summaryrefslogtreecommitdiff
path: root/deps
diff options
context:
space:
mode:
authorMichaël Zasso <targos@protonmail.com>2021-10-10 11:10:43 +0200
committerMichaël Zasso <targos@protonmail.com>2021-10-12 08:07:50 +0200
commit62719c5fd2ab7dee1ac4019c1715061d556ac457 (patch)
tree356fed3842e577ab58fd51d5cc02f071cf7ee216 /deps
parenta784258444b052dfd31cca90db57b21dc38bb1eb (diff)
downloadnode-new-62719c5fd2ab7dee1ac4019c1715061d556ac457.tar.gz
deps: update V8 to 9.5.172.19
PR-URL: https://github.com/nodejs/node/pull/40178 Reviewed-By: Antoine du Hamel <duhamelantoine1995@gmail.com> Reviewed-By: Jiawen Geng <technicalcute@gmail.com> Reviewed-By: Colin Ihrig <cjihrig@gmail.com>
Diffstat (limited to 'deps')
-rw-r--r--deps/v8/.flake81
-rw-r--r--deps/v8/.gitignore1
-rw-r--r--deps/v8/AUTHORS2
-rw-r--r--deps/v8/BUILD.bazel62
-rw-r--r--deps/v8/BUILD.gn207
-rw-r--r--deps/v8/COMMON_OWNERS3
-rw-r--r--deps/v8/DEPS42
-rw-r--r--deps/v8/ENG_REVIEW_OWNERS1
-rw-r--r--deps/v8/LOONG_OWNERS3
-rw-r--r--deps/v8/MIPS_OWNERS1
-rw-r--r--deps/v8/OWNERS5
-rw-r--r--deps/v8/WATCHLISTS12
-rw-r--r--deps/v8/base/trace_event/common/trace_event_common.h6
-rw-r--r--deps/v8/gni/snapshot_toolchain.gni2
-rw-r--r--deps/v8/gni/v8.gni10
-rw-r--r--deps/v8/include/OWNERS3
-rw-r--r--deps/v8/include/cppgc/README.md17
-rw-r--r--deps/v8/include/cppgc/allocation.h16
-rw-r--r--deps/v8/include/cppgc/cross-thread-persistent.h104
-rw-r--r--deps/v8/include/cppgc/internal/caged-heap-local-data.h4
-rw-r--r--deps/v8/include/cppgc/internal/finalizer-trait.h2
-rw-r--r--deps/v8/include/cppgc/internal/gc-info.h100
-rw-r--r--deps/v8/include/cppgc/internal/name-trait.h11
-rw-r--r--deps/v8/include/cppgc/prefinalizer.h2
-rw-r--r--deps/v8/include/js_protocol.pdl44
-rw-r--r--deps/v8/include/v8-array-buffer.h433
-rw-r--r--deps/v8/include/v8-callbacks.h400
-rw-r--r--deps/v8/include/v8-container.h129
-rw-r--r--deps/v8/include/v8-context.h418
-rw-r--r--deps/v8/include/v8-cppgc.h7
-rw-r--r--deps/v8/include/v8-data.h65
-rw-r--r--deps/v8/include/v8-date.h43
-rw-r--r--deps/v8/include/v8-debug.h151
-rw-r--r--deps/v8/include/v8-embedder-heap.h238
-rw-r--r--deps/v8/include/v8-exception.h224
-rw-r--r--deps/v8/include/v8-extension.h62
-rw-r--r--deps/v8/include/v8-external.h37
-rw-r--r--deps/v8/include/v8-fast-api-calls.h10
-rw-r--r--deps/v8/include/v8-forward.h79
-rw-r--r--deps/v8/include/v8-function-callback.h475
-rw-r--r--deps/v8/include/v8-function.h122
-rw-r--r--deps/v8/include/v8-initialization.h266
-rw-r--r--deps/v8/include/v8-inspector.h32
-rw-r--r--deps/v8/include/v8-internal.h71
-rw-r--r--deps/v8/include/v8-isolate.h1669
-rw-r--r--deps/v8/include/v8-json.h47
-rw-r--r--deps/v8/include/v8-local-handle.h459
-rw-r--r--deps/v8/include/v8-locker.h143
-rw-r--r--deps/v8/include/v8-maybe.h137
-rw-r--r--deps/v8/include/v8-memory-span.h43
-rw-r--r--deps/v8/include/v8-message.h234
-rw-r--r--deps/v8/include/v8-metrics.h13
-rw-r--r--deps/v8/include/v8-microtask-queue.h152
-rw-r--r--deps/v8/include/v8-microtask.h28
-rw-r--r--deps/v8/include/v8-object.h770
-rw-r--r--deps/v8/include/v8-persistent-handle.h590
-rw-r--r--deps/v8/include/v8-platform.h20
-rw-r--r--deps/v8/include/v8-primitive-object.h118
-rw-r--r--deps/v8/include/v8-primitive.h858
-rw-r--r--deps/v8/include/v8-profiler.h5
-rw-r--r--deps/v8/include/v8-promise.h174
-rw-r--r--deps/v8/include/v8-proxy.h50
-rw-r--r--deps/v8/include/v8-regexp.h105
-rw-r--r--deps/v8/include/v8-script.h771
-rw-r--r--deps/v8/include/v8-snapshot.h198
-rw-r--r--deps/v8/include/v8-statistics.h215
-rw-r--r--deps/v8/include/v8-template.h1051
-rw-r--r--deps/v8/include/v8-traced-handle.h605
-rw-r--r--deps/v8/include/v8-typed-array.h282
-rw-r--r--deps/v8/include/v8-unwinder-state.h7
-rw-r--r--deps/v8/include/v8-unwinder.h129
-rw-r--r--deps/v8/include/v8-util.h8
-rw-r--r--deps/v8/include/v8-value-serializer.h249
-rw-r--r--deps/v8/include/v8-value.h526
-rw-r--r--deps/v8/include/v8-version.h6
-rw-r--r--deps/v8/include/v8-wasm.h245
-rw-r--r--deps/v8/include/v8-weak-callback-info.h73
-rw-r--r--deps/v8/include/v8.h12339
-rw-r--r--deps/v8/infra/mb/mb_config.pyl25
-rw-r--r--deps/v8/infra/testing/builders.pyl346
-rw-r--r--deps/v8/samples/cppgc/hello-world.cc (renamed from deps/v8/samples/cppgc/cppgc-sample.cc)0
-rw-r--r--deps/v8/samples/hello-world.cc7
-rw-r--r--deps/v8/samples/process.cc21
-rw-r--r--deps/v8/samples/shell.cc13
-rw-r--r--deps/v8/src/DEPS5
-rw-r--r--deps/v8/src/api/api-arguments.h1
-rw-r--r--deps/v8/src/api/api-natives.h2
-rw-r--r--deps/v8/src/api/api.cc276
-rw-r--r--deps/v8/src/api/api.h13
-rw-r--r--deps/v8/src/asmjs/asm-parser.cc8
-rw-r--r--deps/v8/src/ast/prettyprinter.cc29
-rw-r--r--deps/v8/src/ast/prettyprinter.h1
-rw-r--r--deps/v8/src/base/atomicops.h25
-rw-r--r--deps/v8/src/base/bounded-page-allocator.cc58
-rw-r--r--deps/v8/src/base/bounded-page-allocator.h2
-rw-r--r--deps/v8/src/base/build_config.h12
-rw-r--r--deps/v8/src/base/compiler-specific.h12
-rw-r--r--deps/v8/src/base/flags.h66
-rw-r--r--deps/v8/src/base/optional.h2
-rw-r--r--deps/v8/src/base/page-allocator.cc4
-rw-r--r--deps/v8/src/base/page-allocator.h2
-rw-r--r--deps/v8/src/base/platform/platform-fuchsia.cc5
-rw-r--r--deps/v8/src/base/platform/platform-posix.cc56
-rw-r--r--deps/v8/src/base/platform/platform-win32.cc15
-rw-r--r--deps/v8/src/base/platform/platform.h2
-rw-r--r--deps/v8/src/base/region-allocator.cc29
-rw-r--r--deps/v8/src/base/region-allocator.h5
-rw-r--r--deps/v8/src/base/sanitizer/asan.h7
-rw-r--r--deps/v8/src/base/sanitizer/tsan.h20
-rw-r--r--deps/v8/src/base/win32-headers.h6
-rw-r--r--deps/v8/src/baseline/arm/baseline-assembler-arm-inl.h13
-rw-r--r--deps/v8/src/baseline/arm64/baseline-assembler-arm64-inl.h12
-rw-r--r--deps/v8/src/baseline/baseline-assembler-inl.h20
-rw-r--r--deps/v8/src/baseline/baseline-assembler.h15
-rw-r--r--deps/v8/src/baseline/baseline-batch-compiler.cc7
-rw-r--r--deps/v8/src/baseline/baseline-compiler.cc90
-rw-r--r--deps/v8/src/baseline/baseline-compiler.h1
-rw-r--r--deps/v8/src/baseline/baseline.cc7
-rw-r--r--deps/v8/src/baseline/ia32/baseline-assembler-ia32-inl.h15
-rw-r--r--deps/v8/src/baseline/loong64/baseline-assembler-loong64-inl.h503
-rw-r--r--deps/v8/src/baseline/loong64/baseline-compiler-loong64-inl.h77
-rw-r--r--deps/v8/src/baseline/mips/baseline-assembler-mips-inl.h6
-rw-r--r--deps/v8/src/baseline/mips64/baseline-assembler-mips64-inl.h6
-rw-r--r--deps/v8/src/baseline/riscv64/baseline-assembler-riscv64-inl.h270
-rw-r--r--deps/v8/src/baseline/riscv64/baseline-compiler-riscv64-inl.h64
-rw-r--r--deps/v8/src/baseline/x64/baseline-assembler-x64-inl.h15
-rw-r--r--deps/v8/src/bigint/bigint-internal.h3
-rw-r--r--deps/v8/src/bigint/bigint.h67
-rw-r--r--deps/v8/src/bigint/fromstring.cc263
-rw-r--r--deps/v8/src/builtins/accessors.cc1
-rw-r--r--deps/v8/src/builtins/accessors.h2
-rw-r--r--deps/v8/src/builtins/arm/builtins-arm.cc316
-rw-r--r--deps/v8/src/builtins/arm64/builtins-arm64.cc283
-rw-r--r--deps/v8/src/builtins/array-concat.tq2
-rw-r--r--deps/v8/src/builtins/array-shift.tq2
-rw-r--r--deps/v8/src/builtins/array-unshift.tq2
-rw-r--r--deps/v8/src/builtins/builtins-array-gen.cc61
-rw-r--r--deps/v8/src/builtins/builtins-array-gen.h2
-rw-r--r--deps/v8/src/builtins/builtins-async-function-gen.cc5
-rw-r--r--deps/v8/src/builtins/builtins-async-iterator-gen.cc6
-rw-r--r--deps/v8/src/builtins/builtins-call-gen.cc7
-rw-r--r--deps/v8/src/builtins/builtins-constructor-gen.cc5
-rw-r--r--deps/v8/src/builtins/builtins-dataview.cc1
-rw-r--r--deps/v8/src/builtins/builtins-date.cc79
-rw-r--r--deps/v8/src/builtins/builtins-definitions.h223
-rw-r--r--deps/v8/src/builtins/builtins-descriptors.h27
-rw-r--r--deps/v8/src/builtins/builtins-generator-gen.cc18
-rw-r--r--deps/v8/src/builtins/builtins-internal-gen.cc103
-rw-r--r--deps/v8/src/builtins/builtins-intl.cc9
-rw-r--r--deps/v8/src/builtins/builtins-lazy-gen.cc3
-rw-r--r--deps/v8/src/builtins/builtins-object-gen.cc9
-rw-r--r--deps/v8/src/builtins/builtins-proxy-gen.cc16
-rw-r--r--deps/v8/src/builtins/builtins-regexp-gen.cc88
-rw-r--r--deps/v8/src/builtins/builtins-sharedarraybuffer-gen.cc45
-rw-r--r--deps/v8/src/builtins/builtins-string-gen.cc6
-rw-r--r--deps/v8/src/builtins/builtins-string.tq2
-rw-r--r--deps/v8/src/builtins/builtins-typed-array-gen.cc11
-rw-r--r--deps/v8/src/builtins/builtins-typed-array-gen.h1
-rw-r--r--deps/v8/src/builtins/console.tq3
-rw-r--r--deps/v8/src/builtins/convert.tq3
-rw-r--r--deps/v8/src/builtins/frame-arguments.tq8
-rw-r--r--deps/v8/src/builtins/frames.tq6
-rw-r--r--deps/v8/src/builtins/function.tq2
-rw-r--r--deps/v8/src/builtins/ia32/builtins-ia32.cc361
-rw-r--r--deps/v8/src/builtins/loong64/builtins-loong64.cc3755
-rw-r--r--deps/v8/src/builtins/mips/builtins-mips.cc42
-rw-r--r--deps/v8/src/builtins/mips64/builtins-mips64.cc43
-rw-r--r--deps/v8/src/builtins/ppc/builtins-ppc.cc9
-rw-r--r--deps/v8/src/builtins/riscv64/builtins-riscv64.cc128
-rw-r--r--deps/v8/src/builtins/s390/builtins-s390.cc9
-rw-r--r--deps/v8/src/builtins/setup-builtins-internal.cc15
-rw-r--r--deps/v8/src/builtins/typed-array-createtypedarray.tq1
-rw-r--r--deps/v8/src/builtins/typed-array-every.tq25
-rw-r--r--deps/v8/src/builtins/typed-array-filter.tq12
-rw-r--r--deps/v8/src/builtins/typed-array-find.tq35
-rw-r--r--deps/v8/src/builtins/typed-array-findindex.tq28
-rw-r--r--deps/v8/src/builtins/typed-array-findlast.tq56
-rw-r--r--deps/v8/src/builtins/typed-array-findlastindex.tq57
-rw-r--r--deps/v8/src/builtins/typed-array-foreach.tq23
-rw-r--r--deps/v8/src/builtins/typed-array-reduce.tq12
-rw-r--r--deps/v8/src/builtins/typed-array-reduceright.tq12
-rw-r--r--deps/v8/src/builtins/typed-array-set.tq12
-rw-r--r--deps/v8/src/builtins/typed-array-some.tq28
-rw-r--r--deps/v8/src/builtins/typed-array.tq4
-rw-r--r--deps/v8/src/builtins/wasm.tq10
-rw-r--r--deps/v8/src/builtins/x64/builtins-x64.cc347
-rw-r--r--deps/v8/src/codegen/OWNERS3
-rw-r--r--deps/v8/src/codegen/arm/assembler-arm.cc5
-rw-r--r--deps/v8/src/codegen/arm/macro-assembler-arm.cc59
-rw-r--r--deps/v8/src/codegen/arm/macro-assembler-arm.h2
-rw-r--r--deps/v8/src/codegen/arm/register-arm.h1
-rw-r--r--deps/v8/src/codegen/arm64/macro-assembler-arm64.cc89
-rw-r--r--deps/v8/src/codegen/arm64/macro-assembler-arm64.h68
-rw-r--r--deps/v8/src/codegen/arm64/register-arm64.h4
-rw-r--r--deps/v8/src/codegen/assembler-arch.h2
-rw-r--r--deps/v8/src/codegen/assembler-inl.h2
-rw-r--r--deps/v8/src/codegen/assembler.cc6
-rw-r--r--deps/v8/src/codegen/assembler.h10
-rw-r--r--deps/v8/src/codegen/atomic-memory-order.h35
-rw-r--r--deps/v8/src/codegen/code-factory.cc57
-rw-r--r--deps/v8/src/codegen/code-factory.h3
-rw-r--r--deps/v8/src/codegen/code-stub-assembler.cc123
-rw-r--r--deps/v8/src/codegen/code-stub-assembler.h92
-rw-r--r--deps/v8/src/codegen/compiler.cc111
-rw-r--r--deps/v8/src/codegen/compiler.h35
-rw-r--r--deps/v8/src/codegen/constant-pool.cc3
-rw-r--r--deps/v8/src/codegen/constants-arch.h2
-rw-r--r--deps/v8/src/codegen/cpu-features.h3
-rw-r--r--deps/v8/src/codegen/external-reference.cc69
-rw-r--r--deps/v8/src/codegen/external-reference.h24
-rw-r--r--deps/v8/src/codegen/ia32/assembler-ia32.cc10
-rw-r--r--deps/v8/src/codegen/ia32/assembler-ia32.h21
-rw-r--r--deps/v8/src/codegen/ia32/macro-assembler-ia32.cc402
-rw-r--r--deps/v8/src/codegen/ia32/macro-assembler-ia32.h50
-rw-r--r--deps/v8/src/codegen/ia32/register-ia32.h3
-rw-r--r--deps/v8/src/codegen/ia32/sse-instr.h6
-rw-r--r--deps/v8/src/codegen/interface-descriptors-inl.h11
-rw-r--r--deps/v8/src/codegen/interface-descriptors.h16
-rw-r--r--deps/v8/src/codegen/loong64/assembler-loong64-inl.h249
-rw-r--r--deps/v8/src/codegen/loong64/assembler-loong64.cc2405
-rw-r--r--deps/v8/src/codegen/loong64/assembler-loong64.h1129
-rw-r--r--deps/v8/src/codegen/loong64/constants-loong64.cc100
-rw-r--r--deps/v8/src/codegen/loong64/constants-loong64.h1291
-rw-r--r--deps/v8/src/codegen/loong64/cpu-loong64.cc38
-rw-r--r--deps/v8/src/codegen/loong64/interface-descriptors-loong64-inl.h278
-rw-r--r--deps/v8/src/codegen/loong64/macro-assembler-loong64.cc4107
-rw-r--r--deps/v8/src/codegen/loong64/macro-assembler-loong64.h1062
-rw-r--r--deps/v8/src/codegen/loong64/register-loong64.h288
-rw-r--r--deps/v8/src/codegen/macro-assembler.h3
-rw-r--r--deps/v8/src/codegen/mips/assembler-mips.cc1
-rw-r--r--deps/v8/src/codegen/mips/macro-assembler-mips.cc17
-rw-r--r--deps/v8/src/codegen/mips/macro-assembler-mips.h6
-rw-r--r--deps/v8/src/codegen/mips/register-mips.h1
-rw-r--r--deps/v8/src/codegen/mips64/macro-assembler-mips64.cc17
-rw-r--r--deps/v8/src/codegen/mips64/macro-assembler-mips64.h6
-rw-r--r--deps/v8/src/codegen/mips64/register-mips64.h1
-rw-r--r--deps/v8/src/codegen/optimized-compilation-info.cc36
-rw-r--r--deps/v8/src/codegen/optimized-compilation-info.h44
-rw-r--r--deps/v8/src/codegen/ppc/assembler-ppc.cc9
-rw-r--r--deps/v8/src/codegen/ppc/assembler-ppc.h48
-rw-r--r--deps/v8/src/codegen/ppc/constants-ppc.h35
-rw-r--r--deps/v8/src/codegen/ppc/macro-assembler-ppc.cc72
-rw-r--r--deps/v8/src/codegen/ppc/macro-assembler-ppc.h18
-rw-r--r--deps/v8/src/codegen/ppc/register-ppc.h1
-rw-r--r--deps/v8/src/codegen/register-arch.h2
-rw-r--r--deps/v8/src/codegen/register-configuration.cc42
-rw-r--r--deps/v8/src/codegen/reloc-info.cc2
-rw-r--r--deps/v8/src/codegen/riscv64/assembler-riscv64.cc663
-rw-r--r--deps/v8/src/codegen/riscv64/assembler-riscv64.h286
-rw-r--r--deps/v8/src/codegen/riscv64/constants-riscv64.cc41
-rw-r--r--deps/v8/src/codegen/riscv64/constants-riscv64.h546
-rw-r--r--deps/v8/src/codegen/riscv64/macro-assembler-riscv64.cc164
-rw-r--r--deps/v8/src/codegen/riscv64/macro-assembler-riscv64.h50
-rw-r--r--deps/v8/src/codegen/riscv64/register-riscv64.h81
-rw-r--r--deps/v8/src/codegen/s390/assembler-s390.cc1
-rw-r--r--deps/v8/src/codegen/s390/constants-s390.h20
-rw-r--r--deps/v8/src/codegen/s390/macro-assembler-s390.cc246
-rw-r--r--deps/v8/src/codegen/s390/macro-assembler-s390.h184
-rw-r--r--deps/v8/src/codegen/s390/register-s390.h1
-rw-r--r--deps/v8/src/codegen/script-details.h1
-rw-r--r--deps/v8/src/codegen/shared-ia32-x64/macro-assembler-shared-ia32-x64.cc372
-rw-r--r--deps/v8/src/codegen/shared-ia32-x64/macro-assembler-shared-ia32-x64.h442
-rw-r--r--deps/v8/src/codegen/x64/assembler-x64-inl.h6
-rw-r--r--deps/v8/src/codegen/x64/assembler-x64.cc53
-rw-r--r--deps/v8/src/codegen/x64/assembler-x64.h32
-rw-r--r--deps/v8/src/codegen/x64/fma-instr.h8
-rw-r--r--deps/v8/src/codegen/x64/interface-descriptors-x64-inl.h4
-rw-r--r--deps/v8/src/codegen/x64/macro-assembler-x64.cc645
-rw-r--r--deps/v8/src/codegen/x64/macro-assembler-x64.h137
-rw-r--r--deps/v8/src/codegen/x64/register-x64.h54
-rw-r--r--deps/v8/src/codegen/x64/sse-instr.h1
-rw-r--r--deps/v8/src/common/globals.h41
-rw-r--r--deps/v8/src/common/message-template.h5
-rw-r--r--deps/v8/src/compiler-dispatcher/OWNERS1
-rw-r--r--deps/v8/src/compiler-dispatcher/optimizing-compile-dispatcher.cc19
-rw-r--r--deps/v8/src/compiler-dispatcher/optimizing-compile-dispatcher.h4
-rw-r--r--deps/v8/src/compiler/OWNERS1
-rw-r--r--deps/v8/src/compiler/access-builder.cc122
-rw-r--r--deps/v8/src/compiler/access-builder.h10
-rw-r--r--deps/v8/src/compiler/access-info.cc14
-rw-r--r--deps/v8/src/compiler/backend/arm/code-generator-arm.cc197
-rw-r--r--deps/v8/src/compiler/backend/arm/instruction-selector-arm.cc207
-rw-r--r--deps/v8/src/compiler/backend/arm64/code-generator-arm64.cc641
-rw-r--r--deps/v8/src/compiler/backend/arm64/instruction-codes-arm64.h748
-rw-r--r--deps/v8/src/compiler/backend/arm64/instruction-scheduler-arm64.cc196
-rw-r--r--deps/v8/src/compiler/backend/arm64/instruction-selector-arm64.cc802
-rw-r--r--deps/v8/src/compiler/backend/code-generator.cc104
-rw-r--r--deps/v8/src/compiler/backend/code-generator.h37
-rw-r--r--deps/v8/src/compiler/backend/ia32/code-generator-ia32.cc936
-rw-r--r--deps/v8/src/compiler/backend/ia32/instruction-codes-ia32.h144
-rw-r--r--deps/v8/src/compiler/backend/ia32/instruction-scheduler-ia32.cc168
-rw-r--r--deps/v8/src/compiler/backend/ia32/instruction-selector-ia32.cc574
-rw-r--r--deps/v8/src/compiler/backend/instruction-codes.h138
-rw-r--r--deps/v8/src/compiler/backend/instruction-scheduler.cc93
-rw-r--r--deps/v8/src/compiler/backend/instruction-selector.cc149
-rw-r--r--deps/v8/src/compiler/backend/instruction-selector.h40
-rw-r--r--deps/v8/src/compiler/backend/instruction.cc4
-rw-r--r--deps/v8/src/compiler/backend/instruction.h3
-rw-r--r--deps/v8/src/compiler/backend/jump-threading.cc156
-rw-r--r--deps/v8/src/compiler/backend/loong64/code-generator-loong64.cc2636
-rw-r--r--deps/v8/src/compiler/backend/loong64/instruction-codes-loong64.h397
-rw-r--r--deps/v8/src/compiler/backend/loong64/instruction-scheduler-loong64.cc26
-rw-r--r--deps/v8/src/compiler/backend/loong64/instruction-selector-loong64.cc3124
-rw-r--r--deps/v8/src/compiler/backend/mips/code-generator-mips.cc191
-rw-r--r--deps/v8/src/compiler/backend/mips/instruction-scheduler-mips.cc44
-rw-r--r--deps/v8/src/compiler/backend/mips/instruction-selector-mips.cc67
-rw-r--r--deps/v8/src/compiler/backend/mips64/code-generator-mips64.cc392
-rw-r--r--deps/v8/src/compiler/backend/mips64/instruction-codes-mips64.h800
-rw-r--r--deps/v8/src/compiler/backend/mips64/instruction-scheduler-mips64.cc66
-rw-r--r--deps/v8/src/compiler/backend/mips64/instruction-selector-mips64.cc288
-rw-r--r--deps/v8/src/compiler/backend/ppc/code-generator-ppc.cc103
-rw-r--r--deps/v8/src/compiler/backend/ppc/instruction-codes-ppc.h11
-rw-r--r--deps/v8/src/compiler/backend/ppc/instruction-scheduler-ppc.cc11
-rw-r--r--deps/v8/src/compiler/backend/ppc/instruction-selector-ppc.cc103
-rw-r--r--deps/v8/src/compiler/backend/riscv64/code-generator-riscv64.cc1008
-rw-r--r--deps/v8/src/compiler/backend/riscv64/instruction-codes-riscv64.h33
-rw-r--r--deps/v8/src/compiler/backend/riscv64/instruction-scheduler-riscv64.cc71
-rw-r--r--deps/v8/src/compiler/backend/riscv64/instruction-selector-riscv64.cc390
-rw-r--r--deps/v8/src/compiler/backend/s390/code-generator-s390.cc440
-rw-r--r--deps/v8/src/compiler/backend/s390/instruction-codes-s390.h44
-rw-r--r--deps/v8/src/compiler/backend/s390/instruction-scheduler-s390.cc44
-rw-r--r--deps/v8/src/compiler/backend/s390/instruction-selector-s390.cc227
-rw-r--r--deps/v8/src/compiler/backend/x64/code-generator-x64.cc1062
-rw-r--r--deps/v8/src/compiler/backend/x64/instruction-codes-x64.h790
-rw-r--r--deps/v8/src/compiler/backend/x64/instruction-scheduler-x64.cc42
-rw-r--r--deps/v8/src/compiler/backend/x64/instruction-selector-x64.cc363
-rw-r--r--deps/v8/src/compiler/branch-elimination.cc17
-rw-r--r--deps/v8/src/compiler/branch-elimination.h1
-rw-r--r--deps/v8/src/compiler/bytecode-graph-builder.cc17
-rw-r--r--deps/v8/src/compiler/c-linkage.cc12
-rw-r--r--deps/v8/src/compiler/code-assembler.cc82
-rw-r--r--deps/v8/src/compiler/code-assembler.h63
-rw-r--r--deps/v8/src/compiler/common-operator.cc212
-rw-r--r--deps/v8/src/compiler/common-operator.h62
-rw-r--r--deps/v8/src/compiler/compilation-dependencies.cc183
-rw-r--r--deps/v8/src/compiler/compilation-dependencies.h5
-rw-r--r--deps/v8/src/compiler/compilation-dependency.h37
-rw-r--r--deps/v8/src/compiler/decompression-optimizer.cc7
-rw-r--r--deps/v8/src/compiler/effect-control-linearizer.cc67
-rw-r--r--deps/v8/src/compiler/effect-control-linearizer.h4
-rw-r--r--deps/v8/src/compiler/frame-states.cc7
-rw-r--r--deps/v8/src/compiler/graph-assembler.cc41
-rw-r--r--deps/v8/src/compiler/graph-assembler.h42
-rw-r--r--deps/v8/src/compiler/heap-refs.cc893
-rw-r--r--deps/v8/src/compiler/heap-refs.h125
-rw-r--r--deps/v8/src/compiler/int64-lowering.cc18
-rw-r--r--deps/v8/src/compiler/js-call-reducer.cc58
-rw-r--r--deps/v8/src/compiler/js-context-specialization.cc22
-rw-r--r--deps/v8/src/compiler/js-create-lowering.cc59
-rw-r--r--deps/v8/src/compiler/js-generic-lowering.cc16
-rw-r--r--deps/v8/src/compiler/js-heap-broker.cc35
-rw-r--r--deps/v8/src/compiler/js-heap-broker.h28
-rw-r--r--deps/v8/src/compiler/js-inlining-heuristic.cc77
-rw-r--r--deps/v8/src/compiler/js-inlining.cc7
-rw-r--r--deps/v8/src/compiler/js-native-context-specialization.cc68
-rw-r--r--deps/v8/src/compiler/js-typed-lowering.cc33
-rw-r--r--deps/v8/src/compiler/linkage.cc7
-rw-r--r--deps/v8/src/compiler/linkage.h18
-rw-r--r--deps/v8/src/compiler/loop-analysis.cc19
-rw-r--r--deps/v8/src/compiler/machine-graph-verifier.cc38
-rw-r--r--deps/v8/src/compiler/machine-operator-reducer.cc15
-rw-r--r--deps/v8/src/compiler/machine-operator.cc358
-rw-r--r--deps/v8/src/compiler/machine-operator.h86
-rw-r--r--deps/v8/src/compiler/memory-lowering.cc33
-rw-r--r--deps/v8/src/compiler/memory-lowering.h3
-rw-r--r--deps/v8/src/compiler/memory-optimizer.cc11
-rw-r--r--deps/v8/src/compiler/memory-optimizer.h1
-rw-r--r--deps/v8/src/compiler/node-matchers.h1
-rw-r--r--deps/v8/src/compiler/opcodes.h5
-rw-r--r--deps/v8/src/compiler/pipeline-statistics.cc27
-rw-r--r--deps/v8/src/compiler/pipeline-statistics.h9
-rw-r--r--deps/v8/src/compiler/pipeline.cc103
-rw-r--r--deps/v8/src/compiler/pipeline.h8
-rw-r--r--deps/v8/src/compiler/property-access-builder.cc8
-rw-r--r--deps/v8/src/compiler/raw-machine-assembler.cc23
-rw-r--r--deps/v8/src/compiler/raw-machine-assembler.h74
-rw-r--r--deps/v8/src/compiler/simplified-lowering.cc28
-rw-r--r--deps/v8/src/compiler/simplified-lowering.h3
-rw-r--r--deps/v8/src/compiler/simplified-operator.cc267
-rw-r--r--deps/v8/src/compiler/simplified-operator.h19
-rw-r--r--deps/v8/src/compiler/typed-optimization.cc6
-rw-r--r--deps/v8/src/compiler/typer.cc45
-rw-r--r--deps/v8/src/compiler/verifier.cc8
-rw-r--r--deps/v8/src/compiler/wasm-compiler.cc274
-rw-r--r--deps/v8/src/compiler/wasm-compiler.h23
-rw-r--r--deps/v8/src/compiler/wasm-inlining.cc195
-rw-r--r--deps/v8/src/compiler/wasm-inlining.h77
-rw-r--r--deps/v8/src/d8/async-hooks-wrapper.cc198
-rw-r--r--deps/v8/src/d8/async-hooks-wrapper.h9
-rw-r--r--deps/v8/src/d8/d8-platforms.cc11
-rw-r--r--deps/v8/src/d8/d8-posix.cc2
-rw-r--r--deps/v8/src/d8/d8-test.cc18
-rw-r--r--deps/v8/src/d8/d8.cc97
-rw-r--r--deps/v8/src/d8/d8.h11
-rw-r--r--deps/v8/src/date/date.cc78
-rw-r--r--deps/v8/src/date/date.h11
-rw-r--r--deps/v8/src/debug/debug-evaluate.cc16
-rw-r--r--deps/v8/src/debug/debug-interface.cc17
-rw-r--r--deps/v8/src/debug/debug-interface.h16
-rw-r--r--deps/v8/src/debug/debug-property-iterator.h8
-rw-r--r--deps/v8/src/debug/debug.cc35
-rw-r--r--deps/v8/src/debug/interface-types.h5
-rw-r--r--deps/v8/src/deoptimizer/deoptimized-frame-info.cc10
-rw-r--r--deps/v8/src/deoptimizer/deoptimizer.cc47
-rw-r--r--deps/v8/src/deoptimizer/loong64/deoptimizer-loong64.cc42
-rw-r--r--deps/v8/src/deoptimizer/translated-state.cc32
-rw-r--r--deps/v8/src/diagnostics/arm/disasm-arm.cc13
-rw-r--r--deps/v8/src/diagnostics/arm/eh-frame-arm.cc2
-rw-r--r--deps/v8/src/diagnostics/arm/unwinder-arm.cc2
-rw-r--r--deps/v8/src/diagnostics/arm64/disasm-arm64.cc6
-rw-r--r--deps/v8/src/diagnostics/arm64/eh-frame-arm64.cc2
-rw-r--r--deps/v8/src/diagnostics/compilation-statistics.cc23
-rw-r--r--deps/v8/src/diagnostics/compilation-statistics.h2
-rw-r--r--deps/v8/src/diagnostics/eh-frame.cc2
-rw-r--r--deps/v8/src/diagnostics/gdb-jit.cc206
-rw-r--r--deps/v8/src/diagnostics/gdb-jit.h12
-rw-r--r--deps/v8/src/diagnostics/ia32/disasm-ia32.cc58
-rw-r--r--deps/v8/src/diagnostics/loong64/disasm-loong64.cc1697
-rw-r--r--deps/v8/src/diagnostics/loong64/unwinder-loong64.cc14
-rw-r--r--deps/v8/src/diagnostics/mips/disasm-mips.cc8
-rw-r--r--deps/v8/src/diagnostics/mips64/disasm-mips64.cc9
-rw-r--r--deps/v8/src/diagnostics/objects-debug.cc15
-rw-r--r--deps/v8/src/diagnostics/objects-printer.cc8
-rw-r--r--deps/v8/src/diagnostics/perf-jit.h3
-rw-r--r--deps/v8/src/diagnostics/ppc/disasm-ppc.cc31
-rw-r--r--deps/v8/src/diagnostics/ppc/eh-frame-ppc.cc2
-rw-r--r--deps/v8/src/diagnostics/riscv64/disasm-riscv64.cc1086
-rw-r--r--deps/v8/src/diagnostics/s390/eh-frame-s390.cc2
-rw-r--r--deps/v8/src/diagnostics/system-jit-win.cc6
-rw-r--r--deps/v8/src/diagnostics/unwinder.cc2
-rw-r--r--deps/v8/src/diagnostics/unwinding-info-win64.cc30
-rw-r--r--deps/v8/src/diagnostics/unwinding-info-win64.h4
-rw-r--r--deps/v8/src/diagnostics/x64/disasm-x64.cc507
-rw-r--r--deps/v8/src/execution/OWNERS1
-rw-r--r--deps/v8/src/execution/arm/simulator-arm.cc69
-rw-r--r--deps/v8/src/execution/arm64/simulator-arm64.cc3
-rw-r--r--deps/v8/src/execution/execution.cc4
-rw-r--r--deps/v8/src/execution/frame-constants.h6
-rw-r--r--deps/v8/src/execution/frames.cc41
-rw-r--r--deps/v8/src/execution/frames.h12
-rw-r--r--deps/v8/src/execution/futex-emulation.h4
-rw-r--r--deps/v8/src/execution/isolate.cc49
-rw-r--r--deps/v8/src/execution/isolate.h6
-rw-r--r--deps/v8/src/execution/loong64/frame-constants-loong64.cc32
-rw-r--r--deps/v8/src/execution/loong64/frame-constants-loong64.h76
-rw-r--r--deps/v8/src/execution/loong64/simulator-loong64.cc5538
-rw-r--r--deps/v8/src/execution/loong64/simulator-loong64.h647
-rw-r--r--deps/v8/src/execution/messages.cc87
-rw-r--r--deps/v8/src/execution/messages.h1
-rw-r--r--deps/v8/src/execution/microtask-queue.h3
-rw-r--r--deps/v8/src/execution/mips/simulator-mips.cc23
-rw-r--r--deps/v8/src/execution/mips64/simulator-mips64.cc20
-rw-r--r--deps/v8/src/execution/mips64/simulator-mips64.h4
-rw-r--r--deps/v8/src/execution/ppc/simulator-ppc.cc79
-rw-r--r--deps/v8/src/execution/riscv64/simulator-riscv64.cc1536
-rw-r--r--deps/v8/src/execution/riscv64/simulator-riscv64.h336
-rw-r--r--deps/v8/src/execution/runtime-profiler.cc23
-rw-r--r--deps/v8/src/execution/s390/simulator-s390.cc232
-rw-r--r--deps/v8/src/execution/s390/simulator-s390.h20
-rw-r--r--deps/v8/src/execution/simulator-base.h6
-rw-r--r--deps/v8/src/execution/simulator.h2
-rw-r--r--deps/v8/src/execution/thread-local-top.h9
-rw-r--r--deps/v8/src/execution/v8threads.cc1
-rw-r--r--deps/v8/src/execution/vm-state.h2
-rw-r--r--deps/v8/src/extensions/cputracemark-extension.cc3
-rw-r--r--deps/v8/src/extensions/cputracemark-extension.h6
-rw-r--r--deps/v8/src/extensions/externalize-string-extension.cc1
-rw-r--r--deps/v8/src/extensions/externalize-string-extension.h6
-rw-r--r--deps/v8/src/extensions/gc-extension.cc6
-rw-r--r--deps/v8/src/extensions/gc-extension.h7
-rw-r--r--deps/v8/src/extensions/ignition-statistics-extension.cc9
-rw-r--r--deps/v8/src/extensions/ignition-statistics-extension.h6
-rw-r--r--deps/v8/src/extensions/statistics-extension.cc1
-rw-r--r--deps/v8/src/extensions/statistics-extension.h6
-rw-r--r--deps/v8/src/extensions/trigger-failure-extension.cc1
-rw-r--r--deps/v8/src/extensions/trigger-failure-extension.h6
-rw-r--r--deps/v8/src/extensions/vtunedomain-support-extension.cc4
-rw-r--r--deps/v8/src/extensions/vtunedomain-support-extension.h6
-rw-r--r--deps/v8/src/flags/flag-definitions.h80
-rw-r--r--deps/v8/src/handles/DIR_METADATA4
-rw-r--r--deps/v8/src/handles/global-handles.cc2
-rw-r--r--deps/v8/src/handles/global-handles.h3
-rw-r--r--deps/v8/src/handles/handles.h4
-rw-r--r--deps/v8/src/heap/DIR_METADATA4
-rw-r--r--deps/v8/src/heap/array-buffer-sweeper.cc6
-rw-r--r--deps/v8/src/heap/base/asm/loong64/push_registers_asm.cc48
-rw-r--r--deps/v8/src/heap/base/stack.cc10
-rw-r--r--deps/v8/src/heap/basic-memory-chunk.cc24
-rw-r--r--deps/v8/src/heap/basic-memory-chunk.h104
-rw-r--r--deps/v8/src/heap/cppgc-js/DEPS3
-rw-r--r--deps/v8/src/heap/cppgc-js/cpp-heap.cc25
-rw-r--r--deps/v8/src/heap/cppgc-js/cpp-heap.h4
-rw-r--r--deps/v8/src/heap/cppgc-js/cpp-snapshot.cc77
-rw-r--r--deps/v8/src/heap/cppgc-js/unified-heap-marking-state.h1
-rw-r--r--deps/v8/src/heap/cppgc-js/unified-heap-marking-visitor.cc1
-rw-r--r--deps/v8/src/heap/cppgc/DEPS3
-rw-r--r--deps/v8/src/heap/cppgc/caged-heap-local-data.cc10
-rw-r--r--deps/v8/src/heap/cppgc/caged-heap.cc31
-rw-r--r--deps/v8/src/heap/cppgc/caged-heap.h20
-rw-r--r--deps/v8/src/heap/cppgc/gc-info.cc77
-rw-r--r--deps/v8/src/heap/cppgc/heap-base.cc33
-rw-r--r--deps/v8/src/heap/cppgc/heap-base.h11
-rw-r--r--deps/v8/src/heap/cppgc/heap-object-header.h7
-rw-r--r--deps/v8/src/heap/cppgc/heap.cc14
-rw-r--r--deps/v8/src/heap/cppgc/marker.cc11
-rw-r--r--deps/v8/src/heap/cppgc/marking-state.h24
-rw-r--r--deps/v8/src/heap/cppgc/marking-verifier.cc12
-rw-r--r--deps/v8/src/heap/cppgc/marking-verifier.h3
-rw-r--r--deps/v8/src/heap/cppgc/memory.cc2
-rw-r--r--deps/v8/src/heap/cppgc/memory.h6
-rw-r--r--deps/v8/src/heap/cppgc/object-allocator.cc62
-rw-r--r--deps/v8/src/heap/cppgc/object-allocator.h17
-rw-r--r--deps/v8/src/heap/cppgc/page-memory.cc118
-rw-r--r--deps/v8/src/heap/cppgc/page-memory.h25
-rw-r--r--deps/v8/src/heap/cppgc/platform.cc44
-rw-r--r--deps/v8/src/heap/cppgc/platform.h43
-rw-r--r--deps/v8/src/heap/cppgc/prefinalizer-handler.cc27
-rw-r--r--deps/v8/src/heap/cppgc/prefinalizer-handler.h9
-rw-r--r--deps/v8/src/heap/cppgc/stats-collector.cc22
-rw-r--r--deps/v8/src/heap/cppgc/stats-collector.h7
-rw-r--r--deps/v8/src/heap/cppgc/visitor.cc7
-rw-r--r--deps/v8/src/heap/cppgc/write-barrier.cc4
-rw-r--r--deps/v8/src/heap/embedder-tracing.h3
-rw-r--r--deps/v8/src/heap/factory-base.cc3
-rw-r--r--deps/v8/src/heap/factory-inl.h9
-rw-r--r--deps/v8/src/heap/factory.cc32
-rw-r--r--deps/v8/src/heap/factory.h8
-rw-r--r--deps/v8/src/heap/heap.cc30
-rw-r--r--deps/v8/src/heap/heap.h6
-rw-r--r--deps/v8/src/heap/large-spaces.cc2
-rw-r--r--deps/v8/src/heap/mark-compact-inl.h4
-rw-r--r--deps/v8/src/heap/mark-compact.cc49
-rw-r--r--deps/v8/src/heap/mark-compact.h4
-rw-r--r--deps/v8/src/heap/marking-visitor-inl.h26
-rw-r--r--deps/v8/src/heap/marking-visitor.h2
-rw-r--r--deps/v8/src/heap/memory-chunk-layout.h3
-rw-r--r--deps/v8/src/heap/memory-chunk.cc2
-rw-r--r--deps/v8/src/heap/memory-chunk.h24
-rw-r--r--deps/v8/src/heap/memory-measurement.cc2
-rw-r--r--deps/v8/src/heap/memory-measurement.h1
-rw-r--r--deps/v8/src/heap/new-spaces.cc15
-rw-r--r--deps/v8/src/heap/new-spaces.h2
-rw-r--r--deps/v8/src/heap/objects-visiting.cc6
-rw-r--r--deps/v8/src/heap/progress-bar.h61
-rw-r--r--deps/v8/src/heap/setup-heap-internal.cc2
-rw-r--r--deps/v8/src/heap/spaces.cc5
-rw-r--r--deps/v8/src/heap/spaces.h10
-rw-r--r--deps/v8/src/heap/third-party/heap-api.h4
-rw-r--r--deps/v8/src/ic/OWNERS1
-rw-r--r--deps/v8/src/init/bootstrapper.cc49
-rw-r--r--deps/v8/src/init/bootstrapper.h3
-rw-r--r--deps/v8/src/init/heap-symbols.h11
-rw-r--r--deps/v8/src/init/isolate-allocator.cc24
-rw-r--r--deps/v8/src/init/startup-data-util.cc7
-rw-r--r--deps/v8/src/init/startup-data-util.h2
-rw-r--r--deps/v8/src/init/v8.cc28
-rw-r--r--deps/v8/src/init/v8.h4
-rw-r--r--deps/v8/src/init/vm-cage.cc81
-rw-r--r--deps/v8/src/init/vm-cage.h130
-rw-r--r--deps/v8/src/inspector/DEPS1
-rw-r--r--deps/v8/src/inspector/custom-preview.cc5
-rw-r--r--deps/v8/src/inspector/injected-script.cc11
-rw-r--r--deps/v8/src/inspector/injected-script.h8
-rw-r--r--deps/v8/src/inspector/inspected-context.cc4
-rw-r--r--deps/v8/src/inspector/inspected-context.h8
-rw-r--r--deps/v8/src/inspector/test-interface.h2
-rw-r--r--deps/v8/src/inspector/v8-console-message.cc4
-rw-r--r--deps/v8/src/inspector/v8-console-message.h3
-rw-r--r--deps/v8/src/inspector/v8-console.cc7
-rw-r--r--deps/v8/src/inspector/v8-console.h9
-rw-r--r--deps/v8/src/inspector/v8-debugger-agent-impl.cc3
-rw-r--r--deps/v8/src/inspector/v8-debugger-script.h8
-rw-r--r--deps/v8/src/inspector/v8-debugger.cc11
-rw-r--r--deps/v8/src/inspector/v8-heap-profiler-agent-impl.cc1
-rw-r--r--deps/v8/src/inspector/v8-heap-profiler-agent-impl.h4
-rw-r--r--deps/v8/src/inspector/v8-inspector-impl.cc36
-rw-r--r--deps/v8/src/inspector/v8-inspector-impl.h6
-rw-r--r--deps/v8/src/inspector/v8-profiler-agent-impl.cc120
-rw-r--r--deps/v8/src/inspector/v8-profiler-agent-impl.h15
-rw-r--r--deps/v8/src/inspector/v8-regex.cc8
-rw-r--r--deps/v8/src/inspector/v8-regex.h5
-rw-r--r--deps/v8/src/inspector/v8-runtime-agent-impl.cc9
-rw-r--r--deps/v8/src/inspector/v8-runtime-agent-impl.h8
-rw-r--r--deps/v8/src/inspector/v8-stack-trace-impl.h7
-rw-r--r--deps/v8/src/inspector/v8-value-utils.cc4
-rw-r--r--deps/v8/src/inspector/v8-value-utils.h3
-rw-r--r--deps/v8/src/inspector/value-mirror.cc37
-rw-r--r--deps/v8/src/inspector/value-mirror.h4
-rw-r--r--deps/v8/src/interpreter/OWNERS3
-rw-r--r--deps/v8/src/interpreter/bytecode-generator.cc25
-rw-r--r--deps/v8/src/interpreter/bytecode-generator.h1
-rw-r--r--deps/v8/src/interpreter/bytecodes.h4
-rw-r--r--deps/v8/src/interpreter/interpreter-assembler.cc161
-rw-r--r--deps/v8/src/interpreter/interpreter-assembler.h53
-rw-r--r--deps/v8/src/interpreter/interpreter-generator.cc47
-rw-r--r--deps/v8/src/interpreter/interpreter.cc32
-rw-r--r--deps/v8/src/interpreter/interpreter.h2
-rw-r--r--deps/v8/src/json/json-parser.h1
-rw-r--r--deps/v8/src/libplatform/default-platform.cc1
-rw-r--r--deps/v8/src/libsampler/sampler.cc7
-rw-r--r--deps/v8/src/libsampler/sampler.h6
-rw-r--r--deps/v8/src/logging/counters.h2
-rw-r--r--deps/v8/src/logging/log.cc20
-rw-r--r--deps/v8/src/logging/log.h5
-rw-r--r--deps/v8/src/logging/runtime-call-stats.cc11
-rw-r--r--deps/v8/src/logging/runtime-call-stats.h6
-rw-r--r--deps/v8/src/objects/allocation-site-inl.h30
-rw-r--r--deps/v8/src/objects/allocation-site.h2
-rw-r--r--deps/v8/src/objects/arguments.h17
-rw-r--r--deps/v8/src/objects/arguments.tq4
-rw-r--r--deps/v8/src/objects/backing-store.cc52
-rw-r--r--deps/v8/src/objects/backing-store.h2
-rw-r--r--deps/v8/src/objects/bigint.cc13
-rw-r--r--deps/v8/src/objects/cell-inl.h4
-rw-r--r--deps/v8/src/objects/cell.h3
-rw-r--r--deps/v8/src/objects/code-inl.h82
-rw-r--r--deps/v8/src/objects/code.cc6
-rw-r--r--deps/v8/src/objects/code.h18
-rw-r--r--deps/v8/src/objects/contexts.h6
-rw-r--r--deps/v8/src/objects/feedback-cell-inl.h2
-rw-r--r--deps/v8/src/objects/fixed-array-inl.h2
-rw-r--r--deps/v8/src/objects/fixed-array.h11
-rw-r--r--deps/v8/src/objects/instance-type.h6
-rw-r--r--deps/v8/src/objects/intl-objects.cc185
-rw-r--r--deps/v8/src/objects/intl-objects.h17
-rw-r--r--deps/v8/src/objects/js-array-buffer-inl.h55
-rw-r--r--deps/v8/src/objects/js-array-buffer.cc1
-rw-r--r--deps/v8/src/objects/js-array-buffer.h19
-rw-r--r--deps/v8/src/objects/js-array-inl.h10
-rw-r--r--deps/v8/src/objects/js-array.h49
-rw-r--r--deps/v8/src/objects/js-array.tq27
-rw-r--r--deps/v8/src/objects/js-date-time-format.cc4
-rw-r--r--deps/v8/src/objects/js-function-inl.h14
-rw-r--r--deps/v8/src/objects/js-function.cc85
-rw-r--r--deps/v8/src/objects/js-function.h37
-rw-r--r--deps/v8/src/objects/js-function.tq1
-rw-r--r--deps/v8/src/objects/js-list-format.cc3
-rw-r--r--deps/v8/src/objects/js-locale.cc120
-rw-r--r--deps/v8/src/objects/js-number-format.cc18
-rw-r--r--deps/v8/src/objects/js-objects-inl.h15
-rw-r--r--deps/v8/src/objects/js-objects.h53
-rw-r--r--deps/v8/src/objects/js-objects.tq11
-rw-r--r--deps/v8/src/objects/js-promise.h1
-rw-r--r--deps/v8/src/objects/js-proxy.h8
-rw-r--r--deps/v8/src/objects/js-proxy.tq1
-rw-r--r--deps/v8/src/objects/js-regexp-inl.h16
-rw-r--r--deps/v8/src/objects/js-regexp.cc86
-rw-r--r--deps/v8/src/objects/js-regexp.h91
-rw-r--r--deps/v8/src/objects/js-regexp.tq3
-rw-r--r--deps/v8/src/objects/js-relative-time-format.cc6
-rw-r--r--deps/v8/src/objects/js-weak-refs-inl.h13
-rw-r--r--deps/v8/src/objects/js-weak-refs.h22
-rw-r--r--deps/v8/src/objects/js-weak-refs.tq1
-rw-r--r--deps/v8/src/objects/keys.h13
-rw-r--r--deps/v8/src/objects/map-inl.h27
-rw-r--r--deps/v8/src/objects/map.h7
-rw-r--r--deps/v8/src/objects/module.h1
-rw-r--r--deps/v8/src/objects/object-macros-undef.h2
-rw-r--r--deps/v8/src/objects/object-macros.h14
-rw-r--r--deps/v8/src/objects/objects-body-descriptors-inl.h6
-rw-r--r--deps/v8/src/objects/objects-definitions.h1
-rw-r--r--deps/v8/src/objects/objects.cc10
-rw-r--r--deps/v8/src/objects/objects.h13
-rw-r--r--deps/v8/src/objects/ordered-hash-table.h1
-rw-r--r--deps/v8/src/objects/property-cell-inl.h3
-rw-r--r--deps/v8/src/objects/property-details.h35
-rw-r--r--deps/v8/src/objects/script.h5
-rw-r--r--deps/v8/src/objects/shared-function-info-inl.h198
-rw-r--r--deps/v8/src/objects/shared-function-info.cc20
-rw-r--r--deps/v8/src/objects/shared-function-info.h34
-rw-r--r--deps/v8/src/objects/shared-function-info.tq49
-rw-r--r--deps/v8/src/objects/tagged-field.h2
-rw-r--r--deps/v8/src/objects/tagged-impl.h1
-rw-r--r--deps/v8/src/objects/value-serializer.cc4
-rw-r--r--deps/v8/src/objects/value-serializer.h2
-rw-r--r--deps/v8/src/objects/visitors.h2
-rw-r--r--deps/v8/src/parsing/parse-info.h1
-rw-r--r--deps/v8/src/parsing/parser-base.h119
-rw-r--r--deps/v8/src/parsing/parser.h15
-rw-r--r--deps/v8/src/parsing/pending-compilation-error-handler.cc80
-rw-r--r--deps/v8/src/parsing/pending-compilation-error-handler.h74
-rw-r--r--deps/v8/src/parsing/preparse-data.cc10
-rw-r--r--deps/v8/src/parsing/preparser.h24
-rw-r--r--deps/v8/src/parsing/scanner-character-streams.cc3
-rw-r--r--deps/v8/src/parsing/scanner-character-streams.h2
-rw-r--r--deps/v8/src/parsing/scanner.cc18
-rw-r--r--deps/v8/src/parsing/scanner.h4
-rw-r--r--deps/v8/src/profiler/allocation-tracker.h2
-rw-r--r--deps/v8/src/profiler/cpu-profiler.cc11
-rw-r--r--deps/v8/src/profiler/cpu-profiler.h1
-rw-r--r--deps/v8/src/profiler/heap-snapshot-generator.cc30
-rw-r--r--deps/v8/src/profiler/heap-snapshot-generator.h2
-rw-r--r--deps/v8/src/profiler/profile-generator.cc49
-rw-r--r--deps/v8/src/profiler/profile-generator.h4
-rw-r--r--deps/v8/src/profiler/strings-storage.cc8
-rw-r--r--deps/v8/src/profiler/strings-storage.h4
-rw-r--r--deps/v8/src/profiler/tick-sample.cc2
-rw-r--r--deps/v8/src/profiler/tick-sample.h2
-rw-r--r--deps/v8/src/regexp/arm64/regexp-macro-assembler-arm64.cc1
-rw-r--r--deps/v8/src/regexp/experimental/experimental-compiler.cc27
-rw-r--r--deps/v8/src/regexp/experimental/experimental-compiler.h5
-rw-r--r--deps/v8/src/regexp/experimental/experimental-interpreter.h5
-rw-r--r--deps/v8/src/regexp/experimental/experimental.cc24
-rw-r--r--deps/v8/src/regexp/experimental/experimental.h5
-rw-r--r--deps/v8/src/regexp/loong64/regexp-macro-assembler-loong64.cc1264
-rw-r--r--deps/v8/src/regexp/loong64/regexp-macro-assembler-loong64.h214
-rw-r--r--deps/v8/src/regexp/regexp-ast.h16
-rw-r--r--deps/v8/src/regexp/regexp-bytecode-generator-inl.h1
-rw-r--r--deps/v8/src/regexp/regexp-bytecode-generator.cc2
-rw-r--r--deps/v8/src/regexp/regexp-bytecode-generator.h1
-rw-r--r--deps/v8/src/regexp/regexp-bytecode-peephole.cc4
-rw-r--r--deps/v8/src/regexp/regexp-compiler-tonode.cc50
-rw-r--r--deps/v8/src/regexp/regexp-compiler.cc50
-rw-r--r--deps/v8/src/regexp/regexp-compiler.h41
-rw-r--r--deps/v8/src/regexp/regexp-error.h5
-rw-r--r--deps/v8/src/regexp/regexp-flags.h71
-rw-r--r--deps/v8/src/regexp/regexp-interpreter.cc5
-rw-r--r--deps/v8/src/regexp/regexp-interpreter.h2
-rw-r--r--deps/v8/src/regexp/regexp-macro-assembler-arch.h2
-rw-r--r--deps/v8/src/regexp/regexp-macro-assembler-tracer.cc4
-rw-r--r--deps/v8/src/regexp/regexp-macro-assembler.cc6
-rw-r--r--deps/v8/src/regexp/regexp-macro-assembler.h15
-rw-r--r--deps/v8/src/regexp/regexp-nodes.h22
-rw-r--r--deps/v8/src/regexp/regexp-parser.cc811
-rw-r--r--deps/v8/src/regexp/regexp-parser.h366
-rw-r--r--deps/v8/src/regexp/regexp-utils.cc27
-rw-r--r--deps/v8/src/regexp/regexp-utils.h9
-rw-r--r--deps/v8/src/regexp/regexp.cc133
-rw-r--r--deps/v8/src/regexp/regexp.h40
-rw-r--r--deps/v8/src/roots/DIR_METADATA4
-rw-r--r--deps/v8/src/runtime/runtime-atomics.cc4
-rw-r--r--deps/v8/src/runtime/runtime-collections.cc8
-rw-r--r--deps/v8/src/runtime/runtime-compiler.cc4
-rw-r--r--deps/v8/src/runtime/runtime-generator.cc5
-rw-r--r--deps/v8/src/runtime/runtime-internal.cc42
-rw-r--r--deps/v8/src/runtime/runtime-module.cc24
-rw-r--r--deps/v8/src/runtime/runtime-object.cc27
-rw-r--r--deps/v8/src/runtime/runtime-scopes.cc6
-rw-r--r--deps/v8/src/runtime/runtime-test-wasm.cc1
-rw-r--r--deps/v8/src/runtime/runtime-test.cc95
-rw-r--r--deps/v8/src/runtime/runtime-typedarray.cc2
-rw-r--r--deps/v8/src/runtime/runtime.cc7
-rw-r--r--deps/v8/src/runtime/runtime.h5
-rw-r--r--deps/v8/src/snapshot/context-deserializer.cc1
-rw-r--r--deps/v8/src/snapshot/context-serializer.cc4
-rw-r--r--deps/v8/src/snapshot/deserializer.cc4
-rw-r--r--deps/v8/src/snapshot/embedded/embedded-data.cc4
-rw-r--r--deps/v8/src/snapshot/embedded/embedded-empty.cc12
-rw-r--r--deps/v8/src/snapshot/embedded/platform-embedded-file-writer-aix.cc6
-rw-r--r--deps/v8/src/snapshot/embedded/platform-embedded-file-writer-generic.cc11
-rw-r--r--deps/v8/src/snapshot/embedded/platform-embedded-file-writer-mac.cc8
-rw-r--r--deps/v8/src/snapshot/embedded/platform-embedded-file-writer-win.cc7
-rw-r--r--deps/v8/src/snapshot/mksnapshot.cc6
-rw-r--r--deps/v8/src/snapshot/serializer.cc22
-rw-r--r--deps/v8/src/snapshot/snapshot.h2
-rw-r--r--deps/v8/src/tasks/OWNERS1
-rw-r--r--deps/v8/src/third_party/vtune/BUILD.gn5
-rw-r--r--deps/v8/src/third_party/vtune/v8-vtune.h2
-rw-r--r--deps/v8/src/third_party/vtune/vtune-jit.cc8
-rw-r--r--deps/v8/src/third_party/vtune/vtune-jit.h5
-rw-r--r--deps/v8/src/torque/implementation-visitor.cc45
-rw-r--r--deps/v8/src/utils/address-map.h1
-rw-r--r--deps/v8/src/utils/allocation.cc56
-rw-r--r--deps/v8/src/utils/allocation.h21
-rw-r--r--deps/v8/src/utils/v8dll-main.cc2
-rw-r--r--deps/v8/src/wasm/baseline/arm/liftoff-assembler-arm.h28
-rw-r--r--deps/v8/src/wasm/baseline/arm64/liftoff-assembler-arm64.h45
-rw-r--r--deps/v8/src/wasm/baseline/ia32/liftoff-assembler-ia32.h188
-rw-r--r--deps/v8/src/wasm/baseline/liftoff-assembler-defs.h16
-rw-r--r--deps/v8/src/wasm/baseline/liftoff-assembler.h8
-rw-r--r--deps/v8/src/wasm/baseline/liftoff-compiler.cc73
-rw-r--r--deps/v8/src/wasm/baseline/loong64/liftoff-assembler-loong64.h2817
-rw-r--r--deps/v8/src/wasm/baseline/mips/liftoff-assembler-mips.h12
-rw-r--r--deps/v8/src/wasm/baseline/mips64/liftoff-assembler-mips64.h25
-rw-r--r--deps/v8/src/wasm/baseline/ppc/liftoff-assembler-ppc.h377
-rw-r--r--deps/v8/src/wasm/baseline/riscv64/liftoff-assembler-riscv64.h384
-rw-r--r--deps/v8/src/wasm/baseline/s390/liftoff-assembler-s390.h341
-rw-r--r--deps/v8/src/wasm/baseline/x64/liftoff-assembler-x64.h246
-rw-r--r--deps/v8/src/wasm/c-api.cc6
-rw-r--r--deps/v8/src/wasm/c-api.h3
-rw-r--r--deps/v8/src/wasm/code-space-access.cc12
-rw-r--r--deps/v8/src/wasm/code-space-access.h3
-rw-r--r--deps/v8/src/wasm/function-body-decoder-impl.h25
-rw-r--r--deps/v8/src/wasm/function-compiler.cc46
-rw-r--r--deps/v8/src/wasm/graph-builder-interface.cc89
-rw-r--r--deps/v8/src/wasm/graph-builder-interface.h8
-rw-r--r--deps/v8/src/wasm/jump-table-assembler.cc30
-rw-r--r--deps/v8/src/wasm/jump-table-assembler.h5
-rw-r--r--deps/v8/src/wasm/module-compiler.cc37
-rw-r--r--deps/v8/src/wasm/module-compiler.h2
-rw-r--r--deps/v8/src/wasm/module-decoder.cc15
-rw-r--r--deps/v8/src/wasm/module-instantiate.cc12
-rw-r--r--deps/v8/src/wasm/wasm-code-manager.cc53
-rw-r--r--deps/v8/src/wasm/wasm-code-manager.h78
-rw-r--r--deps/v8/src/wasm/wasm-engine.h1
-rw-r--r--deps/v8/src/wasm/wasm-external-refs.cc61
-rw-r--r--deps/v8/src/wasm/wasm-external-refs.h4
-rw-r--r--deps/v8/src/wasm/wasm-feature-flags.h28
-rw-r--r--deps/v8/src/wasm/wasm-js.cc20
-rw-r--r--deps/v8/src/wasm/wasm-limits.h3
-rw-r--r--deps/v8/src/wasm/wasm-linkage.h9
-rw-r--r--deps/v8/src/wasm/wasm-module-builder.cc175
-rw-r--r--deps/v8/src/wasm/wasm-module-builder.h99
-rw-r--r--deps/v8/src/wasm/wasm-module-sourcemap.cc9
-rw-r--r--deps/v8/src/wasm/wasm-module-sourcemap.h5
-rw-r--r--deps/v8/src/wasm/wasm-objects-inl.h37
-rw-r--r--deps/v8/src/wasm/wasm-objects.cc27
-rw-r--r--deps/v8/src/wasm/wasm-objects.h16
-rw-r--r--deps/v8/src/web-snapshot/web-snapshot.cc15
-rw-r--r--deps/v8/test/bigint/bigint-shell.cc187
-rw-r--r--deps/v8/test/cctest/BUILD.gn13
-rw-r--r--deps/v8/test/cctest/cctest-utils.h3
-rw-r--r--deps/v8/test/cctest/cctest.cc10
-rw-r--r--deps/v8/test/cctest/cctest.status24
-rw-r--r--deps/v8/test/cctest/compiler/code-assembler-tester.h7
-rw-r--r--deps/v8/test/cctest/compiler/function-tester.cc1
-rw-r--r--deps/v8/test/cctest/compiler/test-atomic-load-store-codegen.cc398
-rw-r--r--deps/v8/test/cctest/compiler/test-calls-with-arraylike-or-spread.cc1
-rw-r--r--deps/v8/test/cctest/compiler/test-code-generator.cc6
-rw-r--r--deps/v8/test/cctest/compiler/test-concurrent-shared-function-info.cc11
-rw-r--r--deps/v8/test/cctest/compiler/test-linkage.cc3
-rw-r--r--deps/v8/test/cctest/compiler/test-run-bytecode-graph-builder.cc1
-rw-r--r--deps/v8/test/cctest/compiler/test-run-deopt.cc1
-rw-r--r--deps/v8/test/cctest/compiler/test-run-machops.cc6
-rw-r--r--deps/v8/test/cctest/compiler/test-run-retpoline.cc210
-rw-r--r--deps/v8/test/cctest/disasm-regex-helper.cc1
-rw-r--r--deps/v8/test/cctest/heap/test-alloc.cc6
-rw-r--r--deps/v8/test/cctest/heap/test-embedder-tracing.cc8
-rw-r--r--deps/v8/test/cctest/heap/test-heap.cc6
-rw-r--r--deps/v8/test/cctest/heap/test-iterators.cc2
-rw-r--r--deps/v8/test/cctest/heap/test-mark-compact.cc4
-rw-r--r--deps/v8/test/cctest/heap/test-shared-heap.cc4
-rw-r--r--deps/v8/test/cctest/heap/test-spaces.cc2
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode-expectations-printer.cc2
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode-expectations-printer.h3
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/DeclareGlobals.golden6
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/GlobalCompoundExpressions.golden12
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/GlobalCountOperators.golden22
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/PrivateAccessorAccess.golden8
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/PrivateMethodAccess.golden4
-rw-r--r--deps/v8/test/cctest/interpreter/bytecode_expectations/StaticPrivateMethodAccess.golden20
-rw-r--r--deps/v8/test/cctest/interpreter/generate-bytecode-expectations.cc10
-rw-r--r--deps/v8/test/cctest/interpreter/interpreter-tester.h4
-rw-r--r--deps/v8/test/cctest/interpreter/test-source-positions.cc4
-rw-r--r--deps/v8/test/cctest/libsampler/test-sampler.cc6
-rw-r--r--deps/v8/test/cctest/parsing/test-parse-decision.cc4
-rw-r--r--deps/v8/test/cctest/print-extension.cc2
-rw-r--r--deps/v8/test/cctest/print-extension.h6
-rw-r--r--deps/v8/test/cctest/profiler-extension.cc2
-rw-r--r--deps/v8/test/cctest/profiler-extension.h5
-rw-r--r--deps/v8/test/cctest/test-access-checks.cc3
-rw-r--r--deps/v8/test/cctest/test-accessors.cc1
-rw-r--r--deps/v8/test/cctest/test-api-accessors.cc7
-rw-r--r--deps/v8/test/cctest/test-api-array-buffer.cc29
-rw-r--r--deps/v8/test/cctest/test-api-icu.cc3
-rw-r--r--deps/v8/test/cctest/test-api-interceptors.cc4
-rw-r--r--deps/v8/test/cctest/test-api-stack-traces.cc1
-rw-r--r--deps/v8/test/cctest/test-api-wasm.cc8
-rw-r--r--deps/v8/test/cctest/test-api.cc94
-rw-r--r--deps/v8/test/cctest/test-assembler-arm64.cc2
-rw-r--r--deps/v8/test/cctest/test-assembler-ia32.cc1
-rw-r--r--deps/v8/test/cctest/test-assembler-loong64.cc5180
-rw-r--r--deps/v8/test/cctest/test-assembler-ppc.cc45
-rw-r--r--deps/v8/test/cctest/test-assembler-riscv64.cc42
-rw-r--r--deps/v8/test/cctest/test-assembler-x64.cc61
-rw-r--r--deps/v8/test/cctest/test-code-pages.cc3
-rw-r--r--deps/v8/test/cctest/test-code-stub-assembler.cc22
-rw-r--r--deps/v8/test/cctest/test-compiler.cc6
-rw-r--r--deps/v8/test/cctest/test-cpu-profiler.cc39
-rw-r--r--deps/v8/test/cctest/test-debug-helper.cc1
-rw-r--r--deps/v8/test/cctest/test-debug.cc15
-rw-r--r--deps/v8/test/cctest/test-decls.cc4
-rw-r--r--deps/v8/test/cctest/test-deoptimization.cc1
-rw-r--r--deps/v8/test/cctest/test-disasm-ia32.cc15
-rw-r--r--deps/v8/test/cctest/test-disasm-loong64.cc895
-rw-r--r--deps/v8/test/cctest/test-disasm-riscv64.cc56
-rw-r--r--deps/v8/test/cctest/test-disasm-x64.cc53
-rw-r--r--deps/v8/test/cctest/test-factory.cc2
-rw-r--r--deps/v8/test/cctest/test-global-handles.cc2
-rw-r--r--deps/v8/test/cctest/test-heap-profiler.cc1
-rw-r--r--deps/v8/test/cctest/test-icache.cc4
-rw-r--r--deps/v8/test/cctest/test-inspector.cc3
-rw-r--r--deps/v8/test/cctest/test-js-to-wasm.cc5
-rw-r--r--deps/v8/test/cctest/test-js-weak-refs.cc8
-rw-r--r--deps/v8/test/cctest/test-liveedit.cc4
-rw-r--r--deps/v8/test/cctest/test-lockers.cc6
-rw-r--r--deps/v8/test/cctest/test-log-stack-tracer.cc1
-rw-r--r--deps/v8/test/cctest/test-log.cc1
-rw-r--r--deps/v8/test/cctest/test-macro-assembler-loong64.cc2916
-rw-r--r--deps/v8/test/cctest/test-macro-assembler-riscv64.cc24
-rw-r--r--deps/v8/test/cctest/test-macro-assembler-x64.cc8
-rw-r--r--deps/v8/test/cctest/test-modules.cc2
-rw-r--r--deps/v8/test/cctest/test-parsing.cc2
-rw-r--r--deps/v8/test/cctest/test-platform.cc1
-rw-r--r--deps/v8/test/cctest/test-poison-disasm-arm.cc157
-rw-r--r--deps/v8/test/cctest/test-poison-disasm-arm64.cc231
-rw-r--r--deps/v8/test/cctest/test-profile-generator.cc1
-rw-r--r--deps/v8/test/cctest/test-regexp.cc96
-rw-r--r--deps/v8/test/cctest/test-sampler-api.cc6
-rw-r--r--deps/v8/test/cctest/test-serialize.cc9
-rw-r--r--deps/v8/test/cctest/test-stack-unwinding-win64.cc5
-rw-r--r--deps/v8/test/cctest/test-strings.cc2
-rw-r--r--deps/v8/test/cctest/test-thread-termination.cc7
-rw-r--r--deps/v8/test/cctest/test-trace-event.cc6
-rw-r--r--deps/v8/test/cctest/test-unscopables-hidden-prototype.cc1
-rw-r--r--deps/v8/test/cctest/test-unwinder-code-pages.cc4
-rw-r--r--deps/v8/test/cctest/test-utils.cc4
-rw-r--r--deps/v8/test/cctest/test-virtual-memory-cage.cc36
-rw-r--r--deps/v8/test/cctest/test-web-snapshots.cc1
-rw-r--r--deps/v8/test/cctest/trace-extension.cc1
-rw-r--r--deps/v8/test/cctest/trace-extension.h6
-rw-r--r--deps/v8/test/cctest/wasm/test-gc.cc6
-rw-r--r--deps/v8/test/cctest/wasm/test-jump-table-assembler.cc5
-rw-r--r--deps/v8/test/cctest/wasm/test-run-wasm-exceptions.cc1
-rw-r--r--deps/v8/test/cctest/wasm/test-run-wasm-interpreter.cc7
-rw-r--r--deps/v8/test/cctest/wasm/test-run-wasm-js.cc1
-rw-r--r--deps/v8/test/cctest/wasm/test-run-wasm-simd.cc155
-rw-r--r--deps/v8/test/cctest/wasm/test-run-wasm-wrappers.cc10
-rw-r--r--deps/v8/test/cctest/wasm/test-run-wasm.cc38
-rw-r--r--deps/v8/test/cctest/wasm/test-wasm-serialization.cc2
-rw-r--r--deps/v8/test/cctest/wasm/test-wasm-stack.cc1
-rw-r--r--deps/v8/test/cctest/wasm/test-wasm-trap-position.cc1
-rw-r--r--deps/v8/test/cctest/wasm/wasm-run-utils.cc12
-rw-r--r--deps/v8/test/common/wasm/wasm-interpreter.cc12
-rw-r--r--deps/v8/test/common/wasm/wasm-macro-gen.h1
-rw-r--r--deps/v8/test/debugger/debug/regress/regress-opt-after-debug-deopt.js21
-rw-r--r--deps/v8/test/debugger/debug/regress/regress-prepare-break-while-recompile.js18
-rw-r--r--deps/v8/test/debugger/regress/regress-7421.js6
-rw-r--r--deps/v8/test/fuzzer/fuzzer-support.cc7
-rw-r--r--deps/v8/test/fuzzer/fuzzer-support.h15
-rw-r--r--deps/v8/test/fuzzer/inspector-fuzzer.cc79
-rw-r--r--deps/v8/test/fuzzer/json.cc7
-rw-r--r--deps/v8/test/fuzzer/parser.cc5
-rw-r--r--deps/v8/test/fuzzer/regexp-builtins.cc6
-rw-r--r--deps/v8/test/fuzzer/regexp.cc5
-rw-r--r--deps/v8/test/fuzzer/wasm-async.cc5
-rw-r--r--deps/v8/test/fuzzer/wasm-compile.cc437
-rw-r--r--deps/v8/test/fuzzer/wasm-fuzzer-common.cc33
-rw-r--r--deps/v8/test/fuzzer/wasm.cc6
-rw-r--r--deps/v8/test/inspector/counters/collection-expected.txt2
-rw-r--r--deps/v8/test/inspector/counters/collection.js83
-rw-r--r--deps/v8/test/inspector/counters/enable-disable-expected.txt6
-rw-r--r--deps/v8/test/inspector/counters/enable-disable.js51
-rw-r--r--deps/v8/test/inspector/cpu-profiler/coverage-block.js1
-rw-r--r--deps/v8/test/inspector/cpu-profiler/coverage.js1
-rw-r--r--deps/v8/test/inspector/debugger/pause-on-oom-expected.txt1
-rw-r--r--deps/v8/test/inspector/debugger/pause-on-oom-extrawide-expected.txt1
-rw-r--r--deps/v8/test/inspector/debugger/pause-on-oom-wide-expected.txt1
-rw-r--r--deps/v8/test/inspector/frontend-channel.h8
-rw-r--r--deps/v8/test/inspector/inspector-test.cc91
-rw-r--r--deps/v8/test/inspector/inspector.status24
-rw-r--r--deps/v8/test/inspector/isolate-data.cc174
-rw-r--r--deps/v8/test/inspector/isolate-data.h30
-rw-r--r--deps/v8/test/inspector/runtime-call-stats/collection-expected.txt2
-rw-r--r--deps/v8/test/inspector/runtime-call-stats/collection.js83
-rw-r--r--deps/v8/test/inspector/runtime-call-stats/enable-disable-expected.txt5
-rw-r--r--deps/v8/test/inspector/runtime-call-stats/enable-disable.js51
-rw-r--r--deps/v8/test/inspector/runtime/get-properties-expected.txt37
-rw-r--r--deps/v8/test/inspector/runtime/get-properties.js12
-rw-r--r--deps/v8/test/inspector/task-runner.cc16
-rw-r--r--deps/v8/test/inspector/task-runner.h14
-rw-r--r--deps/v8/test/inspector/tasks.cc6
-rw-r--r--deps/v8/test/inspector/tasks.h19
-rw-r--r--deps/v8/test/inspector/utils.cc2
-rw-r--r--deps/v8/test/inspector/utils.h6
-rw-r--r--deps/v8/test/intl/enumeration/calendar-sorted.js11
-rw-r--r--deps/v8/test/intl/enumeration/callendar-syntax-valid.js14
-rw-r--r--deps/v8/test/intl/enumeration/collation-sorted.js11
-rw-r--r--deps/v8/test/intl/enumeration/collation-syntax-valid.js14
-rw-r--r--deps/v8/test/intl/enumeration/currency-sorted.js11
-rw-r--r--deps/v8/test/intl/enumeration/currency-syntax-valid.js14
-rw-r--r--deps/v8/test/intl/enumeration/numberingSystem-no-algorithm.js20
-rw-r--r--deps/v8/test/intl/enumeration/numberingSystem-sorted.js11
-rw-r--r--deps/v8/test/intl/enumeration/numberingSystem-syntax-valid.js14
-rw-r--r--deps/v8/test/intl/enumeration/supported-values-of-invalid-key.js12
-rw-r--r--deps/v8/test/intl/enumeration/supported-values-of-name.js7
-rw-r--r--deps/v8/test/intl/enumeration/supported-values-of-property.js11
-rw-r--r--deps/v8/test/intl/enumeration/supported-values-of-valid-key.js12
-rw-r--r--deps/v8/test/intl/enumeration/timeZone-sorted.js11
-rw-r--r--deps/v8/test/intl/enumeration/unit-sorted.js11
-rw-r--r--deps/v8/test/intl/locale/locale-calendars.js2
-rw-r--r--deps/v8/test/intl/regress-7770.js2
-rw-r--r--deps/v8/test/js-perf-test/Array/find-index.js10
-rw-r--r--deps/v8/test/js-perf-test/Array/find.js10
-rw-r--r--deps/v8/test/js-perf-test/Array/for-each.js12
-rw-r--r--deps/v8/test/js-perf-test/Array/map.js8
-rw-r--r--deps/v8/test/js-perf-test/Array/reduce-right.js8
-rw-r--r--deps/v8/test/js-perf-test/Array/reduce.js8
-rw-r--r--deps/v8/test/js-perf-test/Array/run.js2
-rw-r--r--deps/v8/test/js-perf-test/ClassFields.json118
-rw-r--r--deps/v8/test/js-perf-test/ClassFields/classes.js59
-rw-r--r--deps/v8/test/js-perf-test/ClassFields/define-private-field.js74
-rw-r--r--deps/v8/test/js-perf-test/ClassFields/define-public-field.js75
-rw-r--r--deps/v8/test/js-perf-test/ClassFields/evaluate-class.js83
-rw-r--r--deps/v8/test/js-perf-test/ClassFields/run.js25
-rw-r--r--deps/v8/test/message/fail/class-private-brand-reinitialization-anonymous.js10
-rw-r--r--deps/v8/test/message/fail/class-private-brand-reinitialization-anonymous.out6
-rw-r--r--deps/v8/test/message/fail/class-private-brand-reinitialization.js10
-rw-r--r--deps/v8/test/message/fail/class-private-brand-reinitialization.out6
-rw-r--r--deps/v8/test/message/fail/class-private-field-reinitialization.js10
-rw-r--r--deps/v8/test/message/fail/class-private-field-reinitialization.out7
-rw-r--r--deps/v8/test/message/fail/map-grow-failed.js9
-rw-r--r--deps/v8/test/message/fail/map-grow-failed.out6
-rw-r--r--deps/v8/test/message/fail/set-grow-failed.js9
-rw-r--r--deps/v8/test/message/fail/set-grow-failed.out6
-rw-r--r--deps/v8/test/message/message.status30
-rw-r--r--deps/v8/test/mjsunit/asm/regress-674089.js13
-rw-r--r--deps/v8/test/mjsunit/baseline/flush-baseline-code.js3
-rw-r--r--deps/v8/test/mjsunit/baseline/flush-only-baseline-code.js3
-rw-r--r--deps/v8/test/mjsunit/compiler/concurrent-invalidate-transition-map.js2
-rw-r--r--deps/v8/test/mjsunit/compiler/concurrent-proto-change.js7
-rw-r--r--deps/v8/test/mjsunit/compiler/fast-api-calls.js2
-rw-r--r--deps/v8/test/mjsunit/compiler/fast-api-helpers.js2
-rw-r--r--deps/v8/test/mjsunit/compiler/fast-api-interface-types.js2
-rw-r--r--deps/v8/test/mjsunit/compiler/fast-api-sequences-x64.js2
-rw-r--r--deps/v8/test/mjsunit/compiler/fast-api-sequences.js2
-rw-r--r--deps/v8/test/mjsunit/compiler/manual-concurrent-recompile.js23
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-1236716.js16
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-1239601.js24
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-1245949.js16
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-1250216.js16
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-9017.js2
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-9945-1.js8
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-crbug-1201011.js2
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-crbug-1201057.js2
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-crbug-1201082.js2
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-crbug-1223107.js2
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-crbug-1241464.js15
-rw-r--r--deps/v8/test/mjsunit/concurrent-initial-prototype-change-1.js15
-rw-r--r--deps/v8/test/mjsunit/concurrent-initial-prototype-change-2.js22
-rw-r--r--deps/v8/test/mjsunit/const-dict-tracking.js2
-rw-r--r--deps/v8/test/mjsunit/es6/typedarray-detached.js6
-rw-r--r--deps/v8/test/mjsunit/es6/typedarray-every.js10
-rw-r--r--deps/v8/test/mjsunit/es6/typedarray-filter.js20
-rw-r--r--deps/v8/test/mjsunit/es6/typedarray-foreach.js12
-rw-r--r--deps/v8/test/mjsunit/es6/typedarray.js6
-rw-r--r--deps/v8/test/mjsunit/external-array.js2
-rw-r--r--deps/v8/test/mjsunit/harmony/bigint/tostring-toolong.js17
-rw-r--r--deps/v8/test/mjsunit/harmony/sharedarraybuffer.js6
-rw-r--r--deps/v8/test/mjsunit/messages.js2
-rw-r--r--deps/v8/test/mjsunit/mjsunit.js20
-rw-r--r--deps/v8/test/mjsunit/mjsunit.status78
-rw-r--r--deps/v8/test/mjsunit/regexp.js10
-rw-r--r--deps/v8/test/mjsunit/regress/regress-1193903.js13
-rw-r--r--deps/v8/test/mjsunit/regress/regress-1209444.js9
-rw-r--r--deps/v8/test/mjsunit/regress/regress-1242306.js20
-rw-r--r--deps/v8/test/mjsunit/regress/regress-1243989.js18
-rw-r--r--deps/v8/test/mjsunit/regress/regress-343609.js3
-rw-r--r--deps/v8/test/mjsunit/regress/regress-347914.js2
-rw-r--r--deps/v8/test/mjsunit/regress/regress-356053.js9
-rw-r--r--deps/v8/test/mjsunit/regress/regress-4023.js4
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-1031479.js2
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-1203122-1.js23
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-1203122-2.js23
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-1216261.js5
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-1236286.js20
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-1238467.js17
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-1239907.js36
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-1240661.js11
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-1245870.js14
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-435825.js3
-rw-r--r--deps/v8/test/mjsunit/regress/regress-embedded-cons-string.js19
-rw-r--r--deps/v8/test/mjsunit/regress/regress-sync-optimized-lists.js2
-rw-r--r--deps/v8/test/mjsunit/regress/regress-v8-12060.mjs24
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-1237024.js26
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-1239954.js37
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-1242300.js24
-rw-r--r--deps/v8/test/mjsunit/regress/wasm/regress-1247659.js87
-rw-r--r--deps/v8/test/mjsunit/runtime-call-stats.js4
-rw-r--r--deps/v8/test/mjsunit/temporal/duration-abs.js20
-rw-r--r--deps/v8/test/mjsunit/temporal/duration-add.js40
-rw-r--r--deps/v8/test/mjsunit/temporal/duration-constructor.js93
-rw-r--r--deps/v8/test/mjsunit/temporal/duration-from.js227
-rw-r--r--deps/v8/test/mjsunit/temporal/duration-negated.js24
-rw-r--r--deps/v8/test/mjsunit/temporal/duration-to-json.js184
-rw-r--r--deps/v8/test/mjsunit/temporal/duration-valueOf.js8
-rw-r--r--deps/v8/test/mjsunit/temporal/duration-with.js112
-rw-r--r--deps/v8/test/mjsunit/temporal/instant-add.js48
-rw-r--r--deps/v8/test/mjsunit/temporal/instant-compare.js21
-rw-r--r--deps/v8/test/mjsunit/temporal/instant-constructor.js43
-rw-r--r--deps/v8/test/mjsunit/temporal/instant-equals.js17
-rw-r--r--deps/v8/test/mjsunit/temporal/instant-from-epoch-microseconds.js28
-rw-r--r--deps/v8/test/mjsunit/temporal/instant-from-epoch-milliseconds.js28
-rw-r--r--deps/v8/test/mjsunit/temporal/instant-from-epoch-nanoseconds.js27
-rw-r--r--deps/v8/test/mjsunit/temporal/instant-from-epoch-seconds.js26
-rw-r--r--deps/v8/test/mjsunit/temporal/instant-subtract.js54
-rw-r--r--deps/v8/test/mjsunit/temporal/instant-to-json.js46
-rw-r--r--deps/v8/test/mjsunit/temporal/instant-toJSON.js52
-rw-r--r--deps/v8/test/mjsunit/temporal/instant-valueOf.js7
-rw-r--r--deps/v8/test/mjsunit/temporal/plain-date-add.js25
-rw-r--r--deps/v8/test/mjsunit/temporal/plain-date-compare.js30
-rw-r--r--deps/v8/test/mjsunit/temporal/plain-date-constructor.js82
-rw-r--r--deps/v8/test/mjsunit/temporal/plain-date-equals.js15
-rw-r--r--deps/v8/test/mjsunit/temporal/plain-date-from.js51
-rw-r--r--deps/v8/test/mjsunit/temporal/plain-date-get-calendar.js8
-rw-r--r--deps/v8/test/mjsunit/temporal/plain-date-get-day.js9
-rw-r--r--deps/v8/test/mjsunit/temporal/plain-date-get-dayOfWeek.js9
-rw-r--r--deps/v8/test/mjsunit/temporal/plain-date-get-dayOfYear.js9
-rw-r--r--deps/v8/test/mjsunit/temporal/plain-date-get-daysInMonth.js9
-rw-r--r--deps/v8/test/mjsunit/temporal/plain-date-get-daysInWeek.js9
-rw-r--r--deps/v8/test/mjsunit/temporal/plain-date-get-daysInYear.js9
-rw-r--r--deps/v8/test/mjsunit/temporal/plain-date-get-era.js10
-rw-r--r--deps/v8/test/mjsunit/temporal/plain-date-get-eraYear.js9
-rw-r--r--deps/v8/test/mjsunit/temporal/plain-date-get-inLeapYear.js10
-rw-r--r--deps/v8/test/mjsunit/temporal/plain-date-get-iso-fields.js21
-rw-r--r--deps/v8/test/mjsunit/temporal/plain-date-get-month.js9
-rw-r--r--deps/v8/test/mjsunit/temporal/plain-date-get-monthCode.js9
-rw-r--r--deps/v8/test/mjsunit/temporal/plain-date-get-monthsInYear.js9
-rw-r--r--deps/v8/test/mjsunit/temporal/plain-date-get-weekOfYear.js9
-rw-r--r--deps/v8/test/mjsunit/temporal/plain-date-get-year.js9
-rw-r--r--deps/v8/test/mjsunit/temporal/plain-date-to-json.js18
-rw-r--r--deps/v8/test/mjsunit/temporal/plain-date-to-plain-date-time.js33
-rw-r--r--deps/v8/test/mjsunit/temporal/plain-date-to-plain-month-day.js12
-rw-r--r--deps/v8/test/mjsunit/temporal/plain-date-to-plain-year-month.js12
-rw-r--r--deps/v8/test/mjsunit/temporal/plain-date-valueOf.js7
-rw-r--r--deps/v8/test/mjsunit/temporal/plain-date-with-calendar.js32
-rw-r--r--deps/v8/test/mjsunit/temporal/plain-date-with.js59
-rw-r--r--deps/v8/test/mjsunit/temporal/temporal-helpers.js107
-rw-r--r--deps/v8/test/mjsunit/tools/log_two_byte.js1
-rw-r--r--deps/v8/test/mjsunit/verify-no-fail.js (renamed from deps/v8/tools/clusterfuzz/js_fuzzer/test_data/regress/build_db/cross_over_mutator_input.js)8
-rw-r--r--deps/v8/test/mjsunit/wasm/array-copy-benchmark.js7
-rw-r--r--deps/v8/test/mjsunit/wasm/exceptions.js38
-rw-r--r--deps/v8/test/mjsunit/wasm/externref-table.js33
-rw-r--r--deps/v8/test/mjsunit/wasm/gc-nominal.js2
-rw-r--r--deps/v8/test/mjsunit/wasm/gdbjit.js20
-rw-r--r--deps/v8/test/mjsunit/wasm/inlining.js77
-rw-r--r--deps/v8/test/mjsunit/wasm/reference-globals.js2
-rw-r--r--deps/v8/test/mjsunit/wasm/test-serialization-with-lazy-compilation.js3
-rw-r--r--deps/v8/test/mkgrokdump/mkgrokdump.cc8
-rw-r--r--deps/v8/test/test262/test262.status383
-rw-r--r--deps/v8/test/unittests/BUILD.gn9
-rw-r--r--deps/v8/test/unittests/api/access-check-unittest.cc8
-rw-r--r--deps/v8/test/unittests/api/deserialize-unittest.cc7
-rw-r--r--deps/v8/test/unittests/api/exception-unittest.cc6
-rw-r--r--deps/v8/test/unittests/api/interceptor-unittest.cc6
-rw-r--r--deps/v8/test/unittests/api/isolate-unittest.cc6
-rw-r--r--deps/v8/test/unittests/api/remote-object-unittest.cc8
-rw-r--r--deps/v8/test/unittests/api/resource-constraints-unittest.cc6
-rw-r--r--deps/v8/test/unittests/api/v8-object-unittest.cc7
-rw-r--r--deps/v8/test/unittests/assembler/turbo-assembler-loong64-unittest.cc64
-rw-r--r--deps/v8/test/unittests/base/region-allocator-unittest.cc21
-rw-r--r--deps/v8/test/unittests/codegen/code-stub-assembler-unittest.cc7
-rw-r--r--deps/v8/test/unittests/compiler/arm64/instruction-selector-arm64-unittest.cc200
-rw-r--r--deps/v8/test/unittests/compiler/backend/instruction-selector-unittest.cc41
-rw-r--r--deps/v8/test/unittests/compiler/effect-control-linearizer-unittest.cc24
-rw-r--r--deps/v8/test/unittests/compiler/ia32/instruction-selector-ia32-unittest.cc28
-rw-r--r--deps/v8/test/unittests/compiler/loong64/instruction-selector-loong64-unittest.cc1564
-rw-r--r--deps/v8/test/unittests/compiler/machine-operator-reducer-unittest.cc27
-rw-r--r--deps/v8/test/unittests/compiler/node-test-utils.cc12
-rw-r--r--deps/v8/test/unittests/compiler/node-test-utils.h6
-rw-r--r--deps/v8/test/unittests/compiler/simplified-lowering-unittest.cc5
-rw-r--r--deps/v8/test/unittests/compiler/x64/instruction-selector-x64-unittest.cc8
-rw-r--r--deps/v8/test/unittests/debug/debug-property-iterator-unittest.cc6
-rw-r--r--deps/v8/test/unittests/diagnostics/gdb-jit-unittest.cc58
-rw-r--r--deps/v8/test/unittests/execution/microtask-queue-unittest.cc1
-rw-r--r--deps/v8/test/unittests/heap/cppgc/heap-unittest.cc58
-rw-r--r--deps/v8/test/unittests/heap/cppgc/marker-unittest.cc48
-rw-r--r--deps/v8/test/unittests/heap/cppgc/marking-verifier-unittest.cc8
-rw-r--r--deps/v8/test/unittests/heap/cppgc/page-memory-unittest.cc66
-rw-r--r--deps/v8/test/unittests/heap/cppgc/platform-unittest.cc46
-rw-r--r--deps/v8/test/unittests/heap/cppgc/prefinalizer-unittest.cc50
-rw-r--r--deps/v8/test/unittests/heap/progressbar-unittest.cc80
-rw-r--r--deps/v8/test/unittests/heap/unified-heap-unittest.cc5
-rw-r--r--deps/v8/test/unittests/heap/unified-heap-utils.cc1
-rw-r--r--deps/v8/test/unittests/heap/unified-heap-utils.h2
-rw-r--r--deps/v8/test/unittests/heap/unmapper-unittest.cc20
-rw-r--r--deps/v8/test/unittests/interpreter/bytecode-array-builder-unittest.cc2
-rw-r--r--deps/v8/test/unittests/interpreter/interpreter-assembler-unittest.cc120
-rw-r--r--deps/v8/test/unittests/interpreter/interpreter-assembler-unittest.h32
-rw-r--r--deps/v8/test/unittests/logging/runtime-call-stats-unittest.cc1
-rw-r--r--deps/v8/test/unittests/objects/value-serializer-unittest.cc31
-rw-r--r--deps/v8/test/unittests/profiler/strings-storage-unittest.cc11
-rw-r--r--deps/v8/test/unittests/run-all-unittests.cc7
-rw-r--r--deps/v8/test/unittests/runtime/runtime-debug-unittest.cc5
-rw-r--r--deps/v8/test/unittests/tasks/background-compile-task-unittest.cc2
-rw-r--r--deps/v8/test/unittests/test-helpers.cc1
-rw-r--r--deps/v8/test/unittests/test-helpers.h2
-rw-r--r--deps/v8/test/unittests/test-utils.cc2
-rw-r--r--deps/v8/test/unittests/test-utils.h5
-rw-r--r--deps/v8/test/unittests/wasm/function-body-decoder-unittest.cc54
-rw-r--r--deps/v8/test/unittests/wasm/liftoff-register-unittests.cc2
-rw-r--r--deps/v8/test/unittests/wasm/memory-protection-unittest.cc182
-rw-r--r--deps/v8/test/unittests/wasm/module-decoder-unittest.cc9
-rw-r--r--deps/v8/test/unittests/wasm/trap-handler-posix-unittest.cc2
-rw-r--r--deps/v8/test/unittests/wasm/trap-handler-simulator-unittest.cc2
-rw-r--r--deps/v8/test/unittests/wasm/trap-handler-win-unittest.cc3
-rw-r--r--deps/v8/test/wasm-api-tests/reflect.cc2
-rw-r--r--deps/v8/test/wasm-api-tests/run-all-wasm-api-tests.cc2
-rw-r--r--deps/v8/test/wasm-api-tests/table.cc9
-rw-r--r--deps/v8/test/wasm-js/tests.tar.gz.sha12
-rw-r--r--deps/v8/test/wasm-spec-tests/tests.tar.gz.sha12
-rw-r--r--deps/v8/test/wasm-spec-tests/wasm-spec-tests.status2
-rw-r--r--deps/v8/test/webkit/fast/js/deep-recursion-test.js3
-rw-r--r--deps/v8/test/webkit/fast/js/function-apply.js3
-rw-r--r--deps/v8/test/webkit/function-call-register-allocation.js3
-rw-r--r--deps/v8/test/webkit/run-json-stringify.js3
-rw-r--r--deps/v8/third_party/jinja2/Jinja2-2.10.1.tar.gz.md51
-rw-r--r--deps/v8/third_party/jinja2/Jinja2-2.10.1.tar.gz.sha5121
-rw-r--r--deps/v8/third_party/jinja2/Jinja2-2.10.tar.gz.md51
-rw-r--r--deps/v8/third_party/jinja2/Jinja2-2.10.tar.gz.sha5121
-rw-r--r--deps/v8/third_party/jinja2/OWNERS1
-rw-r--r--deps/v8/third_party/jinja2/README.chromium10
-rw-r--r--deps/v8/third_party/jinja2/__init__.py2
-rwxr-xr-xdeps/v8/third_party/jinja2/get_jinja2.sh4
-rw-r--r--deps/v8/third_party/jinja2/lexer.py6
-rw-r--r--deps/v8/third_party/jinja2/patches/0002-jinja2-add-_identifier-to-pydeps-for-py3.patch34
-rw-r--r--deps/v8/third_party/jinja2/sandbox.py17
-rw-r--r--deps/v8/third_party/jinja2/tests.py2
-rw-r--r--deps/v8/third_party/zlib/google/zip_internal.cc4
-rw-r--r--deps/v8/third_party/zlib/google/zip_internal.h2
-rw-r--r--deps/v8/third_party/zlib/google/zip_reader.cc2
-rw-r--r--deps/v8/tools/clusterfuzz/js_fuzzer/build_db.js3
-rw-r--r--deps/v8/tools/clusterfuzz/js_fuzzer/db.js30
-rw-r--r--deps/v8/tools/clusterfuzz/js_fuzzer/exceptions.js26
-rw-r--r--deps/v8/tools/clusterfuzz/js_fuzzer/mutators/crossover_mutator.js5
-rw-r--r--deps/v8/tools/clusterfuzz/js_fuzzer/source_helpers.js27
-rw-r--r--deps/v8/tools/clusterfuzz/js_fuzzer/test/test_db.js33
-rw-r--r--deps/v8/tools/clusterfuzz/js_fuzzer/test/test_regressions.js46
-rw-r--r--deps/v8/tools/clusterfuzz/js_fuzzer/test_data/cross_over_mutator_class_input.js (renamed from deps/v8/tools/clusterfuzz/js_fuzzer/test_data/regress/build_db/cross_over_mutator_class_input.js)0
-rw-r--r--deps/v8/tools/clusterfuzz/js_fuzzer/test_data/db/this/file.js (renamed from deps/v8/tools/clusterfuzz/js_fuzzer/test_data/regress/build_db/this/file.js)0
-rw-r--r--deps/v8/tools/clusterfuzz/js_fuzzer/test_data/mjsunit/test_load.js4
-rw-r--r--deps/v8/tools/clusterfuzz/js_fuzzer/test_data/mjsunit/test_load_0.js6
-rw-r--r--deps/v8/tools/clusterfuzz/js_fuzzer/test_data/mjsunit/test_load_1.js2
-rw-r--r--deps/v8/tools/clusterfuzz/js_fuzzer/test_data/mjsunit/test_load_self.js2
-rw-r--r--deps/v8/tools/clusterfuzz/js_fuzzer/test_data/regress/build_db/destructuring/input.js7
-rw-r--r--deps/v8/tools/clusterfuzz/js_fuzzer/test_data/spidermonkey/test/load.js2
-rw-r--r--deps/v8/tools/clusterfuzz/js_fuzzer/test_db.js5
-rw-r--r--deps/v8/tools/clusterfuzz/v8_commands.py3
-rwxr-xr-xdeps/v8/tools/clusterfuzz/v8_foozzie.py18
-rw-r--r--deps/v8/tools/clusterfuzz/v8_smoke_tests.js2
-rwxr-xr-xdeps/v8/tools/cppgc/gen_cmake.py8
-rwxr-xr-xdeps/v8/tools/cppgc/test_cmake.sh2
-rwxr-xr-xdeps/v8/tools/dev/gm.py23
-rw-r--r--deps/v8/tools/gen-postmortem-metadata.py17
-rwxr-xr-xdeps/v8/tools/generate-header-include-checks.py4
-rwxr-xr-xdeps/v8/tools/mb/mb.py64
-rwxr-xr-xdeps/v8/tools/mb/mb_unittest.py22
-rw-r--r--deps/v8/tools/profile.mjs5
-rw-r--r--deps/v8/tools/release/PRESUBMIT.py8
-rwxr-xr-xdeps/v8/tools/release/auto_tag.py2
-rwxr-xr-xdeps/v8/tools/release/check_clusterfuzz.py2
-rw-r--r--deps/v8/tools/release/common_includes.py26
-rwxr-xr-xdeps/v8/tools/release/create_release.py22
-rwxr-xr-xdeps/v8/tools/release/merge_to_branch.py4
-rwxr-xr-xdeps/v8/tools/release/mergeinfo.py10
-rwxr-xr-xdeps/v8/tools/release/roll_merge.py4
-rwxr-xr-xdeps/v8/tools/release/search_related_commits.py2
-rwxr-xr-xdeps/v8/tools/release/test_mergeinfo.py10
-rwxr-xr-xdeps/v8/tools/release/test_scripts.py44
-rwxr-xr-xdeps/v8/tools/release/test_search_related_commits.py38
-rw-r--r--deps/v8/tools/run_perf.py29
-rw-r--r--deps/v8/tools/testrunner/base_runner.py29
-rw-r--r--deps/v8/tools/testrunner/local/android.py6
-rw-r--r--deps/v8/tools/testrunner/local/junit_output.py49
-rw-r--r--deps/v8/tools/testrunner/local/statusfile.py2
-rw-r--r--deps/v8/tools/testrunner/local/variants.py9
-rwxr-xr-xdeps/v8/tools/testrunner/num_fuzzer.py16
-rw-r--r--deps/v8/tools/testrunner/objects/testcase.py9
-rw-r--r--deps/v8/tools/testrunner/outproc/base.py2
-rwxr-xr-xdeps/v8/tools/testrunner/standard_runner.py7
-rw-r--r--deps/v8/tools/testrunner/testproc/expectation.py9
-rw-r--r--deps/v8/tools/testrunner/testproc/fuzzer.py6
-rw-r--r--deps/v8/tools/testrunner/testproc/progress.py40
-rwxr-xr-xdeps/v8/tools/unittests/run_tests_test.py3
-rw-r--r--deps/v8/tools/unittests/testdata/testroot1/v8_build_config.json1
-rw-r--r--deps/v8/tools/unittests/testdata/testroot2/v8_build_config.json1
-rw-r--r--deps/v8/tools/unittests/testdata/testroot3/v8_build_config.json1
-rw-r--r--deps/v8/tools/v8heapconst.py396
-rw-r--r--deps/v8/tools/whitespace.txt2
1269 files changed, 89664 insertions, 32761 deletions
diff --git a/deps/v8/.flake8 b/deps/v8/.flake8
index c58d00ca05..22eebf3de4 100644
--- a/deps/v8/.flake8
+++ b/deps/v8/.flake8
@@ -4,7 +4,6 @@ exclude =
./third_party/, # third-party code
./build/, # third-party code
./buildtools/, # third-party code
- ./tools/swarming_client/, # third-party code
./test/wasm-js/, # third-party code
./test/wasm-js/data/, # third-party code
./test/test262/data/, # third-party code
diff --git a/deps/v8/.gitignore b/deps/v8/.gitignore
index 315f3ae0be..66116d82ca 100644
--- a/deps/v8/.gitignore
+++ b/deps/v8/.gitignore
@@ -87,7 +87,6 @@
/tools/luci-go
/tools/oom_dump/oom_dump
/tools/oom_dump/oom_dump.o
-/tools/swarming_client
/tools/turbolizer/build
/tools/turbolizer/.rpt2_cache
/tools/turbolizer/deploy
diff --git a/deps/v8/AUTHORS b/deps/v8/AUTHORS
index ea786ddea4..34bd4c57c6 100644
--- a/deps/v8/AUTHORS
+++ b/deps/v8/AUTHORS
@@ -173,6 +173,7 @@ Milton Chiang <milton.chiang@mediatek.com>
Mu Tao <pamilty@gmail.com>
Myeong-bo Shim <m0609.shim@samsung.com>
Nicolas Antonius Ernst Leopold Maria Kaiser <nikai@nikai.net>
+Nicolò Ribaudo <nicolo.ribaudo@gmail.com>
Niek van der Maas <mail@niekvandermaas.nl>
Niklas Hambüchen <mail@nh2.me>
Noj Vek <nojvek@gmail.com>
@@ -223,6 +224,7 @@ Tao Liqiang <taolq@outlook.com>
Teddy Katz <teddy.katz@gmail.com>
Thomas Young <wenzhang5800@gmail.com>
Tiancheng "Timothy" Gu <timothygu99@gmail.com>
+Timo Teräs <timo.teras@iki.fi>
Tobias Burnus <burnus@net-b.de>
Tobias Nießen <tniessen@tnie.de>
Ujjwal Sharma <usharma1998@gmail.com>
diff --git a/deps/v8/BUILD.bazel b/deps/v8/BUILD.bazel
index c5b4a94f91..e70b2f4b2d 100644
--- a/deps/v8/BUILD.bazel
+++ b/deps/v8/BUILD.bazel
@@ -150,7 +150,6 @@ config_setting(
# v8_can_use_fpu_instructions
# v8_use_mips_abi_hardfloat
# v8_enable_gdbjit
-# v8_untrusted_code_mitigations
# v8_enable_minor_mc
# v8_check_header_includes
# v8_enable_shared_ro_heap
@@ -164,10 +163,11 @@ config_setting(
# v8_verify_torque_generation_invariance
# v8_enable_snapshot_compression
# v8_control_flow_integrity
-# cppgc_enable_object_names
+# v8_enable_virtual_memory_cage
# cppgc_enable_caged_heap
-# cppgc_enable_verify_live_bytes
# cppgc_enable_check_assignments_in_prefinalizers
+# cppgc_enable_object_names
+# cppgc_enable_verify_heap
# cppgc_enable_young_generation
# v8_enable_zone_compression
# v8_enable_heap_sandbox
@@ -306,9 +306,6 @@ v8_config(
"V8_TARGET_OS_MACOSX",
],
}) + select({
- ":is_android_x86": [ "DISABLE_UNTRUSTED_CODE_MITIGATIONS" ],
- "//conditions:default": [],
- }) + select({
":is_v8_enable_pointer_compression": [
"V8_COMPRESS_POINTERS",
"V8_31BIT_SMIS_ON_64BIT_ARCH",
@@ -403,11 +400,53 @@ filegroup(
srcs = [
":cppgc_headers_files",
":v8_version_files",
+ "include/v8-array-buffer.h",
+ "include/v8-callbacks.h",
+ "include/v8-container.h",
+ "include/v8-context.h",
"include/v8-cppgc.h",
+ "include/v8-data.h",
+ "include/v8-date.h",
+ "include/v8-debug.h",
+ "include/v8-embedder-heap.h",
+ "include/v8-exception.h",
+ "include/v8-extension.h",
+ "include/v8-external.h",
"include/v8-fast-api-calls.h",
+ "include/v8-forward.h",
+ "include/v8-function.h",
+ "include/v8-function-callback.h",
+ "include/v8-initialization.h",
"include/v8-internal.h",
+ "include/v8-isolate.h",
+ "include/v8-json.h",
+ "include/v8-local-handle.h",
+ "include/v8-locker.h",
+ "include/v8-maybe.h",
+ "include/v8-memory-span.h",
+ "include/v8-message.h",
+ "include/v8-microtask-queue.h",
+ "include/v8-microtask.h",
+ "include/v8-object.h",
+ "include/v8-persistent-handle.h",
+ "include/v8-primitive-object.h",
+ "include/v8-primitive.h",
"include/v8-profiler.h",
+ "include/v8-promise.h",
+ "include/v8-proxy.h",
+ "include/v8-regexp.h",
+ "include/v8-script.h",
+ "include/v8-snapshot.h",
+ "include/v8-statistics.h",
+ "include/v8-template.h",
+ "include/v8-traced-handle.h",
+ "include/v8-typed-array.h",
+ "include/v8-unwinder.h",
"include/v8-util.h",
+ "include/v8-value-serializer.h",
+ "include/v8-value.h",
+ "include/v8-wasm.h",
+ "include/v8-weak-callback-info.h",
"include/v8.h",
],
)
@@ -975,6 +1014,7 @@ filegroup(
"src/codegen/assembler-inl.h",
"src/codegen/assembler.cc",
"src/codegen/assembler.h",
+ "src/codegen/atomic-memory-order.h",
"src/codegen/bailout-reason.cc",
"src/codegen/bailout-reason.h",
"src/codegen/callable.h",
@@ -1309,6 +1349,7 @@ filegroup(
"src/heap/paged-spaces.h",
"src/heap/parallel-work-item.h",
"src/heap/parked-scope.h",
+ "src/heap/progress-bar.h",
"src/heap/read-only-heap-inl.h",
"src/heap/read-only-heap.cc",
"src/heap/read-only-heap.h",
@@ -1361,6 +1402,8 @@ filegroup(
"src/init/startup-data-util.h",
"src/init/v8.cc",
"src/init/v8.h",
+ "src/init/vm-cage.cc",
+ "src/init/vm-cage.h",
"src/interpreter/block-coverage-builder.h",
"src/interpreter/bytecode-array-builder.cc",
"src/interpreter/bytecode-array-builder.h",
@@ -1755,6 +1798,7 @@ filegroup(
"src/regexp/regexp-dotprinter.h",
"src/regexp/regexp-error.cc",
"src/regexp/regexp-error.h",
+ "src/regexp/regexp-flags.h",
"src/regexp/regexp-interpreter.cc",
"src/regexp/regexp-interpreter.h",
"src/regexp/regexp-macro-assembler-arch.h",
@@ -1810,6 +1854,7 @@ filegroup(
"src/base/sanitizer/lsan-page-allocator.cc",
"src/base/sanitizer/lsan-page-allocator.h",
"src/base/sanitizer/msan.h",
+ "src/base/sanitizer/tsan.h",
"src/snapshot/code-serializer.cc",
"src/snapshot/code-serializer.h",
"src/snapshot/context-deserializer.cc",
@@ -2092,6 +2137,7 @@ filegroup(
"src/asmjs/asm-types.h",
"src/compiler/int64-lowering.h",
"src/compiler/wasm-compiler.h",
+ "src/compiler/wasm-inlining.h",
"src/debug/debug-wasm-objects.cc",
"src/debug/debug-wasm-objects.h",
"src/debug/debug-wasm-objects-inl.h",
@@ -2298,7 +2344,6 @@ filegroup(
"src/compiler/common-operator-reducer.h",
"src/compiler/compilation-dependencies.cc",
"src/compiler/compilation-dependencies.h",
- "src/compiler/compilation-dependency.h",
"src/compiler/compiler-source-position-table.cc",
"src/compiler/compiler-source-position-table.h",
"src/compiler/constant-folding-reducer.cc",
@@ -2475,6 +2520,7 @@ filegroup(
":is_v8_enable_webassembly": [
"src/compiler/int64-lowering.cc",
"src/compiler/wasm-compiler.cc",
+ "src/compiler/wasm-inlining.cc",
],
"//conditions:default": [],
}),
@@ -2570,6 +2616,7 @@ filegroup(
name = "cppgc_base_files",
srcs = [
"src/heap/cppgc/allocation.cc",
+ "src/heap/cppgc/caged-heap.h",
"src/heap/cppgc/compaction-worklists.cc",
"src/heap/cppgc/compaction-worklists.h",
"src/heap/cppgc/compactor.cc",
@@ -2631,6 +2678,7 @@ filegroup(
"src/heap/cppgc/page-memory.h",
"src/heap/cppgc/persistent-node.cc",
"src/heap/cppgc/platform.cc",
+ "src/heap/cppgc/platform.h",
"src/heap/cppgc/pointer-policies.cc",
"src/heap/cppgc/prefinalizer-handler.cc",
"src/heap/cppgc/prefinalizer-handler.h",
diff --git a/deps/v8/BUILD.gn b/deps/v8/BUILD.gn
index 3e48fb11bf..5c7d931b27 100644
--- a/deps/v8/BUILD.gn
+++ b/deps/v8/BUILD.gn
@@ -41,7 +41,7 @@ declare_args() {
v8_enable_future = false
# Sets -DSYSTEM_INSTRUMENTATION. Enables OS-dependent event tracing
- v8_enable_system_instrumentation = is_win || is_mac
+ v8_enable_system_instrumentation = (is_win || is_mac) && !v8_use_perfetto
# Sets the GUID for the ETW provider
v8_etw_guid = ""
@@ -228,11 +228,6 @@ declare_args() {
(is_linux || is_chromeos || is_mac)) ||
(v8_current_cpu == "ppc64" && (is_linux || is_chromeos))
- # Enable mitigations for executing untrusted code.
- # Disabled by default on ia32 due to conflicting requirements with embedded
- # builtins.
- v8_untrusted_code_mitigations = false
-
# Enable minor mark compact.
v8_enable_minor_mc = true
@@ -291,16 +286,20 @@ declare_args() {
cppgc_enable_object_names = false
# Enable heap reservation of size 4GB. Only possible for 64bit archs.
- cppgc_enable_caged_heap = v8_current_cpu == "x64" || v8_current_cpu == "arm64"
+ cppgc_enable_caged_heap =
+ v8_current_cpu == "x64" || v8_current_cpu == "arm64" ||
+ v8_current_cpu == "loong64"
- # Enable verification of live bytes in the marking verifier.
- # TODO(v8:11785): Enable by default when running with the verifier.
- cppgc_enable_verify_live_bytes = false
+ # Enables additional heap verification phases and checks.
+ cppgc_enable_verify_heap = ""
# Enable assignment checks for Members/Persistents during prefinalizer invocations.
# TODO(v8:11749): Enable by default after fixing any existing issues in Blink.
cppgc_enable_check_assignments_in_prefinalizers = false
+ # Enable allocations during prefinalizer invocations.
+ cppgc_allow_allocations_in_prefinalizers = false
+
# Enable young generation in cppgc.
cppgc_enable_young_generation = false
@@ -312,6 +311,11 @@ declare_args() {
# Sets -DV8_HEAP_SANDBOX.
v8_enable_heap_sandbox = ""
+ # Enable the Virtual Memory Cage, which contains the pointer compression cage
+ # as well as ArrayBuffer BackingStores and WASM memory cages.
+ # Sets -DV8_VIRTUAL_MEMORY_CAGE.
+ v8_enable_virtual_memory_cage = ""
+
# Experimental feature for collecting per-class zone memory stats.
# Requires use_rtti = true
v8_enable_precise_zone_stats = false
@@ -342,9 +346,18 @@ declare_args() {
# Enable global allocation site tracking.
v8_allocation_site_tracking = true
+
+ # If enabled, the receiver is always included in the actual and formal
+ # parameter count of function with JS linkage.
+ # TODO(v8:11112): Remove once all architectures support the flag and it is
+ # enabled unconditionally.
+ v8_include_receiver_in_argc = false
}
# Derived defaults.
+if (cppgc_enable_verify_heap == "") {
+ cppgc_enable_verify_heap = v8_enable_debugging_features || dcheck_always_on
+}
if (v8_enable_verify_heap == "") {
v8_enable_verify_heap = v8_enable_debugging_features
}
@@ -392,6 +405,9 @@ if (v8_enable_zone_compression == "") {
if (v8_enable_heap_sandbox == "") {
v8_enable_heap_sandbox = false
}
+if (v8_enable_virtual_memory_cage == "") {
+ v8_enable_virtual_memory_cage = v8_enable_heap_sandbox
+}
if (v8_enable_short_builtin_calls == "") {
v8_enable_short_builtin_calls =
v8_current_cpu == "x64" || (!is_android && v8_current_cpu == "arm64")
@@ -461,9 +477,6 @@ if (build_with_chromium && v8_current_cpu == "arm64" &&
assert(!v8_disable_write_barriers || v8_enable_single_generation,
"Disabling write barriers works only with single generation")
-assert(v8_current_cpu != "x86" || !v8_untrusted_code_mitigations,
- "Untrusted code mitigations are unsupported on ia32")
-
assert(v8_current_cpu == "arm64" || !v8_control_flow_integrity,
"Control-flow integrity is only supported on arm64")
@@ -480,15 +493,22 @@ assert(!v8_enable_map_packing || !v8_enable_pointer_compression,
assert(!v8_enable_map_packing || v8_current_cpu == "x64",
"Map packing is only supported on x64")
-assert(!v8_use_multi_snapshots || !v8_control_flow_integrity,
- "Control-flow integrity does not support multisnapshots")
-
assert(!v8_enable_heap_sandbox || v8_enable_pointer_compression,
"V8 Heap Sandbox requires pointer compression")
assert(!v8_enable_heap_sandbox || !v8_enable_external_code_space,
"V8 Heap Sandbox is not compatible with external code space YET")
+assert(!v8_enable_heap_sandbox || v8_enable_virtual_memory_cage,
+ "The Heap Sandbox requires the virtual memory cage")
+
+assert(
+ !v8_enable_virtual_memory_cage || v8_enable_pointer_compression_shared_cage,
+ "V8 VirtualMemoryCage requires the shared pointer compression cage")
+
+assert(!v8_enable_virtual_memory_cage || !is_lsan,
+ "V8 VirtualMemoryCage is currently incompatible with Leak Sanitizer")
+
assert(
!v8_enable_pointer_compression_shared_cage || v8_enable_pointer_compression,
"Can't share a pointer compression cage if pointers aren't compressed")
@@ -502,7 +522,7 @@ assert(!v8_enable_unconditional_write_barriers || !v8_disable_write_barriers,
"Write barriers can't be both enabled and disabled")
assert(!cppgc_enable_caged_heap || v8_current_cpu == "x64" ||
- v8_current_cpu == "arm64",
+ v8_current_cpu == "arm64" || v8_current_cpu == "loong64",
"CppGC caged heap requires 64bit platforms")
assert(!cppgc_enable_young_generation || cppgc_enable_caged_heap,
@@ -650,6 +670,7 @@ external_v8_defines = [
"V8_31BIT_SMIS_ON_64BIT_ARCH",
"V8_COMPRESS_ZONES",
"V8_HEAP_SANDBOX",
+ "V8_VIRTUAL_MEMORY_CAGE",
"V8_DEPRECATION_WARNINGS",
"V8_IMMINENT_DEPRECATION_WARNINGS",
"V8_NO_ARGUMENTS_ADAPTOR",
@@ -680,6 +701,9 @@ if (v8_enable_zone_compression) {
if (v8_enable_heap_sandbox) {
enabled_external_v8_defines += [ "V8_HEAP_SANDBOX" ]
}
+if (v8_enable_virtual_memory_cage) {
+ enabled_external_v8_defines += [ "V8_VIRTUAL_MEMORY_CAGE" ]
+}
if (v8_deprecation_warnings) {
enabled_external_v8_defines += [ "V8_DEPRECATION_WARNINGS" ]
}
@@ -761,14 +785,18 @@ config("features") {
":cppgc_header_features",
]
- if (cppgc_enable_verify_live_bytes) {
- defines += [ "CPPGC_VERIFY_LIVE_BYTES" ]
+ if (cppgc_enable_verify_heap) {
+ defines += [ "CPPGC_VERIFY_HEAP" ]
}
if (cppgc_enable_check_assignments_in_prefinalizers) {
defines += [ "CPPGC_CHECK_ASSIGNMENTS_IN_PREFINALIZERS" ]
}
+ if (cppgc_allow_allocations_in_prefinalizers) {
+ defines += [ "CPPGC_ALLOW_ALLOCATIONS_IN_PREFINALIZERS" ]
+ }
+
if (v8_embedder_string != "") {
defines += [ "V8_EMBEDDER_STRING=\"$v8_embedder_string\"" ]
}
@@ -872,9 +900,6 @@ config("features") {
if (v8_enable_lazy_source_positions) {
defines += [ "V8_ENABLE_LAZY_SOURCE_POSITIONS" ]
}
- if (v8_use_multi_snapshots) {
- defines += [ "V8_MULTI_SNAPSHOTS" ]
- }
if (v8_use_siphash) {
defines += [ "V8_USE_SIPHASH" ]
}
@@ -935,6 +960,9 @@ config("features") {
if (v8_advanced_bigint_algorithms) {
defines += [ "V8_ADVANCED_BIGINT_ALGORITHMS" ]
}
+ if (v8_include_receiver_in_argc) {
+ defines += [ "V8_INCLUDE_RECEIVER_IN_ARGC" ]
+ }
}
config("toolchain") {
@@ -1057,6 +1085,15 @@ config("toolchain") {
defines += [ "_MIPS_ARCH_MIPS64R2" ]
}
}
+
+ # loong64 simulators.
+ if (target_is_simulator && v8_current_cpu == "loong64") {
+ defines += [ "_LOONG64_TARGET_SIMULATOR" ]
+ }
+ if (v8_current_cpu == "loong64") {
+ defines += [ "V8_TARGET_ARCH_LOONG64" ]
+ }
+
if (v8_current_cpu == "s390" || v8_current_cpu == "s390x") {
defines += [ "V8_TARGET_ARCH_S390" ]
cflags += [ "-ffp-contract=off" ]
@@ -1170,10 +1207,6 @@ config("toolchain") {
defines += [ "V8_RUNTIME_CALL_STATS" ]
}
- if (!v8_untrusted_code_mitigations) {
- defines += [ "DISABLE_UNTRUSTED_CODE_MITIGATIONS" ]
- }
-
if (v8_no_inline) {
if (is_win) {
cflags += [ "/Ob0" ]
@@ -1309,8 +1342,6 @@ template("asm_to_inline_asm") {
if (is_android && enable_java_templates) {
android_assets("v8_external_startup_data_assets") {
if (v8_use_external_startup_data) {
- # We don't support side-by-side snapshots on Android within Chromium.
- assert(!v8_use_multi_snapshots)
deps = [ "//v8" ]
renaming_sources = [ "$root_out_dir/snapshot_blob.bin" ]
if (current_cpu == "arm" || current_cpu == "x86" ||
@@ -1987,17 +2018,6 @@ if (emit_builtins_as_inline_asm) {
args = []
}
}
-if (v8_use_multi_snapshots) {
- run_mksnapshot("trusted") {
- args = [ "--no-untrusted-code-mitigations" ]
- embedded_variant = "Trusted"
- }
- if (emit_builtins_as_inline_asm) {
- asm_to_inline_asm("trusted") {
- args = []
- }
- }
-}
action("v8_dump_build_config") {
script = "tools/testrunner/utils/dump_build_config.py"
@@ -2034,6 +2054,7 @@ action("v8_dump_build_config") {
"v8_enable_pointer_compression=$v8_enable_pointer_compression",
"v8_enable_pointer_compression_shared_cage=" +
"$v8_enable_pointer_compression_shared_cage",
+ "v8_enable_virtual_memory_cage=$v8_enable_virtual_memory_cage",
"v8_enable_third_party_heap=$v8_enable_third_party_heap",
"v8_enable_webassembly=$v8_enable_webassembly",
"v8_control_flow_integrity=$v8_control_flow_integrity",
@@ -2086,16 +2107,6 @@ v8_source_set("v8_snapshot") {
deps += [ ":v8_base" ]
sources += [ "src/snapshot/snapshot-external.cc" ]
-
- if (v8_use_multi_snapshots) {
- public_deps += [ ":run_mksnapshot_trusted" ]
- if (emit_builtins_as_inline_asm) {
- deps += [ ":asm_to_inline_asm_trusted" ]
- sources += [ "$target_gen_dir/embedded_trusted.cc" ]
- } else {
- sources += [ "$target_gen_dir/embedded_trusted.S" ]
- }
- }
} else {
# Also top-level visibility targets can depend on this.
visibility += [ "//:gn_visibility" ]
@@ -2230,6 +2241,11 @@ v8_source_set("v8_initializers") {
### gcmole(arch:mips64el) ###
"src/builtins/mips64/builtins-mips64.cc",
]
+ } else if (v8_current_cpu == "loong64") {
+ sources += [
+ ### gcmole(arch:loong64) ###
+ "src/builtins/loong64/builtins-loong64.cc",
+ ]
} else if (v8_current_cpu == "ppc") {
sources += [
### gcmole(arch:ppc) ###
@@ -2313,11 +2329,53 @@ v8_header_set("v8_headers") {
public_configs = [ ":headers_config" ]
sources = [
+ "include/v8-array-buffer.h",
+ "include/v8-callbacks.h",
+ "include/v8-container.h",
+ "include/v8-context.h",
"include/v8-cppgc.h",
+ "include/v8-data.h",
+ "include/v8-date.h",
+ "include/v8-debug.h",
+ "include/v8-embedder-heap.h",
+ "include/v8-exception.h",
+ "include/v8-extension.h",
+ "include/v8-external.h",
"include/v8-fast-api-calls.h",
+ "include/v8-forward.h",
+ "include/v8-function-callback.h",
+ "include/v8-function.h",
+ "include/v8-initialization.h",
"include/v8-internal.h",
+ "include/v8-isolate.h",
+ "include/v8-json.h",
+ "include/v8-local-handle.h",
+ "include/v8-locker.h",
+ "include/v8-maybe.h",
+ "include/v8-memory-span.h",
+ "include/v8-message.h",
+ "include/v8-microtask-queue.h",
+ "include/v8-microtask.h",
+ "include/v8-object.h",
+ "include/v8-persistent-handle.h",
+ "include/v8-primitive-object.h",
+ "include/v8-primitive.h",
"include/v8-profiler.h",
+ "include/v8-promise.h",
+ "include/v8-proxy.h",
+ "include/v8-regexp.h",
+ "include/v8-script.h",
+ "include/v8-snapshot.h",
+ "include/v8-statistics.h",
+ "include/v8-template.h",
+ "include/v8-traced-handle.h",
+ "include/v8-typed-array.h",
+ "include/v8-unwinder.h",
"include/v8-util.h",
+ "include/v8-value-serializer.h",
+ "include/v8-value.h",
+ "include/v8-wasm.h",
+ "include/v8-weak-callback-info.h",
"include/v8.h",
]
@@ -2450,6 +2508,7 @@ v8_header_set("v8_internal_headers") {
"src/codegen/assembler-arch.h",
"src/codegen/assembler-inl.h",
"src/codegen/assembler.h",
+ "src/codegen/atomic-memory-order.h",
"src/codegen/bailout-reason.h",
"src/codegen/callable.h",
"src/codegen/code-comments.h",
@@ -2532,7 +2591,6 @@ v8_header_set("v8_internal_headers") {
"src/compiler/common-operator-reducer.h",
"src/compiler/common-operator.h",
"src/compiler/compilation-dependencies.h",
- "src/compiler/compilation-dependency.h",
"src/compiler/compiler-source-position-table.h",
"src/compiler/constant-folding-reducer.h",
"src/compiler/control-equivalence.h",
@@ -2775,6 +2833,7 @@ v8_header_set("v8_internal_headers") {
"src/heap/paged-spaces.h",
"src/heap/parallel-work-item.h",
"src/heap/parked-scope.h",
+ "src/heap/progress-bar.h",
"src/heap/read-only-heap-inl.h",
"src/heap/read-only-heap.h",
"src/heap/read-only-spaces.h",
@@ -2806,6 +2865,7 @@ v8_header_set("v8_internal_headers") {
"src/init/setup-isolate.h",
"src/init/startup-data-util.h",
"src/init/v8.h",
+ "src/init/vm-cage.h",
"src/interpreter/block-coverage-builder.h",
"src/interpreter/bytecode-array-builder.h",
"src/interpreter/bytecode-array-iterator.h",
@@ -3088,6 +3148,7 @@ v8_header_set("v8_internal_headers") {
"src/regexp/regexp-compiler.h",
"src/regexp/regexp-dotprinter.h",
"src/regexp/regexp-error.h",
+ "src/regexp/regexp-flags.h",
"src/regexp/regexp-interpreter.h",
"src/regexp/regexp-macro-assembler-arch.h",
"src/regexp/regexp-macro-assembler-tracer.h",
@@ -3188,6 +3249,7 @@ v8_header_set("v8_internal_headers") {
"src/asmjs/asm-types.h",
"src/compiler/int64-lowering.h",
"src/compiler/wasm-compiler.h",
+ "src/compiler/wasm-inlining.h",
"src/debug/debug-wasm-objects-inl.h",
"src/debug/debug-wasm-objects.h",
"src/trap-handler/trap-handler-internal.h",
@@ -3442,6 +3504,21 @@ v8_header_set("v8_internal_headers") {
"src/regexp/mips64/regexp-macro-assembler-mips64.h",
"src/wasm/baseline/mips64/liftoff-assembler-mips64.h",
]
+ } else if (v8_current_cpu == "loong64") {
+ sources += [ ### gcmole(arch:loong64) ###
+ "src/baseline/loong64/baseline-assembler-loong64-inl.h",
+ "src/baseline/loong64/baseline-compiler-loong64-inl.h",
+ "src/codegen/loong64/assembler-loong64-inl.h",
+ "src/codegen/loong64/assembler-loong64.h",
+ "src/codegen/loong64/constants-loong64.h",
+ "src/codegen/loong64/macro-assembler-loong64.h",
+ "src/codegen/loong64/register-loong64.h",
+ "src/compiler/backend/loong64/instruction-codes-loong64.h",
+ "src/execution/loong64/frame-constants-loong64.h",
+ "src/execution/loong64/simulator-loong64.h",
+ "src/regexp/loong64/regexp-macro-assembler-loong64.h",
+ "src/wasm/baseline/loong64/liftoff-assembler-loong64.h",
+ ]
} else if (v8_current_cpu == "ppc") {
sources += [ ### gcmole(arch:ppc) ###
"src/codegen/ppc/assembler-ppc-inl.h",
@@ -3639,6 +3716,7 @@ if (v8_enable_webassembly) {
v8_compiler_sources += [
"src/compiler/int64-lowering.cc",
"src/compiler/wasm-compiler.cc",
+ "src/compiler/wasm-inlining.cc",
]
}
@@ -3923,6 +4001,7 @@ v8_source_set("v8_base_without_compiler") {
"src/init/isolate-allocator.cc",
"src/init/startup-data-util.cc",
"src/init/v8.cc",
+ "src/init/vm-cage.cc",
"src/interpreter/bytecode-array-builder.cc",
"src/interpreter/bytecode-array-iterator.cc",
"src/interpreter/bytecode-array-random-iterator.cc",
@@ -4359,6 +4438,23 @@ v8_source_set("v8_base_without_compiler") {
"src/execution/mips64/simulator-mips64.cc",
"src/regexp/mips64/regexp-macro-assembler-mips64.cc",
]
+ } else if (v8_current_cpu == "loong64") {
+ sources += [ ### gcmole(arch:loong64) ###
+ "src/codegen/loong64/assembler-loong64.cc",
+ "src/codegen/loong64/constants-loong64.cc",
+ "src/codegen/loong64/cpu-loong64.cc",
+ "src/codegen/loong64/interface-descriptors-loong64-inl.h",
+ "src/codegen/loong64/macro-assembler-loong64.cc",
+ "src/compiler/backend/loong64/code-generator-loong64.cc",
+ "src/compiler/backend/loong64/instruction-scheduler-loong64.cc",
+ "src/compiler/backend/loong64/instruction-selector-loong64.cc",
+ "src/deoptimizer/loong64/deoptimizer-loong64.cc",
+ "src/diagnostics/loong64/disasm-loong64.cc",
+ "src/diagnostics/loong64/unwinder-loong64.cc",
+ "src/execution/loong64/frame-constants-loong64.cc",
+ "src/execution/loong64/simulator-loong64.cc",
+ "src/regexp/loong64/regexp-macro-assembler-loong64.cc",
+ ]
} else if (v8_current_cpu == "ppc") {
sources += [ ### gcmole(arch:ppc) ###
"src/codegen/ppc/assembler-ppc.cc",
@@ -4757,6 +4853,7 @@ v8_component("v8_libbase") {
"src/base/sanitizer/lsan-page-allocator.h",
"src/base/sanitizer/lsan.h",
"src/base/sanitizer/msan.h",
+ "src/base/sanitizer/tsan.h",
"src/base/small-vector.h",
"src/base/strings.cc",
"src/base/strings.h",
@@ -5060,6 +5157,8 @@ v8_source_set("v8_cppgc_shared") {
sources += [ "src/heap/base/asm/mips/push_registers_asm.cc" ]
} else if (current_cpu == "mips64el") {
sources += [ "src/heap/base/asm/mips64/push_registers_asm.cc" ]
+ } else if (current_cpu == "loong64") {
+ sources += [ "src/heap/base/asm/loong64/push_registers_asm.cc" ]
} else if (current_cpu == "riscv64") {
sources += [ "src/heap/base/asm/riscv64/push_registers_asm.cc" ]
}
@@ -5093,6 +5192,7 @@ v8_header_set("cppgc_headers") {
sources = [
"include/cppgc/allocation.h",
"include/cppgc/common.h",
+ "include/cppgc/cross-thread-persistent.h",
"include/cppgc/custom-space.h",
"include/cppgc/default-platform.h",
"include/cppgc/ephemeron-pair.h",
@@ -5211,6 +5311,7 @@ v8_source_set("cppgc_base") {
"src/heap/cppgc/page-memory.h",
"src/heap/cppgc/persistent-node.cc",
"src/heap/cppgc/platform.cc",
+ "src/heap/cppgc/platform.h",
"src/heap/cppgc/pointer-policies.cc",
"src/heap/cppgc/prefinalizer-handler.cc",
"src/heap/cppgc/prefinalizer-handler.h",
@@ -5285,10 +5386,12 @@ if (v8_check_header_includes) {
":torque_ls_base",
":v8_base_without_compiler",
":v8_bigint",
+ ":v8_headers",
":v8_initializers",
":v8_internal_headers",
":v8_libbase",
":v8_maybe_icu",
+ ":v8_version",
":wee8",
"src/inspector:inspector",
"src/inspector:inspector_string_conversions",
@@ -5854,8 +5957,8 @@ if (want_v8_shell) {
}
}
-v8_executable("cppgc_sample") {
- sources = [ "samples/cppgc/cppgc-sample.cc" ]
+v8_executable("cppgc_hello_world") {
+ sources = [ "samples/cppgc/hello-world.cc" ]
if (v8_current_cpu == "riscv64") {
libs = [ "atomic" ]
diff --git a/deps/v8/COMMON_OWNERS b/deps/v8/COMMON_OWNERS
index 69222f9843..dc831c0e97 100644
--- a/deps/v8/COMMON_OWNERS
+++ b/deps/v8/COMMON_OWNERS
@@ -24,14 +24,11 @@ marja@chromium.org
mlippautz@chromium.org
mslekova@chromium.org
mvstanton@chromium.org
-mythria@chromium.org
neis@chromium.org
nicohartmann@chromium.org
omerkatz@chromium.org
pthier@chromium.org
-rmcilroy@chromium.org
sigurds@chromium.org
-solanes@chromium.org
syg@chromium.org
szuend@chromium.org
thibaudm@chromium.org
diff --git a/deps/v8/DEPS b/deps/v8/DEPS
index 439f45ca58..8059e3b8c3 100644
--- a/deps/v8/DEPS
+++ b/deps/v8/DEPS
@@ -46,13 +46,13 @@ vars = {
'checkout_reclient': False,
# reclient CIPD package version
- 'reclient_version': 're_client_version:0.33.0.3e223d5',
+ 'reclient_version': 're_client_version:0.40.0.40ff5a5',
# GN CIPD package version.
- 'gn_version': 'git_revision:eea3906f0e2a8d3622080127d2005ff214d51383',
+ 'gn_version': 'git_revision:69ec4fca1fa69ddadae13f9e6b7507efa0675263',
# luci-go CIPD package version.
- 'luci_go': 'git_revision:1120f810b7ab7eb71bd618c4c57fe82a60d4f2fe',
+ 'luci_go': 'git_revision:7b62727dc713b47d7a7ce9bca27500cb8e82ebd7',
# Three lines of non-changing comments so that
# the commit queue can handle CLs rolling android_sdk_build-tools_version
@@ -90,11 +90,11 @@ vars = {
deps = {
'base/trace_event/common':
- Var('chromium_url') + '/chromium/src/base/trace_event/common.git' + '@' + '3da1e2fcf66acd5c7194497b4285ac163f32e239',
+ Var('chromium_url') + '/chromium/src/base/trace_event/common.git' + '@' + '715537d6007ca71837f48bcb04fc3d482aed2507',
'build':
- Var('chromium_url') + '/chromium/src/build.git' + '@' + 'bbf7f0ed65548c4df862d2a2748e3a9b908a3217',
+ Var('chromium_url') + '/chromium/src/build.git' + '@' + '17d097b0ffdc297f04afb54e9e3abff3f1203f06',
'buildtools':
- Var('chromium_url') + '/chromium/src/buildtools.git' + '@' + '37dc929ecb351687006a61744b116cda601753d7',
+ Var('chromium_url') + '/chromium/src/buildtools.git' + '@' + '7ea3a871db68ae2cbbeaf5433a3192a799ef3c11',
'buildtools/clang_format/script':
Var('chromium_url') + '/external/github.com/llvm/llvm-project/clang/tools/clang-format.git' + '@' + '99803d74e35962f63a775f29477882afd4d57d94',
'buildtools/linux64': {
@@ -120,9 +120,9 @@ deps = {
'buildtools/third_party/libc++/trunk':
Var('chromium_url') + '/external/github.com/llvm/llvm-project/libcxx.git' + '@' + '79a2e924d96e2fc1e4b937c42efd08898fa472d7',
'buildtools/third_party/libc++abi/trunk':
- Var('chromium_url') + '/external/github.com/llvm/llvm-project/libcxxabi.git' + '@' + '24e92c2beed59b76ddabe7ceb5ee4b40f09e0712',
+ Var('chromium_url') + '/external/github.com/llvm/llvm-project/libcxxabi.git' + '@' + '17de75220a90f23a16f9f87fbc5c00dce475b726',
'buildtools/third_party/libunwind/trunk':
- Var('chromium_url') + '/external/github.com/llvm/llvm-project/libunwind.git' + '@' + 'b825591df326b2725e6b88bdf74fdc88fefdf460',
+ Var('chromium_url') + '/external/github.com/llvm/llvm-project/libunwind.git' + '@' + '44ea7aba6a34a9250e7793418d83f209a480caf4',
'buildtools/win': {
'packages': [
{
@@ -148,14 +148,14 @@ deps = {
'test/mozilla/data':
Var('chromium_url') + '/v8/deps/third_party/mozilla-tests.git' + '@' + 'f6c578a10ea707b1a8ab0b88943fe5115ce2b9be',
'test/test262/data':
- Var('chromium_url') + '/external/github.com/tc39/test262.git' + '@' + 'ab353c6e732b9e175d3ad6779e3acf3ea82d3761',
+ Var('chromium_url') + '/external/github.com/tc39/test262.git' + '@' + '50f3fca7a0eac6b6e8e5e9aee7af3c2a05831261',
'test/test262/harness':
Var('chromium_url') + '/external/github.com/test262-utils/test262-harness-py.git' + '@' + '278bcfaed0dcaa13936831fb1769d15e7c1e3b2b',
'third_party/aemu-linux-x64': {
'packages': [
{
'package': 'fuchsia/third_party/aemu/linux-amd64',
- 'version': 'qWiGSH8A_xdaUVO-GsDJsJ5HCkIRwZqb-HDyxsLiuWwC'
+ 'version': 'QewYN5289B8deg5Mn6clWEv58UqpocHGKeob2F0T87kC'
},
],
'condition': 'host_os == "linux" and checkout_fuchsia',
@@ -176,7 +176,7 @@ deps = {
'condition': 'checkout_android',
},
'third_party/android_platform': {
- 'url': Var('chromium_url') + '/chromium/src/third_party/android_platform.git' + '@' + 'e98c753917587d320f4e7a24f1c7474535adac3f',
+ 'url': Var('chromium_url') + '/chromium/src/third_party/android_platform.git' + '@' + '6e5dc9acd241c308385f970c384d9e083b2b6e56',
'condition': 'checkout_android',
},
'third_party/android_sdk/public': {
@@ -218,7 +218,7 @@ deps = {
'dep_type': 'cipd',
},
'third_party/catapult': {
- 'url': Var('chromium_url') + '/catapult.git' + '@' + 'abc7ba7d871fe3c25b0a1bec7fc84fb309034cb7',
+ 'url': Var('chromium_url') + '/catapult.git' + '@' + '2331f088546de8f58dcc02daf8212254aaeb2d4c',
'condition': 'checkout_android',
},
'third_party/colorama/src': {
@@ -226,20 +226,20 @@ deps = {
'condition': 'checkout_android',
},
'third_party/depot_tools':
- Var('chromium_url') + '/chromium/tools/depot_tools.git' + '@' + '49a703f3d915b140c9f373107e1ba17f30e2487d',
+ Var('chromium_url') + '/chromium/tools/depot_tools.git' + '@' + '728566654bb1d2c78cdbe6b642c0d68c6f658ca7',
'third_party/fuchsia-sdk': {
'url': Var('chromium_url') + '/chromium/src/third_party/fuchsia-sdk.git' + '@' + '18896843130c33372c455c153ad07d2217bd2085',
'condition': 'checkout_fuchsia',
},
'third_party/google_benchmark/src': {
- 'url': Var('chromium_url') + '/external/github.com/google/benchmark.git' + '@' + '4124223bf5303d1d65fe2c40f33e28372bbb986c',
+ 'url': Var('chromium_url') + '/external/github.com/google/benchmark.git' + '@' + 'c23a0012523bc3e12c9323f398dcc433c4f19f05',
},
'third_party/googletest/src':
- Var('chromium_url') + '/external/github.com/google/googletest.git' + '@' + '47f819c3ca54fb602f432904443e00a0a1fe2f42',
+ Var('chromium_url') + '/external/github.com/google/googletest.git' + '@' + '955c7f837efad184ec63e771c42542d37545eaef',
'third_party/icu':
- Var('chromium_url') + '/chromium/deps/icu.git' + '@' + '75e34bcccea0be165c31fdb278b3712c516c5876',
+ Var('chromium_url') + '/chromium/deps/icu.git' + '@' + 'ece15d049f2d360721716089372e3749fb89e0f4',
'third_party/instrumented_libraries':
- Var('chromium_url') + '/chromium/src/third_party/instrumented_libraries.git' + '@' + '9a8087bbbf43a355950fc1667575d1a753f8aaa4',
+ Var('chromium_url') + '/chromium/src/third_party/instrumented_libraries.git' + '@' + '47226fa33ef5c9b48668c74128f25ef82f10e7af',
'third_party/ittapi': {
# Force checkout ittapi libraries to pass v8 header includes check on
# bots that has check_v8_header_includes enabled.
@@ -247,7 +247,7 @@ deps = {
'condition': "checkout_ittapi or check_v8_header_includes",
},
'third_party/jinja2':
- Var('chromium_url') + '/chromium/src/third_party/jinja2.git' + '@' + '7c54c1f227727e0c4c1d3dc19dd71cd601a2db95',
+ Var('chromium_url') + '/chromium/src/third_party/jinja2.git' + '@' + '6db8da1615a13fdfab925688bc4bf2eb394a73af',
'third_party/jsoncpp/source':
Var('chromium_url') + '/external/github.com/open-source-parsers/jsoncpp.git'+ '@' + '9059f5cad030ba11d37818847443a53918c327b1',
'third_party/logdog/logdog':
@@ -283,9 +283,9 @@ deps = {
'condition': 'checkout_android',
},
'third_party/zlib':
- Var('chromium_url') + '/chromium/src/third_party/zlib.git'+ '@' + '563140dd9c24f84bf40919196e9e7666d351cc0d',
+ Var('chromium_url') + '/chromium/src/third_party/zlib.git'+ '@' + '77c132322fe81a1f5518b326e18c99ebd3281627',
'tools/clang':
- Var('chromium_url') + '/chromium/src/tools/clang.git' + '@' + '6a8e571efd68de48d226950d1e10cb8982e71496',
+ Var('chromium_url') + '/chromium/src/tools/clang.git' + '@' + '664e4259b150e07f1a1e440459f59fbc68edb82f',
'tools/clang/dsymutil': {
'packages': [
{
@@ -314,8 +314,6 @@ deps = {
'condition': 'host_cpu != "s390" and host_os != "aix"',
'dep_type': 'cipd',
},
- 'tools/swarming_client':
- Var('chromium_url') + '/infra/luci/client-py.git' + '@' + 'a32a1607f6093d338f756c7e7c7b4333b0c50c9c',
}
include_rules = [
diff --git a/deps/v8/ENG_REVIEW_OWNERS b/deps/v8/ENG_REVIEW_OWNERS
index 173f6d6aee..e5040c45ad 100644
--- a/deps/v8/ENG_REVIEW_OWNERS
+++ b/deps/v8/ENG_REVIEW_OWNERS
@@ -5,4 +5,3 @@
adamk@chromium.org
danno@chromium.org
hpayer@chromium.org
-rmcilroy@chromium.org
diff --git a/deps/v8/LOONG_OWNERS b/deps/v8/LOONG_OWNERS
new file mode 100644
index 0000000000..cda25c2700
--- /dev/null
+++ b/deps/v8/LOONG_OWNERS
@@ -0,0 +1,3 @@
+liuyu@loongson.cn
+yuyin-hf@loongson.cn
+zhaojiazhong-hf@loongson.cn
diff --git a/deps/v8/MIPS_OWNERS b/deps/v8/MIPS_OWNERS
index 6c65e34e9c..fc3d3e4396 100644
--- a/deps/v8/MIPS_OWNERS
+++ b/deps/v8/MIPS_OWNERS
@@ -1,2 +1,3 @@
xwafish@gmail.com
zhaojiazhong-hf@loongson.cn
+liuyu@loongson.cn
diff --git a/deps/v8/OWNERS b/deps/v8/OWNERS
index f9b23f237f..7174da6f15 100644
--- a/deps/v8/OWNERS
+++ b/deps/v8/OWNERS
@@ -27,6 +27,11 @@ per-file codereview.settings=file:INFRA_OWNERS
per-file AUTHORS=file:COMMON_OWNERS
per-file WATCHLISTS=file:COMMON_OWNERS
+# Needed by the auto_tag builder
+per-file WATCHLISTS=v8-ci-autoroll-builder@chops-service-accounts.iam.gserviceaccount.com
+per-file DEPS=v8-ci-autoroll-builder@chops-service-accounts.iam.gserviceaccount.com
+
+per-file ...-loong64*=file:LOONG_OWNERS
per-file ...-mips*=file:MIPS_OWNERS
per-file ...-mips64*=file:MIPS_OWNERS
per-file ...-ppc*=file:PPC_OWNERS
diff --git a/deps/v8/WATCHLISTS b/deps/v8/WATCHLISTS
index b8b7eac99a..ad065a9842 100644
--- a/deps/v8/WATCHLISTS
+++ b/deps/v8/WATCHLISTS
@@ -104,6 +104,12 @@
'|WORKSPACE' \
'|bazel/',
},
+ 'cppgc': {
+ 'filepath': 'src/heap/cppgc/' \
+ '|src/heap/cppgc-js/' \
+ '|include/cppgc/' \
+ '|test/unittests/heap/',
+ },
},
'WATCHLISTS': {
@@ -119,9 +125,6 @@
'devtools': [
'devtools-reviews+v8@chromium.org',
],
- 'interpreter': [
- 'rmcilroy@chromium.org',
- ],
'baseline': [
'leszeks+watch@chromium.org',
'verwaest+watch@chromium.org',
@@ -169,5 +172,8 @@
'api': [
'cbruni+watch@chromium.org',
],
+ 'cppgc': [
+ 'oilpan-reviews+v8@chromium.org',
+ ],
},
}
diff --git a/deps/v8/base/trace_event/common/trace_event_common.h b/deps/v8/base/trace_event/common/trace_event_common.h
index 76391985c1..62f3c2ec07 100644
--- a/deps/v8/base/trace_event/common/trace_event_common.h
+++ b/deps/v8/base/trace_event/common/trace_event_common.h
@@ -203,6 +203,7 @@
#include "base/threading/platform_thread.h"
#include "base/time/time.h"
+#include "build/build_config.h"
// Export Perfetto symbols in the same way as //base symbols.
#define PERFETTO_COMPONENT_EXPORT BASE_EXPORT
@@ -261,6 +262,11 @@ template <>
perfetto::ThreadTrack BASE_EXPORT
ConvertThreadId(const ::base::PlatformThreadId& thread);
+#if defined(OS_WIN)
+template <>
+perfetto::ThreadTrack BASE_EXPORT ConvertThreadId(const int& thread);
+#endif // defined(OS_WIN)
+
} // namespace legacy
template <>
diff --git a/deps/v8/gni/snapshot_toolchain.gni b/deps/v8/gni/snapshot_toolchain.gni
index e855b88e43..feabd079e0 100644
--- a/deps/v8/gni/snapshot_toolchain.gni
+++ b/deps/v8/gni/snapshot_toolchain.gni
@@ -84,7 +84,7 @@ if (v8_snapshot_toolchain == "") {
if (v8_current_cpu == "x64" || v8_current_cpu == "x86") {
_cpus = v8_current_cpu
} else if (v8_current_cpu == "arm64" || v8_current_cpu == "mips64el" ||
- v8_current_cpu == "riscv64") {
+ v8_current_cpu == "riscv64" || v8_current_cpu == "loong64") {
if (is_win && v8_current_cpu == "arm64") {
# set _cpus to blank for Windows ARM64 so host_toolchain could be
# selected as snapshot toolchain later.
diff --git a/deps/v8/gni/v8.gni b/deps/v8/gni/v8.gni
index a334651797..fe445307f9 100644
--- a/deps/v8/gni/v8.gni
+++ b/deps/v8/gni/v8.gni
@@ -35,9 +35,6 @@ declare_args() {
# as an argument to profiler's method `takeHeapSnapshot`.
v8_enable_raw_heap_snapshots = false
- # Enable several snapshots side-by-side (e.g. default and for trusted code).
- v8_use_multi_snapshots = false
-
# Use external files for startup data blobs:
# the JS builtins sources and the start snapshot.
v8_use_external_startup_data = ""
@@ -99,13 +96,6 @@ if (v8_use_external_startup_data == "") {
v8_use_external_startup_data = !is_ios
}
-if (v8_use_multi_snapshots) {
- # Silently disable multi snapshots if they're incompatible with the current
- # build configuration. This allows us to set v8_use_multi_snapshots=true on
- # all bots, and e.g. no-snapshot bots will automatically do the right thing.
- v8_use_multi_snapshots = v8_use_external_startup_data && !build_with_chromium
-}
-
if (v8_enable_backtrace == "") {
v8_enable_backtrace = is_debug && !v8_optimized_debug
}
diff --git a/deps/v8/include/OWNERS b/deps/v8/include/OWNERS
index d85849d52a..0222513df2 100644
--- a/deps/v8/include/OWNERS
+++ b/deps/v8/include/OWNERS
@@ -11,6 +11,9 @@ per-file v8-inspector.h=file:../src/inspector/OWNERS
per-file v8-inspector-protocol.h=file:../src/inspector/OWNERS
per-file js_protocol.pdl=file:../src/inspector/OWNERS
+# Needed by the auto_tag builder
+per-file v8-version.h=v8-ci-autoroll-builder@chops-service-accounts.iam.gserviceaccount.com
+
# For branch updates:
per-file v8-version.h=file:../INFRA_OWNERS
per-file v8-version.h=hablich@chromium.org
diff --git a/deps/v8/include/cppgc/README.md b/deps/v8/include/cppgc/README.md
index 3a2db6dfa9..e454399853 100644
--- a/deps/v8/include/cppgc/README.md
+++ b/deps/v8/include/cppgc/README.md
@@ -1,5 +1,16 @@
-# C++ Garbage Collection
+# Oilpan: C++ Garbage Collection
-This directory provides an open-source garbage collection library for C++.
+Oilpan is an open-source garbage collection library for C++ that can be used stand-alone or in collaboration with V8's JavaScript garbage collector.
-The library is under construction, meaning that *all APIs in this directory are incomplete and considered unstable and should not be used*. \ No newline at end of file
+**Key properties**
+- Trace-based garbage collection;
+- Precise on-heap memory layout;
+- Conservative on-stack memory layout;
+- Allows for collection with and without considering stack;
+- Incremental and concurrent marking;
+- Incremental and concurrent sweeping;
+- Non-incremental and non-concurrent compaction for selected spaces;
+
+See the [Hello World](https://chromium.googlesource.com/v8/v8/+/main/samples/cppgc/hello-world.cc) example on how to get started using Oilpan to manage C++ code.
+
+Oilpan follows V8's project organization, see e.g. on how we accept [contributions](https://v8.dev/docs/contribute) and [provide a stable API](https://v8.dev/docs/api).
diff --git a/deps/v8/include/cppgc/allocation.h b/deps/v8/include/cppgc/allocation.h
index d75f1a9729..a3112dd61f 100644
--- a/deps/v8/include/cppgc/allocation.h
+++ b/deps/v8/include/cppgc/allocation.h
@@ -36,8 +36,13 @@ class V8_EXPORT MakeGarbageCollectedTraitInternal {
const_cast<uint16_t*>(reinterpret_cast<const uint16_t*>(
reinterpret_cast<const uint8_t*>(payload) -
api_constants::kFullyConstructedBitFieldOffsetFromPayload)));
- atomic_mutable_bitfield->fetch_or(api_constants::kFullyConstructedBitMask,
- std::memory_order_release);
+ // It's safe to split use load+store here (instead of a read-modify-write
+ // operation), since it's guaranteed that this 16-bit bitfield is only
+ // modified by a single thread. This is cheaper in terms of code bloat (on
+ // ARM) and performance.
+ uint16_t value = atomic_mutable_bitfield->load(std::memory_order_relaxed);
+ value |= api_constants::kFullyConstructedBitMask;
+ atomic_mutable_bitfield->store(value, std::memory_order_release);
}
template <typename U, typename CustomSpace>
@@ -202,7 +207,7 @@ struct PostConstructionCallbackTrait {
* \returns an instance of type T.
*/
template <typename T, typename... Args>
-T* MakeGarbageCollected(AllocationHandle& handle, Args&&... args) {
+V8_INLINE T* MakeGarbageCollected(AllocationHandle& handle, Args&&... args) {
T* object =
MakeGarbageCollectedTrait<T>::Call(handle, std::forward<Args>(args)...);
PostConstructionCallbackTrait<T>::Call(object);
@@ -220,8 +225,9 @@ T* MakeGarbageCollected(AllocationHandle& handle, Args&&... args) {
* \returns an instance of type T.
*/
template <typename T, typename... Args>
-T* MakeGarbageCollected(AllocationHandle& handle,
- AdditionalBytes additional_bytes, Args&&... args) {
+V8_INLINE T* MakeGarbageCollected(AllocationHandle& handle,
+ AdditionalBytes additional_bytes,
+ Args&&... args) {
T* object = MakeGarbageCollectedTrait<T>::Call(handle, additional_bytes,
std::forward<Args>(args)...);
PostConstructionCallbackTrait<T>::Call(object);
diff --git a/deps/v8/include/cppgc/cross-thread-persistent.h b/deps/v8/include/cppgc/cross-thread-persistent.h
index 0a9afdcd2b..c8751e1d64 100644
--- a/deps/v8/include/cppgc/cross-thread-persistent.h
+++ b/deps/v8/include/cppgc/cross-thread-persistent.h
@@ -34,7 +34,35 @@ class CrossThreadPersistentBase : public PersistentBase {
V8_CLANG_NO_SANITIZE("address")
void ClearFromGC() const {
raw_ = nullptr;
- node_ = nullptr;
+ SetNodeSafe(nullptr);
+ }
+
+ // GetNodeSafe() can be used for a thread-safe IsValid() check in a
+ // double-checked locking pattern. See ~BasicCrossThreadPersistent.
+ PersistentNode* GetNodeSafe() const {
+ return reinterpret_cast<std::atomic<PersistentNode*>*>(&node_)->load(
+ std::memory_order_acquire);
+ }
+
+ // The GC writes using SetNodeSafe() while holding the lock.
+ V8_CLANG_NO_SANITIZE("address")
+ void SetNodeSafe(PersistentNode* value) const {
+#if defined(__has_feature)
+#if __has_feature(address_sanitizer)
+#define V8_IS_ASAN 1
+#endif
+#endif
+
+#ifdef V8_IS_ASAN
+ __atomic_store(&node_, &value, __ATOMIC_RELEASE);
+#else // !V8_IS_ASAN
+ // Non-ASAN builds can use atomics. This also covers MSVC which does not
+ // have the __atomic_store intrinsic.
+ reinterpret_cast<std::atomic<PersistentNode*>*>(&node_)->store(
+ value, std::memory_order_release);
+#endif // !V8_IS_ASAN
+
+#undef V8_IS_ASAN
}
};
@@ -48,7 +76,31 @@ class BasicCrossThreadPersistent final : public CrossThreadPersistentBase,
using typename WeaknessPolicy::IsStrongPersistent;
using PointeeType = T;
- ~BasicCrossThreadPersistent() { Clear(); }
+ ~BasicCrossThreadPersistent() {
+ // This implements fast path for destroying empty/sentinel.
+ //
+ // Simplified version of `AssignUnsafe()` to allow calling without a
+ // complete type `T`. Uses double-checked locking with a simple thread-safe
+ // check for a valid handle based on a node.
+ if (GetNodeSafe()) {
+ PersistentRegionLock guard;
+ const void* old_value = GetValue();
+ // The fast path check (GetNodeSafe()) does not acquire the lock. Recheck
+ // validity while holding the lock to ensure the reference has not been
+ // cleared.
+ if (IsValid(old_value)) {
+ CrossThreadPersistentRegion& region =
+ this->GetPersistentRegion(old_value);
+ region.FreeNode(GetNode());
+ SetNode(nullptr);
+ } else {
+ CPPGC_DCHECK(!GetNode());
+ }
+ }
+ // No need to call SetValue() as the handle is not used anymore. This can
+ // leave behind stale sentinel values but will always destroy the underlying
+ // node.
+ }
BasicCrossThreadPersistent(
const SourceLocation& loc = SourceLocation::Current())
@@ -135,7 +187,7 @@ class BasicCrossThreadPersistent final : public CrossThreadPersistentBase,
BasicCrossThreadPersistent& operator=(
const BasicCrossThreadPersistent& other) {
PersistentRegionLock guard;
- AssignUnsafe(other.Get());
+ AssignSafe(guard, other.Get());
return *this;
}
@@ -147,7 +199,7 @@ class BasicCrossThreadPersistent final : public CrossThreadPersistentBase,
OtherLocationPolicy,
OtherCheckingPolicy>& other) {
PersistentRegionLock guard;
- AssignUnsafe(other.Get());
+ AssignSafe(guard, other.Get());
return *this;
}
@@ -165,8 +217,13 @@ class BasicCrossThreadPersistent final : public CrossThreadPersistentBase,
return *this;
}
+ /**
+ * Assigns a raw pointer.
+ *
+ * Note: **Not thread-safe.**
+ */
BasicCrossThreadPersistent& operator=(T* other) {
- Assign(other);
+ AssignUnsafe(other);
return *this;
}
@@ -181,13 +238,24 @@ class BasicCrossThreadPersistent final : public CrossThreadPersistentBase,
return operator=(member.Get());
}
+ /**
+ * Assigns a nullptr.
+ *
+ * \returns the handle.
+ */
BasicCrossThreadPersistent& operator=(std::nullptr_t) {
Clear();
return *this;
}
+ /**
+ * Assigns the sentinel pointer.
+ *
+ * \returns the handle.
+ */
BasicCrossThreadPersistent& operator=(SentinelPointer s) {
- Assign(s);
+ PersistentRegionLock guard;
+ AssignSafe(guard, s);
return *this;
}
@@ -209,24 +277,8 @@ class BasicCrossThreadPersistent final : public CrossThreadPersistentBase,
* Clears the stored object.
*/
void Clear() {
- // Simplified version of `Assign()` to allow calling without a complete type
- // `T`.
- const void* old_value = GetValue();
- if (IsValid(old_value)) {
- PersistentRegionLock guard;
- old_value = GetValue();
- // The fast path check (IsValid()) does not acquire the lock. Reload
- // the value to ensure the reference has not been cleared.
- if (IsValid(old_value)) {
- CrossThreadPersistentRegion& region =
- this->GetPersistentRegion(old_value);
- region.FreeNode(GetNode());
- SetNode(nullptr);
- } else {
- CPPGC_DCHECK(!GetNode());
- }
- }
- SetValue(nullptr);
+ PersistentRegionLock guard;
+ AssignSafe(guard, nullptr);
}
/**
@@ -302,7 +354,7 @@ class BasicCrossThreadPersistent final : public CrossThreadPersistentBase,
v->TraceRoot(*handle, handle->Location());
}
- void Assign(T* ptr) {
+ void AssignUnsafe(T* ptr) {
const void* old_value = GetValue();
if (IsValid(old_value)) {
PersistentRegionLock guard;
@@ -330,7 +382,7 @@ class BasicCrossThreadPersistent final : public CrossThreadPersistentBase,
this->CheckPointer(ptr);
}
- void AssignUnsafe(T* ptr) {
+ void AssignSafe(PersistentRegionLock&, T* ptr) {
PersistentRegionLock::AssertLocked();
const void* old_value = GetValue();
if (IsValid(old_value)) {
diff --git a/deps/v8/include/cppgc/internal/caged-heap-local-data.h b/deps/v8/include/cppgc/internal/caged-heap-local-data.h
index 1fa60b6953..5b30d67029 100644
--- a/deps/v8/include/cppgc/internal/caged-heap-local-data.h
+++ b/deps/v8/include/cppgc/internal/caged-heap-local-data.h
@@ -53,10 +53,10 @@ static_assert(sizeof(AgeTable) == 1 * api_constants::kMB,
#endif // CPPGC_YOUNG_GENERATION
struct CagedHeapLocalData final {
- explicit CagedHeapLocalData(HeapBase* heap_base) : heap_base(heap_base) {}
+ CagedHeapLocalData(HeapBase&, PageAllocator&);
bool is_incremental_marking_in_progress = false;
- HeapBase* heap_base = nullptr;
+ HeapBase& heap_base;
#if defined(CPPGC_YOUNG_GENERATION)
AgeTable age_table;
#endif
diff --git a/deps/v8/include/cppgc/internal/finalizer-trait.h b/deps/v8/include/cppgc/internal/finalizer-trait.h
index a95126591c..7bd6f83bf6 100644
--- a/deps/v8/include/cppgc/internal/finalizer-trait.h
+++ b/deps/v8/include/cppgc/internal/finalizer-trait.h
@@ -76,6 +76,8 @@ struct FinalizerTrait {
}
public:
+ static constexpr bool HasFinalizer() { return kNonTrivialFinalizer; }
+
// The callback used to finalize an object of type T.
static constexpr FinalizationCallback kCallback =
kNonTrivialFinalizer ? Finalize : nullptr;
diff --git a/deps/v8/include/cppgc/internal/gc-info.h b/deps/v8/include/cppgc/internal/gc-info.h
index 0830b19490..82a0d05343 100644
--- a/deps/v8/include/cppgc/internal/gc-info.h
+++ b/deps/v8/include/cppgc/internal/gc-info.h
@@ -19,11 +19,94 @@ namespace internal {
using GCInfoIndex = uint16_t;
-// Acquires a new GC info object and returns the index. In addition, also
-// updates `registered_index` atomically.
-V8_EXPORT GCInfoIndex
-EnsureGCInfoIndex(std::atomic<GCInfoIndex>& registered_index,
- FinalizationCallback, TraceCallback, NameCallback, bool);
+struct V8_EXPORT EnsureGCInfoIndexTrait final {
+ // Acquires a new GC info object and returns the index. In addition, also
+ // updates `registered_index` atomically.
+ template <typename T>
+ V8_INLINE static GCInfoIndex EnsureIndex(
+ std::atomic<GCInfoIndex>& registered_index) {
+ return EnsureGCInfoIndexTraitDispatch<T>{}(registered_index);
+ }
+
+ private:
+ template <typename T, bool = std::is_polymorphic<T>::value,
+ bool = FinalizerTrait<T>::HasFinalizer(),
+ bool = NameTrait<T>::HasNonHiddenName()>
+ struct EnsureGCInfoIndexTraitDispatch;
+
+ static GCInfoIndex EnsureGCInfoIndexPolymorphic(std::atomic<GCInfoIndex>&,
+ TraceCallback,
+ FinalizationCallback,
+ NameCallback);
+ static GCInfoIndex EnsureGCInfoIndexPolymorphic(std::atomic<GCInfoIndex>&,
+ TraceCallback,
+ FinalizationCallback);
+ static GCInfoIndex EnsureGCInfoIndexPolymorphic(std::atomic<GCInfoIndex>&,
+ TraceCallback, NameCallback);
+ static GCInfoIndex EnsureGCInfoIndexPolymorphic(std::atomic<GCInfoIndex>&,
+ TraceCallback);
+ static GCInfoIndex EnsureGCInfoIndexNonPolymorphic(std::atomic<GCInfoIndex>&,
+ TraceCallback,
+ FinalizationCallback,
+
+ NameCallback);
+ static GCInfoIndex EnsureGCInfoIndexNonPolymorphic(std::atomic<GCInfoIndex>&,
+ TraceCallback,
+ FinalizationCallback);
+ static GCInfoIndex EnsureGCInfoIndexNonPolymorphic(std::atomic<GCInfoIndex>&,
+ TraceCallback,
+ NameCallback);
+ static GCInfoIndex EnsureGCInfoIndexNonPolymorphic(std::atomic<GCInfoIndex>&,
+ TraceCallback);
+};
+
+#define DISPATCH(is_polymorphic, has_finalizer, has_non_hidden_name, function) \
+ template <typename T> \
+ struct EnsureGCInfoIndexTrait::EnsureGCInfoIndexTraitDispatch< \
+ T, is_polymorphic, has_finalizer, has_non_hidden_name> { \
+ V8_INLINE GCInfoIndex \
+ operator()(std::atomic<GCInfoIndex>& registered_index) { \
+ return function; \
+ } \
+ };
+
+// --------------------------------------------------------------------- //
+// DISPATCH(is_polymorphic, has_finalizer, has_non_hidden_name, function)
+// --------------------------------------------------------------------- //
+DISPATCH(true, true, true, //
+ EnsureGCInfoIndexPolymorphic(registered_index, //
+ TraceTrait<T>::Trace, //
+ FinalizerTrait<T>::kCallback, //
+ NameTrait<T>::GetName)) //
+DISPATCH(true, true, false, //
+ EnsureGCInfoIndexPolymorphic(registered_index, //
+ TraceTrait<T>::Trace, //
+ FinalizerTrait<T>::kCallback)) //
+DISPATCH(true, false, true, //
+ EnsureGCInfoIndexPolymorphic(registered_index, //
+ TraceTrait<T>::Trace, //
+ NameTrait<T>::GetName)) //
+DISPATCH(true, false, false, //
+ EnsureGCInfoIndexPolymorphic(registered_index, //
+ TraceTrait<T>::Trace)) //
+DISPATCH(false, true, true, //
+ EnsureGCInfoIndexNonPolymorphic(registered_index, //
+ TraceTrait<T>::Trace, //
+ FinalizerTrait<T>::kCallback, //
+ NameTrait<T>::GetName)) //
+DISPATCH(false, true, false, //
+ EnsureGCInfoIndexNonPolymorphic(registered_index, //
+ TraceTrait<T>::Trace, //
+ FinalizerTrait<T>::kCallback)) //
+DISPATCH(false, false, true, //
+ EnsureGCInfoIndexNonPolymorphic(registered_index, //
+ TraceTrait<T>::Trace, //
+ NameTrait<T>::GetName)) //
+DISPATCH(false, false, false, //
+ EnsureGCInfoIndexNonPolymorphic(registered_index, //
+ TraceTrait<T>::Trace)) //
+
+#undef DISPATCH
// Fold types based on finalizer behavior. Note that finalizer characteristics
// align with trace behavior, i.e., destructors are virtual when trace methods
@@ -57,16 +140,13 @@ struct GCInfoFolding {
// finalization, and naming.
template <typename T>
struct GCInfoTrait final {
- static GCInfoIndex Index() {
+ V8_INLINE static GCInfoIndex Index() {
static_assert(sizeof(T), "T must be fully defined");
static std::atomic<GCInfoIndex>
registered_index; // Uses zero initialization.
const GCInfoIndex index = registered_index.load(std::memory_order_acquire);
return index ? index
- : EnsureGCInfoIndex(
- registered_index, FinalizerTrait<T>::kCallback,
- TraceTrait<T>::Trace, NameTrait<T>::GetName,
- std::is_polymorphic<T>::value);
+ : EnsureGCInfoIndexTrait::EnsureIndex<T>(registered_index);
}
};
diff --git a/deps/v8/include/cppgc/internal/name-trait.h b/deps/v8/include/cppgc/internal/name-trait.h
index 2e2da1eab4..32a3347859 100644
--- a/deps/v8/include/cppgc/internal/name-trait.h
+++ b/deps/v8/include/cppgc/internal/name-trait.h
@@ -6,6 +6,7 @@
#define INCLUDE_CPPGC_INTERNAL_NAME_TRAIT_H_
#include <cstddef>
+#include <type_traits>
#include "cppgc/name-provider.h"
#include "v8config.h" // NOLINT(build/include_directory)
@@ -67,6 +68,16 @@ class V8_EXPORT NameTraitBase {
template <typename T>
class NameTrait final : public NameTraitBase {
public:
+ static constexpr bool HasNonHiddenName() {
+#if CPPGC_SUPPORTS_COMPILE_TIME_TYPENAME
+ return true;
+#elif CPPGC_SUPPORTS_OBJECT_NAMES
+ return true;
+#else // !CPPGC_SUPPORTS_OBJECT_NAMES
+ return std::is_base_of<NameProvider, T>::value;
+#endif // !CPPGC_SUPPORTS_OBJECT_NAMES
+ }
+
static HeapObjectName GetName(const void* obj) {
return GetNameFor(static_cast<const T*>(obj));
}
diff --git a/deps/v8/include/cppgc/prefinalizer.h b/deps/v8/include/cppgc/prefinalizer.h
index 29b18bef90..6153b37ff5 100644
--- a/deps/v8/include/cppgc/prefinalizer.h
+++ b/deps/v8/include/cppgc/prefinalizer.h
@@ -38,7 +38,7 @@ class PrefinalizerRegistration final {
"Only garbage collected objects can have prefinalizers"); \
Class* self = static_cast<Class*>(object); \
if (liveness_broker.IsHeapObjectAlive(self)) return false; \
- self->Class::PreFinalizer(); \
+ self->PreFinalizer(); \
return true; \
} \
\
diff --git a/deps/v8/include/js_protocol.pdl b/deps/v8/include/js_protocol.pdl
index ebf9eb7fe8..b34c8551ad 100644
--- a/deps/v8/include/js_protocol.pdl
+++ b/deps/v8/include/js_protocol.pdl
@@ -845,24 +845,6 @@ domain Profiler
# Type profile entries for parameters and return values of the functions in the script.
array of TypeProfileEntry entries
- # Collected counter information.
- experimental type CounterInfo extends object
- properties
- # Counter name.
- string name
- # Counter value.
- integer value
-
- # Runtime call counter information.
- experimental type RuntimeCallCounterInfo extends object
- properties
- # Counter name.
- string name
- # Counter value.
- number value
- # Counter time in seconds.
- number time
-
command disable
command enable
@@ -927,30 +909,6 @@ domain Profiler
# Type profile for all scripts since startTypeProfile() was turned on.
array of ScriptTypeProfile result
- # Enable counters collection.
- experimental command enableCounters
-
- # Disable counters collection.
- experimental command disableCounters
-
- # Retrieve counters.
- experimental command getCounters
- returns
- # Collected counters information.
- array of CounterInfo result
-
- # Enable run time call stats collection.
- experimental command enableRuntimeCallStats
-
- # Disable run time call stats collection.
- experimental command disableRuntimeCallStats
-
- # Retrieve run time call stats.
- experimental command getRuntimeCallStats
- returns
- # Collected runtime call counter information.
- array of RuntimeCallCounterInfo result
-
event consoleProfileFinished
parameters
string id
@@ -1469,6 +1427,8 @@ domain Runtime
experimental optional boolean accessorPropertiesOnly
# Whether preview should be generated for the results.
experimental optional boolean generatePreview
+ # If true, returns non-indexed properties only.
+ experimental optional boolean nonIndexedPropertiesOnly
returns
# Object properties.
array of PropertyDescriptor result
diff --git a/deps/v8/include/v8-array-buffer.h b/deps/v8/include/v8-array-buffer.h
new file mode 100644
index 0000000000..0ce2b65368
--- /dev/null
+++ b/deps/v8/include/v8-array-buffer.h
@@ -0,0 +1,433 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef INCLUDE_V8_ARRAY_BUFFER_H_
+#define INCLUDE_V8_ARRAY_BUFFER_H_
+
+#include <stddef.h>
+
+#include <memory>
+
+#include "v8-local-handle.h" // NOLINT(build/include_directory)
+#include "v8-object.h" // NOLINT(build/include_directory)
+#include "v8config.h" // NOLINT(build/include_directory)
+
+namespace v8 {
+
+class SharedArrayBuffer;
+
+#ifndef V8_ARRAY_BUFFER_INTERNAL_FIELD_COUNT
+// The number of required internal fields can be defined by embedder.
+#define V8_ARRAY_BUFFER_INTERNAL_FIELD_COUNT 2
+#endif
+
+enum class ArrayBufferCreationMode { kInternalized, kExternalized };
+
+/**
+ * A wrapper around the backing store (i.e. the raw memory) of an array buffer.
+ * See a document linked in http://crbug.com/v8/9908 for more information.
+ *
+ * The allocation and destruction of backing stores is generally managed by
+ * V8. Clients should always use standard C++ memory ownership types (i.e.
+ * std::unique_ptr and std::shared_ptr) to manage lifetimes of backing stores
+ * properly, since V8 internal objects may alias backing stores.
+ *
+ * This object does not keep the underlying |ArrayBuffer::Allocator| alive by
+ * default. Use Isolate::CreateParams::array_buffer_allocator_shared when
+ * creating the Isolate to make it hold a reference to the allocator itself.
+ */
+class V8_EXPORT BackingStore : public v8::internal::BackingStoreBase {
+ public:
+ ~BackingStore();
+
+ /**
+ * Return a pointer to the beginning of the memory block for this backing
+ * store. The pointer is only valid as long as this backing store object
+ * lives.
+ */
+ void* Data() const;
+
+ /**
+ * The length (in bytes) of this backing store.
+ */
+ size_t ByteLength() const;
+
+ /**
+ * Indicates whether the backing store was created for an ArrayBuffer or
+ * a SharedArrayBuffer.
+ */
+ bool IsShared() const;
+
+ /**
+ * Prevent implicit instantiation of operator delete with size_t argument.
+ * The size_t argument would be incorrect because ptr points to the
+ * internal BackingStore object.
+ */
+ void operator delete(void* ptr) { ::operator delete(ptr); }
+
+ /**
+ * Wrapper around ArrayBuffer::Allocator::Reallocate that preserves IsShared.
+ * Assumes that the backing_store was allocated by the ArrayBuffer allocator
+ * of the given isolate.
+ */
+ static std::unique_ptr<BackingStore> Reallocate(
+ v8::Isolate* isolate, std::unique_ptr<BackingStore> backing_store,
+ size_t byte_length);
+
+ /**
+ * This callback is used only if the memory block for a BackingStore cannot be
+ * allocated with an ArrayBuffer::Allocator. In such cases the destructor of
+ * the BackingStore invokes the callback to free the memory block.
+ */
+ using DeleterCallback = void (*)(void* data, size_t length,
+ void* deleter_data);
+
+ /**
+ * If the memory block of a BackingStore is static or is managed manually,
+ * then this empty deleter along with nullptr deleter_data can be passed to
+ * ArrayBuffer::NewBackingStore to indicate that.
+ *
+ * The manually managed case should be used with caution and only when it
+ * is guaranteed that the memory block freeing happens after detaching its
+ * ArrayBuffer.
+ */
+ static void EmptyDeleter(void* data, size_t length, void* deleter_data);
+
+ private:
+ /**
+ * See [Shared]ArrayBuffer::GetBackingStore and
+ * [Shared]ArrayBuffer::NewBackingStore.
+ */
+ BackingStore();
+};
+
+#if !defined(V8_IMMINENT_DEPRECATION_WARNINGS)
+// Use v8::BackingStore::DeleterCallback instead.
+using BackingStoreDeleterCallback = void (*)(void* data, size_t length,
+ void* deleter_data);
+
+#endif
+
+/**
+ * An instance of the built-in ArrayBuffer constructor (ES6 draft 15.13.5).
+ */
+class V8_EXPORT ArrayBuffer : public Object {
+ public:
+ /**
+ * A thread-safe allocator that V8 uses to allocate |ArrayBuffer|'s memory.
+ * The allocator is a global V8 setting. It has to be set via
+ * Isolate::CreateParams.
+ *
+ * Memory allocated through this allocator by V8 is accounted for as external
+ * memory by V8. Note that V8 keeps track of the memory for all internalized
+ * |ArrayBuffer|s. Responsibility for tracking external memory (using
+ * Isolate::AdjustAmountOfExternalAllocatedMemory) is handed over to the
+ * embedder upon externalization and taken over upon internalization (creating
+ * an internalized buffer from an existing buffer).
+ *
+ * Note that it is unsafe to call back into V8 from any of the allocator
+ * functions.
+ */
+ class V8_EXPORT Allocator {
+ public:
+ virtual ~Allocator() = default;
+
+ /**
+ * Allocate |length| bytes. Return nullptr if allocation is not successful.
+ * Memory should be initialized to zeroes.
+ */
+ virtual void* Allocate(size_t length) = 0;
+
+ /**
+ * Allocate |length| bytes. Return nullptr if allocation is not successful.
+ * Memory does not have to be initialized.
+ */
+ virtual void* AllocateUninitialized(size_t length) = 0;
+
+ /**
+ * Free the memory block of size |length|, pointed to by |data|.
+ * That memory is guaranteed to be previously allocated by |Allocate|.
+ */
+ virtual void Free(void* data, size_t length) = 0;
+
+ /**
+ * Reallocate the memory block of size |old_length| to a memory block of
+ * size |new_length| by expanding, contracting, or copying the existing
+ * memory block. If |new_length| > |old_length|, then the new part of
+ * the memory must be initialized to zeros. Return nullptr if reallocation
+ * is not successful.
+ *
+ * The caller guarantees that the memory block was previously allocated
+ * using Allocate or AllocateUninitialized.
+ *
+ * The default implementation allocates a new block and copies data.
+ */
+ virtual void* Reallocate(void* data, size_t old_length, size_t new_length);
+
+ /**
+ * ArrayBuffer allocation mode. kNormal is a malloc/free style allocation,
+ * while kReservation is for larger allocations with the ability to set
+ * access permissions.
+ */
+ enum class AllocationMode { kNormal, kReservation };
+
+ /**
+ * Convenience allocator.
+ *
+ * When the virtual memory cage is enabled, this allocator will allocate its
+ * backing memory inside the cage. Otherwise, it will rely on malloc/free.
+ *
+ * Caller takes ownership, i.e. the returned object needs to be freed using
+ * |delete allocator| once it is no longer in use.
+ */
+ static Allocator* NewDefaultAllocator();
+ };
+
+ /**
+ * Data length in bytes.
+ */
+ size_t ByteLength() const;
+
+ /**
+ * Create a new ArrayBuffer. Allocate |byte_length| bytes.
+ * Allocated memory will be owned by a created ArrayBuffer and
+ * will be deallocated when it is garbage-collected,
+ * unless the object is externalized.
+ */
+ static Local<ArrayBuffer> New(Isolate* isolate, size_t byte_length);
+
+ /**
+ * Create a new ArrayBuffer with an existing backing store.
+ * The created array keeps a reference to the backing store until the array
+ * is garbage collected. Note that the IsExternal bit does not affect this
+ * reference from the array to the backing store.
+ *
+ * In future IsExternal bit will be removed. Until then the bit is set as
+ * follows. If the backing store does not own the underlying buffer, then
+ * the array is created in externalized state. Otherwise, the array is created
+ * in internalized state. In the latter case the array can be transitioned
+ * to the externalized state using Externalize(backing_store).
+ */
+ static Local<ArrayBuffer> New(Isolate* isolate,
+ std::shared_ptr<BackingStore> backing_store);
+
+ /**
+ * Returns a new standalone BackingStore that is allocated using the array
+ * buffer allocator of the isolate. The result can be later passed to
+ * ArrayBuffer::New.
+ *
+ * If the allocator returns nullptr, then the function may cause GCs in the
+ * given isolate and re-try the allocation. If GCs do not help, then the
+ * function will crash with an out-of-memory error.
+ */
+ static std::unique_ptr<BackingStore> NewBackingStore(Isolate* isolate,
+ size_t byte_length);
+ /**
+ * Returns a new standalone BackingStore that takes over the ownership of
+ * the given buffer. The destructor of the BackingStore invokes the given
+ * deleter callback.
+ *
+ * The result can be later passed to ArrayBuffer::New. The raw pointer
+ * to the buffer must not be passed again to any V8 API function.
+ */
+ static std::unique_ptr<BackingStore> NewBackingStore(
+ void* data, size_t byte_length, v8::BackingStore::DeleterCallback deleter,
+ void* deleter_data);
+
+ /**
+ * Returns true if this ArrayBuffer may be detached.
+ */
+ bool IsDetachable() const;
+
+ /**
+ * Detaches this ArrayBuffer and all its views (typed arrays).
+ * Detaching sets the byte length of the buffer and all typed arrays to zero,
+ * preventing JavaScript from ever accessing underlying backing store.
+ * ArrayBuffer should have been externalized and must be detachable.
+ */
+ void Detach();
+
+ /**
+ * Get a shared pointer to the backing store of this array buffer. This
+ * pointer coordinates the lifetime management of the internal storage
+ * with any live ArrayBuffers on the heap, even across isolates. The embedder
+ * should not attempt to manage lifetime of the storage through other means.
+ */
+ std::shared_ptr<BackingStore> GetBackingStore();
+
+ V8_INLINE static ArrayBuffer* Cast(Value* value) {
+#ifdef V8_ENABLE_CHECKS
+ CheckCast(value);
+#endif
+ return static_cast<ArrayBuffer*>(value);
+ }
+
+ static const int kInternalFieldCount = V8_ARRAY_BUFFER_INTERNAL_FIELD_COUNT;
+ static const int kEmbedderFieldCount = V8_ARRAY_BUFFER_INTERNAL_FIELD_COUNT;
+
+ private:
+ ArrayBuffer();
+ static void CheckCast(Value* obj);
+};
+
+#ifndef V8_ARRAY_BUFFER_VIEW_INTERNAL_FIELD_COUNT
+// The number of required internal fields can be defined by embedder.
+#define V8_ARRAY_BUFFER_VIEW_INTERNAL_FIELD_COUNT 2
+#endif
+
+/**
+ * A base class for an instance of one of "views" over ArrayBuffer,
+ * including TypedArrays and DataView (ES6 draft 15.13).
+ */
+class V8_EXPORT ArrayBufferView : public Object {
+ public:
+ /**
+ * Returns underlying ArrayBuffer.
+ */
+ Local<ArrayBuffer> Buffer();
+ /**
+ * Byte offset in |Buffer|.
+ */
+ size_t ByteOffset();
+ /**
+ * Size of a view in bytes.
+ */
+ size_t ByteLength();
+
+ /**
+ * Copy the contents of the ArrayBufferView's buffer to an embedder defined
+ * memory without additional overhead that calling ArrayBufferView::Buffer
+ * might incur.
+ *
+ * Will write at most min(|byte_length|, ByteLength) bytes starting at
+ * ByteOffset of the underlying buffer to the memory starting at |dest|.
+ * Returns the number of bytes actually written.
+ */
+ size_t CopyContents(void* dest, size_t byte_length);
+
+ /**
+ * Returns true if ArrayBufferView's backing ArrayBuffer has already been
+ * allocated.
+ */
+ bool HasBuffer() const;
+
+ V8_INLINE static ArrayBufferView* Cast(Value* value) {
+#ifdef V8_ENABLE_CHECKS
+ CheckCast(value);
+#endif
+ return static_cast<ArrayBufferView*>(value);
+ }
+
+ static const int kInternalFieldCount =
+ V8_ARRAY_BUFFER_VIEW_INTERNAL_FIELD_COUNT;
+ static const int kEmbedderFieldCount =
+ V8_ARRAY_BUFFER_VIEW_INTERNAL_FIELD_COUNT;
+
+ private:
+ ArrayBufferView();
+ static void CheckCast(Value* obj);
+};
+
+/**
+ * An instance of DataView constructor (ES6 draft 15.13.7).
+ */
+class V8_EXPORT DataView : public ArrayBufferView {
+ public:
+ static Local<DataView> New(Local<ArrayBuffer> array_buffer,
+ size_t byte_offset, size_t length);
+ static Local<DataView> New(Local<SharedArrayBuffer> shared_array_buffer,
+ size_t byte_offset, size_t length);
+ V8_INLINE static DataView* Cast(Value* value) {
+#ifdef V8_ENABLE_CHECKS
+ CheckCast(value);
+#endif
+ return static_cast<DataView*>(value);
+ }
+
+ private:
+ DataView();
+ static void CheckCast(Value* obj);
+};
+
+/**
+ * An instance of the built-in SharedArrayBuffer constructor.
+ */
+class V8_EXPORT SharedArrayBuffer : public Object {
+ public:
+ /**
+ * Data length in bytes.
+ */
+ size_t ByteLength() const;
+
+ /**
+ * Create a new SharedArrayBuffer. Allocate |byte_length| bytes.
+ * Allocated memory will be owned by a created SharedArrayBuffer and
+ * will be deallocated when it is garbage-collected,
+ * unless the object is externalized.
+ */
+ static Local<SharedArrayBuffer> New(Isolate* isolate, size_t byte_length);
+
+ /**
+ * Create a new SharedArrayBuffer with an existing backing store.
+ * The created array keeps a reference to the backing store until the array
+ * is garbage collected. Note that the IsExternal bit does not affect this
+ * reference from the array to the backing store.
+ *
+ * In future IsExternal bit will be removed. Until then the bit is set as
+ * follows. If the backing store does not own the underlying buffer, then
+ * the array is created in externalized state. Otherwise, the array is created
+ * in internalized state. In the latter case the array can be transitioned
+ * to the externalized state using Externalize(backing_store).
+ */
+ static Local<SharedArrayBuffer> New(
+ Isolate* isolate, std::shared_ptr<BackingStore> backing_store);
+
+ /**
+ * Returns a new standalone BackingStore that is allocated using the array
+ * buffer allocator of the isolate. The result can be later passed to
+ * SharedArrayBuffer::New.
+ *
+ * If the allocator returns nullptr, then the function may cause GCs in the
+ * given isolate and re-try the allocation. If GCs do not help, then the
+ * function will crash with an out-of-memory error.
+ */
+ static std::unique_ptr<BackingStore> NewBackingStore(Isolate* isolate,
+ size_t byte_length);
+ /**
+ * Returns a new standalone BackingStore that takes over the ownership of
+ * the given buffer. The destructor of the BackingStore invokes the given
+ * deleter callback.
+ *
+ * The result can be later passed to SharedArrayBuffer::New. The raw pointer
+ * to the buffer must not be passed again to any V8 functions.
+ */
+ static std::unique_ptr<BackingStore> NewBackingStore(
+ void* data, size_t byte_length, v8::BackingStore::DeleterCallback deleter,
+ void* deleter_data);
+
+ /**
+ * Get a shared pointer to the backing store of this array buffer. This
+ * pointer coordinates the lifetime management of the internal storage
+ * with any live ArrayBuffers on the heap, even across isolates. The embedder
+ * should not attempt to manage lifetime of the storage through other means.
+ */
+ std::shared_ptr<BackingStore> GetBackingStore();
+
+ V8_INLINE static SharedArrayBuffer* Cast(Value* value) {
+#ifdef V8_ENABLE_CHECKS
+ CheckCast(value);
+#endif
+ return static_cast<SharedArrayBuffer*>(value);
+ }
+
+ static const int kInternalFieldCount = V8_ARRAY_BUFFER_INTERNAL_FIELD_COUNT;
+
+ private:
+ SharedArrayBuffer();
+ static void CheckCast(Value* obj);
+};
+
+} // namespace v8
+
+#endif // INCLUDE_V8_ARRAY_BUFFER_H_
diff --git a/deps/v8/include/v8-callbacks.h b/deps/v8/include/v8-callbacks.h
new file mode 100644
index 0000000000..ff894161f4
--- /dev/null
+++ b/deps/v8/include/v8-callbacks.h
@@ -0,0 +1,400 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef INCLUDE_V8_ISOLATE_CALLBACKS_H_
+#define INCLUDE_V8_ISOLATE_CALLBACKS_H_
+
+#include <stddef.h>
+
+#include <string>
+
+#include "cppgc/common.h"
+#include "v8-data.h" // NOLINT(build/include_directory)
+#include "v8-local-handle.h" // NOLINT(build/include_directory)
+#include "v8config.h" // NOLINT(build/include_directory)
+
+#if defined(V8_OS_WIN)
+struct _EXCEPTION_POINTERS;
+#endif
+
+namespace v8 {
+
+template <typename T>
+class FunctionCallbackInfo;
+class Isolate;
+class Message;
+class Module;
+class Object;
+class Promise;
+class ScriptOrModule;
+class String;
+class UnboundScript;
+class Value;
+
+/**
+ * A JIT code event is issued each time code is added, moved or removed.
+ *
+ * \note removal events are not currently issued.
+ */
+struct JitCodeEvent {
+ enum EventType {
+ CODE_ADDED,
+ CODE_MOVED,
+ CODE_REMOVED,
+ CODE_ADD_LINE_POS_INFO,
+ CODE_START_LINE_INFO_RECORDING,
+ CODE_END_LINE_INFO_RECORDING
+ };
+ // Definition of the code position type. The "POSITION" type means the place
+ // in the source code which are of interest when making stack traces to
+ // pin-point the source location of a stack frame as close as possible.
+ // The "STATEMENT_POSITION" means the place at the beginning of each
+ // statement, and is used to indicate possible break locations.
+ enum PositionType { POSITION, STATEMENT_POSITION };
+
+ // There are three different kinds of CodeType, one for JIT code generated
+ // by the optimizing compiler, one for byte code generated for the
+ // interpreter, and one for code generated from Wasm. For JIT_CODE and
+ // WASM_CODE, |code_start| points to the beginning of jitted assembly code,
+ // while for BYTE_CODE events, |code_start| points to the first bytecode of
+ // the interpreted function.
+ enum CodeType { BYTE_CODE, JIT_CODE, WASM_CODE };
+
+ // Type of event.
+ EventType type;
+ CodeType code_type;
+ // Start of the instructions.
+ void* code_start;
+ // Size of the instructions.
+ size_t code_len;
+ // Script info for CODE_ADDED event.
+ Local<UnboundScript> script;
+ // User-defined data for *_LINE_INFO_* event. It's used to hold the source
+ // code line information which is returned from the
+ // CODE_START_LINE_INFO_RECORDING event. And it's passed to subsequent
+ // CODE_ADD_LINE_POS_INFO and CODE_END_LINE_INFO_RECORDING events.
+ void* user_data;
+
+ struct name_t {
+ // Name of the object associated with the code, note that the string is not
+ // zero-terminated.
+ const char* str;
+ // Number of chars in str.
+ size_t len;
+ };
+
+ struct line_info_t {
+ // PC offset
+ size_t offset;
+ // Code position
+ size_t pos;
+ // The position type.
+ PositionType position_type;
+ };
+
+ struct wasm_source_info_t {
+ // Source file name.
+ const char* filename;
+ // Length of filename.
+ size_t filename_size;
+ // Line number table, which maps offsets of JITted code to line numbers of
+ // source file.
+ const line_info_t* line_number_table;
+ // Number of entries in the line number table.
+ size_t line_number_table_size;
+ };
+
+ wasm_source_info_t* wasm_source_info;
+
+ union {
+ // Only valid for CODE_ADDED.
+ struct name_t name;
+
+ // Only valid for CODE_ADD_LINE_POS_INFO
+ struct line_info_t line_info;
+
+ // New location of instructions. Only valid for CODE_MOVED.
+ void* new_code_start;
+ };
+
+ Isolate* isolate;
+};
+
+/**
+ * Option flags passed to the SetJitCodeEventHandler function.
+ */
+enum JitCodeEventOptions {
+ kJitCodeEventDefault = 0,
+ // Generate callbacks for already existent code.
+ kJitCodeEventEnumExisting = 1
+};
+
+/**
+ * Callback function passed to SetJitCodeEventHandler.
+ *
+ * \param event code add, move or removal event.
+ */
+using JitCodeEventHandler = void (*)(const JitCodeEvent* event);
+
+// --- Garbage Collection Callbacks ---
+
+/**
+ * Applications can register callback functions which will be called before and
+ * after certain garbage collection operations. Allocations are not allowed in
+ * the callback functions, you therefore cannot manipulate objects (set or
+ * delete properties for example) since it is possible such operations will
+ * result in the allocation of objects.
+ */
+enum GCType {
+ kGCTypeScavenge = 1 << 0,
+ kGCTypeMarkSweepCompact = 1 << 1,
+ kGCTypeIncrementalMarking = 1 << 2,
+ kGCTypeProcessWeakCallbacks = 1 << 3,
+ kGCTypeAll = kGCTypeScavenge | kGCTypeMarkSweepCompact |
+ kGCTypeIncrementalMarking | kGCTypeProcessWeakCallbacks
+};
+
+/**
+ * GCCallbackFlags is used to notify additional information about the GC
+ * callback.
+ * - kGCCallbackFlagConstructRetainedObjectInfos: The GC callback is for
+ * constructing retained object infos.
+ * - kGCCallbackFlagForced: The GC callback is for a forced GC for testing.
+ * - kGCCallbackFlagSynchronousPhantomCallbackProcessing: The GC callback
+ * is called synchronously without getting posted to an idle task.
+ * - kGCCallbackFlagCollectAllAvailableGarbage: The GC callback is called
+ * in a phase where V8 is trying to collect all available garbage
+ * (e.g., handling a low memory notification).
+ * - kGCCallbackScheduleIdleGarbageCollection: The GC callback is called to
+ * trigger an idle garbage collection.
+ */
+enum GCCallbackFlags {
+ kNoGCCallbackFlags = 0,
+ kGCCallbackFlagConstructRetainedObjectInfos = 1 << 1,
+ kGCCallbackFlagForced = 1 << 2,
+ kGCCallbackFlagSynchronousPhantomCallbackProcessing = 1 << 3,
+ kGCCallbackFlagCollectAllAvailableGarbage = 1 << 4,
+ kGCCallbackFlagCollectAllExternalMemory = 1 << 5,
+ kGCCallbackScheduleIdleGarbageCollection = 1 << 6,
+};
+
+using GCCallback = void (*)(GCType type, GCCallbackFlags flags);
+
+using InterruptCallback = void (*)(Isolate* isolate, void* data);
+
+/**
+ * This callback is invoked when the heap size is close to the heap limit and
+ * V8 is likely to abort with out-of-memory error.
+ * The callback can extend the heap limit by returning a value that is greater
+ * than the current_heap_limit. The initial heap limit is the limit that was
+ * set after heap setup.
+ */
+using NearHeapLimitCallback = size_t (*)(void* data, size_t current_heap_limit,
+ size_t initial_heap_limit);
+
+/**
+ * Callback function passed to SetUnhandledExceptionCallback.
+ */
+#if defined(V8_OS_WIN)
+using UnhandledExceptionCallback =
+ int (*)(_EXCEPTION_POINTERS* exception_pointers);
+#endif
+
+// --- Counters Callbacks ---
+
+using CounterLookupCallback = int* (*)(const char* name);
+
+using CreateHistogramCallback = void* (*)(const char* name, int min, int max,
+ size_t buckets);
+
+using AddHistogramSampleCallback = void (*)(void* histogram, int sample);
+
+/**
+ * HostImportModuleDynamicallyCallback is called when we require the
+ * embedder to load a module. This is used as part of the dynamic
+ * import syntax.
+ *
+ * The referrer contains metadata about the script/module that calls
+ * import.
+ *
+ * The specifier is the name of the module that should be imported.
+ *
+ * The embedder must compile, instantiate, evaluate the Module, and
+ * obtain its namespace object.
+ *
+ * The Promise returned from this function is forwarded to userland
+ * JavaScript. The embedder must resolve this promise with the module
+ * namespace object. In case of an exception, the embedder must reject
+ * this promise with the exception. If the promise creation itself
+ * fails (e.g. due to stack overflow), the embedder must propagate
+ * that exception by returning an empty MaybeLocal.
+ */
+using HostImportModuleDynamicallyCallback V8_DEPRECATED(
+ "Use HostImportModuleDynamicallyWithImportAssertionsCallback instead") =
+ MaybeLocal<Promise> (*)(Local<Context> context,
+ Local<ScriptOrModule> referrer,
+ Local<String> specifier);
+
+// --- Exceptions ---
+
+using FatalErrorCallback = void (*)(const char* location, const char* message);
+
+using OOMErrorCallback = void (*)(const char* location, bool is_heap_oom);
+
+using MessageCallback = void (*)(Local<Message> message, Local<Value> data);
+
+// --- Tracing ---
+
+enum LogEventStatus : int { kStart = 0, kEnd = 1, kStamp = 2 };
+using LogEventCallback = void (*)(const char* name,
+ int /* LogEventStatus */ status);
+
+// --- Crashkeys Callback ---
+enum class CrashKeyId {
+ kIsolateAddress,
+ kReadonlySpaceFirstPageAddress,
+ kMapSpaceFirstPageAddress,
+ kCodeSpaceFirstPageAddress,
+ kDumpType,
+};
+
+using AddCrashKeyCallback = void (*)(CrashKeyId id, const std::string& value);
+
+// --- Enter/Leave Script Callback ---
+using BeforeCallEnteredCallback = void (*)(Isolate*);
+using CallCompletedCallback = void (*)(Isolate*);
+
+// --- AllowCodeGenerationFromStrings callbacks ---
+
+/**
+ * Callback to check if code generation from strings is allowed. See
+ * Context::AllowCodeGenerationFromStrings.
+ */
+using AllowCodeGenerationFromStringsCallback = bool (*)(Local<Context> context,
+ Local<String> source);
+
+struct ModifyCodeGenerationFromStringsResult {
+ // If true, proceed with the codegen algorithm. Otherwise, block it.
+ bool codegen_allowed = false;
+ // Overwrite the original source with this string, if present.
+ // Use the original source if empty.
+ // This field is considered only if codegen_allowed is true.
+ MaybeLocal<String> modified_source;
+};
+
+/**
+ * Access type specification.
+ */
+enum AccessType {
+ ACCESS_GET,
+ ACCESS_SET,
+ ACCESS_HAS,
+ ACCESS_DELETE,
+ ACCESS_KEYS
+};
+
+// --- Failed Access Check Callback ---
+
+using FailedAccessCheckCallback = void (*)(Local<Object> target,
+ AccessType type, Local<Value> data);
+
+/**
+ * Callback to check if codegen is allowed from a source object, and convert
+ * the source to string if necessary. See: ModifyCodeGenerationFromStrings.
+ */
+using ModifyCodeGenerationFromStringsCallback =
+ ModifyCodeGenerationFromStringsResult (*)(Local<Context> context,
+ Local<Value> source);
+using ModifyCodeGenerationFromStringsCallback2 =
+ ModifyCodeGenerationFromStringsResult (*)(Local<Context> context,
+ Local<Value> source,
+ bool is_code_like);
+
+// --- WebAssembly compilation callbacks ---
+using ExtensionCallback = bool (*)(const FunctionCallbackInfo<Value>&);
+
+using AllowWasmCodeGenerationCallback = bool (*)(Local<Context> context,
+ Local<String> source);
+
+// --- Callback for APIs defined on v8-supported objects, but implemented
+// by the embedder. Example: WebAssembly.{compile|instantiate}Streaming ---
+using ApiImplementationCallback = void (*)(const FunctionCallbackInfo<Value>&);
+
+// --- Callback for WebAssembly.compileStreaming ---
+using WasmStreamingCallback = void (*)(const FunctionCallbackInfo<Value>&);
+
+// --- Callback for loading source map file for Wasm profiling support
+using WasmLoadSourceMapCallback = Local<String> (*)(Isolate* isolate,
+ const char* name);
+
+// --- Callback for checking if WebAssembly Simd is enabled ---
+using WasmSimdEnabledCallback = bool (*)(Local<Context> context);
+
+// --- Callback for checking if WebAssembly exceptions are enabled ---
+using WasmExceptionsEnabledCallback = bool (*)(Local<Context> context);
+
+// --- Callback for checking if the SharedArrayBuffer constructor is enabled ---
+using SharedArrayBufferConstructorEnabledCallback =
+ bool (*)(Local<Context> context);
+
+/**
+ * HostImportModuleDynamicallyWithImportAssertionsCallback is called when we
+ * require the embedder to load a module. This is used as part of the dynamic
+ * import syntax.
+ *
+ * The referrer contains metadata about the script/module that calls
+ * import.
+ *
+ * The specifier is the name of the module that should be imported.
+ *
+ * The import_assertions are import assertions for this request in the form:
+ * [key1, value1, key2, value2, ...] where the keys and values are of type
+ * v8::String. Note, unlike the FixedArray passed to ResolveModuleCallback and
+ * returned from ModuleRequest::GetImportAssertions(), this array does not
+ * contain the source Locations of the assertions.
+ *
+ * The embedder must compile, instantiate, evaluate the Module, and
+ * obtain its namespace object.
+ *
+ * The Promise returned from this function is forwarded to userland
+ * JavaScript. The embedder must resolve this promise with the module
+ * namespace object. In case of an exception, the embedder must reject
+ * this promise with the exception. If the promise creation itself
+ * fails (e.g. due to stack overflow), the embedder must propagate
+ * that exception by returning an empty MaybeLocal.
+ */
+using HostImportModuleDynamicallyWithImportAssertionsCallback =
+ MaybeLocal<Promise> (*)(Local<Context> context,
+ Local<ScriptOrModule> referrer,
+ Local<String> specifier,
+ Local<FixedArray> import_assertions);
+
+/**
+ * HostInitializeImportMetaObjectCallback is called the first time import.meta
+ * is accessed for a module. Subsequent access will reuse the same value.
+ *
+ * The method combines two implementation-defined abstract operations into one:
+ * HostGetImportMetaProperties and HostFinalizeImportMeta.
+ *
+ * The embedder should use v8::Object::CreateDataProperty to add properties on
+ * the meta object.
+ */
+using HostInitializeImportMetaObjectCallback = void (*)(Local<Context> context,
+ Local<Module> module,
+ Local<Object> meta);
+
+/**
+ * PrepareStackTraceCallback is called when the stack property of an error is
+ * first accessed. The return value will be used as the stack value. If this
+ * callback is registed, the |Error.prepareStackTrace| API will be disabled.
+ * |sites| is an array of call sites, specified in
+ * https://v8.dev/docs/stack-trace-api
+ */
+using PrepareStackTraceCallback = MaybeLocal<Value> (*)(Local<Context> context,
+ Local<Value> error,
+ Local<Array> sites);
+
+} // namespace v8
+
+#endif // INCLUDE_V8_ISOLATE_CALLBACKS_H_
diff --git a/deps/v8/include/v8-container.h b/deps/v8/include/v8-container.h
new file mode 100644
index 0000000000..ce06860364
--- /dev/null
+++ b/deps/v8/include/v8-container.h
@@ -0,0 +1,129 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef INCLUDE_V8_CONTAINER_H_
+#define INCLUDE_V8_CONTAINER_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include "v8-local-handle.h" // NOLINT(build/include_directory)
+#include "v8-object.h" // NOLINT(build/include_directory)
+#include "v8config.h" // NOLINT(build/include_directory)
+
+namespace v8 {
+
+class Context;
+class Isolate;
+
+/**
+ * An instance of the built-in array constructor (ECMA-262, 15.4.2).
+ */
+class V8_EXPORT Array : public Object {
+ public:
+ uint32_t Length() const;
+
+ /**
+ * Creates a JavaScript array with the given length. If the length
+ * is negative the returned array will have length 0.
+ */
+ static Local<Array> New(Isolate* isolate, int length = 0);
+
+ /**
+ * Creates a JavaScript array out of a Local<Value> array in C++
+ * with a known length.
+ */
+ static Local<Array> New(Isolate* isolate, Local<Value>* elements,
+ size_t length);
+ V8_INLINE static Array* Cast(Value* value) {
+#ifdef V8_ENABLE_CHECKS
+ CheckCast(value);
+#endif
+ return static_cast<Array*>(value);
+ }
+
+ private:
+ Array();
+ static void CheckCast(Value* obj);
+};
+
+/**
+ * An instance of the built-in Map constructor (ECMA-262, 6th Edition, 23.1.1).
+ */
+class V8_EXPORT Map : public Object {
+ public:
+ size_t Size() const;
+ void Clear();
+ V8_WARN_UNUSED_RESULT MaybeLocal<Value> Get(Local<Context> context,
+ Local<Value> key);
+ V8_WARN_UNUSED_RESULT MaybeLocal<Map> Set(Local<Context> context,
+ Local<Value> key,
+ Local<Value> value);
+ V8_WARN_UNUSED_RESULT Maybe<bool> Has(Local<Context> context,
+ Local<Value> key);
+ V8_WARN_UNUSED_RESULT Maybe<bool> Delete(Local<Context> context,
+ Local<Value> key);
+
+ /**
+ * Returns an array of length Size() * 2, where index N is the Nth key and
+ * index N + 1 is the Nth value.
+ */
+ Local<Array> AsArray() const;
+
+ /**
+ * Creates a new empty Map.
+ */
+ static Local<Map> New(Isolate* isolate);
+
+ V8_INLINE static Map* Cast(Value* value) {
+#ifdef V8_ENABLE_CHECKS
+ CheckCast(value);
+#endif
+ return static_cast<Map*>(value);
+ }
+
+ private:
+ Map();
+ static void CheckCast(Value* obj);
+};
+
+/**
+ * An instance of the built-in Set constructor (ECMA-262, 6th Edition, 23.2.1).
+ */
+class V8_EXPORT Set : public Object {
+ public:
+ size_t Size() const;
+ void Clear();
+ V8_WARN_UNUSED_RESULT MaybeLocal<Set> Add(Local<Context> context,
+ Local<Value> key);
+ V8_WARN_UNUSED_RESULT Maybe<bool> Has(Local<Context> context,
+ Local<Value> key);
+ V8_WARN_UNUSED_RESULT Maybe<bool> Delete(Local<Context> context,
+ Local<Value> key);
+
+ /**
+ * Returns an array of the keys in this Set.
+ */
+ Local<Array> AsArray() const;
+
+ /**
+ * Creates a new empty Set.
+ */
+ static Local<Set> New(Isolate* isolate);
+
+ V8_INLINE static Set* Cast(Value* value) {
+#ifdef V8_ENABLE_CHECKS
+ CheckCast(value);
+#endif
+ return static_cast<Set*>(value);
+ }
+
+ private:
+ Set();
+ static void CheckCast(Value* obj);
+};
+
+} // namespace v8
+
+#endif // INCLUDE_V8_CONTAINER_H_
diff --git a/deps/v8/include/v8-context.h b/deps/v8/include/v8-context.h
new file mode 100644
index 0000000000..bd28c6c9c9
--- /dev/null
+++ b/deps/v8/include/v8-context.h
@@ -0,0 +1,418 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef INCLUDE_V8_CONTEXT_H_
+#define INCLUDE_V8_CONTEXT_H_
+
+#include <stdint.h>
+
+#include "v8-data.h" // NOLINT(build/include_directory)
+#include "v8-local-handle.h" // NOLINT(build/include_directory)
+#include "v8-snapshot.h" // NOLINT(build/include_directory)
+#include "v8config.h" // NOLINT(build/include_directory)
+
+namespace v8 {
+
+class Function;
+class MicrotaskQueue;
+class Object;
+class ObjectTemplate;
+class Value;
+class String;
+
+/**
+ * A container for extension names.
+ */
+class V8_EXPORT ExtensionConfiguration {
+ public:
+ ExtensionConfiguration() : name_count_(0), names_(nullptr) {}
+ ExtensionConfiguration(int name_count, const char* names[])
+ : name_count_(name_count), names_(names) {}
+
+ const char** begin() const { return &names_[0]; }
+ const char** end() const { return &names_[name_count_]; }
+
+ private:
+ const int name_count_;
+ const char** names_;
+};
+
+/**
+ * A sandboxed execution context with its own set of built-in objects
+ * and functions.
+ */
+class V8_EXPORT Context : public Data {
+ public:
+ /**
+ * Returns the global proxy object.
+ *
+ * Global proxy object is a thin wrapper whose prototype points to actual
+ * context's global object with the properties like Object, etc. This is done
+ * that way for security reasons (for more details see
+ * https://wiki.mozilla.org/Gecko:SplitWindow).
+ *
+ * Please note that changes to global proxy object prototype most probably
+ * would break VM---v8 expects only global object as a prototype of global
+ * proxy object.
+ */
+ Local<Object> Global();
+
+ /**
+ * Detaches the global object from its context before
+ * the global object can be reused to create a new context.
+ */
+ void DetachGlobal();
+
+ /**
+ * Creates a new context and returns a handle to the newly allocated
+ * context.
+ *
+ * \param isolate The isolate in which to create the context.
+ *
+ * \param extensions An optional extension configuration containing
+ * the extensions to be installed in the newly created context.
+ *
+ * \param global_template An optional object template from which the
+ * global object for the newly created context will be created.
+ *
+ * \param global_object An optional global object to be reused for
+ * the newly created context. This global object must have been
+ * created by a previous call to Context::New with the same global
+ * template. The state of the global object will be completely reset
+ * and only object identify will remain.
+ */
+ static Local<Context> New(
+ Isolate* isolate, ExtensionConfiguration* extensions = nullptr,
+ MaybeLocal<ObjectTemplate> global_template = MaybeLocal<ObjectTemplate>(),
+ MaybeLocal<Value> global_object = MaybeLocal<Value>(),
+ DeserializeInternalFieldsCallback internal_fields_deserializer =
+ DeserializeInternalFieldsCallback(),
+ MicrotaskQueue* microtask_queue = nullptr);
+
+ /**
+ * Create a new context from a (non-default) context snapshot. There
+ * is no way to provide a global object template since we do not create
+ * a new global object from template, but we can reuse a global object.
+ *
+ * \param isolate See v8::Context::New.
+ *
+ * \param context_snapshot_index The index of the context snapshot to
+ * deserialize from. Use v8::Context::New for the default snapshot.
+ *
+ * \param embedder_fields_deserializer Optional callback to deserialize
+ * internal fields. It should match the SerializeInternalFieldCallback used
+ * to serialize.
+ *
+ * \param extensions See v8::Context::New.
+ *
+ * \param global_object See v8::Context::New.
+ */
+ static MaybeLocal<Context> FromSnapshot(
+ Isolate* isolate, size_t context_snapshot_index,
+ DeserializeInternalFieldsCallback embedder_fields_deserializer =
+ DeserializeInternalFieldsCallback(),
+ ExtensionConfiguration* extensions = nullptr,
+ MaybeLocal<Value> global_object = MaybeLocal<Value>(),
+ MicrotaskQueue* microtask_queue = nullptr);
+
+ /**
+ * Returns an global object that isn't backed by an actual context.
+ *
+ * The global template needs to have access checks with handlers installed.
+ * If an existing global object is passed in, the global object is detached
+ * from its context.
+ *
+ * Note that this is different from a detached context where all accesses to
+ * the global proxy will fail. Instead, the access check handlers are invoked.
+ *
+ * It is also not possible to detach an object returned by this method.
+ * Instead, the access check handlers need to return nothing to achieve the
+ * same effect.
+ *
+ * It is possible, however, to create a new context from the global object
+ * returned by this method.
+ */
+ static MaybeLocal<Object> NewRemoteContext(
+ Isolate* isolate, Local<ObjectTemplate> global_template,
+ MaybeLocal<Value> global_object = MaybeLocal<Value>());
+
+ /**
+ * Sets the security token for the context. To access an object in
+ * another context, the security tokens must match.
+ */
+ void SetSecurityToken(Local<Value> token);
+
+ /** Restores the security token to the default value. */
+ void UseDefaultSecurityToken();
+
+ /** Returns the security token of this context.*/
+ Local<Value> GetSecurityToken();
+
+ /**
+ * Enter this context. After entering a context, all code compiled
+ * and run is compiled and run in this context. If another context
+ * is already entered, this old context is saved so it can be
+ * restored when the new context is exited.
+ */
+ void Enter();
+
+ /**
+ * Exit this context. Exiting the current context restores the
+ * context that was in place when entering the current context.
+ */
+ void Exit();
+
+ /** Returns the isolate associated with a current context. */
+ Isolate* GetIsolate();
+
+ /** Returns the microtask queue associated with a current context. */
+ MicrotaskQueue* GetMicrotaskQueue();
+
+ /**
+ * The field at kDebugIdIndex used to be reserved for the inspector.
+ * It now serves no purpose.
+ */
+ enum EmbedderDataFields { kDebugIdIndex = 0 };
+
+ /**
+ * Return the number of fields allocated for embedder data.
+ */
+ uint32_t GetNumberOfEmbedderDataFields();
+
+ /**
+ * Gets the embedder data with the given index, which must have been set by a
+ * previous call to SetEmbedderData with the same index.
+ */
+ V8_INLINE Local<Value> GetEmbedderData(int index);
+
+ /**
+ * Gets the binding object used by V8 extras. Extra natives get a reference
+ * to this object and can use it to "export" functionality by adding
+ * properties. Extra natives can also "import" functionality by accessing
+ * properties added by the embedder using the V8 API.
+ */
+ Local<Object> GetExtrasBindingObject();
+
+ /**
+ * Sets the embedder data with the given index, growing the data as
+ * needed. Note that index 0 currently has a special meaning for Chrome's
+ * debugger.
+ */
+ void SetEmbedderData(int index, Local<Value> value);
+
+ /**
+ * Gets a 2-byte-aligned native pointer from the embedder data with the given
+ * index, which must have been set by a previous call to
+ * SetAlignedPointerInEmbedderData with the same index. Note that index 0
+ * currently has a special meaning for Chrome's debugger.
+ */
+ V8_INLINE void* GetAlignedPointerFromEmbedderData(int index);
+
+ /**
+ * Sets a 2-byte-aligned native pointer in the embedder data with the given
+ * index, growing the data as needed. Note that index 0 currently has a
+ * special meaning for Chrome's debugger.
+ */
+ void SetAlignedPointerInEmbedderData(int index, void* value);
+
+ /**
+ * Control whether code generation from strings is allowed. Calling
+ * this method with false will disable 'eval' and the 'Function'
+ * constructor for code running in this context. If 'eval' or the
+ * 'Function' constructor are used an exception will be thrown.
+ *
+ * If code generation from strings is not allowed the
+ * V8::AllowCodeGenerationFromStrings callback will be invoked if
+ * set before blocking the call to 'eval' or the 'Function'
+ * constructor. If that callback returns true, the call will be
+ * allowed, otherwise an exception will be thrown. If no callback is
+ * set an exception will be thrown.
+ */
+ void AllowCodeGenerationFromStrings(bool allow);
+
+ /**
+ * Returns true if code generation from strings is allowed for the context.
+ * For more details see AllowCodeGenerationFromStrings(bool) documentation.
+ */
+ bool IsCodeGenerationFromStringsAllowed() const;
+
+ /**
+ * Sets the error description for the exception that is thrown when
+ * code generation from strings is not allowed and 'eval' or the 'Function'
+ * constructor are called.
+ */
+ void SetErrorMessageForCodeGenerationFromStrings(Local<String> message);
+
+ /**
+ * Return data that was previously attached to the context snapshot via
+ * SnapshotCreator, and removes the reference to it.
+ * Repeated call with the same index returns an empty MaybeLocal.
+ */
+ template <class T>
+ V8_INLINE MaybeLocal<T> GetDataFromSnapshotOnce(size_t index);
+
+ /**
+ * If callback is set, abort any attempt to execute JavaScript in this
+ * context, call the specified callback, and throw an exception.
+ * To unset abort, pass nullptr as callback.
+ */
+ using AbortScriptExecutionCallback = void (*)(Isolate* isolate,
+ Local<Context> context);
+ void SetAbortScriptExecution(AbortScriptExecutionCallback callback);
+
+ /**
+ * Returns the value that was set or restored by
+ * SetContinuationPreservedEmbedderData(), if any.
+ */
+ Local<Value> GetContinuationPreservedEmbedderData() const;
+
+ /**
+ * Sets a value that will be stored on continuations and reset while the
+ * continuation runs.
+ */
+ void SetContinuationPreservedEmbedderData(Local<Value> context);
+
+ /**
+ * Set or clear hooks to be invoked for promise lifecycle operations.
+ * To clear a hook, set it to an empty v8::Function. Each function will
+ * receive the observed promise as the first argument. If a chaining
+ * operation is used on a promise, the init will additionally receive
+ * the parent promise as the second argument.
+ */
+ void SetPromiseHooks(Local<Function> init_hook, Local<Function> before_hook,
+ Local<Function> after_hook,
+ Local<Function> resolve_hook);
+
+ /**
+ * Stack-allocated class which sets the execution context for all
+ * operations executed within a local scope.
+ */
+ class V8_NODISCARD Scope {
+ public:
+ explicit V8_INLINE Scope(Local<Context> context) : context_(context) {
+ context_->Enter();
+ }
+ V8_INLINE ~Scope() { context_->Exit(); }
+
+ private:
+ Local<Context> context_;
+ };
+
+ /**
+ * Stack-allocated class to support the backup incumbent settings object
+ * stack.
+ * https://html.spec.whatwg.org/multipage/webappapis.html#backup-incumbent-settings-object-stack
+ */
+ class V8_EXPORT V8_NODISCARD BackupIncumbentScope final {
+ public:
+ /**
+ * |backup_incumbent_context| is pushed onto the backup incumbent settings
+ * object stack.
+ */
+ explicit BackupIncumbentScope(Local<Context> backup_incumbent_context);
+ ~BackupIncumbentScope();
+
+ /**
+ * Returns address that is comparable with JS stack address. Note that JS
+ * stack may be allocated separately from the native stack. See also
+ * |TryCatch::JSStackComparableAddressPrivate| for details.
+ */
+ V8_DEPRECATE_SOON(
+ "This is private V8 information that should not be exposed in the API.")
+ uintptr_t JSStackComparableAddress() const {
+ return JSStackComparableAddressPrivate();
+ }
+
+ private:
+ friend class internal::Isolate;
+
+ uintptr_t JSStackComparableAddressPrivate() const {
+ return js_stack_comparable_address_;
+ }
+
+ Local<Context> backup_incumbent_context_;
+ uintptr_t js_stack_comparable_address_ = 0;
+ const BackupIncumbentScope* prev_ = nullptr;
+ };
+
+ V8_INLINE static Context* Cast(Data* data);
+
+ private:
+ friend class Value;
+ friend class Script;
+ friend class Object;
+ friend class Function;
+
+ static void CheckCast(Data* obj);
+
+ internal::Address* GetDataFromSnapshotOnce(size_t index);
+ Local<Value> SlowGetEmbedderData(int index);
+ void* SlowGetAlignedPointerFromEmbedderData(int index);
+};
+
+// --- Implementation ---
+
+Local<Value> Context::GetEmbedderData(int index) {
+#ifndef V8_ENABLE_CHECKS
+ using A = internal::Address;
+ using I = internal::Internals;
+ A ctx = *reinterpret_cast<const A*>(this);
+ A embedder_data =
+ I::ReadTaggedPointerField(ctx, I::kNativeContextEmbedderDataOffset);
+ int value_offset =
+ I::kEmbedderDataArrayHeaderSize + (I::kEmbedderDataSlotSize * index);
+ A value = I::ReadRawField<A>(embedder_data, value_offset);
+#ifdef V8_COMPRESS_POINTERS
+ // We read the full pointer value and then decompress it in order to avoid
+ // dealing with potential endiannes issues.
+ value =
+ I::DecompressTaggedAnyField(embedder_data, static_cast<uint32_t>(value));
+#endif
+ internal::Isolate* isolate = internal::IsolateFromNeverReadOnlySpaceObject(
+ *reinterpret_cast<A*>(this));
+ A* result = HandleScope::CreateHandle(isolate, value);
+ return Local<Value>(reinterpret_cast<Value*>(result));
+#else
+ return SlowGetEmbedderData(index);
+#endif
+}
+
+void* Context::GetAlignedPointerFromEmbedderData(int index) {
+#ifndef V8_ENABLE_CHECKS
+ using A = internal::Address;
+ using I = internal::Internals;
+ A ctx = *reinterpret_cast<const A*>(this);
+ A embedder_data =
+ I::ReadTaggedPointerField(ctx, I::kNativeContextEmbedderDataOffset);
+ int value_offset =
+ I::kEmbedderDataArrayHeaderSize + (I::kEmbedderDataSlotSize * index);
+#ifdef V8_HEAP_SANDBOX
+ value_offset += I::kEmbedderDataSlotRawPayloadOffset;
+#endif
+ internal::Isolate* isolate = I::GetIsolateForHeapSandbox(ctx);
+ return reinterpret_cast<void*>(
+ I::ReadExternalPointerField(isolate, embedder_data, value_offset,
+ internal::kEmbedderDataSlotPayloadTag));
+#else
+ return SlowGetAlignedPointerFromEmbedderData(index);
+#endif
+}
+
+template <class T>
+MaybeLocal<T> Context::GetDataFromSnapshotOnce(size_t index) {
+ T* data = reinterpret_cast<T*>(GetDataFromSnapshotOnce(index));
+ if (data) internal::PerformCastCheck(data);
+ return Local<T>(data);
+}
+
+Context* Context::Cast(v8::Data* data) {
+#ifdef V8_ENABLE_CHECKS
+ CheckCast(data);
+#endif
+ return static_cast<Context*>(data);
+}
+
+} // namespace v8
+
+#endif // INCLUDE_V8_CONTEXT_H_
diff --git a/deps/v8/include/v8-cppgc.h b/deps/v8/include/v8-cppgc.h
index 745fb04347..813e0842fa 100644
--- a/deps/v8/include/v8-cppgc.h
+++ b/deps/v8/include/v8-cppgc.h
@@ -14,8 +14,9 @@
#include "cppgc/heap-statistics.h"
#include "cppgc/internal/write-barrier.h"
#include "cppgc/visitor.h"
-#include "v8-internal.h" // NOLINT(build/include_directory)
-#include "v8.h" // NOLINT(build/include_directory)
+#include "v8-internal.h" // NOLINT(build/include_directory)
+#include "v8-platform.h" // NOLINT(build/include_directory)
+#include "v8-traced-handle.h" // NOLINT(build/include_directory)
namespace cppgc {
class AllocationHandle;
@@ -24,6 +25,8 @@ class HeapHandle;
namespace v8 {
+class Object;
+
namespace internal {
class CppHeap;
} // namespace internal
diff --git a/deps/v8/include/v8-data.h b/deps/v8/include/v8-data.h
new file mode 100644
index 0000000000..dbd36c9a03
--- /dev/null
+++ b/deps/v8/include/v8-data.h
@@ -0,0 +1,65 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef INCLUDE_V8_DATA_H_
+#define INCLUDE_V8_DATA_H_
+
+#include "v8-local-handle.h" // NOLINT(build/include_directory)
+#include "v8config.h" // NOLINT(build/include_directory)
+
+namespace v8 {
+
+class Context;
+
+/**
+ * The superclass of objects that can reside on V8's heap.
+ */
+class V8_EXPORT Data {
+ public:
+ /**
+ * Returns true if this data is a |v8::Value|.
+ */
+ bool IsValue() const;
+
+ /**
+ * Returns true if this data is a |v8::Module|.
+ */
+ bool IsModule() const;
+
+ /**
+ * Returns true if this data is a |v8::Private|.
+ */
+ bool IsPrivate() const;
+
+ /**
+ * Returns true if this data is a |v8::ObjectTemplate|.
+ */
+ bool IsObjectTemplate() const;
+
+ /**
+ * Returns true if this data is a |v8::FunctionTemplate|.
+ */
+ bool IsFunctionTemplate() const;
+
+ /**
+ * Returns true if this data is a |v8::Context|.
+ */
+ bool IsContext() const;
+
+ private:
+ Data();
+};
+
+/**
+ * A fixed-sized array with elements of type Data.
+ */
+class V8_EXPORT FixedArray : public Data {
+ public:
+ int Length() const;
+ Local<Data> Get(Local<Context> context, int i) const;
+};
+
+} // namespace v8
+
+#endif // INCLUDE_V8_DATA_H_
diff --git a/deps/v8/include/v8-date.h b/deps/v8/include/v8-date.h
new file mode 100644
index 0000000000..e7a01f29b2
--- /dev/null
+++ b/deps/v8/include/v8-date.h
@@ -0,0 +1,43 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef INCLUDE_V8_DATE_H_
+#define INCLUDE_V8_DATE_H_
+
+#include "v8-local-handle.h" // NOLINT(build/include_directory)
+#include "v8-object.h" // NOLINT(build/include_directory)
+#include "v8config.h" // NOLINT(build/include_directory)
+
+namespace v8 {
+
+class Context;
+
+/**
+ * An instance of the built-in Date constructor (ECMA-262, 15.9).
+ */
+class V8_EXPORT Date : public Object {
+ public:
+ static V8_WARN_UNUSED_RESULT MaybeLocal<Value> New(Local<Context> context,
+ double time);
+
+ /**
+ * A specialization of Value::NumberValue that is more efficient
+ * because we know the structure of this object.
+ */
+ double ValueOf() const;
+
+ V8_INLINE static Date* Cast(Value* value) {
+#ifdef V8_ENABLE_CHECKS
+ CheckCast(value);
+#endif
+ return static_cast<Date*>(value);
+ }
+
+ private:
+ static void CheckCast(Value* obj);
+};
+
+} // namespace v8
+
+#endif // INCLUDE_V8_DATE_H_
diff --git a/deps/v8/include/v8-debug.h b/deps/v8/include/v8-debug.h
new file mode 100644
index 0000000000..a13ae3f6d6
--- /dev/null
+++ b/deps/v8/include/v8-debug.h
@@ -0,0 +1,151 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef INCLUDE_V8_DEBUG_H_
+#define INCLUDE_V8_DEBUG_H_
+
+#include <stdint.h>
+
+#include "v8-local-handle.h" // NOLINT(build/include_directory)
+#include "v8config.h" // NOLINT(build/include_directory)
+
+namespace v8 {
+
+class Isolate;
+class String;
+
+/**
+ * A single JavaScript stack frame.
+ */
+class V8_EXPORT StackFrame {
+ public:
+ /**
+ * Returns the number, 1-based, of the line for the associate function call.
+ * This method will return Message::kNoLineNumberInfo if it is unable to
+ * retrieve the line number, or if kLineNumber was not passed as an option
+ * when capturing the StackTrace.
+ */
+ int GetLineNumber() const;
+
+ /**
+ * Returns the 1-based column offset on the line for the associated function
+ * call.
+ * This method will return Message::kNoColumnInfo if it is unable to retrieve
+ * the column number, or if kColumnOffset was not passed as an option when
+ * capturing the StackTrace.
+ */
+ int GetColumn() const;
+
+ /**
+ * Returns the id of the script for the function for this StackFrame.
+ * This method will return Message::kNoScriptIdInfo if it is unable to
+ * retrieve the script id, or if kScriptId was not passed as an option when
+ * capturing the StackTrace.
+ */
+ int GetScriptId() const;
+
+ /**
+ * Returns the name of the resource that contains the script for the
+ * function for this StackFrame.
+ */
+ Local<String> GetScriptName() const;
+
+ /**
+ * Returns the name of the resource that contains the script for the
+ * function for this StackFrame or sourceURL value if the script name
+ * is undefined and its source ends with //# sourceURL=... string or
+ * deprecated //@ sourceURL=... string.
+ */
+ Local<String> GetScriptNameOrSourceURL() const;
+
+ /**
+ * Returns the source of the script for the function for this StackFrame.
+ */
+ Local<String> GetScriptSource() const;
+
+ /**
+ * Returns the source mapping URL (if one is present) of the script for
+ * the function for this StackFrame.
+ */
+ Local<String> GetScriptSourceMappingURL() const;
+
+ /**
+ * Returns the name of the function associated with this stack frame.
+ */
+ Local<String> GetFunctionName() const;
+
+ /**
+ * Returns whether or not the associated function is compiled via a call to
+ * eval().
+ */
+ bool IsEval() const;
+
+ /**
+ * Returns whether or not the associated function is called as a
+ * constructor via "new".
+ */
+ bool IsConstructor() const;
+
+ /**
+ * Returns whether or not the associated functions is defined in wasm.
+ */
+ bool IsWasm() const;
+
+ /**
+ * Returns whether or not the associated function is defined by the user.
+ */
+ bool IsUserJavaScript() const;
+};
+
+/**
+ * Representation of a JavaScript stack trace. The information collected is a
+ * snapshot of the execution stack and the information remains valid after
+ * execution continues.
+ */
+class V8_EXPORT StackTrace {
+ public:
+ /**
+ * Flags that determine what information is placed captured for each
+ * StackFrame when grabbing the current stack trace.
+ * Note: these options are deprecated and we always collect all available
+ * information (kDetailed).
+ */
+ enum StackTraceOptions {
+ kLineNumber = 1,
+ kColumnOffset = 1 << 1 | kLineNumber,
+ kScriptName = 1 << 2,
+ kFunctionName = 1 << 3,
+ kIsEval = 1 << 4,
+ kIsConstructor = 1 << 5,
+ kScriptNameOrSourceURL = 1 << 6,
+ kScriptId = 1 << 7,
+ kExposeFramesAcrossSecurityOrigins = 1 << 8,
+ kOverview = kLineNumber | kColumnOffset | kScriptName | kFunctionName,
+ kDetailed = kOverview | kIsEval | kIsConstructor | kScriptNameOrSourceURL
+ };
+
+ /**
+ * Returns a StackFrame at a particular index.
+ */
+ Local<StackFrame> GetFrame(Isolate* isolate, uint32_t index) const;
+
+ /**
+ * Returns the number of StackFrames.
+ */
+ int GetFrameCount() const;
+
+ /**
+ * Grab a snapshot of the current JavaScript execution stack.
+ *
+ * \param frame_limit The maximum number of stack frames we want to capture.
+ * \param options Enumerates the set of things we will capture for each
+ * StackFrame.
+ */
+ static Local<StackTrace> CurrentStackTrace(
+ Isolate* isolate, int frame_limit, StackTraceOptions options = kDetailed);
+};
+
+} // namespace v8
+
+#endif // INCLUDE_V8_DEBUG_H_
diff --git a/deps/v8/include/v8-embedder-heap.h b/deps/v8/include/v8-embedder-heap.h
new file mode 100644
index 0000000000..501a4fc523
--- /dev/null
+++ b/deps/v8/include/v8-embedder-heap.h
@@ -0,0 +1,238 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef INCLUDE_V8_EMBEDDER_HEAP_H_
+#define INCLUDE_V8_EMBEDDER_HEAP_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include <utility>
+#include <vector>
+
+#include "cppgc/common.h"
+#include "v8-local-handle.h" // NOLINT(build/include_directory)
+#include "v8-traced-handle.h" // NOLINT(build/include_directory)
+#include "v8config.h" // NOLINT(build/include_directory)
+
+namespace v8 {
+
+class Data;
+class Isolate;
+class Value;
+
+namespace internal {
+class LocalEmbedderHeapTracer;
+} // namespace internal
+
+/**
+ * Handler for embedder roots on non-unified heap garbage collections.
+ */
+class V8_EXPORT EmbedderRootsHandler {
+ public:
+ virtual ~EmbedderRootsHandler() = default;
+
+ /**
+ * Returns true if the TracedGlobal handle should be considered as root for
+ * the currently running non-tracing garbage collection and false otherwise.
+ * The default implementation will keep all TracedGlobal references as roots.
+ *
+ * If this returns false, then V8 may decide that the object referred to by
+ * such a handle is reclaimed. In that case:
+ * - No action is required if handles are used with destructors, i.e., by just
+ * using |TracedGlobal|.
+ * - When run without destructors, i.e., by using |TracedReference|, V8 calls
+ * |ResetRoot|.
+ *
+ * Note that the |handle| is different from the handle that the embedder holds
+ * for retaining the object. The embedder may use |WrapperClassId()| to
+ * distinguish cases where it wants handles to be treated as roots from not
+ * being treated as roots.
+ */
+ virtual bool IsRoot(const v8::TracedReference<v8::Value>& handle) = 0;
+ virtual bool IsRoot(const v8::TracedGlobal<v8::Value>& handle) = 0;
+
+ /**
+ * Used in combination with |IsRoot|. Called by V8 when an
+ * object that is backed by a handle is reclaimed by a non-tracing garbage
+ * collection. It is up to the embedder to reset the original handle.
+ *
+ * Note that the |handle| is different from the handle that the embedder holds
+ * for retaining the object. It is up to the embedder to find the original
+ * handle via the object or class id.
+ */
+ virtual void ResetRoot(const v8::TracedReference<v8::Value>& handle) = 0;
+};
+
+/**
+ * Interface for tracing through the embedder heap. During a V8 garbage
+ * collection, V8 collects hidden fields of all potential wrappers, and at the
+ * end of its marking phase iterates the collection and asks the embedder to
+ * trace through its heap and use reporter to report each JavaScript object
+ * reachable from any of the given wrappers.
+ */
+class V8_EXPORT EmbedderHeapTracer {
+ public:
+ using EmbedderStackState = cppgc::EmbedderStackState;
+
+ enum TraceFlags : uint64_t {
+ kNoFlags = 0,
+ kReduceMemory = 1 << 0,
+ kForced = 1 << 2,
+ };
+
+ /**
+ * Interface for iterating through TracedGlobal handles.
+ */
+ class V8_EXPORT TracedGlobalHandleVisitor {
+ public:
+ virtual ~TracedGlobalHandleVisitor() = default;
+ virtual void VisitTracedGlobalHandle(const TracedGlobal<Value>& handle) {}
+ virtual void VisitTracedReference(const TracedReference<Value>& handle) {}
+ };
+
+ /**
+ * Summary of a garbage collection cycle. See |TraceEpilogue| on how the
+ * summary is reported.
+ */
+ struct TraceSummary {
+ /**
+ * Time spent managing the retained memory in milliseconds. This can e.g.
+ * include the time tracing through objects in the embedder.
+ */
+ double time = 0.0;
+
+ /**
+ * Memory retained by the embedder through the |EmbedderHeapTracer|
+ * mechanism in bytes.
+ */
+ size_t allocated_size = 0;
+ };
+
+ virtual ~EmbedderHeapTracer() = default;
+
+ /**
+ * Iterates all TracedGlobal handles created for the v8::Isolate the tracer is
+ * attached to.
+ */
+ void IterateTracedGlobalHandles(TracedGlobalHandleVisitor* visitor);
+
+ /**
+ * Called by the embedder to set the start of the stack which is e.g. used by
+ * V8 to determine whether handles are used from stack or heap.
+ */
+ void SetStackStart(void* stack_start);
+
+ /**
+ * Called by the embedder to notify V8 of an empty execution stack.
+ */
+ V8_DEPRECATE_SOON(
+ "This call only optimized internal caches which V8 is able to figure out "
+ "on its own now.")
+ void NotifyEmptyEmbedderStack();
+
+ /**
+ * Called by v8 to register internal fields of found wrappers.
+ *
+ * The embedder is expected to store them somewhere and trace reachable
+ * wrappers from them when called through |AdvanceTracing|.
+ */
+ virtual void RegisterV8References(
+ const std::vector<std::pair<void*, void*>>& embedder_fields) = 0;
+
+ void RegisterEmbedderReference(const BasicTracedReference<v8::Data>& ref);
+
+ /**
+ * Called at the beginning of a GC cycle.
+ */
+ virtual void TracePrologue(TraceFlags flags) {}
+
+ /**
+ * Called to advance tracing in the embedder.
+ *
+ * The embedder is expected to trace its heap starting from wrappers reported
+ * by RegisterV8References method, and report back all reachable wrappers.
+ * Furthermore, the embedder is expected to stop tracing by the given
+ * deadline. A deadline of infinity means that tracing should be finished.
+ *
+ * Returns |true| if tracing is done, and false otherwise.
+ */
+ virtual bool AdvanceTracing(double deadline_in_ms) = 0;
+
+ /*
+ * Returns true if there no more tracing work to be done (see AdvanceTracing)
+ * and false otherwise.
+ */
+ virtual bool IsTracingDone() = 0;
+
+ /**
+ * Called at the end of a GC cycle.
+ *
+ * Note that allocation is *not* allowed within |TraceEpilogue|. Can be
+ * overriden to fill a |TraceSummary| that is used by V8 to schedule future
+ * garbage collections.
+ */
+ virtual void TraceEpilogue(TraceSummary* trace_summary) {}
+
+ /**
+ * Called upon entering the final marking pause. No more incremental marking
+ * steps will follow this call.
+ */
+ virtual void EnterFinalPause(EmbedderStackState stack_state) = 0;
+
+ /*
+ * Called by the embedder to request immediate finalization of the currently
+ * running tracing phase that has been started with TracePrologue and not
+ * yet finished with TraceEpilogue.
+ *
+ * Will be a noop when currently not in tracing.
+ *
+ * This is an experimental feature.
+ */
+ void FinalizeTracing();
+
+ /**
+ * See documentation on EmbedderRootsHandler.
+ */
+ virtual bool IsRootForNonTracingGC(
+ const v8::TracedReference<v8::Value>& handle);
+ virtual bool IsRootForNonTracingGC(const v8::TracedGlobal<v8::Value>& handle);
+
+ /**
+ * See documentation on EmbedderRootsHandler.
+ */
+ virtual void ResetHandleInNonTracingGC(
+ const v8::TracedReference<v8::Value>& handle);
+
+ /*
+ * Called by the embedder to immediately perform a full garbage collection.
+ *
+ * Should only be used in testing code.
+ */
+ void GarbageCollectionForTesting(EmbedderStackState stack_state);
+
+ /*
+ * Called by the embedder to signal newly allocated or freed memory. Not bound
+ * to tracing phases. Embedders should trade off when increments are reported
+ * as V8 may consult global heuristics on whether to trigger garbage
+ * collection on this change.
+ */
+ void IncreaseAllocatedSize(size_t bytes);
+ void DecreaseAllocatedSize(size_t bytes);
+
+ /*
+ * Returns the v8::Isolate this tracer is attached too and |nullptr| if it
+ * is not attached to any v8::Isolate.
+ */
+ v8::Isolate* isolate() const { return isolate_; }
+
+ protected:
+ v8::Isolate* isolate_ = nullptr;
+
+ friend class internal::LocalEmbedderHeapTracer;
+};
+
+} // namespace v8
+
+#endif // INCLUDE_V8_EMBEDDER_HEAP_H_
diff --git a/deps/v8/include/v8-exception.h b/deps/v8/include/v8-exception.h
new file mode 100644
index 0000000000..add882da4c
--- /dev/null
+++ b/deps/v8/include/v8-exception.h
@@ -0,0 +1,224 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef INCLUDE_V8_EXCEPTION_H_
+#define INCLUDE_V8_EXCEPTION_H_
+
+#include <stddef.h>
+
+#include "v8-local-handle.h" // NOLINT(build/include_directory)
+#include "v8config.h" // NOLINT(build/include_directory)
+
+namespace v8 {
+
+class Context;
+class Isolate;
+class Message;
+class StackTrace;
+class String;
+class Value;
+
+namespace internal {
+class Isolate;
+class ThreadLocalTop;
+} // namespace internal
+
+/**
+ * Create new error objects by calling the corresponding error object
+ * constructor with the message.
+ */
+class V8_EXPORT Exception {
+ public:
+ static Local<Value> RangeError(Local<String> message);
+ static Local<Value> ReferenceError(Local<String> message);
+ static Local<Value> SyntaxError(Local<String> message);
+ static Local<Value> TypeError(Local<String> message);
+ static Local<Value> WasmCompileError(Local<String> message);
+ static Local<Value> WasmLinkError(Local<String> message);
+ static Local<Value> WasmRuntimeError(Local<String> message);
+ static Local<Value> Error(Local<String> message);
+
+ /**
+ * Creates an error message for the given exception.
+ * Will try to reconstruct the original stack trace from the exception value,
+ * or capture the current stack trace if not available.
+ */
+ static Local<Message> CreateMessage(Isolate* isolate, Local<Value> exception);
+
+ /**
+ * Returns the original stack trace that was captured at the creation time
+ * of a given exception, or an empty handle if not available.
+ */
+ static Local<StackTrace> GetStackTrace(Local<Value> exception);
+};
+
+/**
+ * An external exception handler.
+ */
+class V8_EXPORT TryCatch {
+ public:
+ /**
+ * Creates a new try/catch block and registers it with v8. Note that
+ * all TryCatch blocks should be stack allocated because the memory
+ * location itself is compared against JavaScript try/catch blocks.
+ */
+ explicit TryCatch(Isolate* isolate);
+
+ /**
+ * Unregisters and deletes this try/catch block.
+ */
+ ~TryCatch();
+
+ /**
+ * Returns true if an exception has been caught by this try/catch block.
+ */
+ bool HasCaught() const;
+
+ /**
+ * For certain types of exceptions, it makes no sense to continue execution.
+ *
+ * If CanContinue returns false, the correct action is to perform any C++
+ * cleanup needed and then return. If CanContinue returns false and
+ * HasTerminated returns true, it is possible to call
+ * CancelTerminateExecution in order to continue calling into the engine.
+ */
+ bool CanContinue() const;
+
+ /**
+ * Returns true if an exception has been caught due to script execution
+ * being terminated.
+ *
+ * There is no JavaScript representation of an execution termination
+ * exception. Such exceptions are thrown when the TerminateExecution
+ * methods are called to terminate a long-running script.
+ *
+ * If such an exception has been thrown, HasTerminated will return true,
+ * indicating that it is possible to call CancelTerminateExecution in order
+ * to continue calling into the engine.
+ */
+ bool HasTerminated() const;
+
+ /**
+ * Throws the exception caught by this TryCatch in a way that avoids
+ * it being caught again by this same TryCatch. As with ThrowException
+ * it is illegal to execute any JavaScript operations after calling
+ * ReThrow; the caller must return immediately to where the exception
+ * is caught.
+ */
+ Local<Value> ReThrow();
+
+ /**
+ * Returns the exception caught by this try/catch block. If no exception has
+ * been caught an empty handle is returned.
+ */
+ Local<Value> Exception() const;
+
+ /**
+ * Returns the .stack property of an object. If no .stack
+ * property is present an empty handle is returned.
+ */
+ V8_WARN_UNUSED_RESULT static MaybeLocal<Value> StackTrace(
+ Local<Context> context, Local<Value> exception);
+
+ /**
+ * Returns the .stack property of the thrown object. If no .stack property is
+ * present or if this try/catch block has not caught an exception, an empty
+ * handle is returned.
+ */
+ V8_WARN_UNUSED_RESULT MaybeLocal<Value> StackTrace(
+ Local<Context> context) const;
+
+ /**
+ * Returns the message associated with this exception. If there is
+ * no message associated an empty handle is returned.
+ */
+ Local<v8::Message> Message() const;
+
+ /**
+ * Clears any exceptions that may have been caught by this try/catch block.
+ * After this method has been called, HasCaught() will return false. Cancels
+ * the scheduled exception if it is caught and ReThrow() is not called before.
+ *
+ * It is not necessary to clear a try/catch block before using it again; if
+ * another exception is thrown the previously caught exception will just be
+ * overwritten. However, it is often a good idea since it makes it easier
+ * to determine which operation threw a given exception.
+ */
+ void Reset();
+
+ /**
+ * Set verbosity of the external exception handler.
+ *
+ * By default, exceptions that are caught by an external exception
+ * handler are not reported. Call SetVerbose with true on an
+ * external exception handler to have exceptions caught by the
+ * handler reported as if they were not caught.
+ */
+ void SetVerbose(bool value);
+
+ /**
+ * Returns true if verbosity is enabled.
+ */
+ bool IsVerbose() const;
+
+ /**
+ * Set whether or not this TryCatch should capture a Message object
+ * which holds source information about where the exception
+ * occurred. True by default.
+ */
+ void SetCaptureMessage(bool value);
+
+ V8_DEPRECATE_SOON(
+ "This is private information that should not be exposed by the API")
+ static void* JSStackComparableAddress(TryCatch* handler) {
+ if (handler == nullptr) return nullptr;
+ return reinterpret_cast<void*>(handler->JSStackComparableAddressPrivate());
+ }
+
+ TryCatch(const TryCatch&) = delete;
+ void operator=(const TryCatch&) = delete;
+
+ private:
+ // Declaring operator new and delete as deleted is not spec compliant.
+ // Therefore declare them private instead to disable dynamic alloc
+ void* operator new(size_t size);
+ void* operator new[](size_t size);
+ void operator delete(void*, size_t);
+ void operator delete[](void*, size_t);
+
+ /**
+ * There are cases when the raw address of C++ TryCatch object cannot be
+ * used for comparisons with addresses into the JS stack. The cases are:
+ * 1) ARM, ARM64 and MIPS simulators which have separate JS stack.
+ * 2) Address sanitizer allocates local C++ object in the heap when
+ * UseAfterReturn mode is enabled.
+ * This method returns address that can be used for comparisons with
+ * addresses into the JS stack. When neither simulator nor ASAN's
+ * UseAfterReturn is enabled, then the address returned will be the address
+ * of the C++ try catch handler itself.
+ */
+ internal::Address JSStackComparableAddressPrivate() {
+ return js_stack_comparable_address_;
+ }
+
+ void ResetInternal();
+
+ internal::Isolate* isolate_;
+ TryCatch* next_;
+ void* exception_;
+ void* message_obj_;
+ internal::Address js_stack_comparable_address_;
+ bool is_verbose_ : 1;
+ bool can_continue_ : 1;
+ bool capture_message_ : 1;
+ bool rethrow_ : 1;
+ bool has_terminated_ : 1;
+
+ friend class internal::Isolate;
+ friend class internal::ThreadLocalTop;
+};
+
+} // namespace v8
+
+#endif // INCLUDE_V8_EXCEPTION_H_
diff --git a/deps/v8/include/v8-extension.h b/deps/v8/include/v8-extension.h
new file mode 100644
index 0000000000..0705e2afbb
--- /dev/null
+++ b/deps/v8/include/v8-extension.h
@@ -0,0 +1,62 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef INCLUDE_V8_EXTENSION_H_
+#define INCLUDE_V8_EXTENSION_H_
+
+#include <memory>
+
+#include "v8-local-handle.h" // NOLINT(build/include_directory)
+#include "v8-primitive.h" // NOLINT(build/include_directory)
+#include "v8config.h" // NOLINT(build/include_directory)
+
+namespace v8 {
+
+class FunctionTemplate;
+
+// --- Extensions ---
+
+/**
+ * Ignore
+ */
+class V8_EXPORT Extension {
+ public:
+ // Note that the strings passed into this constructor must live as long
+ // as the Extension itself.
+ Extension(const char* name, const char* source = nullptr, int dep_count = 0,
+ const char** deps = nullptr, int source_length = -1);
+ virtual ~Extension() { delete source_; }
+ virtual Local<FunctionTemplate> GetNativeFunctionTemplate(
+ Isolate* isolate, Local<String> name) {
+ return Local<FunctionTemplate>();
+ }
+
+ const char* name() const { return name_; }
+ size_t source_length() const { return source_length_; }
+ const String::ExternalOneByteStringResource* source() const {
+ return source_;
+ }
+ int dependency_count() const { return dep_count_; }
+ const char** dependencies() const { return deps_; }
+ void set_auto_enable(bool value) { auto_enable_ = value; }
+ bool auto_enable() { return auto_enable_; }
+
+ // Disallow copying and assigning.
+ Extension(const Extension&) = delete;
+ void operator=(const Extension&) = delete;
+
+ private:
+ const char* name_;
+ size_t source_length_; // expected to initialize before source_
+ String::ExternalOneByteStringResource* source_;
+ int dep_count_;
+ const char** deps_;
+ bool auto_enable_;
+};
+
+void V8_EXPORT RegisterExtension(std::unique_ptr<Extension>);
+
+} // namespace v8
+
+#endif // INCLUDE_V8_EXTENSION_H_
diff --git a/deps/v8/include/v8-external.h b/deps/v8/include/v8-external.h
new file mode 100644
index 0000000000..2e245036f4
--- /dev/null
+++ b/deps/v8/include/v8-external.h
@@ -0,0 +1,37 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef INCLUDE_V8_EXTERNAL_H_
+#define INCLUDE_V8_EXTERNAL_H_
+
+#include "v8-value.h" // NOLINT(build/include_directory)
+#include "v8config.h" // NOLINT(build/include_directory)
+
+namespace v8 {
+
+class Isolate;
+
+/**
+ * A JavaScript value that wraps a C++ void*. This type of value is mainly used
+ * to associate C++ data structures with JavaScript objects.
+ */
+class V8_EXPORT External : public Value {
+ public:
+ static Local<External> New(Isolate* isolate, void* value);
+ V8_INLINE static External* Cast(Value* value) {
+#ifdef V8_ENABLE_CHECKS
+ CheckCast(value);
+#endif
+ return static_cast<External*>(value);
+ }
+
+ void* Value() const;
+
+ private:
+ static void CheckCast(v8::Value* obj);
+};
+
+} // namespace v8
+
+#endif // INCLUDE_V8_EXTERNAL_H_
diff --git a/deps/v8/include/v8-fast-api-calls.h b/deps/v8/include/v8-fast-api-calls.h
index 5dc7473eaa..ca13b1e626 100644
--- a/deps/v8/include/v8-fast-api-calls.h
+++ b/deps/v8/include/v8-fast-api-calls.h
@@ -225,9 +225,11 @@
#include <tuple>
#include <type_traits>
-#include "v8-internal.h" // NOLINT(build/include_directory)
-#include "v8.h" // NOLINT(build/include_directory)
-#include "v8config.h" // NOLINT(build/include_directory)
+#include "v8-internal.h" // NOLINT(build/include_directory)
+#include "v8-local-handle.h" // NOLINT(build/include_directory)
+#include "v8-typed-array.h" // NOLINT(build/include_directory)
+#include "v8-value.h" // NOLINT(build/include_directory)
+#include "v8config.h" // NOLINT(build/include_directory)
namespace v8 {
@@ -464,7 +466,7 @@ class V8_EXPORT CFunction {
};
};
-struct ApiObject {
+struct V8_DEPRECATE_SOON("Use v8::Local<v8::Value> instead.") ApiObject {
uintptr_t address;
};
diff --git a/deps/v8/include/v8-forward.h b/deps/v8/include/v8-forward.h
new file mode 100644
index 0000000000..ae16fe64b2
--- /dev/null
+++ b/deps/v8/include/v8-forward.h
@@ -0,0 +1,79 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef INCLUDE_V8_LOCAL_HANDLES_H_
+#define INCLUDE_V8_LOCAL_HANDLES_H_
+
+// This header is intended to be used by headers that pass around V8 types,
+// either by pointer or using Local<Type>. The full definitions can be included
+// either via v8.h or the more fine-grained headers.
+
+#include "v8-local-handle.h" // NOLINT(build/include_directory)
+
+namespace v8 {
+
+class AccessorSignature;
+class Array;
+class ArrayBuffer;
+class ArrayBufferView;
+class BigInt;
+class BigInt64Array;
+class BigIntObject;
+class BigUint64Array;
+class Boolean;
+class BooleanObject;
+class Context;
+class DataView;
+class Data;
+class Date;
+class External;
+class FixedArray;
+class Float32Array;
+class Float64Array;
+class Function;
+template <class F>
+class FunctionCallbackInfo;
+class FunctionTemplate;
+class Int16Array;
+class Int32;
+class Int32Array;
+class Int8Array;
+class Integer;
+class Isolate;
+class Map;
+class Module;
+class Name;
+class Number;
+class NumberObject;
+class Object;
+class ObjectTemplate;
+class Platform;
+class Primitive;
+class Private;
+class Promise;
+class Proxy;
+class RegExp;
+class Script;
+class Set;
+class SharedArrayBuffer;
+class Signature;
+class String;
+class StringObject;
+class Symbol;
+class SymbolObject;
+class Template;
+class TypedArray;
+class Uint16Array;
+class Uint32;
+class Uint32Array;
+class Uint8Array;
+class Uint8ClampedArray;
+class UnboundModuleScript;
+class Value;
+class WasmMemoryObject;
+class WasmModuleObject;
+
+} // namespace v8
+
+#endif // INCLUDE_V8_LOCAL_HANDLES_H_
diff --git a/deps/v8/include/v8-function-callback.h b/deps/v8/include/v8-function-callback.h
new file mode 100644
index 0000000000..2adff99b1c
--- /dev/null
+++ b/deps/v8/include/v8-function-callback.h
@@ -0,0 +1,475 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef INCLUDE_V8_FUNCTION_CALLBACK_H_
+#define INCLUDE_V8_FUNCTION_CALLBACK_H_
+
+#include "v8-local-handle.h" // NOLINT(build/include_directory)
+#include "v8-primitive.h" // NOLINT(build/include_directory)
+#include "v8config.h" // NOLINT(build/include_directory)
+
+namespace v8 {
+
+template <typename T>
+class BasicTracedReference;
+template <typename T>
+class Global;
+class Object;
+class Value;
+
+namespace internal {
+class FunctionCallbackArguments;
+class PropertyCallbackArguments;
+} // namespace internal
+
+namespace debug {
+class ConsoleCallArguments;
+} // namespace debug
+
+template <typename T>
+class ReturnValue {
+ public:
+ template <class S>
+ V8_INLINE ReturnValue(const ReturnValue<S>& that) : value_(that.value_) {
+ static_assert(std::is_base_of<T, S>::value, "type check");
+ }
+ // Local setters
+ template <typename S>
+ V8_INLINE void Set(const Global<S>& handle);
+ template <typename S>
+ V8_INLINE void Set(const BasicTracedReference<S>& handle);
+ template <typename S>
+ V8_INLINE void Set(const Local<S> handle);
+ // Fast primitive setters
+ V8_INLINE void Set(bool value);
+ V8_INLINE void Set(double i);
+ V8_INLINE void Set(int32_t i);
+ V8_INLINE void Set(uint32_t i);
+ // Fast JS primitive setters
+ V8_INLINE void SetNull();
+ V8_INLINE void SetUndefined();
+ V8_INLINE void SetEmptyString();
+ // Convenience getter for Isolate
+ V8_INLINE Isolate* GetIsolate() const;
+
+ // Pointer setter: Uncompilable to prevent inadvertent misuse.
+ template <typename S>
+ V8_INLINE void Set(S* whatever);
+
+ // Getter. Creates a new Local<> so it comes with a certain performance
+ // hit. If the ReturnValue was not yet set, this will return the undefined
+ // value.
+ V8_INLINE Local<Value> Get() const;
+
+ private:
+ template <class F>
+ friend class ReturnValue;
+ template <class F>
+ friend class FunctionCallbackInfo;
+ template <class F>
+ friend class PropertyCallbackInfo;
+ template <class F, class G, class H>
+ friend class PersistentValueMapBase;
+ V8_INLINE void SetInternal(internal::Address value) { *value_ = value; }
+ V8_INLINE internal::Address GetDefaultValue();
+ V8_INLINE explicit ReturnValue(internal::Address* slot);
+ internal::Address* value_;
+};
+
+/**
+ * The argument information given to function call callbacks. This
+ * class provides access to information about the context of the call,
+ * including the receiver, the number and values of arguments, and
+ * the holder of the function.
+ */
+template <typename T>
+class FunctionCallbackInfo {
+ public:
+ /** The number of available arguments. */
+ V8_INLINE int Length() const;
+ /**
+ * Accessor for the available arguments. Returns `undefined` if the index
+ * is out of bounds.
+ */
+ V8_INLINE Local<Value> operator[](int i) const;
+ /** Returns the receiver. This corresponds to the "this" value. */
+ V8_INLINE Local<Object> This() const;
+ /**
+ * If the callback was created without a Signature, this is the same
+ * value as This(). If there is a signature, and the signature didn't match
+ * This() but one of its hidden prototypes, this will be the respective
+ * hidden prototype.
+ *
+ * Note that this is not the prototype of This() on which the accessor
+ * referencing this callback was found (which in V8 internally is often
+ * referred to as holder [sic]).
+ */
+ V8_INLINE Local<Object> Holder() const;
+ /** For construct calls, this returns the "new.target" value. */
+ V8_INLINE Local<Value> NewTarget() const;
+ /** Indicates whether this is a regular call or a construct call. */
+ V8_INLINE bool IsConstructCall() const;
+ /** The data argument specified when creating the callback. */
+ V8_INLINE Local<Value> Data() const;
+ /** The current Isolate. */
+ V8_INLINE Isolate* GetIsolate() const;
+ /** The ReturnValue for the call. */
+ V8_INLINE ReturnValue<T> GetReturnValue() const;
+ // This shouldn't be public, but the arm compiler needs it.
+ static const int kArgsLength = 6;
+
+ protected:
+ friend class internal::FunctionCallbackArguments;
+ friend class internal::CustomArguments<FunctionCallbackInfo>;
+ friend class debug::ConsoleCallArguments;
+ static const int kHolderIndex = 0;
+ static const int kIsolateIndex = 1;
+ static const int kReturnValueDefaultValueIndex = 2;
+ static const int kReturnValueIndex = 3;
+ static const int kDataIndex = 4;
+ static const int kNewTargetIndex = 5;
+
+ V8_INLINE FunctionCallbackInfo(internal::Address* implicit_args,
+ internal::Address* values, int length);
+ internal::Address* implicit_args_;
+ internal::Address* values_;
+ int length_;
+};
+
+/**
+ * The information passed to a property callback about the context
+ * of the property access.
+ */
+template <typename T>
+class PropertyCallbackInfo {
+ public:
+ /**
+ * \return The isolate of the property access.
+ */
+ V8_INLINE Isolate* GetIsolate() const;
+
+ /**
+ * \return The data set in the configuration, i.e., in
+ * `NamedPropertyHandlerConfiguration` or
+ * `IndexedPropertyHandlerConfiguration.`
+ */
+ V8_INLINE Local<Value> Data() const;
+
+ /**
+ * \return The receiver. In many cases, this is the object on which the
+ * property access was intercepted. When using
+ * `Reflect.get`, `Function.prototype.call`, or similar functions, it is the
+ * object passed in as receiver or thisArg.
+ *
+ * \code
+ * void GetterCallback(Local<Name> name,
+ * const v8::PropertyCallbackInfo<v8::Value>& info) {
+ * auto context = info.GetIsolate()->GetCurrentContext();
+ *
+ * v8::Local<v8::Value> a_this =
+ * info.This()
+ * ->GetRealNamedProperty(context, v8_str("a"))
+ * .ToLocalChecked();
+ * v8::Local<v8::Value> a_holder =
+ * info.Holder()
+ * ->GetRealNamedProperty(context, v8_str("a"))
+ * .ToLocalChecked();
+ *
+ * CHECK(v8_str("r")->Equals(context, a_this).FromJust());
+ * CHECK(v8_str("obj")->Equals(context, a_holder).FromJust());
+ *
+ * info.GetReturnValue().Set(name);
+ * }
+ *
+ * v8::Local<v8::FunctionTemplate> templ =
+ * v8::FunctionTemplate::New(isolate);
+ * templ->InstanceTemplate()->SetHandler(
+ * v8::NamedPropertyHandlerConfiguration(GetterCallback));
+ * LocalContext env;
+ * env->Global()
+ * ->Set(env.local(), v8_str("obj"), templ->GetFunction(env.local())
+ * .ToLocalChecked()
+ * ->NewInstance(env.local())
+ * .ToLocalChecked())
+ * .FromJust();
+ *
+ * CompileRun("obj.a = 'obj'; var r = {a: 'r'}; Reflect.get(obj, 'x', r)");
+ * \endcode
+ */
+ V8_INLINE Local<Object> This() const;
+
+ /**
+ * \return The object in the prototype chain of the receiver that has the
+ * interceptor. Suppose you have `x` and its prototype is `y`, and `y`
+ * has an interceptor. Then `info.This()` is `x` and `info.Holder()` is `y`.
+ * The Holder() could be a hidden object (the global object, rather
+ * than the global proxy).
+ *
+ * \note For security reasons, do not pass the object back into the runtime.
+ */
+ V8_INLINE Local<Object> Holder() const;
+
+ /**
+ * \return The return value of the callback.
+ * Can be changed by calling Set().
+ * \code
+ * info.GetReturnValue().Set(...)
+ * \endcode
+ *
+ */
+ V8_INLINE ReturnValue<T> GetReturnValue() const;
+
+ /**
+ * \return True if the intercepted function should throw if an error occurs.
+ * Usually, `true` corresponds to `'use strict'`.
+ *
+ * \note Always `false` when intercepting `Reflect.set()`
+ * independent of the language mode.
+ */
+ V8_INLINE bool ShouldThrowOnError() const;
+
+ // This shouldn't be public, but the arm compiler needs it.
+ static const int kArgsLength = 7;
+
+ protected:
+ friend class MacroAssembler;
+ friend class internal::PropertyCallbackArguments;
+ friend class internal::CustomArguments<PropertyCallbackInfo>;
+ static const int kShouldThrowOnErrorIndex = 0;
+ static const int kHolderIndex = 1;
+ static const int kIsolateIndex = 2;
+ static const int kReturnValueDefaultValueIndex = 3;
+ static const int kReturnValueIndex = 4;
+ static const int kDataIndex = 5;
+ static const int kThisIndex = 6;
+
+ V8_INLINE PropertyCallbackInfo(internal::Address* args) : args_(args) {}
+ internal::Address* args_;
+};
+
+using FunctionCallback = void (*)(const FunctionCallbackInfo<Value>& info);
+
+// --- Implementation ---
+
+template <typename T>
+ReturnValue<T>::ReturnValue(internal::Address* slot) : value_(slot) {}
+
+template <typename T>
+template <typename S>
+void ReturnValue<T>::Set(const Global<S>& handle) {
+ static_assert(std::is_base_of<T, S>::value, "type check");
+ if (V8_UNLIKELY(handle.IsEmpty())) {
+ *value_ = GetDefaultValue();
+ } else {
+ *value_ = *reinterpret_cast<internal::Address*>(*handle);
+ }
+}
+
+template <typename T>
+template <typename S>
+void ReturnValue<T>::Set(const BasicTracedReference<S>& handle) {
+ static_assert(std::is_base_of<T, S>::value, "type check");
+ if (V8_UNLIKELY(handle.IsEmpty())) {
+ *value_ = GetDefaultValue();
+ } else {
+ *value_ = *reinterpret_cast<internal::Address*>(handle.val_);
+ }
+}
+
+template <typename T>
+template <typename S>
+void ReturnValue<T>::Set(const Local<S> handle) {
+ static_assert(std::is_void<T>::value || std::is_base_of<T, S>::value,
+ "type check");
+ if (V8_UNLIKELY(handle.IsEmpty())) {
+ *value_ = GetDefaultValue();
+ } else {
+ *value_ = *reinterpret_cast<internal::Address*>(*handle);
+ }
+}
+
+template <typename T>
+void ReturnValue<T>::Set(double i) {
+ static_assert(std::is_base_of<T, Number>::value, "type check");
+ Set(Number::New(GetIsolate(), i));
+}
+
+template <typename T>
+void ReturnValue<T>::Set(int32_t i) {
+ static_assert(std::is_base_of<T, Integer>::value, "type check");
+ using I = internal::Internals;
+ if (V8_LIKELY(I::IsValidSmi(i))) {
+ *value_ = I::IntToSmi(i);
+ return;
+ }
+ Set(Integer::New(GetIsolate(), i));
+}
+
+template <typename T>
+void ReturnValue<T>::Set(uint32_t i) {
+ static_assert(std::is_base_of<T, Integer>::value, "type check");
+ // Can't simply use INT32_MAX here for whatever reason.
+ bool fits_into_int32_t = (i & (1U << 31)) == 0;
+ if (V8_LIKELY(fits_into_int32_t)) {
+ Set(static_cast<int32_t>(i));
+ return;
+ }
+ Set(Integer::NewFromUnsigned(GetIsolate(), i));
+}
+
+template <typename T>
+void ReturnValue<T>::Set(bool value) {
+ static_assert(std::is_base_of<T, Boolean>::value, "type check");
+ using I = internal::Internals;
+ int root_index;
+ if (value) {
+ root_index = I::kTrueValueRootIndex;
+ } else {
+ root_index = I::kFalseValueRootIndex;
+ }
+ *value_ = *I::GetRoot(GetIsolate(), root_index);
+}
+
+template <typename T>
+void ReturnValue<T>::SetNull() {
+ static_assert(std::is_base_of<T, Primitive>::value, "type check");
+ using I = internal::Internals;
+ *value_ = *I::GetRoot(GetIsolate(), I::kNullValueRootIndex);
+}
+
+template <typename T>
+void ReturnValue<T>::SetUndefined() {
+ static_assert(std::is_base_of<T, Primitive>::value, "type check");
+ using I = internal::Internals;
+ *value_ = *I::GetRoot(GetIsolate(), I::kUndefinedValueRootIndex);
+}
+
+template <typename T>
+void ReturnValue<T>::SetEmptyString() {
+ static_assert(std::is_base_of<T, String>::value, "type check");
+ using I = internal::Internals;
+ *value_ = *I::GetRoot(GetIsolate(), I::kEmptyStringRootIndex);
+}
+
+template <typename T>
+Isolate* ReturnValue<T>::GetIsolate() const {
+ // Isolate is always the pointer below the default value on the stack.
+ return *reinterpret_cast<Isolate**>(&value_[-2]);
+}
+
+template <typename T>
+Local<Value> ReturnValue<T>::Get() const {
+ using I = internal::Internals;
+ if (*value_ == *I::GetRoot(GetIsolate(), I::kTheHoleValueRootIndex))
+ return Local<Value>(*Undefined(GetIsolate()));
+ return Local<Value>::New(GetIsolate(), reinterpret_cast<Value*>(value_));
+}
+
+template <typename T>
+template <typename S>
+void ReturnValue<T>::Set(S* whatever) {
+ static_assert(sizeof(S) < 0, "incompilable to prevent inadvertent misuse");
+}
+
+template <typename T>
+internal::Address ReturnValue<T>::GetDefaultValue() {
+ // Default value is always the pointer below value_ on the stack.
+ return value_[-1];
+}
+
+template <typename T>
+FunctionCallbackInfo<T>::FunctionCallbackInfo(internal::Address* implicit_args,
+ internal::Address* values,
+ int length)
+ : implicit_args_(implicit_args), values_(values), length_(length) {}
+
+template <typename T>
+Local<Value> FunctionCallbackInfo<T>::operator[](int i) const {
+ // values_ points to the first argument (not the receiver).
+ if (i < 0 || length_ <= i) return Local<Value>(*Undefined(GetIsolate()));
+ return Local<Value>(reinterpret_cast<Value*>(values_ + i));
+}
+
+template <typename T>
+Local<Object> FunctionCallbackInfo<T>::This() const {
+ // values_ points to the first argument (not the receiver).
+ return Local<Object>(reinterpret_cast<Object*>(values_ - 1));
+}
+
+template <typename T>
+Local<Object> FunctionCallbackInfo<T>::Holder() const {
+ return Local<Object>(
+ reinterpret_cast<Object*>(&implicit_args_[kHolderIndex]));
+}
+
+template <typename T>
+Local<Value> FunctionCallbackInfo<T>::NewTarget() const {
+ return Local<Value>(
+ reinterpret_cast<Value*>(&implicit_args_[kNewTargetIndex]));
+}
+
+template <typename T>
+Local<Value> FunctionCallbackInfo<T>::Data() const {
+ return Local<Value>(reinterpret_cast<Value*>(&implicit_args_[kDataIndex]));
+}
+
+template <typename T>
+Isolate* FunctionCallbackInfo<T>::GetIsolate() const {
+ return *reinterpret_cast<Isolate**>(&implicit_args_[kIsolateIndex]);
+}
+
+template <typename T>
+ReturnValue<T> FunctionCallbackInfo<T>::GetReturnValue() const {
+ return ReturnValue<T>(&implicit_args_[kReturnValueIndex]);
+}
+
+template <typename T>
+bool FunctionCallbackInfo<T>::IsConstructCall() const {
+ return !NewTarget()->IsUndefined();
+}
+
+template <typename T>
+int FunctionCallbackInfo<T>::Length() const {
+ return length_;
+}
+
+template <typename T>
+Isolate* PropertyCallbackInfo<T>::GetIsolate() const {
+ return *reinterpret_cast<Isolate**>(&args_[kIsolateIndex]);
+}
+
+template <typename T>
+Local<Value> PropertyCallbackInfo<T>::Data() const {
+ return Local<Value>(reinterpret_cast<Value*>(&args_[kDataIndex]));
+}
+
+template <typename T>
+Local<Object> PropertyCallbackInfo<T>::This() const {
+ return Local<Object>(reinterpret_cast<Object*>(&args_[kThisIndex]));
+}
+
+template <typename T>
+Local<Object> PropertyCallbackInfo<T>::Holder() const {
+ return Local<Object>(reinterpret_cast<Object*>(&args_[kHolderIndex]));
+}
+
+template <typename T>
+ReturnValue<T> PropertyCallbackInfo<T>::GetReturnValue() const {
+ return ReturnValue<T>(&args_[kReturnValueIndex]);
+}
+
+template <typename T>
+bool PropertyCallbackInfo<T>::ShouldThrowOnError() const {
+ using I = internal::Internals;
+ if (args_[kShouldThrowOnErrorIndex] !=
+ I::IntToSmi(I::kInferShouldThrowMode)) {
+ return args_[kShouldThrowOnErrorIndex] != I::IntToSmi(I::kDontThrow);
+ }
+ return v8::internal::ShouldThrowOnError(
+ reinterpret_cast<v8::internal::Isolate*>(GetIsolate()));
+}
+
+} // namespace v8
+
+#endif // INCLUDE_V8_FUNCTION_CALLBACK_H_
diff --git a/deps/v8/include/v8-function.h b/deps/v8/include/v8-function.h
new file mode 100644
index 0000000000..9424a86fda
--- /dev/null
+++ b/deps/v8/include/v8-function.h
@@ -0,0 +1,122 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef INCLUDE_V8_FUNCTION_H_
+#define INCLUDE_V8_FUNCTION_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include "v8-function-callback.h" // NOLINT(build/include_directory)
+#include "v8-local-handle.h" // NOLINT(build/include_directory)
+#include "v8-message.h" // NOLINT(build/include_directory)
+#include "v8-object.h" // NOLINT(build/include_directory)
+#include "v8-template.h" // NOLINT(build/include_directory)
+#include "v8config.h" // NOLINT(build/include_directory)
+
+namespace v8 {
+
+class Context;
+
+/**
+ * A JavaScript function object (ECMA-262, 15.3).
+ */
+class V8_EXPORT Function : public Object {
+ public:
+ /**
+ * Create a function in the current execution context
+ * for a given FunctionCallback.
+ */
+ static MaybeLocal<Function> New(
+ Local<Context> context, FunctionCallback callback,
+ Local<Value> data = Local<Value>(), int length = 0,
+ ConstructorBehavior behavior = ConstructorBehavior::kAllow,
+ SideEffectType side_effect_type = SideEffectType::kHasSideEffect);
+
+ V8_WARN_UNUSED_RESULT MaybeLocal<Object> NewInstance(
+ Local<Context> context, int argc, Local<Value> argv[]) const;
+
+ V8_WARN_UNUSED_RESULT MaybeLocal<Object> NewInstance(
+ Local<Context> context) const {
+ return NewInstance(context, 0, nullptr);
+ }
+
+ /**
+ * When side effect checks are enabled, passing kHasNoSideEffect allows the
+ * constructor to be invoked without throwing. Calls made within the
+ * constructor are still checked.
+ */
+ V8_WARN_UNUSED_RESULT MaybeLocal<Object> NewInstanceWithSideEffectType(
+ Local<Context> context, int argc, Local<Value> argv[],
+ SideEffectType side_effect_type = SideEffectType::kHasSideEffect) const;
+
+ V8_WARN_UNUSED_RESULT MaybeLocal<Value> Call(Local<Context> context,
+ Local<Value> recv, int argc,
+ Local<Value> argv[]);
+
+ void SetName(Local<String> name);
+ Local<Value> GetName() const;
+
+ /**
+ * Name inferred from variable or property assignment of this function.
+ * Used to facilitate debugging and profiling of JavaScript code written
+ * in an OO style, where many functions are anonymous but are assigned
+ * to object properties.
+ */
+ Local<Value> GetInferredName() const;
+
+ /**
+ * displayName if it is set, otherwise name if it is configured, otherwise
+ * function name, otherwise inferred name.
+ */
+ Local<Value> GetDebugName() const;
+
+ /**
+ * Returns zero based line number of function body and
+ * kLineOffsetNotFound if no information available.
+ */
+ int GetScriptLineNumber() const;
+ /**
+ * Returns zero based column number of function body and
+ * kLineOffsetNotFound if no information available.
+ */
+ int GetScriptColumnNumber() const;
+
+ /**
+ * Returns scriptId.
+ */
+ int ScriptId() const;
+
+ /**
+ * Returns the original function if this function is bound, else returns
+ * v8::Undefined.
+ */
+ Local<Value> GetBoundFunction() const;
+
+ /**
+ * Calls builtin Function.prototype.toString on this function.
+ * This is different from Value::ToString() that may call a user-defined
+ * toString() function, and different than Object::ObjectProtoToString() which
+ * always serializes "[object Function]".
+ */
+ V8_WARN_UNUSED_RESULT MaybeLocal<String> FunctionProtoToString(
+ Local<Context> context);
+
+ ScriptOrigin GetScriptOrigin() const;
+ V8_INLINE static Function* Cast(Value* value) {
+#ifdef V8_ENABLE_CHECKS
+ CheckCast(value);
+#endif
+ return static_cast<Function*>(value);
+ }
+
+ static const int kLineOffsetNotFound;
+
+ private:
+ Function();
+ static void CheckCast(Value* obj);
+};
+} // namespace v8
+
+#endif // INCLUDE_V8_FUNCTION_H_
diff --git a/deps/v8/include/v8-initialization.h b/deps/v8/include/v8-initialization.h
new file mode 100644
index 0000000000..3b609292f6
--- /dev/null
+++ b/deps/v8/include/v8-initialization.h
@@ -0,0 +1,266 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef INCLUDE_V8_INITIALIZATION_H_
+#define INCLUDE_V8_INITIALIZATION_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include "v8-internal.h" // NOLINT(build/include_directory)
+#include "v8-isolate.h" // NOLINT(build/include_directory)
+#include "v8-platform.h" // NOLINT(build/include_directory)
+#include "v8config.h" // NOLINT(build/include_directory)
+
+// We reserve the V8_* prefix for macros defined in V8 public API and
+// assume there are no name conflicts with the embedder's code.
+
+/**
+ * The v8 JavaScript engine.
+ */
+namespace v8 {
+
+class PageAllocator;
+class Platform;
+template <class K, class V, class T>
+class PersistentValueMapBase;
+
+/**
+ * EntropySource is used as a callback function when v8 needs a source
+ * of entropy.
+ */
+using EntropySource = bool (*)(unsigned char* buffer, size_t length);
+
+/**
+ * ReturnAddressLocationResolver is used as a callback function when v8 is
+ * resolving the location of a return address on the stack. Profilers that
+ * change the return address on the stack can use this to resolve the stack
+ * location to wherever the profiler stashed the original return address.
+ *
+ * \param return_addr_location A location on stack where a machine
+ * return address resides.
+ * \returns Either return_addr_location, or else a pointer to the profiler's
+ * copy of the original return address.
+ *
+ * \note The resolver function must not cause garbage collection.
+ */
+using ReturnAddressLocationResolver =
+ uintptr_t (*)(uintptr_t return_addr_location);
+
+using DcheckErrorCallback = void (*)(const char* file, int line,
+ const char* message);
+
+/**
+ * Container class for static utility functions.
+ */
+class V8_EXPORT V8 {
+ public:
+ /**
+ * Hand startup data to V8, in case the embedder has chosen to build
+ * V8 with external startup data.
+ *
+ * Note:
+ * - By default the startup data is linked into the V8 library, in which
+ * case this function is not meaningful.
+ * - If this needs to be called, it needs to be called before V8
+ * tries to make use of its built-ins.
+ * - To avoid unnecessary copies of data, V8 will point directly into the
+ * given data blob, so pretty please keep it around until V8 exit.
+ * - Compression of the startup blob might be useful, but needs to
+ * handled entirely on the embedders' side.
+ * - The call will abort if the data is invalid.
+ */
+ static void SetSnapshotDataBlob(StartupData* startup_blob);
+
+ /** Set the callback to invoke in case of Dcheck failures. */
+ static void SetDcheckErrorHandler(DcheckErrorCallback that);
+
+ /**
+ * Sets V8 flags from a string.
+ */
+ static void SetFlagsFromString(const char* str);
+ static void SetFlagsFromString(const char* str, size_t length);
+
+ /**
+ * Sets V8 flags from the command line.
+ */
+ static void SetFlagsFromCommandLine(int* argc, char** argv,
+ bool remove_flags);
+
+ /** Get the version string. */
+ static const char* GetVersion();
+
+ /**
+ * Initializes V8. This function needs to be called before the first Isolate
+ * is created. It always returns true.
+ */
+ V8_INLINE static bool Initialize() {
+ const int kBuildConfiguration =
+ (internal::PointerCompressionIsEnabled() ? kPointerCompression : 0) |
+ (internal::SmiValuesAre31Bits() ? k31BitSmis : 0) |
+ (internal::HeapSandboxIsEnabled() ? kHeapSandbox : 0) |
+ (internal::VirtualMemoryCageIsEnabled() ? kVirtualMemoryCage : 0);
+ return Initialize(kBuildConfiguration);
+ }
+
+ /**
+ * Allows the host application to provide a callback which can be used
+ * as a source of entropy for random number generators.
+ */
+ static void SetEntropySource(EntropySource source);
+
+ /**
+ * Allows the host application to provide a callback that allows v8 to
+ * cooperate with a profiler that rewrites return addresses on stack.
+ */
+ static void SetReturnAddressLocationResolver(
+ ReturnAddressLocationResolver return_address_resolver);
+
+ /**
+ * Releases any resources used by v8 and stops any utility threads
+ * that may be running. Note that disposing v8 is permanent, it
+ * cannot be reinitialized.
+ *
+ * It should generally not be necessary to dispose v8 before exiting
+ * a process, this should happen automatically. It is only necessary
+ * to use if the process needs the resources taken up by v8.
+ */
+ static bool Dispose();
+
+ /**
+ * Initialize the ICU library bundled with V8. The embedder should only
+ * invoke this method when using the bundled ICU. Returns true on success.
+ *
+ * If V8 was compiled with the ICU data in an external file, the location
+ * of the data file has to be provided.
+ */
+ static bool InitializeICU(const char* icu_data_file = nullptr);
+
+ /**
+ * Initialize the ICU library bundled with V8. The embedder should only
+ * invoke this method when using the bundled ICU. If V8 was compiled with
+ * the ICU data in an external file and when the default location of that
+ * file should be used, a path to the executable must be provided.
+ * Returns true on success.
+ *
+ * The default is a file called icudtl.dat side-by-side with the executable.
+ *
+ * Optionally, the location of the data file can be provided to override the
+ * default.
+ */
+ static bool InitializeICUDefaultLocation(const char* exec_path,
+ const char* icu_data_file = nullptr);
+
+ /**
+ * Initialize the external startup data. The embedder only needs to
+ * invoke this method when external startup data was enabled in a build.
+ *
+ * If V8 was compiled with the startup data in an external file, then
+ * V8 needs to be given those external files during startup. There are
+ * three ways to do this:
+ * - InitializeExternalStartupData(const char*)
+ * This will look in the given directory for the file "snapshot_blob.bin".
+ * - InitializeExternalStartupDataFromFile(const char*)
+ * As above, but will directly use the given file name.
+ * - Call SetSnapshotDataBlob.
+ * This will read the blobs from the given data structure and will
+ * not perform any file IO.
+ */
+ static void InitializeExternalStartupData(const char* directory_path);
+ static void InitializeExternalStartupDataFromFile(const char* snapshot_blob);
+
+ /**
+ * Sets the v8::Platform to use. This should be invoked before V8 is
+ * initialized.
+ */
+ static void InitializePlatform(Platform* platform);
+
+ /**
+ * Clears all references to the v8::Platform. This should be invoked after
+ * V8 was disposed.
+ */
+ static void ShutdownPlatform();
+
+#ifdef V8_VIRTUAL_MEMORY_CAGE
+ //
+ // Virtual Memory Cage related API.
+ //
+ // This API is not yet stable and subject to changes in the future.
+ //
+
+ /**
+ * Initializes the virtual memory cage for V8.
+ *
+ * This must be invoked after the platform was initialized but before V8 is
+ * initialized. The virtual memory cage is torn down during platform shutdown.
+ * Returns true on success, false otherwise.
+ */
+ static bool InitializeVirtualMemoryCage();
+
+ /**
+ * Provides access to the data page allocator for the virtual memory cage.
+ *
+ * This allocator allocates pages inside the data cage part of the virtual
+ * memory cage in which data buffers such as ArrayBuffer backing stores must
+ * be allocated. Objects in this region should generally consists purely of
+ * data and not contain any pointers. It should be assumed that an attacker
+ * can corrupt data inside the cage, and so in particular the contents of
+ * pages returned by this allocator, arbitrarily and concurrently.
+ *
+ * The virtual memory cage must have been initialized before.
+ */
+ static PageAllocator* GetVirtualMemoryCageDataPageAllocator();
+#endif
+
+ /**
+ * Activate trap-based bounds checking for WebAssembly.
+ *
+ * \param use_v8_signal_handler Whether V8 should install its own signal
+ * handler or rely on the embedder's.
+ */
+ static bool EnableWebAssemblyTrapHandler(bool use_v8_signal_handler);
+
+#if defined(V8_OS_WIN)
+ /**
+ * On Win64, by default V8 does not emit unwinding data for jitted code,
+ * which means the OS cannot walk the stack frames and the system Structured
+ * Exception Handling (SEH) cannot unwind through V8-generated code:
+ * https://code.google.com/p/v8/issues/detail?id=3598.
+ *
+ * This function allows embedders to register a custom exception handler for
+ * exceptions in V8-generated code.
+ */
+ static void SetUnhandledExceptionCallback(
+ UnhandledExceptionCallback unhandled_exception_callback);
+#endif
+
+ /**
+ * Get statistics about the shared memory usage.
+ */
+ static void GetSharedMemoryStatistics(SharedMemoryStatistics* statistics);
+
+ private:
+ V8();
+
+ enum BuildConfigurationFeatures {
+ kPointerCompression = 1 << 0,
+ k31BitSmis = 1 << 1,
+ kHeapSandbox = 1 << 2,
+ kVirtualMemoryCage = 1 << 3,
+ };
+
+ /**
+ * Checks that the embedder build configuration is compatible with
+ * the V8 binary and if so initializes V8.
+ */
+ static bool Initialize(int build_config);
+
+ friend class Context;
+ template <class K, class V, class T>
+ friend class PersistentValueMapBase;
+};
+
+} // namespace v8
+
+#endif // INCLUDE_V8_INITIALIZATION_H_
diff --git a/deps/v8/include/v8-inspector.h b/deps/v8/include/v8-inspector.h
index e6621ccd75..74592fdf57 100644
--- a/deps/v8/include/v8-inspector.h
+++ b/deps/v8/include/v8-inspector.h
@@ -6,12 +6,20 @@
#define V8_V8_INSPECTOR_H_
#include <stdint.h>
-#include <cctype>
+#include <cctype>
#include <memory>
-#include <unordered_map>
-#include "v8.h" // NOLINT(build/include_directory)
+#include "v8-isolate.h" // NOLINT(build/include_directory)
+#include "v8-local-handle.h" // NOLINT(build/include_directory)
+
+namespace v8 {
+class Context;
+class Name;
+class Object;
+class StackTrace;
+class Value;
+} // namespace v8
namespace v8_inspector {
@@ -320,24 +328,6 @@ class V8_EXPORT V8Inspector {
virtual std::unique_ptr<V8StackTrace> createStackTrace(
v8::Local<v8::StackTrace>) = 0;
virtual std::unique_ptr<V8StackTrace> captureStackTrace(bool fullStack) = 0;
-
- // Performance counters.
- class V8_EXPORT Counters : public std::enable_shared_from_this<Counters> {
- public:
- explicit Counters(v8::Isolate* isolate);
- ~Counters();
- const std::unordered_map<std::string, int>& getCountersMap() const {
- return m_countersMap;
- }
-
- private:
- static int* getCounterPtr(const char* name);
-
- v8::Isolate* m_isolate;
- std::unordered_map<std::string, int> m_countersMap;
- };
-
- virtual std::shared_ptr<Counters> enableCounters() = 0;
};
} // namespace v8_inspector
diff --git a/deps/v8/include/v8-internal.h b/deps/v8/include/v8-internal.h
index 0222ab2f7e..6516a16219 100644
--- a/deps/v8/include/v8-internal.h
+++ b/deps/v8/include/v8-internal.h
@@ -141,15 +141,12 @@ using ExternalPointer_t = Address;
// the same time.
enum ExternalPointerTag : uint64_t {
kExternalPointerNullTag = 0x0000000000000000,
- kArrayBufferBackingStoreTag = 0x00ff000000000000, // 0b000000011111111
- kTypedArrayExternalPointerTag = 0x017f000000000000, // 0b000000101111111
- kDataViewDataPointerTag = 0x01bf000000000000, // 0b000000110111111
- kExternalStringResourceTag = 0x01df000000000000, // 0b000000111011111
- kExternalStringResourceDataTag = 0x01ef000000000000, // 0b000000111101111
- kForeignForeignAddressTag = 0x01f7000000000000, // 0b000000111110111
- kNativeContextMicrotaskQueueTag = 0x01fb000000000000, // 0b000000111111011
- kEmbedderDataSlotPayloadTag = 0x01fd000000000000, // 0b000000111111101
- kCodeEntryPointTag = 0x01fe000000000000, // 0b000000111111110
+ kExternalStringResourceTag = 0x00ff000000000000, // 0b000000011111111
+ kExternalStringResourceDataTag = 0x017f000000000000, // 0b000000101111111
+ kForeignForeignAddressTag = 0x01bf000000000000, // 0b000000110111111
+ kNativeContextMicrotaskQueueTag = 0x01df000000000000, // 0b000000111011111
+ kEmbedderDataSlotPayloadTag = 0x01ef000000000000, // 0b000000111101111
+ kCodeEntryPointTag = 0x01f7000000000000, // 0b000000111110111
};
constexpr uint64_t kExternalPointerTagMask = 0xffff000000000000;
@@ -482,6 +479,62 @@ class Internals {
#endif // V8_COMPRESS_POINTERS
};
+constexpr bool VirtualMemoryCageIsEnabled() {
+#ifdef V8_VIRTUAL_MEMORY_CAGE
+ return true;
+#else
+ return false;
+#endif
+}
+
+#ifdef V8_VIRTUAL_MEMORY_CAGE
+// Size of the pointer compression cage located at the start of the virtual
+// memory cage.
+constexpr size_t kVirtualMemoryCagePointerCageSize =
+ Internals::kPtrComprCageReservationSize;
+
+// Size of the virtual memory cage, excluding the guard regions surrounding it.
+constexpr size_t kVirtualMemoryCageSize = size_t{1} << 40; // 1 TB
+
+static_assert(kVirtualMemoryCageSize > kVirtualMemoryCagePointerCageSize,
+ "The virtual memory cage must be larger than the pointer "
+ "compression cage contained within it.");
+
+// Required alignment of the virtual memory cage. For simplicity, we require the
+// size of the guard regions to be a multiple of this, so that this specifies
+// the alignment of the cage including and excluding surrounding guard regions.
+// The alignment requirement is due to the pointer compression cage being
+// located at the start of the virtual memory cage.
+constexpr size_t kVirtualMemoryCageAlignment =
+ Internals::kPtrComprCageBaseAlignment;
+
+// Size of the guard regions surrounding the virtual memory cage. This assumes a
+// worst-case scenario of a 32-bit unsigned index being used to access an array
+// of 64-bit values.
+constexpr size_t kVirtualMemoryCageGuardRegionSize = size_t{32} << 30; // 32 GB
+
+static_assert((kVirtualMemoryCageGuardRegionSize %
+ kVirtualMemoryCageAlignment) == 0,
+ "The size of the virtual memory cage guard region must be a "
+ "multiple of its required alignment.");
+
+// Minimum possible size of the virtual memory cage, excluding the guard regions
+// surrounding it. Used by unit tests.
+constexpr size_t kVirtualMemoryCageMinimumSize =
+ 2 * kVirtualMemoryCagePointerCageSize;
+
+// For now, even if the virtual memory cage is enabled, we still allow backing
+// stores to be allocated outside of it as fallback. This will simplify the
+// initial rollout. However, if the heap sandbox is also enabled, we already use
+// the "enforcing mode" of the virtual memory cage. This is useful for testing.
+#ifdef V8_HEAP_SANDBOX
+constexpr bool kAllowBackingStoresOutsideDataCage = false;
+#else
+constexpr bool kAllowBackingStoresOutsideDataCage = true;
+#endif // V8_HEAP_SANDBOX
+
+#endif // V8_VIRTUAL_MEMORY_CAGE
+
// Only perform cast check for types derived from v8::Data since
// other types do not implement the Cast method.
template <bool PerformCheck>
diff --git a/deps/v8/include/v8-isolate.h b/deps/v8/include/v8-isolate.h
new file mode 100644
index 0000000000..c018859c02
--- /dev/null
+++ b/deps/v8/include/v8-isolate.h
@@ -0,0 +1,1669 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef INCLUDE_V8_ISOLATE_H_
+#define INCLUDE_V8_ISOLATE_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include <memory>
+#include <utility>
+#include <vector>
+
+#include "cppgc/common.h"
+#include "v8-array-buffer.h" // NOLINT(build/include_directory)
+#include "v8-callbacks.h" // NOLINT(build/include_directory)
+#include "v8-data.h" // NOLINT(build/include_directory)
+#include "v8-debug.h" // NOLINT(build/include_directory)
+#include "v8-embedder-heap.h" // NOLINT(build/include_directory)
+#include "v8-function-callback.h" // NOLINT(build/include_directory)
+#include "v8-internal.h" // NOLINT(build/include_directory)
+#include "v8-local-handle.h" // NOLINT(build/include_directory)
+#include "v8-microtask.h" // NOLINT(build/include_directory)
+#include "v8-persistent-handle.h" // NOLINT(build/include_directory)
+#include "v8-primitive.h" // NOLINT(build/include_directory)
+#include "v8-statistics.h" // NOLINT(build/include_directory)
+#include "v8-unwinder.h" // NOLINT(build/include_directory)
+#include "v8config.h" // NOLINT(build/include_directory)
+
+namespace v8 {
+
+class CppHeap;
+class HeapProfiler;
+class MicrotaskQueue;
+class StartupData;
+class ScriptOrModule;
+class SharedArrayBuffer;
+
+namespace internal {
+class MicrotaskQueue;
+class ThreadLocalTop;
+} // namespace internal
+
+namespace metrics {
+class Recorder;
+} // namespace metrics
+
+/**
+ * A set of constraints that specifies the limits of the runtime's memory use.
+ * You must set the heap size before initializing the VM - the size cannot be
+ * adjusted after the VM is initialized.
+ *
+ * If you are using threads then you should hold the V8::Locker lock while
+ * setting the stack limit and you must set a non-default stack limit separately
+ * for each thread.
+ *
+ * The arguments for set_max_semi_space_size, set_max_old_space_size,
+ * set_max_executable_size, set_code_range_size specify limits in MB.
+ *
+ * The argument for set_max_semi_space_size_in_kb is in KB.
+ */
+class V8_EXPORT ResourceConstraints {
+ public:
+ /**
+ * Configures the constraints with reasonable default values based on the
+ * provided heap size limit. The heap size includes both the young and
+ * the old generation.
+ *
+ * \param initial_heap_size_in_bytes The initial heap size or zero.
+ * By default V8 starts with a small heap and dynamically grows it to
+ * match the set of live objects. This may lead to ineffective
+ * garbage collections at startup if the live set is large.
+ * Setting the initial heap size avoids such garbage collections.
+ * Note that this does not affect young generation garbage collections.
+ *
+ * \param maximum_heap_size_in_bytes The hard limit for the heap size.
+ * When the heap size approaches this limit, V8 will perform series of
+ * garbage collections and invoke the NearHeapLimitCallback. If the garbage
+ * collections do not help and the callback does not increase the limit,
+ * then V8 will crash with V8::FatalProcessOutOfMemory.
+ */
+ void ConfigureDefaultsFromHeapSize(size_t initial_heap_size_in_bytes,
+ size_t maximum_heap_size_in_bytes);
+
+ /**
+ * Configures the constraints with reasonable default values based on the
+ * capabilities of the current device the VM is running on.
+ *
+ * \param physical_memory The total amount of physical memory on the current
+ * device, in bytes.
+ * \param virtual_memory_limit The amount of virtual memory on the current
+ * device, in bytes, or zero, if there is no limit.
+ */
+ void ConfigureDefaults(uint64_t physical_memory,
+ uint64_t virtual_memory_limit);
+
+ /**
+ * The address beyond which the VM's stack may not grow.
+ */
+ uint32_t* stack_limit() const { return stack_limit_; }
+ void set_stack_limit(uint32_t* value) { stack_limit_ = value; }
+
+ /**
+ * The amount of virtual memory reserved for generated code. This is relevant
+ * for 64-bit architectures that rely on code range for calls in code.
+ *
+ * When V8_COMPRESS_POINTERS_IN_SHARED_CAGE is defined, there is a shared
+ * process-wide code range that is lazily initialized. This value is used to
+ * configure that shared code range when the first Isolate is
+ * created. Subsequent Isolates ignore this value.
+ */
+ size_t code_range_size_in_bytes() const { return code_range_size_; }
+ void set_code_range_size_in_bytes(size_t limit) { code_range_size_ = limit; }
+
+ /**
+ * The maximum size of the old generation.
+ * When the old generation approaches this limit, V8 will perform series of
+ * garbage collections and invoke the NearHeapLimitCallback.
+ * If the garbage collections do not help and the callback does not
+ * increase the limit, then V8 will crash with V8::FatalProcessOutOfMemory.
+ */
+ size_t max_old_generation_size_in_bytes() const {
+ return max_old_generation_size_;
+ }
+ void set_max_old_generation_size_in_bytes(size_t limit) {
+ max_old_generation_size_ = limit;
+ }
+
+ /**
+ * The maximum size of the young generation, which consists of two semi-spaces
+ * and a large object space. This affects frequency of Scavenge garbage
+ * collections and should be typically much smaller that the old generation.
+ */
+ size_t max_young_generation_size_in_bytes() const {
+ return max_young_generation_size_;
+ }
+ void set_max_young_generation_size_in_bytes(size_t limit) {
+ max_young_generation_size_ = limit;
+ }
+
+ size_t initial_old_generation_size_in_bytes() const {
+ return initial_old_generation_size_;
+ }
+ void set_initial_old_generation_size_in_bytes(size_t initial_size) {
+ initial_old_generation_size_ = initial_size;
+ }
+
+ size_t initial_young_generation_size_in_bytes() const {
+ return initial_young_generation_size_;
+ }
+ void set_initial_young_generation_size_in_bytes(size_t initial_size) {
+ initial_young_generation_size_ = initial_size;
+ }
+
+ private:
+ static constexpr size_t kMB = 1048576u;
+ size_t code_range_size_ = 0;
+ size_t max_old_generation_size_ = 0;
+ size_t max_young_generation_size_ = 0;
+ size_t initial_old_generation_size_ = 0;
+ size_t initial_young_generation_size_ = 0;
+ uint32_t* stack_limit_ = nullptr;
+};
+
+/**
+ * Option flags passed to the SetRAILMode function.
+ * See documentation https://developers.google.com/web/tools/chrome-devtools/
+ * profile/evaluate-performance/rail
+ */
+enum RAILMode : unsigned {
+ // Response performance mode: In this mode very low virtual machine latency
+ // is provided. V8 will try to avoid JavaScript execution interruptions.
+ // Throughput may be throttled.
+ PERFORMANCE_RESPONSE,
+ // Animation performance mode: In this mode low virtual machine latency is
+ // provided. V8 will try to avoid as many JavaScript execution interruptions
+ // as possible. Throughput may be throttled. This is the default mode.
+ PERFORMANCE_ANIMATION,
+ // Idle performance mode: The embedder is idle. V8 can complete deferred work
+ // in this mode.
+ PERFORMANCE_IDLE,
+ // Load performance mode: In this mode high throughput is provided. V8 may
+ // turn off latency optimizations.
+ PERFORMANCE_LOAD
+};
+
+/**
+ * Memory pressure level for the MemoryPressureNotification.
+ * kNone hints V8 that there is no memory pressure.
+ * kModerate hints V8 to speed up incremental garbage collection at the cost of
+ * of higher latency due to garbage collection pauses.
+ * kCritical hints V8 to free memory as soon as possible. Garbage collection
+ * pauses at this level will be large.
+ */
+enum class MemoryPressureLevel { kNone, kModerate, kCritical };
+
+/**
+ * Isolate represents an isolated instance of the V8 engine. V8 isolates have
+ * completely separate states. Objects from one isolate must not be used in
+ * other isolates. The embedder can create multiple isolates and use them in
+ * parallel in multiple threads. An isolate can be entered by at most one
+ * thread at any given time. The Locker/Unlocker API must be used to
+ * synchronize.
+ */
+class V8_EXPORT Isolate {
+ public:
+ /**
+ * Initial configuration parameters for a new Isolate.
+ */
+ struct V8_EXPORT CreateParams {
+ CreateParams();
+ ~CreateParams();
+
+ /**
+ * Allows the host application to provide the address of a function that is
+ * notified each time code is added, moved or removed.
+ */
+ JitCodeEventHandler code_event_handler = nullptr;
+
+ /**
+ * ResourceConstraints to use for the new Isolate.
+ */
+ ResourceConstraints constraints;
+
+ /**
+ * Explicitly specify a startup snapshot blob. The embedder owns the blob.
+ */
+ StartupData* snapshot_blob = nullptr;
+
+ /**
+ * Enables the host application to provide a mechanism for recording
+ * statistics counters.
+ */
+ CounterLookupCallback counter_lookup_callback = nullptr;
+
+ /**
+ * Enables the host application to provide a mechanism for recording
+ * histograms. The CreateHistogram function returns a
+ * histogram which will later be passed to the AddHistogramSample
+ * function.
+ */
+ CreateHistogramCallback create_histogram_callback = nullptr;
+ AddHistogramSampleCallback add_histogram_sample_callback = nullptr;
+
+ /**
+ * The ArrayBuffer::Allocator to use for allocating and freeing the backing
+ * store of ArrayBuffers.
+ *
+ * If the shared_ptr version is used, the Isolate instance and every
+ * |BackingStore| allocated using this allocator hold a std::shared_ptr
+ * to the allocator, in order to facilitate lifetime
+ * management for the allocator instance.
+ */
+ ArrayBuffer::Allocator* array_buffer_allocator = nullptr;
+ std::shared_ptr<ArrayBuffer::Allocator> array_buffer_allocator_shared;
+
+ /**
+ * Specifies an optional nullptr-terminated array of raw addresses in the
+ * embedder that V8 can match against during serialization and use for
+ * deserialization. This array and its content must stay valid for the
+ * entire lifetime of the isolate.
+ */
+ const intptr_t* external_references = nullptr;
+
+ /**
+ * Whether calling Atomics.wait (a function that may block) is allowed in
+ * this isolate. This can also be configured via SetAllowAtomicsWait.
+ */
+ bool allow_atomics_wait = true;
+
+ /**
+ * Termination is postponed when there is no active SafeForTerminationScope.
+ */
+ bool only_terminate_in_safe_scope = false;
+
+ /**
+ * The following parameters describe the offsets for addressing type info
+ * for wrapped API objects and are used by the fast C API
+ * (for details see v8-fast-api-calls.h).
+ */
+ int embedder_wrapper_type_index = -1;
+ int embedder_wrapper_object_index = -1;
+ };
+
+ /**
+ * Stack-allocated class which sets the isolate for all operations
+ * executed within a local scope.
+ */
+ class V8_EXPORT V8_NODISCARD Scope {
+ public:
+ explicit Scope(Isolate* isolate) : isolate_(isolate) { isolate->Enter(); }
+
+ ~Scope() { isolate_->Exit(); }
+
+ // Prevent copying of Scope objects.
+ Scope(const Scope&) = delete;
+ Scope& operator=(const Scope&) = delete;
+
+ private:
+ Isolate* const isolate_;
+ };
+
+ /**
+ * Assert that no Javascript code is invoked.
+ */
+ class V8_EXPORT V8_NODISCARD DisallowJavascriptExecutionScope {
+ public:
+ enum OnFailure { CRASH_ON_FAILURE, THROW_ON_FAILURE, DUMP_ON_FAILURE };
+
+ DisallowJavascriptExecutionScope(Isolate* isolate, OnFailure on_failure);
+ ~DisallowJavascriptExecutionScope();
+
+ // Prevent copying of Scope objects.
+ DisallowJavascriptExecutionScope(const DisallowJavascriptExecutionScope&) =
+ delete;
+ DisallowJavascriptExecutionScope& operator=(
+ const DisallowJavascriptExecutionScope&) = delete;
+
+ private:
+ OnFailure on_failure_;
+ Isolate* isolate_;
+
+ bool was_execution_allowed_assert_;
+ bool was_execution_allowed_throws_;
+ bool was_execution_allowed_dump_;
+ };
+
+ /**
+ * Introduce exception to DisallowJavascriptExecutionScope.
+ */
+ class V8_EXPORT V8_NODISCARD AllowJavascriptExecutionScope {
+ public:
+ explicit AllowJavascriptExecutionScope(Isolate* isolate);
+ ~AllowJavascriptExecutionScope();
+
+ // Prevent copying of Scope objects.
+ AllowJavascriptExecutionScope(const AllowJavascriptExecutionScope&) =
+ delete;
+ AllowJavascriptExecutionScope& operator=(
+ const AllowJavascriptExecutionScope&) = delete;
+
+ private:
+ Isolate* isolate_;
+ bool was_execution_allowed_assert_;
+ bool was_execution_allowed_throws_;
+ bool was_execution_allowed_dump_;
+ };
+
+ /**
+ * Do not run microtasks while this scope is active, even if microtasks are
+ * automatically executed otherwise.
+ */
+ class V8_EXPORT V8_NODISCARD SuppressMicrotaskExecutionScope {
+ public:
+ explicit SuppressMicrotaskExecutionScope(
+ Isolate* isolate, MicrotaskQueue* microtask_queue = nullptr);
+ ~SuppressMicrotaskExecutionScope();
+
+ // Prevent copying of Scope objects.
+ SuppressMicrotaskExecutionScope(const SuppressMicrotaskExecutionScope&) =
+ delete;
+ SuppressMicrotaskExecutionScope& operator=(
+ const SuppressMicrotaskExecutionScope&) = delete;
+
+ private:
+ internal::Isolate* const isolate_;
+ internal::MicrotaskQueue* const microtask_queue_;
+ internal::Address previous_stack_height_;
+
+ friend class internal::ThreadLocalTop;
+ };
+
+ /**
+ * This scope allows terminations inside direct V8 API calls and forbid them
+ * inside any recursive API calls without explicit SafeForTerminationScope.
+ */
+ class V8_EXPORT V8_NODISCARD SafeForTerminationScope {
+ public:
+ explicit SafeForTerminationScope(v8::Isolate* isolate);
+ ~SafeForTerminationScope();
+
+ // Prevent copying of Scope objects.
+ SafeForTerminationScope(const SafeForTerminationScope&) = delete;
+ SafeForTerminationScope& operator=(const SafeForTerminationScope&) = delete;
+
+ private:
+ internal::Isolate* isolate_;
+ bool prev_value_;
+ };
+
+ /**
+ * Types of garbage collections that can be requested via
+ * RequestGarbageCollectionForTesting.
+ */
+ enum GarbageCollectionType {
+ kFullGarbageCollection,
+ kMinorGarbageCollection
+ };
+
+ /**
+ * Features reported via the SetUseCounterCallback callback. Do not change
+ * assigned numbers of existing items; add new features to the end of this
+ * list.
+ */
+ enum UseCounterFeature {
+ kUseAsm = 0,
+ kBreakIterator = 1,
+ kLegacyConst = 2,
+ kMarkDequeOverflow = 3,
+ kStoreBufferOverflow = 4,
+ kSlotsBufferOverflow = 5,
+ kObjectObserve = 6,
+ kForcedGC = 7,
+ kSloppyMode = 8,
+ kStrictMode = 9,
+ kStrongMode = 10,
+ kRegExpPrototypeStickyGetter = 11,
+ kRegExpPrototypeToString = 12,
+ kRegExpPrototypeUnicodeGetter = 13,
+ kIntlV8Parse = 14,
+ kIntlPattern = 15,
+ kIntlResolved = 16,
+ kPromiseChain = 17,
+ kPromiseAccept = 18,
+ kPromiseDefer = 19,
+ kHtmlCommentInExternalScript = 20,
+ kHtmlComment = 21,
+ kSloppyModeBlockScopedFunctionRedefinition = 22,
+ kForInInitializer = 23,
+ kArrayProtectorDirtied = 24,
+ kArraySpeciesModified = 25,
+ kArrayPrototypeConstructorModified = 26,
+ kArrayInstanceProtoModified = 27,
+ kArrayInstanceConstructorModified = 28,
+ kLegacyFunctionDeclaration = 29,
+ kRegExpPrototypeSourceGetter = 30, // Unused.
+ kRegExpPrototypeOldFlagGetter = 31, // Unused.
+ kDecimalWithLeadingZeroInStrictMode = 32,
+ kLegacyDateParser = 33,
+ kDefineGetterOrSetterWouldThrow = 34,
+ kFunctionConstructorReturnedUndefined = 35,
+ kAssigmentExpressionLHSIsCallInSloppy = 36,
+ kAssigmentExpressionLHSIsCallInStrict = 37,
+ kPromiseConstructorReturnedUndefined = 38,
+ kConstructorNonUndefinedPrimitiveReturn = 39,
+ kLabeledExpressionStatement = 40,
+ kLineOrParagraphSeparatorAsLineTerminator = 41,
+ kIndexAccessor = 42,
+ kErrorCaptureStackTrace = 43,
+ kErrorPrepareStackTrace = 44,
+ kErrorStackTraceLimit = 45,
+ kWebAssemblyInstantiation = 46,
+ kDeoptimizerDisableSpeculation = 47,
+ kArrayPrototypeSortJSArrayModifiedPrototype = 48,
+ kFunctionTokenOffsetTooLongForToString = 49,
+ kWasmSharedMemory = 50,
+ kWasmThreadOpcodes = 51,
+ kAtomicsNotify = 52, // Unused.
+ kAtomicsWake = 53, // Unused.
+ kCollator = 54,
+ kNumberFormat = 55,
+ kDateTimeFormat = 56,
+ kPluralRules = 57,
+ kRelativeTimeFormat = 58,
+ kLocale = 59,
+ kListFormat = 60,
+ kSegmenter = 61,
+ kStringLocaleCompare = 62,
+ kStringToLocaleUpperCase = 63,
+ kStringToLocaleLowerCase = 64,
+ kNumberToLocaleString = 65,
+ kDateToLocaleString = 66,
+ kDateToLocaleDateString = 67,
+ kDateToLocaleTimeString = 68,
+ kAttemptOverrideReadOnlyOnPrototypeSloppy = 69,
+ kAttemptOverrideReadOnlyOnPrototypeStrict = 70,
+ kOptimizedFunctionWithOneShotBytecode = 71, // Unused.
+ kRegExpMatchIsTrueishOnNonJSRegExp = 72,
+ kRegExpMatchIsFalseishOnJSRegExp = 73,
+ kDateGetTimezoneOffset = 74, // Unused.
+ kStringNormalize = 75,
+ kCallSiteAPIGetFunctionSloppyCall = 76,
+ kCallSiteAPIGetThisSloppyCall = 77,
+ kRegExpMatchAllWithNonGlobalRegExp = 78,
+ kRegExpExecCalledOnSlowRegExp = 79,
+ kRegExpReplaceCalledOnSlowRegExp = 80,
+ kDisplayNames = 81,
+ kSharedArrayBufferConstructed = 82,
+ kArrayPrototypeHasElements = 83,
+ kObjectPrototypeHasElements = 84,
+ kNumberFormatStyleUnit = 85,
+ kDateTimeFormatRange = 86,
+ kDateTimeFormatDateTimeStyle = 87,
+ kBreakIteratorTypeWord = 88,
+ kBreakIteratorTypeLine = 89,
+ kInvalidatedArrayBufferDetachingProtector = 90,
+ kInvalidatedArrayConstructorProtector = 91,
+ kInvalidatedArrayIteratorLookupChainProtector = 92,
+ kInvalidatedArraySpeciesLookupChainProtector = 93,
+ kInvalidatedIsConcatSpreadableLookupChainProtector = 94,
+ kInvalidatedMapIteratorLookupChainProtector = 95,
+ kInvalidatedNoElementsProtector = 96,
+ kInvalidatedPromiseHookProtector = 97,
+ kInvalidatedPromiseResolveLookupChainProtector = 98,
+ kInvalidatedPromiseSpeciesLookupChainProtector = 99,
+ kInvalidatedPromiseThenLookupChainProtector = 100,
+ kInvalidatedRegExpSpeciesLookupChainProtector = 101,
+ kInvalidatedSetIteratorLookupChainProtector = 102,
+ kInvalidatedStringIteratorLookupChainProtector = 103,
+ kInvalidatedStringLengthOverflowLookupChainProtector = 104,
+ kInvalidatedTypedArraySpeciesLookupChainProtector = 105,
+ kWasmSimdOpcodes = 106,
+ kVarRedeclaredCatchBinding = 107,
+ kWasmRefTypes = 108,
+ kWasmBulkMemory = 109, // Unused.
+ kWasmMultiValue = 110,
+ kWasmExceptionHandling = 111,
+ kInvalidatedMegaDOMProtector = 112,
+
+ // If you add new values here, you'll also need to update Chromium's:
+ // web_feature.mojom, use_counter_callback.cc, and enums.xml. V8 changes to
+ // this list need to be landed first, then changes on the Chromium side.
+ kUseCounterFeatureCount // This enum value must be last.
+ };
+
+ enum MessageErrorLevel {
+ kMessageLog = (1 << 0),
+ kMessageDebug = (1 << 1),
+ kMessageInfo = (1 << 2),
+ kMessageError = (1 << 3),
+ kMessageWarning = (1 << 4),
+ kMessageAll = kMessageLog | kMessageDebug | kMessageInfo | kMessageError |
+ kMessageWarning,
+ };
+
+ using UseCounterCallback = void (*)(Isolate* isolate,
+ UseCounterFeature feature);
+
+ /**
+ * Allocates a new isolate but does not initialize it. Does not change the
+ * currently entered isolate.
+ *
+ * Only Isolate::GetData() and Isolate::SetData(), which access the
+ * embedder-controlled parts of the isolate, are allowed to be called on the
+ * uninitialized isolate. To initialize the isolate, call
+ * Isolate::Initialize().
+ *
+ * When an isolate is no longer used its resources should be freed
+ * by calling Dispose(). Using the delete operator is not allowed.
+ *
+ * V8::Initialize() must have run prior to this.
+ */
+ static Isolate* Allocate();
+
+ /**
+ * Initialize an Isolate previously allocated by Isolate::Allocate().
+ */
+ static void Initialize(Isolate* isolate, const CreateParams& params);
+
+ /**
+ * Creates a new isolate. Does not change the currently entered
+ * isolate.
+ *
+ * When an isolate is no longer used its resources should be freed
+ * by calling Dispose(). Using the delete operator is not allowed.
+ *
+ * V8::Initialize() must have run prior to this.
+ */
+ static Isolate* New(const CreateParams& params);
+
+ /**
+ * Returns the entered isolate for the current thread or NULL in
+ * case there is no current isolate.
+ *
+ * This method must not be invoked before V8::Initialize() was invoked.
+ */
+ static Isolate* GetCurrent();
+
+ /**
+ * Returns the entered isolate for the current thread or NULL in
+ * case there is no current isolate.
+ *
+ * No checks are performed by this method.
+ */
+ static Isolate* TryGetCurrent();
+
+ /**
+ * Clears the set of objects held strongly by the heap. This set of
+ * objects are originally built when a WeakRef is created or
+ * successfully dereferenced.
+ *
+ * This is invoked automatically after microtasks are run. See
+ * MicrotasksPolicy for when microtasks are run.
+ *
+ * This needs to be manually invoked only if the embedder is manually running
+ * microtasks via a custom MicrotaskQueue class's PerformCheckpoint. In that
+ * case, it is the embedder's responsibility to make this call at a time which
+ * does not interrupt synchronous ECMAScript code execution.
+ */
+ void ClearKeptObjects();
+
+ /**
+ * Custom callback used by embedders to help V8 determine if it should abort
+ * when it throws and no internal handler is predicted to catch the
+ * exception. If --abort-on-uncaught-exception is used on the command line,
+ * then V8 will abort if either:
+ * - no custom callback is set.
+ * - the custom callback set returns true.
+ * Otherwise, the custom callback will not be called and V8 will not abort.
+ */
+ using AbortOnUncaughtExceptionCallback = bool (*)(Isolate*);
+ void SetAbortOnUncaughtExceptionCallback(
+ AbortOnUncaughtExceptionCallback callback);
+
+ /**
+ * This specifies the callback called by the upcoming dynamic
+ * import() language feature to load modules.
+ */
+ V8_DEPRECATED(
+ "Use the version of SetHostImportModuleDynamicallyCallback that takes a "
+ "HostImportModuleDynamicallyWithImportAssertionsCallback instead")
+ void SetHostImportModuleDynamicallyCallback(
+ HostImportModuleDynamicallyCallback callback);
+
+ /**
+ * This specifies the callback called by the upcoming dynamic
+ * import() language feature to load modules.
+ */
+ void SetHostImportModuleDynamicallyCallback(
+ HostImportModuleDynamicallyWithImportAssertionsCallback callback);
+
+ /**
+ * This specifies the callback called by the upcoming import.meta
+ * language feature to retrieve host-defined meta data for a module.
+ */
+ void SetHostInitializeImportMetaObjectCallback(
+ HostInitializeImportMetaObjectCallback callback);
+
+ /**
+ * This specifies the callback called when the stack property of Error
+ * is accessed.
+ */
+ void SetPrepareStackTraceCallback(PrepareStackTraceCallback callback);
+
+ /**
+ * Optional notification that the system is running low on memory.
+ * V8 uses these notifications to guide heuristics.
+ * It is allowed to call this function from another thread while
+ * the isolate is executing long running JavaScript code.
+ */
+ void MemoryPressureNotification(MemoryPressureLevel level);
+
+ /**
+ * Drop non-essential caches. Should only be called from testing code.
+ * The method can potentially block for a long time and does not necessarily
+ * trigger GC.
+ */
+ void ClearCachesForTesting();
+
+ /**
+ * Methods below this point require holding a lock (using Locker) in
+ * a multi-threaded environment.
+ */
+
+ /**
+ * Sets this isolate as the entered one for the current thread.
+ * Saves the previously entered one (if any), so that it can be
+ * restored when exiting. Re-entering an isolate is allowed.
+ */
+ void Enter();
+
+ /**
+ * Exits this isolate by restoring the previously entered one in the
+ * current thread. The isolate may still stay the same, if it was
+ * entered more than once.
+ *
+ * Requires: this == Isolate::GetCurrent().
+ */
+ void Exit();
+
+ /**
+ * Disposes the isolate. The isolate must not be entered by any
+ * thread to be disposable.
+ */
+ void Dispose();
+
+ /**
+ * Dumps activated low-level V8 internal stats. This can be used instead
+ * of performing a full isolate disposal.
+ */
+ void DumpAndResetStats();
+
+ /**
+ * Discards all V8 thread-specific data for the Isolate. Should be used
+ * if a thread is terminating and it has used an Isolate that will outlive
+ * the thread -- all thread-specific data for an Isolate is discarded when
+ * an Isolate is disposed so this call is pointless if an Isolate is about
+ * to be Disposed.
+ */
+ void DiscardThreadSpecificMetadata();
+
+ /**
+ * Associate embedder-specific data with the isolate. |slot| has to be
+ * between 0 and GetNumberOfDataSlots() - 1.
+ */
+ V8_INLINE void SetData(uint32_t slot, void* data);
+
+ /**
+ * Retrieve embedder-specific data from the isolate.
+ * Returns NULL if SetData has never been called for the given |slot|.
+ */
+ V8_INLINE void* GetData(uint32_t slot);
+
+ /**
+ * Returns the maximum number of available embedder data slots. Valid slots
+ * are in the range of 0 - GetNumberOfDataSlots() - 1.
+ */
+ V8_INLINE static uint32_t GetNumberOfDataSlots();
+
+ /**
+ * Return data that was previously attached to the isolate snapshot via
+ * SnapshotCreator, and removes the reference to it.
+ * Repeated call with the same index returns an empty MaybeLocal.
+ */
+ template <class T>
+ V8_INLINE MaybeLocal<T> GetDataFromSnapshotOnce(size_t index);
+
+ /**
+ * Get statistics about the heap memory usage.
+ */
+ void GetHeapStatistics(HeapStatistics* heap_statistics);
+
+ /**
+ * Returns the number of spaces in the heap.
+ */
+ size_t NumberOfHeapSpaces();
+
+ /**
+ * Get the memory usage of a space in the heap.
+ *
+ * \param space_statistics The HeapSpaceStatistics object to fill in
+ * statistics.
+ * \param index The index of the space to get statistics from, which ranges
+ * from 0 to NumberOfHeapSpaces() - 1.
+ * \returns true on success.
+ */
+ bool GetHeapSpaceStatistics(HeapSpaceStatistics* space_statistics,
+ size_t index);
+
+ /**
+ * Returns the number of types of objects tracked in the heap at GC.
+ */
+ size_t NumberOfTrackedHeapObjectTypes();
+
+ /**
+ * Get statistics about objects in the heap.
+ *
+ * \param object_statistics The HeapObjectStatistics object to fill in
+ * statistics of objects of given type, which were live in the previous GC.
+ * \param type_index The index of the type of object to fill details about,
+ * which ranges from 0 to NumberOfTrackedHeapObjectTypes() - 1.
+ * \returns true on success.
+ */
+ bool GetHeapObjectStatisticsAtLastGC(HeapObjectStatistics* object_statistics,
+ size_t type_index);
+
+ /**
+ * Get statistics about code and its metadata in the heap.
+ *
+ * \param object_statistics The HeapCodeStatistics object to fill in
+ * statistics of code, bytecode and their metadata.
+ * \returns true on success.
+ */
+ bool GetHeapCodeAndMetadataStatistics(HeapCodeStatistics* object_statistics);
+
+ /**
+ * This API is experimental and may change significantly.
+ *
+ * Enqueues a memory measurement request and invokes the delegate with the
+ * results.
+ *
+ * \param delegate the delegate that defines which contexts to measure and
+ * reports the results.
+ *
+ * \param execution promptness executing the memory measurement.
+ * The kEager value is expected to be used only in tests.
+ */
+ bool MeasureMemory(
+ std::unique_ptr<MeasureMemoryDelegate> delegate,
+ MeasureMemoryExecution execution = MeasureMemoryExecution::kDefault);
+
+ /**
+ * Get a call stack sample from the isolate.
+ * \param state Execution state.
+ * \param frames Caller allocated buffer to store stack frames.
+ * \param frames_limit Maximum number of frames to capture. The buffer must
+ * be large enough to hold the number of frames.
+ * \param sample_info The sample info is filled up by the function
+ * provides number of actual captured stack frames and
+ * the current VM state.
+ * \note GetStackSample should only be called when the JS thread is paused or
+ * interrupted. Otherwise the behavior is undefined.
+ */
+ void GetStackSample(const RegisterState& state, void** frames,
+ size_t frames_limit, SampleInfo* sample_info);
+
+ /**
+ * Adjusts the amount of registered external memory. Used to give V8 an
+ * indication of the amount of externally allocated memory that is kept alive
+ * by JavaScript objects. V8 uses this to decide when to perform global
+ * garbage collections. Registering externally allocated memory will trigger
+ * global garbage collections more often than it would otherwise in an attempt
+ * to garbage collect the JavaScript objects that keep the externally
+ * allocated memory alive.
+ *
+ * \param change_in_bytes the change in externally allocated memory that is
+ * kept alive by JavaScript objects.
+ * \returns the adjusted value.
+ */
+ int64_t AdjustAmountOfExternalAllocatedMemory(int64_t change_in_bytes);
+
+ /**
+ * Returns the number of phantom handles without callbacks that were reset
+ * by the garbage collector since the last call to this function.
+ */
+ size_t NumberOfPhantomHandleResetsSinceLastCall();
+
+ /**
+ * Returns heap profiler for this isolate. Will return NULL until the isolate
+ * is initialized.
+ */
+ HeapProfiler* GetHeapProfiler();
+
+ /**
+ * Tells the VM whether the embedder is idle or not.
+ */
+ void SetIdle(bool is_idle);
+
+ /** Returns the ArrayBuffer::Allocator used in this isolate. */
+ ArrayBuffer::Allocator* GetArrayBufferAllocator();
+
+ /** Returns true if this isolate has a current context. */
+ bool InContext();
+
+ /**
+ * Returns the context of the currently running JavaScript, or the context
+ * on the top of the stack if no JavaScript is running.
+ */
+ Local<Context> GetCurrentContext();
+
+ /**
+ * Returns either the last context entered through V8's C++ API, or the
+ * context of the currently running microtask while processing microtasks.
+ * If a context is entered while executing a microtask, that context is
+ * returned.
+ */
+ Local<Context> GetEnteredOrMicrotaskContext();
+
+ /**
+ * Returns the Context that corresponds to the Incumbent realm in HTML spec.
+ * https://html.spec.whatwg.org/multipage/webappapis.html#incumbent
+ */
+ Local<Context> GetIncumbentContext();
+
+ /**
+ * Schedules a v8::Exception::Error with the given message.
+ * See ThrowException for more details. Templatized to provide compile-time
+ * errors in case of too long strings (see v8::String::NewFromUtf8Literal).
+ */
+ template <int N>
+ Local<Value> ThrowError(const char (&message)[N]) {
+ return ThrowError(String::NewFromUtf8Literal(this, message));
+ }
+ Local<Value> ThrowError(Local<String> message);
+
+ /**
+ * Schedules an exception to be thrown when returning to JavaScript. When an
+ * exception has been scheduled it is illegal to invoke any JavaScript
+ * operation; the caller must return immediately and only after the exception
+ * has been handled does it become legal to invoke JavaScript operations.
+ */
+ Local<Value> ThrowException(Local<Value> exception);
+
+ using GCCallback = void (*)(Isolate* isolate, GCType type,
+ GCCallbackFlags flags);
+ using GCCallbackWithData = void (*)(Isolate* isolate, GCType type,
+ GCCallbackFlags flags, void* data);
+
+ /**
+ * Enables the host application to receive a notification before a
+ * garbage collection. Allocations are allowed in the callback function,
+ * but the callback is not re-entrant: if the allocation inside it will
+ * trigger the garbage collection, the callback won't be called again.
+ * It is possible to specify the GCType filter for your callback. But it is
+ * not possible to register the same callback function two times with
+ * different GCType filters.
+ */
+ void AddGCPrologueCallback(GCCallbackWithData callback, void* data = nullptr,
+ GCType gc_type_filter = kGCTypeAll);
+ void AddGCPrologueCallback(GCCallback callback,
+ GCType gc_type_filter = kGCTypeAll);
+
+ /**
+ * This function removes callback which was installed by
+ * AddGCPrologueCallback function.
+ */
+ void RemoveGCPrologueCallback(GCCallbackWithData, void* data = nullptr);
+ void RemoveGCPrologueCallback(GCCallback callback);
+
+ /**
+ * Sets the embedder heap tracer for the isolate.
+ */
+ void SetEmbedderHeapTracer(EmbedderHeapTracer* tracer);
+
+ /*
+ * Gets the currently active heap tracer for the isolate.
+ */
+ EmbedderHeapTracer* GetEmbedderHeapTracer();
+
+ /**
+ * Sets an embedder roots handle that V8 should consider when performing
+ * non-unified heap garbage collections.
+ *
+ * Using only EmbedderHeapTracer automatically sets up a default handler.
+ * The intended use case is for setting a custom handler after invoking
+ * `AttachCppHeap()`.
+ *
+ * V8 does not take ownership of the handler.
+ */
+ void SetEmbedderRootsHandler(EmbedderRootsHandler* handler);
+
+ /**
+ * Attaches a managed C++ heap as an extension to the JavaScript heap. The
+ * embedder maintains ownership of the CppHeap. At most one C++ heap can be
+ * attached to V8.
+ *
+ * This is an experimental feature and may still change significantly.
+ */
+ void AttachCppHeap(CppHeap*);
+
+ /**
+ * Detaches a managed C++ heap if one was attached using `AttachCppHeap()`.
+ *
+ * This is an experimental feature and may still change significantly.
+ */
+ void DetachCppHeap();
+
+ /**
+ * This is an experimental feature and may still change significantly.
+
+ * \returns the C++ heap managed by V8. Only available if such a heap has been
+ * attached using `AttachCppHeap()`.
+ */
+ CppHeap* GetCppHeap() const;
+
+ /**
+ * Use for |AtomicsWaitCallback| to indicate the type of event it receives.
+ */
+ enum class AtomicsWaitEvent {
+ /** Indicates that this call is happening before waiting. */
+ kStartWait,
+ /** `Atomics.wait()` finished because of an `Atomics.wake()` call. */
+ kWokenUp,
+ /** `Atomics.wait()` finished because it timed out. */
+ kTimedOut,
+ /** `Atomics.wait()` was interrupted through |TerminateExecution()|. */
+ kTerminatedExecution,
+ /** `Atomics.wait()` was stopped through |AtomicsWaitWakeHandle|. */
+ kAPIStopped,
+ /** `Atomics.wait()` did not wait, as the initial condition was not met. */
+ kNotEqual
+ };
+
+ /**
+ * Passed to |AtomicsWaitCallback| as a means of stopping an ongoing
+ * `Atomics.wait` call.
+ */
+ class V8_EXPORT AtomicsWaitWakeHandle {
+ public:
+ /**
+ * Stop this `Atomics.wait()` call and call the |AtomicsWaitCallback|
+ * with |kAPIStopped|.
+ *
+ * This function may be called from another thread. The caller has to ensure
+ * through proper synchronization that it is not called after
+ * the finishing |AtomicsWaitCallback|.
+ *
+ * Note that the ECMAScript specification does not plan for the possibility
+ * of wakeups that are neither coming from a timeout or an `Atomics.wake()`
+ * call, so this may invalidate assumptions made by existing code.
+ * The embedder may accordingly wish to schedule an exception in the
+ * finishing |AtomicsWaitCallback|.
+ */
+ void Wake();
+ };
+
+ /**
+ * Embedder callback for `Atomics.wait()` that can be added through
+ * |SetAtomicsWaitCallback|.
+ *
+ * This will be called just before starting to wait with the |event| value
+ * |kStartWait| and after finishing waiting with one of the other
+ * values of |AtomicsWaitEvent| inside of an `Atomics.wait()` call.
+ *
+ * |array_buffer| will refer to the underlying SharedArrayBuffer,
+ * |offset_in_bytes| to the location of the waited-on memory address inside
+ * the SharedArrayBuffer.
+ *
+ * |value| and |timeout_in_ms| will be the values passed to
+ * the `Atomics.wait()` call. If no timeout was used, |timeout_in_ms|
+ * will be `INFINITY`.
+ *
+ * In the |kStartWait| callback, |stop_handle| will be an object that
+ * is only valid until the corresponding finishing callback and that
+ * can be used to stop the wait process while it is happening.
+ *
+ * This callback may schedule exceptions, *unless* |event| is equal to
+ * |kTerminatedExecution|.
+ */
+ using AtomicsWaitCallback = void (*)(AtomicsWaitEvent event,
+ Local<SharedArrayBuffer> array_buffer,
+ size_t offset_in_bytes, int64_t value,
+ double timeout_in_ms,
+ AtomicsWaitWakeHandle* stop_handle,
+ void* data);
+
+ /**
+ * Set a new |AtomicsWaitCallback|. This overrides an earlier
+ * |AtomicsWaitCallback|, if there was any. If |callback| is nullptr,
+ * this unsets the callback. |data| will be passed to the callback
+ * as its last parameter.
+ */
+ void SetAtomicsWaitCallback(AtomicsWaitCallback callback, void* data);
+
+ /**
+ * Enables the host application to receive a notification after a
+ * garbage collection. Allocations are allowed in the callback function,
+ * but the callback is not re-entrant: if the allocation inside it will
+ * trigger the garbage collection, the callback won't be called again.
+ * It is possible to specify the GCType filter for your callback. But it is
+ * not possible to register the same callback function two times with
+ * different GCType filters.
+ */
+ void AddGCEpilogueCallback(GCCallbackWithData callback, void* data = nullptr,
+ GCType gc_type_filter = kGCTypeAll);
+ void AddGCEpilogueCallback(GCCallback callback,
+ GCType gc_type_filter = kGCTypeAll);
+
+ /**
+ * This function removes callback which was installed by
+ * AddGCEpilogueCallback function.
+ */
+ void RemoveGCEpilogueCallback(GCCallbackWithData callback,
+ void* data = nullptr);
+ void RemoveGCEpilogueCallback(GCCallback callback);
+
+ using GetExternallyAllocatedMemoryInBytesCallback = size_t (*)();
+
+ /**
+ * Set the callback that tells V8 how much memory is currently allocated
+ * externally of the V8 heap. Ideally this memory is somehow connected to V8
+ * objects and may get freed-up when the corresponding V8 objects get
+ * collected by a V8 garbage collection.
+ */
+ void SetGetExternallyAllocatedMemoryInBytesCallback(
+ GetExternallyAllocatedMemoryInBytesCallback callback);
+
+ /**
+ * Forcefully terminate the current thread of JavaScript execution
+ * in the given isolate.
+ *
+ * This method can be used by any thread even if that thread has not
+ * acquired the V8 lock with a Locker object.
+ */
+ void TerminateExecution();
+
+ /**
+ * Is V8 terminating JavaScript execution.
+ *
+ * Returns true if JavaScript execution is currently terminating
+ * because of a call to TerminateExecution. In that case there are
+ * still JavaScript frames on the stack and the termination
+ * exception is still active.
+ */
+ bool IsExecutionTerminating();
+
+ /**
+ * Resume execution capability in the given isolate, whose execution
+ * was previously forcefully terminated using TerminateExecution().
+ *
+ * When execution is forcefully terminated using TerminateExecution(),
+ * the isolate can not resume execution until all JavaScript frames
+ * have propagated the uncatchable exception which is generated. This
+ * method allows the program embedding the engine to handle the
+ * termination event and resume execution capability, even if
+ * JavaScript frames remain on the stack.
+ *
+ * This method can be used by any thread even if that thread has not
+ * acquired the V8 lock with a Locker object.
+ */
+ void CancelTerminateExecution();
+
+ /**
+ * Request V8 to interrupt long running JavaScript code and invoke
+ * the given |callback| passing the given |data| to it. After |callback|
+ * returns control will be returned to the JavaScript code.
+ * There may be a number of interrupt requests in flight.
+ * Can be called from another thread without acquiring a |Locker|.
+ * Registered |callback| must not reenter interrupted Isolate.
+ */
+ void RequestInterrupt(InterruptCallback callback, void* data);
+
+ /**
+ * Returns true if there is ongoing background work within V8 that will
+ * eventually post a foreground task, like asynchronous WebAssembly
+ * compilation.
+ */
+ bool HasPendingBackgroundTasks();
+
+ /**
+ * Request garbage collection in this Isolate. It is only valid to call this
+ * function if --expose_gc was specified.
+ *
+ * This should only be used for testing purposes and not to enforce a garbage
+ * collection schedule. It has strong negative impact on the garbage
+ * collection performance. Use IdleNotificationDeadline() or
+ * LowMemoryNotification() instead to influence the garbage collection
+ * schedule.
+ */
+ void RequestGarbageCollectionForTesting(GarbageCollectionType type);
+
+ /**
+ * Set the callback to invoke for logging event.
+ */
+ void SetEventLogger(LogEventCallback that);
+
+ /**
+ * Adds a callback to notify the host application right before a script
+ * is about to run. If a script re-enters the runtime during executing, the
+ * BeforeCallEnteredCallback is invoked for each re-entrance.
+ * Executing scripts inside the callback will re-trigger the callback.
+ */
+ void AddBeforeCallEnteredCallback(BeforeCallEnteredCallback callback);
+
+ /**
+ * Removes callback that was installed by AddBeforeCallEnteredCallback.
+ */
+ void RemoveBeforeCallEnteredCallback(BeforeCallEnteredCallback callback);
+
+ /**
+ * Adds a callback to notify the host application when a script finished
+ * running. If a script re-enters the runtime during executing, the
+ * CallCompletedCallback is only invoked when the outer-most script
+ * execution ends. Executing scripts inside the callback do not trigger
+ * further callbacks.
+ */
+ void AddCallCompletedCallback(CallCompletedCallback callback);
+
+ /**
+ * Removes callback that was installed by AddCallCompletedCallback.
+ */
+ void RemoveCallCompletedCallback(CallCompletedCallback callback);
+
+ /**
+ * Set the PromiseHook callback for various promise lifecycle
+ * events.
+ */
+ void SetPromiseHook(PromiseHook hook);
+
+ /**
+ * Set callback to notify about promise reject with no handler, or
+ * revocation of such a previous notification once the handler is added.
+ */
+ void SetPromiseRejectCallback(PromiseRejectCallback callback);
+
+ /**
+ * Runs the default MicrotaskQueue until it gets empty and perform other
+ * microtask checkpoint steps, such as calling ClearKeptObjects. Asserts that
+ * the MicrotasksPolicy is not kScoped. Any exceptions thrown by microtask
+ * callbacks are swallowed.
+ */
+ void PerformMicrotaskCheckpoint();
+
+ /**
+ * Enqueues the callback to the default MicrotaskQueue
+ */
+ void EnqueueMicrotask(Local<Function> microtask);
+
+ /**
+ * Enqueues the callback to the default MicrotaskQueue
+ */
+ void EnqueueMicrotask(MicrotaskCallback callback, void* data = nullptr);
+
+ /**
+ * Controls how Microtasks are invoked. See MicrotasksPolicy for details.
+ */
+ void SetMicrotasksPolicy(MicrotasksPolicy policy);
+
+ /**
+ * Returns the policy controlling how Microtasks are invoked.
+ */
+ MicrotasksPolicy GetMicrotasksPolicy() const;
+
+ /**
+ * Adds a callback to notify the host application after
+ * microtasks were run on the default MicrotaskQueue. The callback is
+ * triggered by explicit RunMicrotasks call or automatic microtasks execution
+ * (see SetMicrotaskPolicy).
+ *
+ * Callback will trigger even if microtasks were attempted to run,
+ * but the microtasks queue was empty and no single microtask was actually
+ * executed.
+ *
+ * Executing scripts inside the callback will not re-trigger microtasks and
+ * the callback.
+ */
+ void AddMicrotasksCompletedCallback(
+ MicrotasksCompletedCallbackWithData callback, void* data = nullptr);
+
+ /**
+ * Removes callback that was installed by AddMicrotasksCompletedCallback.
+ */
+ void RemoveMicrotasksCompletedCallback(
+ MicrotasksCompletedCallbackWithData callback, void* data = nullptr);
+
+ /**
+ * Sets a callback for counting the number of times a feature of V8 is used.
+ */
+ void SetUseCounterCallback(UseCounterCallback callback);
+
+ /**
+ * Enables the host application to provide a mechanism for recording
+ * statistics counters.
+ */
+ void SetCounterFunction(CounterLookupCallback);
+
+ /**
+ * Enables the host application to provide a mechanism for recording
+ * histograms. The CreateHistogram function returns a
+ * histogram which will later be passed to the AddHistogramSample
+ * function.
+ */
+ void SetCreateHistogramFunction(CreateHistogramCallback);
+ void SetAddHistogramSampleFunction(AddHistogramSampleCallback);
+
+ /**
+ * Enables the host application to provide a mechanism for recording
+ * event based metrics. In order to use this interface
+ * include/v8-metrics.h
+ * needs to be included and the recorder needs to be derived from the
+ * Recorder base class defined there.
+ * This method can only be called once per isolate and must happen during
+ * isolate initialization before background threads are spawned.
+ */
+ void SetMetricsRecorder(
+ const std::shared_ptr<metrics::Recorder>& metrics_recorder);
+
+ /**
+ * Enables the host application to provide a mechanism for recording a
+ * predefined set of data as crash keys to be used in postmortem debugging in
+ * case of a crash.
+ */
+ void SetAddCrashKeyCallback(AddCrashKeyCallback);
+
+ /**
+ * Optional notification that the embedder is idle.
+ * V8 uses the notification to perform garbage collection.
+ * This call can be used repeatedly if the embedder remains idle.
+ * Returns true if the embedder should stop calling IdleNotificationDeadline
+ * until real work has been done. This indicates that V8 has done
+ * as much cleanup as it will be able to do.
+ *
+ * The deadline_in_seconds argument specifies the deadline V8 has to finish
+ * garbage collection work. deadline_in_seconds is compared with
+ * MonotonicallyIncreasingTime() and should be based on the same timebase as
+ * that function. There is no guarantee that the actual work will be done
+ * within the time limit.
+ */
+ bool IdleNotificationDeadline(double deadline_in_seconds);
+
+ /**
+ * Optional notification that the system is running low on memory.
+ * V8 uses these notifications to attempt to free memory.
+ */
+ void LowMemoryNotification();
+
+ /**
+ * Optional notification that a context has been disposed. V8 uses these
+ * notifications to guide the GC heuristic and cancel FinalizationRegistry
+ * cleanup tasks. Returns the number of context disposals - including this one
+ * - since the last time V8 had a chance to clean up.
+ *
+ * The optional parameter |dependant_context| specifies whether the disposed
+ * context was depending on state from other contexts or not.
+ */
+ int ContextDisposedNotification(bool dependant_context = true);
+
+ /**
+ * Optional notification that the isolate switched to the foreground.
+ * V8 uses these notifications to guide heuristics.
+ */
+ void IsolateInForegroundNotification();
+
+ /**
+ * Optional notification that the isolate switched to the background.
+ * V8 uses these notifications to guide heuristics.
+ */
+ void IsolateInBackgroundNotification();
+
+ /**
+ * Optional notification which will enable the memory savings mode.
+ * V8 uses this notification to guide heuristics which may result in a
+ * smaller memory footprint at the cost of reduced runtime performance.
+ */
+ void EnableMemorySavingsMode();
+
+ /**
+ * Optional notification which will disable the memory savings mode.
+ */
+ void DisableMemorySavingsMode();
+
+ /**
+ * Optional notification to tell V8 the current performance requirements
+ * of the embedder based on RAIL.
+ * V8 uses these notifications to guide heuristics.
+ * This is an unfinished experimental feature. Semantics and implementation
+ * may change frequently.
+ */
+ void SetRAILMode(RAILMode rail_mode);
+
+ /**
+ * Update load start time of the RAIL mode
+ */
+ void UpdateLoadStartTime();
+
+ /**
+ * Optional notification to tell V8 the current isolate is used for debugging
+ * and requires higher heap limit.
+ */
+ void IncreaseHeapLimitForDebugging();
+
+ /**
+ * Restores the original heap limit after IncreaseHeapLimitForDebugging().
+ */
+ void RestoreOriginalHeapLimit();
+
+ /**
+ * Returns true if the heap limit was increased for debugging and the
+ * original heap limit was not restored yet.
+ */
+ bool IsHeapLimitIncreasedForDebugging();
+
+ /**
+ * Allows the host application to provide the address of a function that is
+ * notified each time code is added, moved or removed.
+ *
+ * \param options options for the JIT code event handler.
+ * \param event_handler the JIT code event handler, which will be invoked
+ * each time code is added, moved or removed.
+ * \note \p event_handler won't get notified of existent code.
+ * \note since code removal notifications are not currently issued, the
+ * \p event_handler may get notifications of code that overlaps earlier
+ * code notifications. This happens when code areas are reused, and the
+ * earlier overlapping code areas should therefore be discarded.
+ * \note the events passed to \p event_handler and the strings they point to
+ * are not guaranteed to live past each call. The \p event_handler must
+ * copy strings and other parameters it needs to keep around.
+ * \note the set of events declared in JitCodeEvent::EventType is expected to
+ * grow over time, and the JitCodeEvent structure is expected to accrue
+ * new members. The \p event_handler function must ignore event codes
+ * it does not recognize to maintain future compatibility.
+ * \note Use Isolate::CreateParams to get events for code executed during
+ * Isolate setup.
+ */
+ void SetJitCodeEventHandler(JitCodeEventOptions options,
+ JitCodeEventHandler event_handler);
+
+ /**
+ * Modifies the stack limit for this Isolate.
+ *
+ * \param stack_limit An address beyond which the Vm's stack may not grow.
+ *
+ * \note If you are using threads then you should hold the V8::Locker lock
+ * while setting the stack limit and you must set a non-default stack
+ * limit separately for each thread.
+ */
+ void SetStackLimit(uintptr_t stack_limit);
+
+ /**
+ * Returns a memory range that can potentially contain jitted code. Code for
+ * V8's 'builtins' will not be in this range if embedded builtins is enabled.
+ *
+ * On Win64, embedders are advised to install function table callbacks for
+ * these ranges, as default SEH won't be able to unwind through jitted code.
+ * The first page of the code range is reserved for the embedder and is
+ * committed, writable, and executable, to be used to store unwind data, as
+ * documented in
+ * https://docs.microsoft.com/en-us/cpp/build/exception-handling-x64.
+ *
+ * Might be empty on other platforms.
+ *
+ * https://code.google.com/p/v8/issues/detail?id=3598
+ */
+ void GetCodeRange(void** start, size_t* length_in_bytes);
+
+ /**
+ * As GetCodeRange, but for embedded builtins (these live in a distinct
+ * memory region from other V8 Code objects).
+ */
+ void GetEmbeddedCodeRange(const void** start, size_t* length_in_bytes);
+
+ /**
+ * Returns the JSEntryStubs necessary for use with the Unwinder API.
+ */
+ JSEntryStubs GetJSEntryStubs();
+
+ static constexpr size_t kMinCodePagesBufferSize = 32;
+
+ /**
+ * Copies the code heap pages currently in use by V8 into |code_pages_out|.
+ * |code_pages_out| must have at least kMinCodePagesBufferSize capacity and
+ * must be empty.
+ *
+ * Signal-safe, does not allocate, does not access the V8 heap.
+ * No code on the stack can rely on pages that might be missing.
+ *
+ * Returns the number of pages available to be copied, which might be greater
+ * than |capacity|. In this case, only |capacity| pages will be copied into
+ * |code_pages_out|. The caller should provide a bigger buffer on the next
+ * call in order to get all available code pages, but this is not required.
+ */
+ size_t CopyCodePages(size_t capacity, MemoryRange* code_pages_out);
+
+ /** Set the callback to invoke in case of fatal errors. */
+ void SetFatalErrorHandler(FatalErrorCallback that);
+
+ /** Set the callback to invoke in case of OOM errors. */
+ void SetOOMErrorHandler(OOMErrorCallback that);
+
+ /**
+ * Add a callback to invoke in case the heap size is close to the heap limit.
+ * If multiple callbacks are added, only the most recently added callback is
+ * invoked.
+ */
+ void AddNearHeapLimitCallback(NearHeapLimitCallback callback, void* data);
+
+ /**
+ * Remove the given callback and restore the heap limit to the
+ * given limit. If the given limit is zero, then it is ignored.
+ * If the current heap size is greater than the given limit,
+ * then the heap limit is restored to the minimal limit that
+ * is possible for the current heap size.
+ */
+ void RemoveNearHeapLimitCallback(NearHeapLimitCallback callback,
+ size_t heap_limit);
+
+ /**
+ * If the heap limit was changed by the NearHeapLimitCallback, then the
+ * initial heap limit will be restored once the heap size falls below the
+ * given threshold percentage of the initial heap limit.
+ * The threshold percentage is a number in (0.0, 1.0) range.
+ */
+ void AutomaticallyRestoreInitialHeapLimit(double threshold_percent = 0.5);
+
+ /**
+ * Set the callback to invoke to check if code generation from
+ * strings should be allowed.
+ */
+ void SetModifyCodeGenerationFromStringsCallback(
+ ModifyCodeGenerationFromStringsCallback2 callback);
+
+ /**
+ * Set the callback to invoke to check if wasm code generation should
+ * be allowed.
+ */
+ void SetAllowWasmCodeGenerationCallback(
+ AllowWasmCodeGenerationCallback callback);
+
+ /**
+ * Embedder over{ride|load} injection points for wasm APIs. The expectation
+ * is that the embedder sets them at most once.
+ */
+ void SetWasmModuleCallback(ExtensionCallback callback);
+ void SetWasmInstanceCallback(ExtensionCallback callback);
+
+ void SetWasmStreamingCallback(WasmStreamingCallback callback);
+
+ void SetWasmLoadSourceMapCallback(WasmLoadSourceMapCallback callback);
+
+ void SetWasmSimdEnabledCallback(WasmSimdEnabledCallback callback);
+
+ void SetWasmExceptionsEnabledCallback(WasmExceptionsEnabledCallback callback);
+
+ void SetSharedArrayBufferConstructorEnabledCallback(
+ SharedArrayBufferConstructorEnabledCallback callback);
+
+ /**
+ * This function can be called by the embedder to signal V8 that the dynamic
+ * enabling of features has finished. V8 can now set up dynamically added
+ * features.
+ */
+ void InstallConditionalFeatures(Local<Context> context);
+
+ /**
+ * Check if V8 is dead and therefore unusable. This is the case after
+ * fatal errors such as out-of-memory situations.
+ */
+ bool IsDead();
+
+ /**
+ * Adds a message listener (errors only).
+ *
+ * The same message listener can be added more than once and in that
+ * case it will be called more than once for each message.
+ *
+ * If data is specified, it will be passed to the callback when it is called.
+ * Otherwise, the exception object will be passed to the callback instead.
+ */
+ bool AddMessageListener(MessageCallback that,
+ Local<Value> data = Local<Value>());
+
+ /**
+ * Adds a message listener.
+ *
+ * The same message listener can be added more than once and in that
+ * case it will be called more than once for each message.
+ *
+ * If data is specified, it will be passed to the callback when it is called.
+ * Otherwise, the exception object will be passed to the callback instead.
+ *
+ * A listener can listen for particular error levels by providing a mask.
+ */
+ bool AddMessageListenerWithErrorLevel(MessageCallback that,
+ int message_levels,
+ Local<Value> data = Local<Value>());
+
+ /**
+ * Remove all message listeners from the specified callback function.
+ */
+ void RemoveMessageListeners(MessageCallback that);
+
+ /** Callback function for reporting failed access checks.*/
+ void SetFailedAccessCheckCallbackFunction(FailedAccessCheckCallback);
+
+ /**
+ * Tells V8 to capture current stack trace when uncaught exception occurs
+ * and report it to the message listeners. The option is off by default.
+ */
+ void SetCaptureStackTraceForUncaughtExceptions(
+ bool capture, int frame_limit = 10,
+ StackTrace::StackTraceOptions options = StackTrace::kOverview);
+
+ /**
+ * Iterates through all external resources referenced from current isolate
+ * heap. GC is not invoked prior to iterating, therefore there is no
+ * guarantee that visited objects are still alive.
+ */
+ void VisitExternalResources(ExternalResourceVisitor* visitor);
+
+ /**
+ * Iterates through all the persistent handles in the current isolate's heap
+ * that have class_ids.
+ */
+ void VisitHandlesWithClassIds(PersistentHandleVisitor* visitor);
+
+ /**
+ * Iterates through all the persistent handles in the current isolate's heap
+ * that have class_ids and are weak to be marked as inactive if there is no
+ * pending activity for the handle.
+ */
+ void VisitWeakHandles(PersistentHandleVisitor* visitor);
+
+ /**
+ * Check if this isolate is in use.
+ * True if at least one thread Enter'ed this isolate.
+ */
+ bool IsInUse();
+
+ /**
+ * Set whether calling Atomics.wait (a function that may block) is allowed in
+ * this isolate. This can also be configured via
+ * CreateParams::allow_atomics_wait.
+ */
+ void SetAllowAtomicsWait(bool allow);
+
+ /**
+ * Time zone redetection indicator for
+ * DateTimeConfigurationChangeNotification.
+ *
+ * kSkip indicates V8 that the notification should not trigger redetecting
+ * host time zone. kRedetect indicates V8 that host time zone should be
+ * redetected, and used to set the default time zone.
+ *
+ * The host time zone detection may require file system access or similar
+ * operations unlikely to be available inside a sandbox. If v8 is run inside a
+ * sandbox, the host time zone has to be detected outside the sandbox before
+ * calling DateTimeConfigurationChangeNotification function.
+ */
+ enum class TimeZoneDetection { kSkip, kRedetect };
+
+ /**
+ * Notification that the embedder has changed the time zone, daylight savings
+ * time or other date / time configuration parameters. V8 keeps a cache of
+ * various values used for date / time computation. This notification will
+ * reset those cached values for the current context so that date / time
+ * configuration changes would be reflected.
+ *
+ * This API should not be called more than needed as it will negatively impact
+ * the performance of date operations.
+ */
+ void DateTimeConfigurationChangeNotification(
+ TimeZoneDetection time_zone_detection = TimeZoneDetection::kSkip);
+
+ /**
+ * Notification that the embedder has changed the locale. V8 keeps a cache of
+ * various values used for locale computation. This notification will reset
+ * those cached values for the current context so that locale configuration
+ * changes would be reflected.
+ *
+ * This API should not be called more than needed as it will negatively impact
+ * the performance of locale operations.
+ */
+ void LocaleConfigurationChangeNotification();
+
+ Isolate() = delete;
+ ~Isolate() = delete;
+ Isolate(const Isolate&) = delete;
+ Isolate& operator=(const Isolate&) = delete;
+ // Deleting operator new and delete here is allowed as ctor and dtor is also
+ // deleted.
+ void* operator new(size_t size) = delete;
+ void* operator new[](size_t size) = delete;
+ void operator delete(void*, size_t) = delete;
+ void operator delete[](void*, size_t) = delete;
+
+ private:
+ template <class K, class V, class Traits>
+ friend class PersistentValueMapBase;
+
+ internal::Address* GetDataFromSnapshotOnce(size_t index);
+ void ReportExternalAllocationLimitReached();
+};
+
+void Isolate::SetData(uint32_t slot, void* data) {
+ using I = internal::Internals;
+ I::SetEmbedderData(this, slot, data);
+}
+
+void* Isolate::GetData(uint32_t slot) {
+ using I = internal::Internals;
+ return I::GetEmbedderData(this, slot);
+}
+
+uint32_t Isolate::GetNumberOfDataSlots() {
+ using I = internal::Internals;
+ return I::kNumIsolateDataSlots;
+}
+
+template <class T>
+MaybeLocal<T> Isolate::GetDataFromSnapshotOnce(size_t index) {
+ T* data = reinterpret_cast<T*>(GetDataFromSnapshotOnce(index));
+ if (data) internal::PerformCastCheck(data);
+ return Local<T>(data);
+}
+
+} // namespace v8
+
+#endif // INCLUDE_V8_ISOLATE_H_
diff --git a/deps/v8/include/v8-json.h b/deps/v8/include/v8-json.h
new file mode 100644
index 0000000000..23d918fc97
--- /dev/null
+++ b/deps/v8/include/v8-json.h
@@ -0,0 +1,47 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef INCLUDE_V8_JSON_H_
+#define INCLUDE_V8_JSON_H_
+
+#include "v8-local-handle.h" // NOLINT(build/include_directory)
+#include "v8config.h" // NOLINT(build/include_directory)
+
+namespace v8 {
+
+class Context;
+class Value;
+class String;
+
+/**
+ * A JSON Parser and Stringifier.
+ */
+class V8_EXPORT JSON {
+ public:
+ /**
+ * Tries to parse the string |json_string| and returns it as value if
+ * successful.
+ *
+ * \param the context in which to parse and create the value.
+ * \param json_string The string to parse.
+ * \return The corresponding value if successfully parsed.
+ */
+ static V8_WARN_UNUSED_RESULT MaybeLocal<Value> Parse(
+ Local<Context> context, Local<String> json_string);
+
+ /**
+ * Tries to stringify the JSON-serializable object |json_object| and returns
+ * it as string if successful.
+ *
+ * \param json_object The JSON-serializable object to stringify.
+ * \return The corresponding string if successfully stringified.
+ */
+ static V8_WARN_UNUSED_RESULT MaybeLocal<String> Stringify(
+ Local<Context> context, Local<Value> json_object,
+ Local<String> gap = Local<String>());
+};
+
+} // namespace v8
+
+#endif // INCLUDE_V8_JSON_H_
diff --git a/deps/v8/include/v8-local-handle.h b/deps/v8/include/v8-local-handle.h
new file mode 100644
index 0000000000..66a8e93af6
--- /dev/null
+++ b/deps/v8/include/v8-local-handle.h
@@ -0,0 +1,459 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef INCLUDE_V8_LOCAL_HANDLE_H_
+#define INCLUDE_V8_LOCAL_HANDLE_H_
+
+#include <stddef.h>
+
+#include <type_traits>
+
+#include "v8-internal.h" // NOLINT(build/include_directory)
+
+namespace v8 {
+
+class Boolean;
+template <class T>
+class BasicTracedReference;
+class Context;
+class EscapableHandleScope;
+template <class F>
+class Eternal;
+template <class F>
+class FunctionCallbackInfo;
+class Isolate;
+template <class F>
+class MaybeLocal;
+template <class T>
+class NonCopyablePersistentTraits;
+class Object;
+template <class T, class M = NonCopyablePersistentTraits<T>>
+class Persistent;
+template <class T>
+class PersistentBase;
+template <class F1, class F2, class F3>
+class PersistentValueMapBase;
+template <class F1, class F2>
+class PersistentValueVector;
+class Primitive;
+class Private;
+template <class F>
+class PropertyCallbackInfo;
+template <class F>
+class ReturnValue;
+class String;
+template <class F>
+class Traced;
+template <class F>
+class TracedGlobal;
+template <class F>
+class TracedReference;
+class TracedReferenceBase;
+class Utils;
+
+namespace internal {
+template <typename T>
+class CustomArguments;
+} // namespace internal
+
+namespace api_internal {
+// Called when ToLocalChecked is called on an empty Local.
+V8_EXPORT void ToLocalEmpty();
+} // namespace api_internal
+
+/**
+ * A stack-allocated class that governs a number of local handles.
+ * After a handle scope has been created, all local handles will be
+ * allocated within that handle scope until either the handle scope is
+ * deleted or another handle scope is created. If there is already a
+ * handle scope and a new one is created, all allocations will take
+ * place in the new handle scope until it is deleted. After that,
+ * new handles will again be allocated in the original handle scope.
+ *
+ * After the handle scope of a local handle has been deleted the
+ * garbage collector will no longer track the object stored in the
+ * handle and may deallocate it. The behavior of accessing a handle
+ * for which the handle scope has been deleted is undefined.
+ */
+class V8_EXPORT V8_NODISCARD HandleScope {
+ public:
+ explicit HandleScope(Isolate* isolate);
+
+ ~HandleScope();
+
+ /**
+ * Counts the number of allocated handles.
+ */
+ static int NumberOfHandles(Isolate* isolate);
+
+ V8_INLINE Isolate* GetIsolate() const {
+ return reinterpret_cast<Isolate*>(isolate_);
+ }
+
+ HandleScope(const HandleScope&) = delete;
+ void operator=(const HandleScope&) = delete;
+
+ protected:
+ V8_INLINE HandleScope() = default;
+
+ void Initialize(Isolate* isolate);
+
+ static internal::Address* CreateHandle(internal::Isolate* isolate,
+ internal::Address value);
+
+ private:
+ // Declaring operator new and delete as deleted is not spec compliant.
+ // Therefore declare them private instead to disable dynamic alloc
+ void* operator new(size_t size);
+ void* operator new[](size_t size);
+ void operator delete(void*, size_t);
+ void operator delete[](void*, size_t);
+
+ internal::Isolate* isolate_;
+ internal::Address* prev_next_;
+ internal::Address* prev_limit_;
+
+ // Local::New uses CreateHandle with an Isolate* parameter.
+ template <class F>
+ friend class Local;
+
+ // Object::GetInternalField and Context::GetEmbedderData use CreateHandle with
+ // a HeapObject in their shortcuts.
+ friend class Object;
+ friend class Context;
+};
+
+/**
+ * An object reference managed by the v8 garbage collector.
+ *
+ * All objects returned from v8 have to be tracked by the garbage collector so
+ * that it knows that the objects are still alive. Also, because the garbage
+ * collector may move objects, it is unsafe to point directly to an object.
+ * Instead, all objects are stored in handles which are known by the garbage
+ * collector and updated whenever an object moves. Handles should always be
+ * passed by value (except in cases like out-parameters) and they should never
+ * be allocated on the heap.
+ *
+ * There are two types of handles: local and persistent handles.
+ *
+ * Local handles are light-weight and transient and typically used in local
+ * operations. They are managed by HandleScopes. That means that a HandleScope
+ * must exist on the stack when they are created and that they are only valid
+ * inside of the HandleScope active during their creation. For passing a local
+ * handle to an outer HandleScope, an EscapableHandleScope and its Escape()
+ * method must be used.
+ *
+ * Persistent handles can be used when storing objects across several
+ * independent operations and have to be explicitly deallocated when they're no
+ * longer used.
+ *
+ * It is safe to extract the object stored in the handle by dereferencing the
+ * handle (for instance, to extract the Object* from a Local<Object>); the value
+ * will still be governed by a handle behind the scenes and the same rules apply
+ * to these values as to their handles.
+ */
+template <class T>
+class Local {
+ public:
+ V8_INLINE Local() : val_(nullptr) {}
+ template <class S>
+ V8_INLINE Local(Local<S> that) : val_(reinterpret_cast<T*>(*that)) {
+ /**
+ * This check fails when trying to convert between incompatible
+ * handles. For example, converting from a Local<String> to a
+ * Local<Number>.
+ */
+ static_assert(std::is_base_of<T, S>::value, "type check");
+ }
+
+ /**
+ * Returns true if the handle is empty.
+ */
+ V8_INLINE bool IsEmpty() const { return val_ == nullptr; }
+
+ /**
+ * Sets the handle to be empty. IsEmpty() will then return true.
+ */
+ V8_INLINE void Clear() { val_ = nullptr; }
+
+ V8_INLINE T* operator->() const { return val_; }
+
+ V8_INLINE T* operator*() const { return val_; }
+
+ /**
+ * Checks whether two handles are the same.
+ * Returns true if both are empty, or if the objects to which they refer
+ * are identical.
+ *
+ * If both handles refer to JS objects, this is the same as strict equality.
+ * For primitives, such as numbers or strings, a `false` return value does not
+ * indicate that the values aren't equal in the JavaScript sense.
+ * Use `Value::StrictEquals()` to check primitives for equality.
+ */
+ template <class S>
+ V8_INLINE bool operator==(const Local<S>& that) const {
+ internal::Address* a = reinterpret_cast<internal::Address*>(this->val_);
+ internal::Address* b = reinterpret_cast<internal::Address*>(that.val_);
+ if (a == nullptr) return b == nullptr;
+ if (b == nullptr) return false;
+ return *a == *b;
+ }
+
+ template <class S>
+ V8_INLINE bool operator==(const PersistentBase<S>& that) const {
+ internal::Address* a = reinterpret_cast<internal::Address*>(this->val_);
+ internal::Address* b = reinterpret_cast<internal::Address*>(that.val_);
+ if (a == nullptr) return b == nullptr;
+ if (b == nullptr) return false;
+ return *a == *b;
+ }
+
+ /**
+ * Checks whether two handles are different.
+ * Returns true if only one of the handles is empty, or if
+ * the objects to which they refer are different.
+ *
+ * If both handles refer to JS objects, this is the same as strict
+ * non-equality. For primitives, such as numbers or strings, a `true` return
+ * value does not indicate that the values aren't equal in the JavaScript
+ * sense. Use `Value::StrictEquals()` to check primitives for equality.
+ */
+ template <class S>
+ V8_INLINE bool operator!=(const Local<S>& that) const {
+ return !operator==(that);
+ }
+
+ template <class S>
+ V8_INLINE bool operator!=(const Persistent<S>& that) const {
+ return !operator==(that);
+ }
+
+ /**
+ * Cast a handle to a subclass, e.g. Local<Value> to Local<Object>.
+ * This is only valid if the handle actually refers to a value of the
+ * target type.
+ */
+ template <class S>
+ V8_INLINE static Local<T> Cast(Local<S> that) {
+#ifdef V8_ENABLE_CHECKS
+ // If we're going to perform the type check then we have to check
+ // that the handle isn't empty before doing the checked cast.
+ if (that.IsEmpty()) return Local<T>();
+#endif
+ return Local<T>(T::Cast(*that));
+ }
+
+ /**
+ * Calling this is equivalent to Local<S>::Cast().
+ * In particular, this is only valid if the handle actually refers to a value
+ * of the target type.
+ */
+ template <class S>
+ V8_INLINE Local<S> As() const {
+ return Local<S>::Cast(*this);
+ }
+
+ /**
+ * Create a local handle for the content of another handle.
+ * The referee is kept alive by the local handle even when
+ * the original handle is destroyed/disposed.
+ */
+ V8_INLINE static Local<T> New(Isolate* isolate, Local<T> that) {
+ return New(isolate, that.val_);
+ }
+
+ V8_INLINE static Local<T> New(Isolate* isolate,
+ const PersistentBase<T>& that) {
+ return New(isolate, that.val_);
+ }
+
+ V8_INLINE static Local<T> New(Isolate* isolate,
+ const BasicTracedReference<T>& that) {
+ return New(isolate, *that);
+ }
+
+ private:
+ friend class TracedReferenceBase;
+ friend class Utils;
+ template <class F>
+ friend class Eternal;
+ template <class F>
+ friend class PersistentBase;
+ template <class F, class M>
+ friend class Persistent;
+ template <class F>
+ friend class Local;
+ template <class F>
+ friend class MaybeLocal;
+ template <class F>
+ friend class FunctionCallbackInfo;
+ template <class F>
+ friend class PropertyCallbackInfo;
+ friend class String;
+ friend class Object;
+ friend class Context;
+ friend class Isolate;
+ friend class Private;
+ template <class F>
+ friend class internal::CustomArguments;
+ friend Local<Primitive> Undefined(Isolate* isolate);
+ friend Local<Primitive> Null(Isolate* isolate);
+ friend Local<Boolean> True(Isolate* isolate);
+ friend Local<Boolean> False(Isolate* isolate);
+ friend class HandleScope;
+ friend class EscapableHandleScope;
+ template <class F1, class F2, class F3>
+ friend class PersistentValueMapBase;
+ template <class F1, class F2>
+ friend class PersistentValueVector;
+ template <class F>
+ friend class ReturnValue;
+ template <class F>
+ friend class Traced;
+ template <class F>
+ friend class TracedGlobal;
+ template <class F>
+ friend class BasicTracedReference;
+ template <class F>
+ friend class TracedReference;
+
+ explicit V8_INLINE Local(T* that) : val_(that) {}
+ V8_INLINE static Local<T> New(Isolate* isolate, T* that) {
+ if (that == nullptr) return Local<T>();
+ T* that_ptr = that;
+ internal::Address* p = reinterpret_cast<internal::Address*>(that_ptr);
+ return Local<T>(reinterpret_cast<T*>(HandleScope::CreateHandle(
+ reinterpret_cast<internal::Isolate*>(isolate), *p)));
+ }
+ T* val_;
+};
+
+#if !defined(V8_IMMINENT_DEPRECATION_WARNINGS)
+// Handle is an alias for Local for historical reasons.
+template <class T>
+using Handle = Local<T>;
+#endif
+
+/**
+ * A MaybeLocal<> is a wrapper around Local<> that enforces a check whether
+ * the Local<> is empty before it can be used.
+ *
+ * If an API method returns a MaybeLocal<>, the API method can potentially fail
+ * either because an exception is thrown, or because an exception is pending,
+ * e.g. because a previous API call threw an exception that hasn't been caught
+ * yet, or because a TerminateExecution exception was thrown. In that case, an
+ * empty MaybeLocal is returned.
+ */
+template <class T>
+class MaybeLocal {
+ public:
+ V8_INLINE MaybeLocal() : val_(nullptr) {}
+ template <class S>
+ V8_INLINE MaybeLocal(Local<S> that) : val_(reinterpret_cast<T*>(*that)) {
+ static_assert(std::is_base_of<T, S>::value, "type check");
+ }
+
+ V8_INLINE bool IsEmpty() const { return val_ == nullptr; }
+
+ /**
+ * Converts this MaybeLocal<> to a Local<>. If this MaybeLocal<> is empty,
+ * |false| is returned and |out| is left untouched.
+ */
+ template <class S>
+ V8_WARN_UNUSED_RESULT V8_INLINE bool ToLocal(Local<S>* out) const {
+ out->val_ = IsEmpty() ? nullptr : this->val_;
+ return !IsEmpty();
+ }
+
+ /**
+ * Converts this MaybeLocal<> to a Local<>. If this MaybeLocal<> is empty,
+ * V8 will crash the process.
+ */
+ V8_INLINE Local<T> ToLocalChecked() {
+ if (V8_UNLIKELY(val_ == nullptr)) api_internal::ToLocalEmpty();
+ return Local<T>(val_);
+ }
+
+ /**
+ * Converts this MaybeLocal<> to a Local<>, using a default value if this
+ * MaybeLocal<> is empty.
+ */
+ template <class S>
+ V8_INLINE Local<S> FromMaybe(Local<S> default_value) const {
+ return IsEmpty() ? default_value : Local<S>(val_);
+ }
+
+ private:
+ T* val_;
+};
+
+/**
+ * A HandleScope which first allocates a handle in the current scope
+ * which will be later filled with the escape value.
+ */
+class V8_EXPORT V8_NODISCARD EscapableHandleScope : public HandleScope {
+ public:
+ explicit EscapableHandleScope(Isolate* isolate);
+ V8_INLINE ~EscapableHandleScope() = default;
+
+ /**
+ * Pushes the value into the previous scope and returns a handle to it.
+ * Cannot be called twice.
+ */
+ template <class T>
+ V8_INLINE Local<T> Escape(Local<T> value) {
+ internal::Address* slot =
+ Escape(reinterpret_cast<internal::Address*>(*value));
+ return Local<T>(reinterpret_cast<T*>(slot));
+ }
+
+ template <class T>
+ V8_INLINE MaybeLocal<T> EscapeMaybe(MaybeLocal<T> value) {
+ return Escape(value.FromMaybe(Local<T>()));
+ }
+
+ EscapableHandleScope(const EscapableHandleScope&) = delete;
+ void operator=(const EscapableHandleScope&) = delete;
+
+ private:
+ // Declaring operator new and delete as deleted is not spec compliant.
+ // Therefore declare them private instead to disable dynamic alloc
+ void* operator new(size_t size);
+ void* operator new[](size_t size);
+ void operator delete(void*, size_t);
+ void operator delete[](void*, size_t);
+
+ internal::Address* Escape(internal::Address* escape_value);
+ internal::Address* escape_slot_;
+};
+
+/**
+ * A SealHandleScope acts like a handle scope in which no handle allocations
+ * are allowed. It can be useful for debugging handle leaks.
+ * Handles can be allocated within inner normal HandleScopes.
+ */
+class V8_EXPORT V8_NODISCARD SealHandleScope {
+ public:
+ explicit SealHandleScope(Isolate* isolate);
+ ~SealHandleScope();
+
+ SealHandleScope(const SealHandleScope&) = delete;
+ void operator=(const SealHandleScope&) = delete;
+
+ private:
+ // Declaring operator new and delete as deleted is not spec compliant.
+ // Therefore declare them private instead to disable dynamic alloc
+ void* operator new(size_t size);
+ void* operator new[](size_t size);
+ void operator delete(void*, size_t);
+ void operator delete[](void*, size_t);
+
+ internal::Isolate* const isolate_;
+ internal::Address* prev_limit_;
+ int prev_sealed_level_;
+};
+
+} // namespace v8
+
+#endif // INCLUDE_V8_LOCAL_HANDLE_H_
diff --git a/deps/v8/include/v8-locker.h b/deps/v8/include/v8-locker.h
new file mode 100644
index 0000000000..b90fc5ed91
--- /dev/null
+++ b/deps/v8/include/v8-locker.h
@@ -0,0 +1,143 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef INCLUDE_V8_LOCKER_H_
+#define INCLUDE_V8_LOCKER_H_
+
+#include "v8config.h" // NOLINT(build/include_directory)
+
+namespace v8 {
+
+namespace internal {
+class Isolate;
+} // namespace internal
+
+class Isolate;
+
+/**
+ * Multiple threads in V8 are allowed, but only one thread at a time is allowed
+ * to use any given V8 isolate, see the comments in the Isolate class. The
+ * definition of 'using a V8 isolate' includes accessing handles or holding onto
+ * object pointers obtained from V8 handles while in the particular V8 isolate.
+ * It is up to the user of V8 to ensure, perhaps with locking, that this
+ * constraint is not violated. In addition to any other synchronization
+ * mechanism that may be used, the v8::Locker and v8::Unlocker classes must be
+ * used to signal thread switches to V8.
+ *
+ * v8::Locker is a scoped lock object. While it's active, i.e. between its
+ * construction and destruction, the current thread is allowed to use the locked
+ * isolate. V8 guarantees that an isolate can be locked by at most one thread at
+ * any time. In other words, the scope of a v8::Locker is a critical section.
+ *
+ * Sample usage:
+ * \code
+ * ...
+ * {
+ * v8::Locker locker(isolate);
+ * v8::Isolate::Scope isolate_scope(isolate);
+ * ...
+ * // Code using V8 and isolate goes here.
+ * ...
+ * } // Destructor called here
+ * \endcode
+ *
+ * If you wish to stop using V8 in a thread A you can do this either by
+ * destroying the v8::Locker object as above or by constructing a v8::Unlocker
+ * object:
+ *
+ * \code
+ * {
+ * isolate->Exit();
+ * v8::Unlocker unlocker(isolate);
+ * ...
+ * // Code not using V8 goes here while V8 can run in another thread.
+ * ...
+ * } // Destructor called here.
+ * isolate->Enter();
+ * \endcode
+ *
+ * The Unlocker object is intended for use in a long-running callback from V8,
+ * where you want to release the V8 lock for other threads to use.
+ *
+ * The v8::Locker is a recursive lock, i.e. you can lock more than once in a
+ * given thread. This can be useful if you have code that can be called either
+ * from code that holds the lock or from code that does not. The Unlocker is
+ * not recursive so you can not have several Unlockers on the stack at once, and
+ * you can not use an Unlocker in a thread that is not inside a Locker's scope.
+ *
+ * An unlocker will unlock several lockers if it has to and reinstate the
+ * correct depth of locking on its destruction, e.g.:
+ *
+ * \code
+ * // V8 not locked.
+ * {
+ * v8::Locker locker(isolate);
+ * Isolate::Scope isolate_scope(isolate);
+ * // V8 locked.
+ * {
+ * v8::Locker another_locker(isolate);
+ * // V8 still locked (2 levels).
+ * {
+ * isolate->Exit();
+ * v8::Unlocker unlocker(isolate);
+ * // V8 not locked.
+ * }
+ * isolate->Enter();
+ * // V8 locked again (2 levels).
+ * }
+ * // V8 still locked (1 level).
+ * }
+ * // V8 Now no longer locked.
+ * \endcode
+ */
+class V8_EXPORT Unlocker {
+ public:
+ /**
+ * Initialize Unlocker for a given Isolate.
+ */
+ V8_INLINE explicit Unlocker(Isolate* isolate) { Initialize(isolate); }
+
+ ~Unlocker();
+
+ private:
+ void Initialize(Isolate* isolate);
+
+ internal::Isolate* isolate_;
+};
+
+class V8_EXPORT Locker {
+ public:
+ /**
+ * Initialize Locker for a given Isolate.
+ */
+ V8_INLINE explicit Locker(Isolate* isolate) { Initialize(isolate); }
+
+ ~Locker();
+
+ /**
+ * Returns whether or not the locker for a given isolate, is locked by the
+ * current thread.
+ */
+ static bool IsLocked(Isolate* isolate);
+
+ /**
+ * Returns whether v8::Locker is being used by this V8 instance.
+ */
+ static bool IsActive();
+
+ // Disallow copying and assigning.
+ Locker(const Locker&) = delete;
+ void operator=(const Locker&) = delete;
+
+ private:
+ void Initialize(Isolate* isolate);
+
+ bool has_lock_;
+ bool top_level_;
+ internal::Isolate* isolate_;
+};
+
+} // namespace v8
+
+#endif // INCLUDE_V8_LOCKER_H_
diff --git a/deps/v8/include/v8-maybe.h b/deps/v8/include/v8-maybe.h
new file mode 100644
index 0000000000..0532a51005
--- /dev/null
+++ b/deps/v8/include/v8-maybe.h
@@ -0,0 +1,137 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef INCLUDE_V8_MAYBE_H_
+#define INCLUDE_V8_MAYBE_H_
+
+#include "v8-internal.h" // NOLINT(build/include_directory)
+#include "v8config.h" // NOLINT(build/include_directory)
+
+namespace v8 {
+
+namespace api_internal {
+// Called when ToChecked is called on an empty Maybe.
+V8_EXPORT void FromJustIsNothing();
+} // namespace api_internal
+
+/**
+ * A simple Maybe type, representing an object which may or may not have a
+ * value, see https://hackage.haskell.org/package/base/docs/Data-Maybe.html.
+ *
+ * If an API method returns a Maybe<>, the API method can potentially fail
+ * either because an exception is thrown, or because an exception is pending,
+ * e.g. because a previous API call threw an exception that hasn't been caught
+ * yet, or because a TerminateExecution exception was thrown. In that case, a
+ * "Nothing" value is returned.
+ */
+template <class T>
+class Maybe {
+ public:
+ V8_INLINE bool IsNothing() const { return !has_value_; }
+ V8_INLINE bool IsJust() const { return has_value_; }
+
+ /**
+ * An alias for |FromJust|. Will crash if the Maybe<> is nothing.
+ */
+ V8_INLINE T ToChecked() const { return FromJust(); }
+
+ /**
+ * Short-hand for ToChecked(), which doesn't return a value. To be used, where
+ * the actual value of the Maybe is not needed like Object::Set.
+ */
+ V8_INLINE void Check() const {
+ if (V8_UNLIKELY(!IsJust())) api_internal::FromJustIsNothing();
+ }
+
+ /**
+ * Converts this Maybe<> to a value of type T. If this Maybe<> is
+ * nothing (empty), |false| is returned and |out| is left untouched.
+ */
+ V8_WARN_UNUSED_RESULT V8_INLINE bool To(T* out) const {
+ if (V8_LIKELY(IsJust())) *out = value_;
+ return IsJust();
+ }
+
+ /**
+ * Converts this Maybe<> to a value of type T. If this Maybe<> is
+ * nothing (empty), V8 will crash the process.
+ */
+ V8_INLINE T FromJust() const {
+ if (V8_UNLIKELY(!IsJust())) api_internal::FromJustIsNothing();
+ return value_;
+ }
+
+ /**
+ * Converts this Maybe<> to a value of type T, using a default value if this
+ * Maybe<> is nothing (empty).
+ */
+ V8_INLINE T FromMaybe(const T& default_value) const {
+ return has_value_ ? value_ : default_value;
+ }
+
+ V8_INLINE bool operator==(const Maybe& other) const {
+ return (IsJust() == other.IsJust()) &&
+ (!IsJust() || FromJust() == other.FromJust());
+ }
+
+ V8_INLINE bool operator!=(const Maybe& other) const {
+ return !operator==(other);
+ }
+
+ private:
+ Maybe() : has_value_(false) {}
+ explicit Maybe(const T& t) : has_value_(true), value_(t) {}
+
+ bool has_value_;
+ T value_;
+
+ template <class U>
+ friend Maybe<U> Nothing();
+ template <class U>
+ friend Maybe<U> Just(const U& u);
+};
+
+template <class T>
+inline Maybe<T> Nothing() {
+ return Maybe<T>();
+}
+
+template <class T>
+inline Maybe<T> Just(const T& t) {
+ return Maybe<T>(t);
+}
+
+// A template specialization of Maybe<T> for the case of T = void.
+template <>
+class Maybe<void> {
+ public:
+ V8_INLINE bool IsNothing() const { return !is_valid_; }
+ V8_INLINE bool IsJust() const { return is_valid_; }
+
+ V8_INLINE bool operator==(const Maybe& other) const {
+ return IsJust() == other.IsJust();
+ }
+
+ V8_INLINE bool operator!=(const Maybe& other) const {
+ return !operator==(other);
+ }
+
+ private:
+ struct JustTag {};
+
+ Maybe() : is_valid_(false) {}
+ explicit Maybe(JustTag) : is_valid_(true) {}
+
+ bool is_valid_;
+
+ template <class U>
+ friend Maybe<U> Nothing();
+ friend Maybe<void> JustVoid();
+};
+
+inline Maybe<void> JustVoid() { return Maybe<void>(Maybe<void>::JustTag()); }
+
+} // namespace v8
+
+#endif // INCLUDE_V8_MAYBE_H_
diff --git a/deps/v8/include/v8-memory-span.h b/deps/v8/include/v8-memory-span.h
new file mode 100644
index 0000000000..b26af4f705
--- /dev/null
+++ b/deps/v8/include/v8-memory-span.h
@@ -0,0 +1,43 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef INCLUDE_V8_MEMORY_SPAN_H_
+#define INCLUDE_V8_MEMORY_SPAN_H_
+
+#include <stddef.h>
+
+#include "v8config.h" // NOLINT(build/include_directory)
+
+namespace v8 {
+
+/**
+ * Points to an unowned continous buffer holding a known number of elements.
+ *
+ * This is similar to std::span (under consideration for C++20), but does not
+ * require advanced C++ support. In the (far) future, this may be replaced with
+ * or aliased to std::span.
+ *
+ * To facilitate future migration, this class exposes a subset of the interface
+ * implemented by std::span.
+ */
+template <typename T>
+class V8_EXPORT MemorySpan {
+ public:
+ /** The default constructor creates an empty span. */
+ constexpr MemorySpan() = default;
+
+ constexpr MemorySpan(T* data, size_t size) : data_(data), size_(size) {}
+
+ /** Returns a pointer to the beginning of the buffer. */
+ constexpr T* data() const { return data_; }
+ /** Returns the number of elements that the buffer holds. */
+ constexpr size_t size() const { return size_; }
+
+ private:
+ T* data_ = nullptr;
+ size_t size_ = 0;
+};
+
+} // namespace v8
+#endif // INCLUDE_V8_MEMORY_SPAN_H_
diff --git a/deps/v8/include/v8-message.h b/deps/v8/include/v8-message.h
new file mode 100644
index 0000000000..195ca79bd9
--- /dev/null
+++ b/deps/v8/include/v8-message.h
@@ -0,0 +1,234 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef INCLUDE_V8_MESSAGE_H_
+#define INCLUDE_V8_MESSAGE_H_
+
+#include <stdio.h>
+
+#include "v8-local-handle.h" // NOLINT(build/include_directory)
+#include "v8-maybe.h" // NOLINT(build/include_directory)
+#include "v8config.h" // NOLINT(build/include_directory)
+
+namespace v8 {
+
+class Integer;
+class PrimitiveArray;
+class StackTrace;
+class String;
+class Value;
+
+/**
+ * The optional attributes of ScriptOrigin.
+ */
+class ScriptOriginOptions {
+ public:
+ V8_INLINE ScriptOriginOptions(bool is_shared_cross_origin = false,
+ bool is_opaque = false, bool is_wasm = false,
+ bool is_module = false)
+ : flags_((is_shared_cross_origin ? kIsSharedCrossOrigin : 0) |
+ (is_wasm ? kIsWasm : 0) | (is_opaque ? kIsOpaque : 0) |
+ (is_module ? kIsModule : 0)) {}
+ V8_INLINE ScriptOriginOptions(int flags)
+ : flags_(flags &
+ (kIsSharedCrossOrigin | kIsOpaque | kIsWasm | kIsModule)) {}
+
+ bool IsSharedCrossOrigin() const {
+ return (flags_ & kIsSharedCrossOrigin) != 0;
+ }
+ bool IsOpaque() const { return (flags_ & kIsOpaque) != 0; }
+ bool IsWasm() const { return (flags_ & kIsWasm) != 0; }
+ bool IsModule() const { return (flags_ & kIsModule) != 0; }
+
+ int Flags() const { return flags_; }
+
+ private:
+ enum {
+ kIsSharedCrossOrigin = 1,
+ kIsOpaque = 1 << 1,
+ kIsWasm = 1 << 2,
+ kIsModule = 1 << 3
+ };
+ const int flags_;
+};
+
+/**
+ * The origin, within a file, of a script.
+ */
+class V8_EXPORT ScriptOrigin {
+ public:
+ V8_DEPRECATE_SOON("Use constructor with primitive C++ types")
+ ScriptOrigin(
+ Local<Value> resource_name, Local<Integer> resource_line_offset,
+ Local<Integer> resource_column_offset,
+ Local<Boolean> resource_is_shared_cross_origin = Local<Boolean>(),
+ Local<Integer> script_id = Local<Integer>(),
+ Local<Value> source_map_url = Local<Value>(),
+ Local<Boolean> resource_is_opaque = Local<Boolean>(),
+ Local<Boolean> is_wasm = Local<Boolean>(),
+ Local<Boolean> is_module = Local<Boolean>(),
+ Local<PrimitiveArray> host_defined_options = Local<PrimitiveArray>());
+ V8_DEPRECATE_SOON("Use constructor that takes an isolate")
+ explicit ScriptOrigin(
+ Local<Value> resource_name, int resource_line_offset = 0,
+ int resource_column_offset = 0,
+ bool resource_is_shared_cross_origin = false, int script_id = -1,
+ Local<Value> source_map_url = Local<Value>(),
+ bool resource_is_opaque = false, bool is_wasm = false,
+ bool is_module = false,
+ Local<PrimitiveArray> host_defined_options = Local<PrimitiveArray>());
+ V8_INLINE ScriptOrigin(
+ Isolate* isolate, Local<Value> resource_name,
+ int resource_line_offset = 0, int resource_column_offset = 0,
+ bool resource_is_shared_cross_origin = false, int script_id = -1,
+ Local<Value> source_map_url = Local<Value>(),
+ bool resource_is_opaque = false, bool is_wasm = false,
+ bool is_module = false,
+ Local<PrimitiveArray> host_defined_options = Local<PrimitiveArray>())
+ : isolate_(isolate),
+ resource_name_(resource_name),
+ resource_line_offset_(resource_line_offset),
+ resource_column_offset_(resource_column_offset),
+ options_(resource_is_shared_cross_origin, resource_is_opaque, is_wasm,
+ is_module),
+ script_id_(script_id),
+ source_map_url_(source_map_url),
+ host_defined_options_(host_defined_options) {}
+
+ V8_INLINE Local<Value> ResourceName() const;
+ V8_DEPRECATE_SOON("Use getter with primitive C++ types.")
+ V8_INLINE Local<Integer> ResourceLineOffset() const;
+ V8_DEPRECATE_SOON("Use getter with primitive C++ types.")
+ V8_INLINE Local<Integer> ResourceColumnOffset() const;
+ V8_DEPRECATE_SOON("Use getter with primitive C++ types.")
+ V8_INLINE Local<Integer> ScriptID() const;
+ V8_INLINE int LineOffset() const;
+ V8_INLINE int ColumnOffset() const;
+ V8_INLINE int ScriptId() const;
+ V8_INLINE Local<Value> SourceMapUrl() const;
+ V8_INLINE Local<PrimitiveArray> HostDefinedOptions() const;
+ V8_INLINE ScriptOriginOptions Options() const { return options_; }
+
+ private:
+ Isolate* isolate_;
+ Local<Value> resource_name_;
+ int resource_line_offset_;
+ int resource_column_offset_;
+ ScriptOriginOptions options_;
+ int script_id_;
+ Local<Value> source_map_url_;
+ Local<PrimitiveArray> host_defined_options_;
+};
+
+/**
+ * An error message.
+ */
+class V8_EXPORT Message {
+ public:
+ Local<String> Get() const;
+
+ /**
+ * Return the isolate to which the Message belongs.
+ */
+ Isolate* GetIsolate() const;
+
+ V8_WARN_UNUSED_RESULT MaybeLocal<String> GetSource(
+ Local<Context> context) const;
+ V8_WARN_UNUSED_RESULT MaybeLocal<String> GetSourceLine(
+ Local<Context> context) const;
+
+ /**
+ * Returns the origin for the script from where the function causing the
+ * error originates.
+ */
+ ScriptOrigin GetScriptOrigin() const;
+
+ /**
+ * Returns the resource name for the script from where the function causing
+ * the error originates.
+ */
+ Local<Value> GetScriptResourceName() const;
+
+ /**
+ * Exception stack trace. By default stack traces are not captured for
+ * uncaught exceptions. SetCaptureStackTraceForUncaughtExceptions allows
+ * to change this option.
+ */
+ Local<StackTrace> GetStackTrace() const;
+
+ /**
+ * Returns the number, 1-based, of the line where the error occurred.
+ */
+ V8_WARN_UNUSED_RESULT Maybe<int> GetLineNumber(Local<Context> context) const;
+
+ /**
+ * Returns the index within the script of the first character where
+ * the error occurred.
+ */
+ int GetStartPosition() const;
+
+ /**
+ * Returns the index within the script of the last character where
+ * the error occurred.
+ */
+ int GetEndPosition() const;
+
+ /**
+ * Returns the Wasm function index where the error occurred. Returns -1 if
+ * message is not from a Wasm script.
+ */
+ int GetWasmFunctionIndex() const;
+
+ /**
+ * Returns the error level of the message.
+ */
+ int ErrorLevel() const;
+
+ /**
+ * Returns the index within the line of the first character where
+ * the error occurred.
+ */
+ int GetStartColumn() const;
+ V8_WARN_UNUSED_RESULT Maybe<int> GetStartColumn(Local<Context> context) const;
+
+ /**
+ * Returns the index within the line of the last character where
+ * the error occurred.
+ */
+ int GetEndColumn() const;
+ V8_WARN_UNUSED_RESULT Maybe<int> GetEndColumn(Local<Context> context) const;
+
+ /**
+ * Passes on the value set by the embedder when it fed the script from which
+ * this Message was generated to V8.
+ */
+ bool IsSharedCrossOrigin() const;
+ bool IsOpaque() const;
+
+ // TODO(1245381): Print to a string instead of on a FILE.
+ static void PrintCurrentStackTrace(Isolate* isolate, FILE* out);
+
+ static const int kNoLineNumberInfo = 0;
+ static const int kNoColumnInfo = 0;
+ static const int kNoScriptIdInfo = 0;
+ static const int kNoWasmFunctionIndexInfo = -1;
+};
+
+Local<Value> ScriptOrigin::ResourceName() const { return resource_name_; }
+
+Local<PrimitiveArray> ScriptOrigin::HostDefinedOptions() const {
+ return host_defined_options_;
+}
+
+int ScriptOrigin::LineOffset() const { return resource_line_offset_; }
+
+int ScriptOrigin::ColumnOffset() const { return resource_column_offset_; }
+
+int ScriptOrigin::ScriptId() const { return script_id_; }
+
+Local<Value> ScriptOrigin::SourceMapUrl() const { return source_map_url_; }
+
+} // namespace v8
+
+#endif // INCLUDE_V8_MESSAGE_H_
diff --git a/deps/v8/include/v8-metrics.h b/deps/v8/include/v8-metrics.h
index a6eea6a864..29e5440106 100644
--- a/deps/v8/include/v8-metrics.h
+++ b/deps/v8/include/v8-metrics.h
@@ -5,10 +5,19 @@
#ifndef V8_METRICS_H_
#define V8_METRICS_H_
-#include "v8-internal.h" // NOLINT(build/include_directory)
-#include "v8.h" // NOLINT(build/include_directory)
+#include <stddef.h>
+#include <stdint.h>
+
+#include <vector>
+
+#include "v8-internal.h" // NOLINT(build/include_directory)
+#include "v8-local-handle.h" // NOLINT(build/include_directory)
namespace v8 {
+
+class Context;
+class Isolate;
+
namespace metrics {
struct GarbageCollectionPhases {
diff --git a/deps/v8/include/v8-microtask-queue.h b/deps/v8/include/v8-microtask-queue.h
new file mode 100644
index 0000000000..af9caa54a8
--- /dev/null
+++ b/deps/v8/include/v8-microtask-queue.h
@@ -0,0 +1,152 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef INCLUDE_V8_MICROTASKS_QUEUE_H_
+#define INCLUDE_V8_MICROTASKS_QUEUE_H_
+
+#include <stddef.h>
+
+#include <memory>
+
+#include "v8-local-handle.h" // NOLINT(build/include_directory)
+#include "v8-microtask.h" // NOLINT(build/include_directory)
+#include "v8config.h" // NOLINT(build/include_directory)
+
+namespace v8 {
+
+class Function;
+
+namespace internal {
+class Isolate;
+class MicrotaskQueue;
+} // namespace internal
+
+/**
+ * Represents the microtask queue, where microtasks are stored and processed.
+ * https://html.spec.whatwg.org/multipage/webappapis.html#microtask-queue
+ * https://html.spec.whatwg.org/multipage/webappapis.html#enqueuejob(queuename,-job,-arguments)
+ * https://html.spec.whatwg.org/multipage/webappapis.html#perform-a-microtask-checkpoint
+ *
+ * A MicrotaskQueue instance may be associated to multiple Contexts by passing
+ * it to Context::New(), and they can be detached by Context::DetachGlobal().
+ * The embedder must keep the MicrotaskQueue instance alive until all associated
+ * Contexts are gone or detached.
+ *
+ * Use the same instance of MicrotaskQueue for all Contexts that may access each
+ * other synchronously. E.g. for Web embedding, use the same instance for all
+ * origins that share the same URL scheme and eTLD+1.
+ */
+class V8_EXPORT MicrotaskQueue {
+ public:
+ /**
+ * Creates an empty MicrotaskQueue instance.
+ */
+ static std::unique_ptr<MicrotaskQueue> New(
+ Isolate* isolate, MicrotasksPolicy policy = MicrotasksPolicy::kAuto);
+
+ virtual ~MicrotaskQueue() = default;
+
+ /**
+ * Enqueues the callback to the queue.
+ */
+ virtual void EnqueueMicrotask(Isolate* isolate,
+ Local<Function> microtask) = 0;
+
+ /**
+ * Enqueues the callback to the queue.
+ */
+ virtual void EnqueueMicrotask(v8::Isolate* isolate,
+ MicrotaskCallback callback,
+ void* data = nullptr) = 0;
+
+ /**
+ * Adds a callback to notify the embedder after microtasks were run. The
+ * callback is triggered by explicit RunMicrotasks call or automatic
+ * microtasks execution (see Isolate::SetMicrotasksPolicy).
+ *
+ * Callback will trigger even if microtasks were attempted to run,
+ * but the microtasks queue was empty and no single microtask was actually
+ * executed.
+ *
+ * Executing scripts inside the callback will not re-trigger microtasks and
+ * the callback.
+ */
+ virtual void AddMicrotasksCompletedCallback(
+ MicrotasksCompletedCallbackWithData callback, void* data = nullptr) = 0;
+
+ /**
+ * Removes callback that was installed by AddMicrotasksCompletedCallback.
+ */
+ virtual void RemoveMicrotasksCompletedCallback(
+ MicrotasksCompletedCallbackWithData callback, void* data = nullptr) = 0;
+
+ /**
+ * Runs microtasks if no microtask is running on this MicrotaskQueue instance.
+ */
+ virtual void PerformCheckpoint(Isolate* isolate) = 0;
+
+ /**
+ * Returns true if a microtask is running on this MicrotaskQueue instance.
+ */
+ virtual bool IsRunningMicrotasks() const = 0;
+
+ /**
+ * Returns the current depth of nested MicrotasksScope that has
+ * kRunMicrotasks.
+ */
+ virtual int GetMicrotasksScopeDepth() const = 0;
+
+ MicrotaskQueue(const MicrotaskQueue&) = delete;
+ MicrotaskQueue& operator=(const MicrotaskQueue&) = delete;
+
+ private:
+ friend class internal::MicrotaskQueue;
+ MicrotaskQueue() = default;
+};
+
+/**
+ * This scope is used to control microtasks when MicrotasksPolicy::kScoped
+ * is used on Isolate. In this mode every non-primitive call to V8 should be
+ * done inside some MicrotasksScope.
+ * Microtasks are executed when topmost MicrotasksScope marked as kRunMicrotasks
+ * exits.
+ * kDoNotRunMicrotasks should be used to annotate calls not intended to trigger
+ * microtasks.
+ */
+class V8_EXPORT V8_NODISCARD MicrotasksScope {
+ public:
+ enum Type { kRunMicrotasks, kDoNotRunMicrotasks };
+
+ MicrotasksScope(Isolate* isolate, Type type);
+ MicrotasksScope(Isolate* isolate, MicrotaskQueue* microtask_queue, Type type);
+ ~MicrotasksScope();
+
+ /**
+ * Runs microtasks if no kRunMicrotasks scope is currently active.
+ */
+ static void PerformCheckpoint(Isolate* isolate);
+
+ /**
+ * Returns current depth of nested kRunMicrotasks scopes.
+ */
+ static int GetCurrentDepth(Isolate* isolate);
+
+ /**
+ * Returns true while microtasks are being executed.
+ */
+ static bool IsRunningMicrotasks(Isolate* isolate);
+
+ // Prevent copying.
+ MicrotasksScope(const MicrotasksScope&) = delete;
+ MicrotasksScope& operator=(const MicrotasksScope&) = delete;
+
+ private:
+ internal::Isolate* const isolate_;
+ internal::MicrotaskQueue* const microtask_queue_;
+ bool run_;
+};
+
+} // namespace v8
+
+#endif // INCLUDE_V8_MICROTASKS_QUEUE_H_
diff --git a/deps/v8/include/v8-microtask.h b/deps/v8/include/v8-microtask.h
new file mode 100644
index 0000000000..c159203608
--- /dev/null
+++ b/deps/v8/include/v8-microtask.h
@@ -0,0 +1,28 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef INCLUDE_V8_MICROTASK_H_
+#define INCLUDE_V8_MICROTASK_H_
+
+namespace v8 {
+
+class Isolate;
+
+// --- Microtasks Callbacks ---
+using MicrotasksCompletedCallbackWithData = void (*)(Isolate*, void*);
+using MicrotaskCallback = void (*)(void* data);
+
+/**
+ * Policy for running microtasks:
+ * - explicit: microtasks are invoked with the
+ * Isolate::PerformMicrotaskCheckpoint() method;
+ * - scoped: microtasks invocation is controlled by MicrotasksScope objects;
+ * - auto: microtasks are invoked when the script call depth decrements
+ * to zero.
+ */
+enum class MicrotasksPolicy { kExplicit, kScoped, kAuto };
+
+} // namespace v8
+
+#endif // INCLUDE_V8_MICROTASK_H_
diff --git a/deps/v8/include/v8-object.h b/deps/v8/include/v8-object.h
new file mode 100644
index 0000000000..114e452a38
--- /dev/null
+++ b/deps/v8/include/v8-object.h
@@ -0,0 +1,770 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef INCLUDE_V8_OBJECT_H_
+#define INCLUDE_V8_OBJECT_H_
+
+#include "v8-local-handle.h" // NOLINT(build/include_directory)
+#include "v8-maybe.h" // NOLINT(build/include_directory)
+#include "v8-persistent-handle.h" // NOLINT(build/include_directory)
+#include "v8-primitive.h" // NOLINT(build/include_directory)
+#include "v8-traced-handle.h" // NOLINT(build/include_directory)
+#include "v8-value.h" // NOLINT(build/include_directory)
+#include "v8config.h" // NOLINT(build/include_directory)
+
+namespace v8 {
+
+class Array;
+class Function;
+class FunctionTemplate;
+template <typename T>
+class PropertyCallbackInfo;
+
+/**
+ * A private symbol
+ *
+ * This is an experimental feature. Use at your own risk.
+ */
+class V8_EXPORT Private : public Data {
+ public:
+ /**
+ * Returns the print name string of the private symbol, or undefined if none.
+ */
+ Local<Value> Name() const;
+
+ /**
+ * Create a private symbol. If name is not empty, it will be the description.
+ */
+ static Local<Private> New(Isolate* isolate,
+ Local<String> name = Local<String>());
+
+ /**
+ * Retrieve a global private symbol. If a symbol with this name has not
+ * been retrieved in the same isolate before, it is created.
+ * Note that private symbols created this way are never collected, so
+ * they should only be used for statically fixed properties.
+ * Also, there is only one global name space for the names used as keys.
+ * To minimize the potential for clashes, use qualified names as keys,
+ * e.g., "Class#property".
+ */
+ static Local<Private> ForApi(Isolate* isolate, Local<String> name);
+
+ V8_INLINE static Private* Cast(Data* data);
+
+ private:
+ Private();
+
+ static void CheckCast(Data* that);
+};
+
+/**
+ * An instance of a Property Descriptor, see Ecma-262 6.2.4.
+ *
+ * Properties in a descriptor are present or absent. If you do not set
+ * `enumerable`, `configurable`, and `writable`, they are absent. If `value`,
+ * `get`, or `set` are absent, but you must specify them in the constructor, use
+ * empty handles.
+ *
+ * Accessors `get` and `set` must be callable or undefined if they are present.
+ *
+ * \note Only query properties if they are present, i.e., call `x()` only if
+ * `has_x()` returns true.
+ *
+ * \code
+ * // var desc = {writable: false}
+ * v8::PropertyDescriptor d(Local<Value>()), false);
+ * d.value(); // error, value not set
+ * if (d.has_writable()) {
+ * d.writable(); // false
+ * }
+ *
+ * // var desc = {value: undefined}
+ * v8::PropertyDescriptor d(v8::Undefined(isolate));
+ *
+ * // var desc = {get: undefined}
+ * v8::PropertyDescriptor d(v8::Undefined(isolate), Local<Value>()));
+ * \endcode
+ */
+class V8_EXPORT PropertyDescriptor {
+ public:
+ // GenericDescriptor
+ PropertyDescriptor();
+
+ // DataDescriptor
+ explicit PropertyDescriptor(Local<Value> value);
+
+ // DataDescriptor with writable property
+ PropertyDescriptor(Local<Value> value, bool writable);
+
+ // AccessorDescriptor
+ PropertyDescriptor(Local<Value> get, Local<Value> set);
+
+ ~PropertyDescriptor();
+
+ Local<Value> value() const;
+ bool has_value() const;
+
+ Local<Value> get() const;
+ bool has_get() const;
+ Local<Value> set() const;
+ bool has_set() const;
+
+ void set_enumerable(bool enumerable);
+ bool enumerable() const;
+ bool has_enumerable() const;
+
+ void set_configurable(bool configurable);
+ bool configurable() const;
+ bool has_configurable() const;
+
+ bool writable() const;
+ bool has_writable() const;
+
+ struct PrivateData;
+ PrivateData* get_private() const { return private_; }
+
+ PropertyDescriptor(const PropertyDescriptor&) = delete;
+ void operator=(const PropertyDescriptor&) = delete;
+
+ private:
+ PrivateData* private_;
+};
+
+/**
+ * PropertyAttribute.
+ */
+enum PropertyAttribute {
+ /** None. **/
+ None = 0,
+ /** ReadOnly, i.e., not writable. **/
+ ReadOnly = 1 << 0,
+ /** DontEnum, i.e., not enumerable. **/
+ DontEnum = 1 << 1,
+ /** DontDelete, i.e., not configurable. **/
+ DontDelete = 1 << 2
+};
+
+/**
+ * Accessor[Getter|Setter] are used as callback functions when
+ * setting|getting a particular property. See Object and ObjectTemplate's
+ * method SetAccessor.
+ */
+using AccessorGetterCallback =
+ void (*)(Local<String> property, const PropertyCallbackInfo<Value>& info);
+using AccessorNameGetterCallback =
+ void (*)(Local<Name> property, const PropertyCallbackInfo<Value>& info);
+
+using AccessorSetterCallback = void (*)(Local<String> property,
+ Local<Value> value,
+ const PropertyCallbackInfo<void>& info);
+using AccessorNameSetterCallback =
+ void (*)(Local<Name> property, Local<Value> value,
+ const PropertyCallbackInfo<void>& info);
+
+/**
+ * Access control specifications.
+ *
+ * Some accessors should be accessible across contexts. These
+ * accessors have an explicit access control parameter which specifies
+ * the kind of cross-context access that should be allowed.
+ *
+ * TODO(dcarney): Remove PROHIBITS_OVERWRITING as it is now unused.
+ */
+enum AccessControl {
+ DEFAULT = 0,
+ ALL_CAN_READ = 1,
+ ALL_CAN_WRITE = 1 << 1,
+ PROHIBITS_OVERWRITING = 1 << 2
+};
+
+/**
+ * Property filter bits. They can be or'ed to build a composite filter.
+ */
+enum PropertyFilter {
+ ALL_PROPERTIES = 0,
+ ONLY_WRITABLE = 1,
+ ONLY_ENUMERABLE = 2,
+ ONLY_CONFIGURABLE = 4,
+ SKIP_STRINGS = 8,
+ SKIP_SYMBOLS = 16
+};
+
+/**
+ * Options for marking whether callbacks may trigger JS-observable side effects.
+ * Side-effect-free callbacks are allowlisted during debug evaluation with
+ * throwOnSideEffect. It applies when calling a Function, FunctionTemplate,
+ * or an Accessor callback. For Interceptors, please see
+ * PropertyHandlerFlags's kHasNoSideEffect.
+ * Callbacks that only cause side effects to the receiver are allowlisted if
+ * invoked on receiver objects that are created within the same debug-evaluate
+ * call, as these objects are temporary and the side effect does not escape.
+ */
+enum class SideEffectType {
+ kHasSideEffect,
+ kHasNoSideEffect,
+ kHasSideEffectToReceiver
+};
+
+/**
+ * Keys/Properties filter enums:
+ *
+ * KeyCollectionMode limits the range of collected properties. kOwnOnly limits
+ * the collected properties to the given Object only. kIncludesPrototypes will
+ * include all keys of the objects's prototype chain as well.
+ */
+enum class KeyCollectionMode { kOwnOnly, kIncludePrototypes };
+
+/**
+ * kIncludesIndices allows for integer indices to be collected, while
+ * kSkipIndices will exclude integer indices from being collected.
+ */
+enum class IndexFilter { kIncludeIndices, kSkipIndices };
+
+/**
+ * kConvertToString will convert integer indices to strings.
+ * kKeepNumbers will return numbers for integer indices.
+ */
+enum class KeyConversionMode { kConvertToString, kKeepNumbers, kNoNumbers };
+
+/**
+ * Integrity level for objects.
+ */
+enum class IntegrityLevel { kFrozen, kSealed };
+
+/**
+ * A JavaScript object (ECMA-262, 4.3.3)
+ */
+class V8_EXPORT Object : public Value {
+ public:
+ /**
+ * Set only return Just(true) or Empty(), so if it should never fail, use
+ * result.Check().
+ */
+ V8_WARN_UNUSED_RESULT Maybe<bool> Set(Local<Context> context,
+ Local<Value> key, Local<Value> value);
+
+ V8_WARN_UNUSED_RESULT Maybe<bool> Set(Local<Context> context, uint32_t index,
+ Local<Value> value);
+
+ // Implements CreateDataProperty (ECMA-262, 7.3.4).
+ //
+ // Defines a configurable, writable, enumerable property with the given value
+ // on the object unless the property already exists and is not configurable
+ // or the object is not extensible.
+ //
+ // Returns true on success.
+ V8_WARN_UNUSED_RESULT Maybe<bool> CreateDataProperty(Local<Context> context,
+ Local<Name> key,
+ Local<Value> value);
+ V8_WARN_UNUSED_RESULT Maybe<bool> CreateDataProperty(Local<Context> context,
+ uint32_t index,
+ Local<Value> value);
+
+ // Implements DefineOwnProperty.
+ //
+ // In general, CreateDataProperty will be faster, however, does not allow
+ // for specifying attributes.
+ //
+ // Returns true on success.
+ V8_WARN_UNUSED_RESULT Maybe<bool> DefineOwnProperty(
+ Local<Context> context, Local<Name> key, Local<Value> value,
+ PropertyAttribute attributes = None);
+
+ // Implements Object.DefineProperty(O, P, Attributes), see Ecma-262 19.1.2.4.
+ //
+ // The defineProperty function is used to add an own property or
+ // update the attributes of an existing own property of an object.
+ //
+ // Both data and accessor descriptors can be used.
+ //
+ // In general, CreateDataProperty is faster, however, does not allow
+ // for specifying attributes or an accessor descriptor.
+ //
+ // The PropertyDescriptor can change when redefining a property.
+ //
+ // Returns true on success.
+ V8_WARN_UNUSED_RESULT Maybe<bool> DefineProperty(
+ Local<Context> context, Local<Name> key, PropertyDescriptor& descriptor);
+
+ V8_WARN_UNUSED_RESULT MaybeLocal<Value> Get(Local<Context> context,
+ Local<Value> key);
+
+ V8_WARN_UNUSED_RESULT MaybeLocal<Value> Get(Local<Context> context,
+ uint32_t index);
+
+ /**
+ * Gets the property attributes of a property which can be None or
+ * any combination of ReadOnly, DontEnum and DontDelete. Returns
+ * None when the property doesn't exist.
+ */
+ V8_WARN_UNUSED_RESULT Maybe<PropertyAttribute> GetPropertyAttributes(
+ Local<Context> context, Local<Value> key);
+
+ /**
+ * Returns Object.getOwnPropertyDescriptor as per ES2016 section 19.1.2.6.
+ */
+ V8_WARN_UNUSED_RESULT MaybeLocal<Value> GetOwnPropertyDescriptor(
+ Local<Context> context, Local<Name> key);
+
+ /**
+ * Object::Has() calls the abstract operation HasProperty(O, P) described
+ * in ECMA-262, 7.3.10. Has() returns
+ * true, if the object has the property, either own or on the prototype chain.
+ * Interceptors, i.e., PropertyQueryCallbacks, are called if present.
+ *
+ * Has() has the same side effects as JavaScript's `variable in object`.
+ * For example, calling Has() on a revoked proxy will throw an exception.
+ *
+ * \note Has() converts the key to a name, which possibly calls back into
+ * JavaScript.
+ *
+ * See also v8::Object::HasOwnProperty() and
+ * v8::Object::HasRealNamedProperty().
+ */
+ V8_WARN_UNUSED_RESULT Maybe<bool> Has(Local<Context> context,
+ Local<Value> key);
+
+ V8_WARN_UNUSED_RESULT Maybe<bool> Delete(Local<Context> context,
+ Local<Value> key);
+
+ V8_WARN_UNUSED_RESULT Maybe<bool> Has(Local<Context> context, uint32_t index);
+
+ V8_WARN_UNUSED_RESULT Maybe<bool> Delete(Local<Context> context,
+ uint32_t index);
+
+ /**
+ * Note: SideEffectType affects the getter only, not the setter.
+ */
+ V8_WARN_UNUSED_RESULT Maybe<bool> SetAccessor(
+ Local<Context> context, Local<Name> name,
+ AccessorNameGetterCallback getter,
+ AccessorNameSetterCallback setter = nullptr,
+ MaybeLocal<Value> data = MaybeLocal<Value>(),
+ AccessControl settings = DEFAULT, PropertyAttribute attribute = None,
+ SideEffectType getter_side_effect_type = SideEffectType::kHasSideEffect,
+ SideEffectType setter_side_effect_type = SideEffectType::kHasSideEffect);
+
+ void SetAccessorProperty(Local<Name> name, Local<Function> getter,
+ Local<Function> setter = Local<Function>(),
+ PropertyAttribute attribute = None,
+ AccessControl settings = DEFAULT);
+
+ /**
+ * Sets a native data property like Template::SetNativeDataProperty, but
+ * this method sets on this object directly.
+ */
+ V8_WARN_UNUSED_RESULT Maybe<bool> SetNativeDataProperty(
+ Local<Context> context, Local<Name> name,
+ AccessorNameGetterCallback getter,
+ AccessorNameSetterCallback setter = nullptr,
+ Local<Value> data = Local<Value>(), PropertyAttribute attributes = None,
+ SideEffectType getter_side_effect_type = SideEffectType::kHasSideEffect,
+ SideEffectType setter_side_effect_type = SideEffectType::kHasSideEffect);
+
+ /**
+ * Attempts to create a property with the given name which behaves like a data
+ * property, except that the provided getter is invoked (and provided with the
+ * data value) to supply its value the first time it is read. After the
+ * property is accessed once, it is replaced with an ordinary data property.
+ *
+ * Analogous to Template::SetLazyDataProperty.
+ */
+ V8_WARN_UNUSED_RESULT Maybe<bool> SetLazyDataProperty(
+ Local<Context> context, Local<Name> name,
+ AccessorNameGetterCallback getter, Local<Value> data = Local<Value>(),
+ PropertyAttribute attributes = None,
+ SideEffectType getter_side_effect_type = SideEffectType::kHasSideEffect,
+ SideEffectType setter_side_effect_type = SideEffectType::kHasSideEffect);
+
+ /**
+ * Functionality for private properties.
+ * This is an experimental feature, use at your own risk.
+ * Note: Private properties are not inherited. Do not rely on this, since it
+ * may change.
+ */
+ Maybe<bool> HasPrivate(Local<Context> context, Local<Private> key);
+ Maybe<bool> SetPrivate(Local<Context> context, Local<Private> key,
+ Local<Value> value);
+ Maybe<bool> DeletePrivate(Local<Context> context, Local<Private> key);
+ MaybeLocal<Value> GetPrivate(Local<Context> context, Local<Private> key);
+
+ /**
+ * Returns an array containing the names of the enumerable properties
+ * of this object, including properties from prototype objects. The
+ * array returned by this method contains the same values as would
+ * be enumerated by a for-in statement over this object.
+ */
+ V8_WARN_UNUSED_RESULT MaybeLocal<Array> GetPropertyNames(
+ Local<Context> context);
+ V8_WARN_UNUSED_RESULT MaybeLocal<Array> GetPropertyNames(
+ Local<Context> context, KeyCollectionMode mode,
+ PropertyFilter property_filter, IndexFilter index_filter,
+ KeyConversionMode key_conversion = KeyConversionMode::kKeepNumbers);
+
+ /**
+ * This function has the same functionality as GetPropertyNames but
+ * the returned array doesn't contain the names of properties from
+ * prototype objects.
+ */
+ V8_WARN_UNUSED_RESULT MaybeLocal<Array> GetOwnPropertyNames(
+ Local<Context> context);
+
+ /**
+ * Returns an array containing the names of the filtered properties
+ * of this object, including properties from prototype objects. The
+ * array returned by this method contains the same values as would
+ * be enumerated by a for-in statement over this object.
+ */
+ V8_WARN_UNUSED_RESULT MaybeLocal<Array> GetOwnPropertyNames(
+ Local<Context> context, PropertyFilter filter,
+ KeyConversionMode key_conversion = KeyConversionMode::kKeepNumbers);
+
+ /**
+ * Get the prototype object. This does not skip objects marked to
+ * be skipped by __proto__ and it does not consult the security
+ * handler.
+ */
+ Local<Value> GetPrototype();
+
+ /**
+ * Set the prototype object. This does not skip objects marked to
+ * be skipped by __proto__ and it does not consult the security
+ * handler.
+ */
+ V8_WARN_UNUSED_RESULT Maybe<bool> SetPrototype(Local<Context> context,
+ Local<Value> prototype);
+
+ /**
+ * Finds an instance of the given function template in the prototype
+ * chain.
+ */
+ Local<Object> FindInstanceInPrototypeChain(Local<FunctionTemplate> tmpl);
+
+ /**
+ * Call builtin Object.prototype.toString on this object.
+ * This is different from Value::ToString() that may call
+ * user-defined toString function. This one does not.
+ */
+ V8_WARN_UNUSED_RESULT MaybeLocal<String> ObjectProtoToString(
+ Local<Context> context);
+
+ /**
+ * Returns the name of the function invoked as a constructor for this object.
+ */
+ Local<String> GetConstructorName();
+
+ /**
+ * Sets the integrity level of the object.
+ */
+ Maybe<bool> SetIntegrityLevel(Local<Context> context, IntegrityLevel level);
+
+ /** Gets the number of internal fields for this Object. */
+ int InternalFieldCount() const;
+
+ /** Same as above, but works for PersistentBase. */
+ V8_INLINE static int InternalFieldCount(
+ const PersistentBase<Object>& object) {
+ return object.val_->InternalFieldCount();
+ }
+
+ /** Same as above, but works for BasicTracedReference. */
+ V8_INLINE static int InternalFieldCount(
+ const BasicTracedReference<Object>& object) {
+ return object->InternalFieldCount();
+ }
+
+ /** Gets the value from an internal field. */
+ V8_INLINE Local<Value> GetInternalField(int index);
+
+ /** Sets the value in an internal field. */
+ void SetInternalField(int index, Local<Value> value);
+
+ /**
+ * Gets a 2-byte-aligned native pointer from an internal field. This field
+ * must have been set by SetAlignedPointerInInternalField, everything else
+ * leads to undefined behavior.
+ */
+ V8_INLINE void* GetAlignedPointerFromInternalField(int index);
+
+ /** Same as above, but works for PersistentBase. */
+ V8_INLINE static void* GetAlignedPointerFromInternalField(
+ const PersistentBase<Object>& object, int index) {
+ return object.val_->GetAlignedPointerFromInternalField(index);
+ }
+
+ /** Same as above, but works for TracedGlobal. */
+ V8_INLINE static void* GetAlignedPointerFromInternalField(
+ const BasicTracedReference<Object>& object, int index) {
+ return object->GetAlignedPointerFromInternalField(index);
+ }
+
+ /**
+ * Sets a 2-byte-aligned native pointer in an internal field. To retrieve such
+ * a field, GetAlignedPointerFromInternalField must be used, everything else
+ * leads to undefined behavior.
+ */
+ void SetAlignedPointerInInternalField(int index, void* value);
+ void SetAlignedPointerInInternalFields(int argc, int indices[],
+ void* values[]);
+
+ /**
+ * HasOwnProperty() is like JavaScript's Object.prototype.hasOwnProperty().
+ *
+ * See also v8::Object::Has() and v8::Object::HasRealNamedProperty().
+ */
+ V8_WARN_UNUSED_RESULT Maybe<bool> HasOwnProperty(Local<Context> context,
+ Local<Name> key);
+ V8_WARN_UNUSED_RESULT Maybe<bool> HasOwnProperty(Local<Context> context,
+ uint32_t index);
+ /**
+ * Use HasRealNamedProperty() if you want to check if an object has an own
+ * property without causing side effects, i.e., without calling interceptors.
+ *
+ * This function is similar to v8::Object::HasOwnProperty(), but it does not
+ * call interceptors.
+ *
+ * \note Consider using non-masking interceptors, i.e., the interceptors are
+ * not called if the receiver has the real named property. See
+ * `v8::PropertyHandlerFlags::kNonMasking`.
+ *
+ * See also v8::Object::Has().
+ */
+ V8_WARN_UNUSED_RESULT Maybe<bool> HasRealNamedProperty(Local<Context> context,
+ Local<Name> key);
+ V8_WARN_UNUSED_RESULT Maybe<bool> HasRealIndexedProperty(
+ Local<Context> context, uint32_t index);
+ V8_WARN_UNUSED_RESULT Maybe<bool> HasRealNamedCallbackProperty(
+ Local<Context> context, Local<Name> key);
+
+ /**
+ * If result.IsEmpty() no real property was located in the prototype chain.
+ * This means interceptors in the prototype chain are not called.
+ */
+ V8_WARN_UNUSED_RESULT MaybeLocal<Value> GetRealNamedPropertyInPrototypeChain(
+ Local<Context> context, Local<Name> key);
+
+ /**
+ * Gets the property attributes of a real property in the prototype chain,
+ * which can be None or any combination of ReadOnly, DontEnum and DontDelete.
+ * Interceptors in the prototype chain are not called.
+ */
+ V8_WARN_UNUSED_RESULT Maybe<PropertyAttribute>
+ GetRealNamedPropertyAttributesInPrototypeChain(Local<Context> context,
+ Local<Name> key);
+
+ /**
+ * If result.IsEmpty() no real property was located on the object or
+ * in the prototype chain.
+ * This means interceptors in the prototype chain are not called.
+ */
+ V8_WARN_UNUSED_RESULT MaybeLocal<Value> GetRealNamedProperty(
+ Local<Context> context, Local<Name> key);
+
+ /**
+ * Gets the property attributes of a real property which can be
+ * None or any combination of ReadOnly, DontEnum and DontDelete.
+ * Interceptors in the prototype chain are not called.
+ */
+ V8_WARN_UNUSED_RESULT Maybe<PropertyAttribute> GetRealNamedPropertyAttributes(
+ Local<Context> context, Local<Name> key);
+
+ /** Tests for a named lookup interceptor.*/
+ bool HasNamedLookupInterceptor() const;
+
+ /** Tests for an index lookup interceptor.*/
+ bool HasIndexedLookupInterceptor() const;
+
+ /**
+ * Returns the identity hash for this object. The current implementation
+ * uses a hidden property on the object to store the identity hash.
+ *
+ * The return value will never be 0. Also, it is not guaranteed to be
+ * unique.
+ */
+ int GetIdentityHash();
+
+ /**
+ * Clone this object with a fast but shallow copy. Values will point
+ * to the same values as the original object.
+ */
+ // TODO(dcarney): take an isolate and optionally bail out?
+ Local<Object> Clone();
+
+ /**
+ * Returns the context in which the object was created.
+ */
+ V8_DEPRECATE_SOON("Use MaybeLocal<Context> GetCreationContext()")
+ Local<Context> CreationContext();
+ MaybeLocal<Context> GetCreationContext();
+
+ /** Same as above, but works for Persistents */
+ V8_DEPRECATE_SOON(
+ "Use MaybeLocal<Context> GetCreationContext(const "
+ "PersistentBase<Object>& object)")
+ static Local<Context> CreationContext(const PersistentBase<Object>& object);
+ V8_INLINE static MaybeLocal<Context> GetCreationContext(
+ const PersistentBase<Object>& object) {
+ return object.val_->GetCreationContext();
+ }
+
+ /**
+ * Checks whether a callback is set by the
+ * ObjectTemplate::SetCallAsFunctionHandler method.
+ * When an Object is callable this method returns true.
+ */
+ bool IsCallable() const;
+
+ /**
+ * True if this object is a constructor.
+ */
+ bool IsConstructor() const;
+
+ /**
+ * True if this object can carry information relevant to the embedder in its
+ * embedder fields, false otherwise. This is generally true for objects
+ * constructed through function templates but also holds for other types where
+ * V8 automatically adds internal fields at compile time, such as e.g.
+ * v8::ArrayBuffer.
+ */
+ bool IsApiWrapper() const;
+
+ /**
+ * True if this object was created from an object template which was marked
+ * as undetectable. See v8::ObjectTemplate::MarkAsUndetectable for more
+ * information.
+ */
+ bool IsUndetectable() const;
+
+ /**
+ * Call an Object as a function if a callback is set by the
+ * ObjectTemplate::SetCallAsFunctionHandler method.
+ */
+ V8_WARN_UNUSED_RESULT MaybeLocal<Value> CallAsFunction(Local<Context> context,
+ Local<Value> recv,
+ int argc,
+ Local<Value> argv[]);
+
+ /**
+ * Call an Object as a constructor if a callback is set by the
+ * ObjectTemplate::SetCallAsFunctionHandler method.
+ * Note: This method behaves like the Function::NewInstance method.
+ */
+ V8_WARN_UNUSED_RESULT MaybeLocal<Value> CallAsConstructor(
+ Local<Context> context, int argc, Local<Value> argv[]);
+
+ /**
+ * Return the isolate to which the Object belongs to.
+ */
+ Isolate* GetIsolate();
+
+ /**
+ * If this object is a Set, Map, WeakSet or WeakMap, this returns a
+ * representation of the elements of this object as an array.
+ * If this object is a SetIterator or MapIterator, this returns all
+ * elements of the underlying collection, starting at the iterator's current
+ * position.
+ * For other types, this will return an empty MaybeLocal<Array> (without
+ * scheduling an exception).
+ */
+ MaybeLocal<Array> PreviewEntries(bool* is_key_value);
+
+ static Local<Object> New(Isolate* isolate);
+
+ /**
+ * Creates a JavaScript object with the given properties, and
+ * a the given prototype_or_null (which can be any JavaScript
+ * value, and if it's null, the newly created object won't have
+ * a prototype at all). This is similar to Object.create().
+ * All properties will be created as enumerable, configurable
+ * and writable properties.
+ */
+ static Local<Object> New(Isolate* isolate, Local<Value> prototype_or_null,
+ Local<Name>* names, Local<Value>* values,
+ size_t length);
+
+ V8_INLINE static Object* Cast(Value* obj);
+
+ /**
+ * Support for TC39 "dynamic code brand checks" proposal.
+ *
+ * This API allows to query whether an object was constructed from a
+ * "code like" ObjectTemplate.
+ *
+ * See also: v8::ObjectTemplate::SetCodeLike
+ */
+ bool IsCodeLike(Isolate* isolate) const;
+
+ private:
+ Object();
+ static void CheckCast(Value* obj);
+ Local<Value> SlowGetInternalField(int index);
+ void* SlowGetAlignedPointerFromInternalField(int index);
+};
+
+// --- Implementation ---
+
+Local<Value> Object::GetInternalField(int index) {
+#ifndef V8_ENABLE_CHECKS
+ using A = internal::Address;
+ using I = internal::Internals;
+ A obj = *reinterpret_cast<A*>(this);
+ // Fast path: If the object is a plain JSObject, which is the common case, we
+ // know where to find the internal fields and can return the value directly.
+ int instance_type = I::GetInstanceType(obj);
+ if (v8::internal::CanHaveInternalField(instance_type)) {
+ int offset = I::kJSObjectHeaderSize + (I::kEmbedderDataSlotSize * index);
+ A value = I::ReadRawField<A>(obj, offset);
+#ifdef V8_COMPRESS_POINTERS
+ // We read the full pointer value and then decompress it in order to avoid
+ // dealing with potential endiannes issues.
+ value = I::DecompressTaggedAnyField(obj, static_cast<uint32_t>(value));
+#endif
+ internal::Isolate* isolate =
+ internal::IsolateFromNeverReadOnlySpaceObject(obj);
+ A* result = HandleScope::CreateHandle(isolate, value);
+ return Local<Value>(reinterpret_cast<Value*>(result));
+ }
+#endif
+ return SlowGetInternalField(index);
+}
+
+void* Object::GetAlignedPointerFromInternalField(int index) {
+#ifndef V8_ENABLE_CHECKS
+ using A = internal::Address;
+ using I = internal::Internals;
+ A obj = *reinterpret_cast<A*>(this);
+ // Fast path: If the object is a plain JSObject, which is the common case, we
+ // know where to find the internal fields and can return the value directly.
+ auto instance_type = I::GetInstanceType(obj);
+ if (v8::internal::CanHaveInternalField(instance_type)) {
+ int offset = I::kJSObjectHeaderSize + (I::kEmbedderDataSlotSize * index);
+#ifdef V8_HEAP_SANDBOX
+ offset += I::kEmbedderDataSlotRawPayloadOffset;
+#endif
+ internal::Isolate* isolate = I::GetIsolateForHeapSandbox(obj);
+ A value = I::ReadExternalPointerField(
+ isolate, obj, offset, internal::kEmbedderDataSlotPayloadTag);
+ return reinterpret_cast<void*>(value);
+ }
+#endif
+ return SlowGetAlignedPointerFromInternalField(index);
+}
+
+Private* Private::Cast(Data* data) {
+#ifdef V8_ENABLE_CHECKS
+ CheckCast(data);
+#endif
+ return reinterpret_cast<Private*>(data);
+}
+
+Object* Object::Cast(v8::Value* value) {
+#ifdef V8_ENABLE_CHECKS
+ CheckCast(value);
+#endif
+ return static_cast<Object*>(value);
+}
+
+} // namespace v8
+
+#endif // INCLUDE_V8_OBJECT_H_
diff --git a/deps/v8/include/v8-persistent-handle.h b/deps/v8/include/v8-persistent-handle.h
new file mode 100644
index 0000000000..a6c21268d6
--- /dev/null
+++ b/deps/v8/include/v8-persistent-handle.h
@@ -0,0 +1,590 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef INCLUDE_V8_PERSISTENT_HANDLE_H_
+#define INCLUDE_V8_PERSISTENT_HANDLE_H_
+
+#include "v8-internal.h" // NOLINT(build/include_directory)
+#include "v8-local-handle.h" // NOLINT(build/include_directory)
+#include "v8-weak-callback-info.h" // NOLINT(build/include_directory)
+#include "v8config.h" // NOLINT(build/include_directory)
+
+namespace v8 {
+
+class Isolate;
+template <class K, class V, class T>
+class PersistentValueMapBase;
+template <class V, class T>
+class PersistentValueVector;
+template <class T>
+class Global;
+template <class T>
+class PersistentBase;
+template <class K, class V, class T>
+class PersistentValueMap;
+class Value;
+
+namespace api_internal {
+V8_EXPORT Value* Eternalize(v8::Isolate* isolate, Value* handle);
+V8_EXPORT internal::Address* CopyGlobalReference(internal::Address* from);
+V8_EXPORT void DisposeGlobal(internal::Address* global_handle);
+V8_EXPORT void MakeWeak(internal::Address** location_addr);
+V8_EXPORT void* ClearWeak(internal::Address* location);
+V8_EXPORT void AnnotateStrongRetainer(internal::Address* location,
+ const char* label);
+V8_EXPORT internal::Address* GlobalizeReference(internal::Isolate* isolate,
+ internal::Address* handle);
+V8_EXPORT void MoveGlobalReference(internal::Address** from,
+ internal::Address** to);
+} // namespace api_internal
+
+/**
+ * Eternal handles are set-once handles that live for the lifetime of the
+ * isolate.
+ */
+template <class T>
+class Eternal {
+ public:
+ V8_INLINE Eternal() : val_(nullptr) {}
+ template <class S>
+ V8_INLINE Eternal(Isolate* isolate, Local<S> handle) : val_(nullptr) {
+ Set(isolate, handle);
+ }
+ // Can only be safely called if already set.
+ V8_INLINE Local<T> Get(Isolate* isolate) const {
+ // The eternal handle will never go away, so as with the roots, we don't
+ // even need to open a handle.
+ return Local<T>(val_);
+ }
+
+ V8_INLINE bool IsEmpty() const { return val_ == nullptr; }
+
+ template <class S>
+ void Set(Isolate* isolate, Local<S> handle) {
+ static_assert(std::is_base_of<T, S>::value, "type check");
+ val_ = reinterpret_cast<T*>(
+ api_internal::Eternalize(isolate, reinterpret_cast<Value*>(*handle)));
+ }
+
+ private:
+ T* val_;
+};
+
+namespace api_internal {
+V8_EXPORT void MakeWeak(internal::Address* location, void* data,
+ WeakCallbackInfo<void>::Callback weak_callback,
+ WeakCallbackType type);
+} // namespace api_internal
+
+/**
+ * An object reference that is independent of any handle scope. Where
+ * a Local handle only lives as long as the HandleScope in which it was
+ * allocated, a PersistentBase handle remains valid until it is explicitly
+ * disposed using Reset().
+ *
+ * A persistent handle contains a reference to a storage cell within
+ * the V8 engine which holds an object value and which is updated by
+ * the garbage collector whenever the object is moved. A new storage
+ * cell can be created using the constructor or PersistentBase::Reset and
+ * existing handles can be disposed using PersistentBase::Reset.
+ *
+ */
+template <class T>
+class PersistentBase {
+ public:
+ /**
+ * If non-empty, destroy the underlying storage cell
+ * IsEmpty() will return true after this call.
+ */
+ V8_INLINE void Reset();
+
+ /**
+ * If non-empty, destroy the underlying storage cell
+ * and create a new one with the contents of other if other is non empty
+ */
+ template <class S>
+ V8_INLINE void Reset(Isolate* isolate, const Local<S>& other);
+
+ /**
+ * If non-empty, destroy the underlying storage cell
+ * and create a new one with the contents of other if other is non empty
+ */
+ template <class S>
+ V8_INLINE void Reset(Isolate* isolate, const PersistentBase<S>& other);
+
+ V8_INLINE bool IsEmpty() const { return val_ == nullptr; }
+ V8_INLINE void Empty() { val_ = 0; }
+
+ V8_INLINE Local<T> Get(Isolate* isolate) const {
+ return Local<T>::New(isolate, *this);
+ }
+
+ template <class S>
+ V8_INLINE bool operator==(const PersistentBase<S>& that) const {
+ internal::Address* a = reinterpret_cast<internal::Address*>(this->val_);
+ internal::Address* b = reinterpret_cast<internal::Address*>(that.val_);
+ if (a == nullptr) return b == nullptr;
+ if (b == nullptr) return false;
+ return *a == *b;
+ }
+
+ template <class S>
+ V8_INLINE bool operator==(const Local<S>& that) const {
+ internal::Address* a = reinterpret_cast<internal::Address*>(this->val_);
+ internal::Address* b = reinterpret_cast<internal::Address*>(that.val_);
+ if (a == nullptr) return b == nullptr;
+ if (b == nullptr) return false;
+ return *a == *b;
+ }
+
+ template <class S>
+ V8_INLINE bool operator!=(const PersistentBase<S>& that) const {
+ return !operator==(that);
+ }
+
+ template <class S>
+ V8_INLINE bool operator!=(const Local<S>& that) const {
+ return !operator==(that);
+ }
+
+ /**
+ * Install a finalization callback on this object.
+ * NOTE: There is no guarantee as to *when* or even *if* the callback is
+ * invoked. The invocation is performed solely on a best effort basis.
+ * As always, GC-based finalization should *not* be relied upon for any
+ * critical form of resource management!
+ *
+ * The callback is supposed to reset the handle. No further V8 API may be
+ * called in this callback. In case additional work involving V8 needs to be
+ * done, a second callback can be scheduled using
+ * WeakCallbackInfo<void>::SetSecondPassCallback.
+ */
+ template <typename P>
+ V8_INLINE void SetWeak(P* parameter,
+ typename WeakCallbackInfo<P>::Callback callback,
+ WeakCallbackType type);
+
+ /**
+ * Turns this handle into a weak phantom handle without finalization callback.
+ * The handle will be reset automatically when the garbage collector detects
+ * that the object is no longer reachable.
+ * A related function Isolate::NumberOfPhantomHandleResetsSinceLastCall
+ * returns how many phantom handles were reset by the garbage collector.
+ */
+ V8_INLINE void SetWeak();
+
+ template <typename P>
+ V8_INLINE P* ClearWeak();
+
+ // TODO(dcarney): remove this.
+ V8_INLINE void ClearWeak() { ClearWeak<void>(); }
+
+ /**
+ * Annotates the strong handle with the given label, which is then used by the
+ * heap snapshot generator as a name of the edge from the root to the handle.
+ * The function does not take ownership of the label and assumes that the
+ * label is valid as long as the handle is valid.
+ */
+ V8_INLINE void AnnotateStrongRetainer(const char* label);
+
+ /** Returns true if the handle's reference is weak. */
+ V8_INLINE bool IsWeak() const;
+
+ /**
+ * Assigns a wrapper class ID to the handle.
+ */
+ V8_INLINE void SetWrapperClassId(uint16_t class_id);
+
+ /**
+ * Returns the class ID previously assigned to this handle or 0 if no class ID
+ * was previously assigned.
+ */
+ V8_INLINE uint16_t WrapperClassId() const;
+
+ PersistentBase(const PersistentBase& other) = delete;
+ void operator=(const PersistentBase&) = delete;
+
+ private:
+ friend class Isolate;
+ friend class Utils;
+ template <class F>
+ friend class Local;
+ template <class F1, class F2>
+ friend class Persistent;
+ template <class F>
+ friend class Global;
+ template <class F>
+ friend class PersistentBase;
+ template <class F>
+ friend class ReturnValue;
+ template <class F1, class F2, class F3>
+ friend class PersistentValueMapBase;
+ template <class F1, class F2>
+ friend class PersistentValueVector;
+ friend class Object;
+
+ explicit V8_INLINE PersistentBase(T* val) : val_(val) {}
+ V8_INLINE static T* New(Isolate* isolate, T* that);
+
+ T* val_;
+};
+
+/**
+ * Default traits for Persistent. This class does not allow
+ * use of the copy constructor or assignment operator.
+ * At present kResetInDestructor is not set, but that will change in a future
+ * version.
+ */
+template <class T>
+class NonCopyablePersistentTraits {
+ public:
+ using NonCopyablePersistent = Persistent<T, NonCopyablePersistentTraits<T>>;
+ static const bool kResetInDestructor = false;
+ template <class S, class M>
+ V8_INLINE static void Copy(const Persistent<S, M>& source,
+ NonCopyablePersistent* dest) {
+ static_assert(sizeof(S) < 0,
+ "NonCopyablePersistentTraits::Copy is not instantiable");
+ }
+};
+
+/**
+ * Helper class traits to allow copying and assignment of Persistent.
+ * This will clone the contents of storage cell, but not any of the flags, etc.
+ */
+template <class T>
+struct CopyablePersistentTraits {
+ using CopyablePersistent = Persistent<T, CopyablePersistentTraits<T>>;
+ static const bool kResetInDestructor = true;
+ template <class S, class M>
+ static V8_INLINE void Copy(const Persistent<S, M>& source,
+ CopyablePersistent* dest) {
+ // do nothing, just allow copy
+ }
+};
+
+/**
+ * A PersistentBase which allows copy and assignment.
+ *
+ * Copy, assignment and destructor behavior is controlled by the traits
+ * class M.
+ *
+ * Note: Persistent class hierarchy is subject to future changes.
+ */
+template <class T, class M>
+class Persistent : public PersistentBase<T> {
+ public:
+ /**
+ * A Persistent with no storage cell.
+ */
+ V8_INLINE Persistent() : PersistentBase<T>(nullptr) {}
+ /**
+ * Construct a Persistent from a Local.
+ * When the Local is non-empty, a new storage cell is created
+ * pointing to the same object, and no flags are set.
+ */
+ template <class S>
+ V8_INLINE Persistent(Isolate* isolate, Local<S> that)
+ : PersistentBase<T>(PersistentBase<T>::New(isolate, *that)) {
+ static_assert(std::is_base_of<T, S>::value, "type check");
+ }
+ /**
+ * Construct a Persistent from a Persistent.
+ * When the Persistent is non-empty, a new storage cell is created
+ * pointing to the same object, and no flags are set.
+ */
+ template <class S, class M2>
+ V8_INLINE Persistent(Isolate* isolate, const Persistent<S, M2>& that)
+ : PersistentBase<T>(PersistentBase<T>::New(isolate, *that)) {
+ static_assert(std::is_base_of<T, S>::value, "type check");
+ }
+ /**
+ * The copy constructors and assignment operator create a Persistent
+ * exactly as the Persistent constructor, but the Copy function from the
+ * traits class is called, allowing the setting of flags based on the
+ * copied Persistent.
+ */
+ V8_INLINE Persistent(const Persistent& that) : PersistentBase<T>(nullptr) {
+ Copy(that);
+ }
+ template <class S, class M2>
+ V8_INLINE Persistent(const Persistent<S, M2>& that) : PersistentBase<T>(0) {
+ Copy(that);
+ }
+ V8_INLINE Persistent& operator=(const Persistent& that) {
+ Copy(that);
+ return *this;
+ }
+ template <class S, class M2>
+ V8_INLINE Persistent& operator=(const Persistent<S, M2>& that) {
+ Copy(that);
+ return *this;
+ }
+ /**
+ * The destructor will dispose the Persistent based on the
+ * kResetInDestructor flags in the traits class. Since not calling dispose
+ * can result in a memory leak, it is recommended to always set this flag.
+ */
+ V8_INLINE ~Persistent() {
+ if (M::kResetInDestructor) this->Reset();
+ }
+
+ // TODO(dcarney): this is pretty useless, fix or remove
+ template <class S>
+ V8_INLINE static Persistent<T>& Cast(const Persistent<S>& that) {
+#ifdef V8_ENABLE_CHECKS
+ // If we're going to perform the type check then we have to check
+ // that the handle isn't empty before doing the checked cast.
+ if (!that.IsEmpty()) T::Cast(*that);
+#endif
+ return reinterpret_cast<Persistent<T>&>(const_cast<Persistent<S>&>(that));
+ }
+
+ // TODO(dcarney): this is pretty useless, fix or remove
+ template <class S>
+ V8_INLINE Persistent<S>& As() const {
+ return Persistent<S>::Cast(*this);
+ }
+
+ private:
+ friend class Isolate;
+ friend class Utils;
+ template <class F>
+ friend class Local;
+ template <class F1, class F2>
+ friend class Persistent;
+ template <class F>
+ friend class ReturnValue;
+
+ explicit V8_INLINE Persistent(T* that) : PersistentBase<T>(that) {}
+ V8_INLINE T* operator*() const { return this->val_; }
+ template <class S, class M2>
+ V8_INLINE void Copy(const Persistent<S, M2>& that);
+};
+
+/**
+ * A PersistentBase which has move semantics.
+ *
+ * Note: Persistent class hierarchy is subject to future changes.
+ */
+template <class T>
+class Global : public PersistentBase<T> {
+ public:
+ /**
+ * A Global with no storage cell.
+ */
+ V8_INLINE Global() : PersistentBase<T>(nullptr) {}
+
+ /**
+ * Construct a Global from a Local.
+ * When the Local is non-empty, a new storage cell is created
+ * pointing to the same object, and no flags are set.
+ */
+ template <class S>
+ V8_INLINE Global(Isolate* isolate, Local<S> that)
+ : PersistentBase<T>(PersistentBase<T>::New(isolate, *that)) {
+ static_assert(std::is_base_of<T, S>::value, "type check");
+ }
+
+ /**
+ * Construct a Global from a PersistentBase.
+ * When the Persistent is non-empty, a new storage cell is created
+ * pointing to the same object, and no flags are set.
+ */
+ template <class S>
+ V8_INLINE Global(Isolate* isolate, const PersistentBase<S>& that)
+ : PersistentBase<T>(PersistentBase<T>::New(isolate, that.val_)) {
+ static_assert(std::is_base_of<T, S>::value, "type check");
+ }
+
+ /**
+ * Move constructor.
+ */
+ V8_INLINE Global(Global&& other);
+
+ V8_INLINE ~Global() { this->Reset(); }
+
+ /**
+ * Move via assignment.
+ */
+ template <class S>
+ V8_INLINE Global& operator=(Global<S>&& rhs);
+
+ /**
+ * Pass allows returning uniques from functions, etc.
+ */
+ Global Pass() { return static_cast<Global&&>(*this); }
+
+ /*
+ * For compatibility with Chromium's base::Bind (base::Passed).
+ */
+ using MoveOnlyTypeForCPP03 = void;
+
+ Global(const Global&) = delete;
+ void operator=(const Global&) = delete;
+
+ private:
+ template <class F>
+ friend class ReturnValue;
+ V8_INLINE T* operator*() const { return this->val_; }
+};
+
+// UniquePersistent is an alias for Global for historical reason.
+template <class T>
+using UniquePersistent = Global<T>;
+
+/**
+ * Interface for iterating through all the persistent handles in the heap.
+ */
+class V8_EXPORT PersistentHandleVisitor {
+ public:
+ virtual ~PersistentHandleVisitor() = default;
+ virtual void VisitPersistentHandle(Persistent<Value>* value,
+ uint16_t class_id) {}
+};
+
+template <class T>
+T* PersistentBase<T>::New(Isolate* isolate, T* that) {
+ if (that == nullptr) return nullptr;
+ internal::Address* p = reinterpret_cast<internal::Address*>(that);
+ return reinterpret_cast<T*>(api_internal::GlobalizeReference(
+ reinterpret_cast<internal::Isolate*>(isolate), p));
+}
+
+template <class T, class M>
+template <class S, class M2>
+void Persistent<T, M>::Copy(const Persistent<S, M2>& that) {
+ static_assert(std::is_base_of<T, S>::value, "type check");
+ this->Reset();
+ if (that.IsEmpty()) return;
+ internal::Address* p = reinterpret_cast<internal::Address*>(that.val_);
+ this->val_ = reinterpret_cast<T*>(api_internal::CopyGlobalReference(p));
+ M::Copy(that, this);
+}
+
+template <class T>
+bool PersistentBase<T>::IsWeak() const {
+ using I = internal::Internals;
+ if (this->IsEmpty()) return false;
+ return I::GetNodeState(reinterpret_cast<internal::Address*>(this->val_)) ==
+ I::kNodeStateIsWeakValue;
+}
+
+template <class T>
+void PersistentBase<T>::Reset() {
+ if (this->IsEmpty()) return;
+ api_internal::DisposeGlobal(reinterpret_cast<internal::Address*>(this->val_));
+ val_ = nullptr;
+}
+
+/**
+ * If non-empty, destroy the underlying storage cell
+ * and create a new one with the contents of other if other is non empty
+ */
+template <class T>
+template <class S>
+void PersistentBase<T>::Reset(Isolate* isolate, const Local<S>& other) {
+ static_assert(std::is_base_of<T, S>::value, "type check");
+ Reset();
+ if (other.IsEmpty()) return;
+ this->val_ = New(isolate, other.val_);
+}
+
+/**
+ * If non-empty, destroy the underlying storage cell
+ * and create a new one with the contents of other if other is non empty
+ */
+template <class T>
+template <class S>
+void PersistentBase<T>::Reset(Isolate* isolate,
+ const PersistentBase<S>& other) {
+ static_assert(std::is_base_of<T, S>::value, "type check");
+ Reset();
+ if (other.IsEmpty()) return;
+ this->val_ = New(isolate, other.val_);
+}
+
+template <class T>
+template <typename P>
+V8_INLINE void PersistentBase<T>::SetWeak(
+ P* parameter, typename WeakCallbackInfo<P>::Callback callback,
+ WeakCallbackType type) {
+ using Callback = WeakCallbackInfo<void>::Callback;
+#if (__GNUC__ >= 8) && !defined(__clang__)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wcast-function-type"
+#endif
+ api_internal::MakeWeak(reinterpret_cast<internal::Address*>(this->val_),
+ parameter, reinterpret_cast<Callback>(callback), type);
+#if (__GNUC__ >= 8) && !defined(__clang__)
+#pragma GCC diagnostic pop
+#endif
+}
+
+template <class T>
+void PersistentBase<T>::SetWeak() {
+ api_internal::MakeWeak(reinterpret_cast<internal::Address**>(&this->val_));
+}
+
+template <class T>
+template <typename P>
+P* PersistentBase<T>::ClearWeak() {
+ return reinterpret_cast<P*>(api_internal::ClearWeak(
+ reinterpret_cast<internal::Address*>(this->val_)));
+}
+
+template <class T>
+void PersistentBase<T>::AnnotateStrongRetainer(const char* label) {
+ api_internal::AnnotateStrongRetainer(
+ reinterpret_cast<internal::Address*>(this->val_), label);
+}
+
+template <class T>
+void PersistentBase<T>::SetWrapperClassId(uint16_t class_id) {
+ using I = internal::Internals;
+ if (this->IsEmpty()) return;
+ internal::Address* obj = reinterpret_cast<internal::Address*>(this->val_);
+ uint8_t* addr = reinterpret_cast<uint8_t*>(obj) + I::kNodeClassIdOffset;
+ *reinterpret_cast<uint16_t*>(addr) = class_id;
+}
+
+template <class T>
+uint16_t PersistentBase<T>::WrapperClassId() const {
+ using I = internal::Internals;
+ if (this->IsEmpty()) return 0;
+ internal::Address* obj = reinterpret_cast<internal::Address*>(this->val_);
+ uint8_t* addr = reinterpret_cast<uint8_t*>(obj) + I::kNodeClassIdOffset;
+ return *reinterpret_cast<uint16_t*>(addr);
+}
+
+template <class T>
+Global<T>::Global(Global&& other) : PersistentBase<T>(other.val_) {
+ if (other.val_ != nullptr) {
+ api_internal::MoveGlobalReference(
+ reinterpret_cast<internal::Address**>(&other.val_),
+ reinterpret_cast<internal::Address**>(&this->val_));
+ other.val_ = nullptr;
+ }
+}
+
+template <class T>
+template <class S>
+Global<T>& Global<T>::operator=(Global<S>&& rhs) {
+ static_assert(std::is_base_of<T, S>::value, "type check");
+ if (this != &rhs) {
+ this->Reset();
+ if (rhs.val_ != nullptr) {
+ this->val_ = rhs.val_;
+ api_internal::MoveGlobalReference(
+ reinterpret_cast<internal::Address**>(&rhs.val_),
+ reinterpret_cast<internal::Address**>(&this->val_));
+ rhs.val_ = nullptr;
+ }
+ }
+ return *this;
+}
+
+} // namespace v8
+
+#endif // INCLUDE_V8_PERSISTENT_HANDLE_H_
diff --git a/deps/v8/include/v8-platform.h b/deps/v8/include/v8-platform.h
index fc9a357feb..dee399fa77 100644
--- a/deps/v8/include/v8-platform.h
+++ b/deps/v8/include/v8-platform.h
@@ -430,11 +430,29 @@ class PageAllocator {
/**
* Frees memory in the given [address, address + size) range. address and size
* should be operating system page-aligned. The next write to this
- * memory area brings the memory transparently back.
+ * memory area brings the memory transparently back. This should be treated as
+ * a hint to the OS that the pages are no longer needed. It does not guarantee
+ * that the pages will be discarded immediately or at all.
*/
virtual bool DiscardSystemPages(void* address, size_t size) { return true; }
/**
+ * Decommits any wired memory pages in the given range, allowing the OS to
+ * reclaim them, and marks the region as inacessible (kNoAccess). The address
+ * range stays reserved and can be accessed again later by changing its
+ * permissions. However, in that case the memory content is guaranteed to be
+ * zero-initialized again. The memory must have been previously allocated by a
+ * call to AllocatePages. Returns true on success, false otherwise.
+ */
+#ifdef V8_VIRTUAL_MEMORY_CAGE
+ // Implementing this API is required when the virtual memory cage is enabled.
+ virtual bool DecommitPages(void* address, size_t size) = 0;
+#else
+ // Otherwise, it is optional for now.
+ virtual bool DecommitPages(void* address, size_t size) { return false; }
+#endif
+
+ /**
* INTERNAL ONLY: This interface has not been stabilised and may change
* without notice from one release to another without being deprecated first.
*/
diff --git a/deps/v8/include/v8-primitive-object.h b/deps/v8/include/v8-primitive-object.h
new file mode 100644
index 0000000000..573932d078
--- /dev/null
+++ b/deps/v8/include/v8-primitive-object.h
@@ -0,0 +1,118 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef INCLUDE_V8_PRIMITIVE_OBJECT_H_
+#define INCLUDE_V8_PRIMITIVE_OBJECT_H_
+
+#include "v8-local-handle.h" // NOLINT(build/include_directory)
+#include "v8-object.h" // NOLINT(build/include_directory)
+#include "v8config.h" // NOLINT(build/include_directory)
+
+namespace v8 {
+
+class Isolate;
+
+/**
+ * A Number object (ECMA-262, 4.3.21).
+ */
+class V8_EXPORT NumberObject : public Object {
+ public:
+ static Local<Value> New(Isolate* isolate, double value);
+
+ double ValueOf() const;
+
+ V8_INLINE static NumberObject* Cast(Value* value) {
+#ifdef V8_ENABLE_CHECKS
+ CheckCast(value);
+#endif
+ return static_cast<NumberObject*>(value);
+ }
+
+ private:
+ static void CheckCast(Value* obj);
+};
+
+/**
+ * A BigInt object (https://tc39.github.io/proposal-bigint)
+ */
+class V8_EXPORT BigIntObject : public Object {
+ public:
+ static Local<Value> New(Isolate* isolate, int64_t value);
+
+ Local<BigInt> ValueOf() const;
+
+ V8_INLINE static BigIntObject* Cast(Value* value) {
+#ifdef V8_ENABLE_CHECKS
+ CheckCast(value);
+#endif
+ return static_cast<BigIntObject*>(value);
+ }
+
+ private:
+ static void CheckCast(Value* obj);
+};
+
+/**
+ * A Boolean object (ECMA-262, 4.3.15).
+ */
+class V8_EXPORT BooleanObject : public Object {
+ public:
+ static Local<Value> New(Isolate* isolate, bool value);
+
+ bool ValueOf() const;
+
+ V8_INLINE static BooleanObject* Cast(Value* value) {
+#ifdef V8_ENABLE_CHECKS
+ CheckCast(value);
+#endif
+ return static_cast<BooleanObject*>(value);
+ }
+
+ private:
+ static void CheckCast(Value* obj);
+};
+
+/**
+ * A String object (ECMA-262, 4.3.18).
+ */
+class V8_EXPORT StringObject : public Object {
+ public:
+ static Local<Value> New(Isolate* isolate, Local<String> value);
+
+ Local<String> ValueOf() const;
+
+ V8_INLINE static StringObject* Cast(Value* value) {
+#ifdef V8_ENABLE_CHECKS
+ CheckCast(value);
+#endif
+ return static_cast<StringObject*>(value);
+ }
+
+ private:
+ static void CheckCast(Value* obj);
+};
+
+/**
+ * A Symbol object (ECMA-262 edition 6).
+ */
+class V8_EXPORT SymbolObject : public Object {
+ public:
+ static Local<Value> New(Isolate* isolate, Local<Symbol> value);
+
+ Local<Symbol> ValueOf() const;
+
+ V8_INLINE static SymbolObject* Cast(Value* value) {
+#ifdef V8_ENABLE_CHECKS
+ CheckCast(value);
+#endif
+ return static_cast<SymbolObject*>(value);
+ }
+
+ private:
+ static void CheckCast(Value* obj);
+};
+
+} // namespace v8
+
+#endif // INCLUDE_V8_PRIMITIVE_OBJECT_H_
diff --git a/deps/v8/include/v8-primitive.h b/deps/v8/include/v8-primitive.h
new file mode 100644
index 0000000000..59d959da05
--- /dev/null
+++ b/deps/v8/include/v8-primitive.h
@@ -0,0 +1,858 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef INCLUDE_V8_PRIMITIVE_H_
+#define INCLUDE_V8_PRIMITIVE_H_
+
+#include "v8-data.h" // NOLINT(build/include_directory)
+#include "v8-internal.h" // NOLINT(build/include_directory)
+#include "v8-local-handle.h" // NOLINT(build/include_directory)
+#include "v8-value.h" // NOLINT(build/include_directory)
+#include "v8config.h" // NOLINT(build/include_directory)
+
+namespace v8 {
+
+class Context;
+class Isolate;
+class String;
+
+namespace internal {
+class ExternalString;
+class ScopedExternalStringLock;
+} // namespace internal
+
+/**
+ * The superclass of primitive values. See ECMA-262 4.3.2.
+ */
+class V8_EXPORT Primitive : public Value {};
+
+/**
+ * A primitive boolean value (ECMA-262, 4.3.14). Either the true
+ * or false value.
+ */
+class V8_EXPORT Boolean : public Primitive {
+ public:
+ bool Value() const;
+ V8_INLINE static Boolean* Cast(v8::Data* data) {
+#ifdef V8_ENABLE_CHECKS
+ CheckCast(data);
+#endif
+ return static_cast<Boolean*>(data);
+ }
+
+ V8_INLINE static Local<Boolean> New(Isolate* isolate, bool value);
+
+ private:
+ static void CheckCast(v8::Data* that);
+};
+
+/**
+ * An array to hold Primitive values. This is used by the embedder to
+ * pass host defined options to the ScriptOptions during compilation.
+ *
+ * This is passed back to the embedder as part of
+ * HostImportModuleDynamicallyCallback for module loading.
+ */
+class V8_EXPORT PrimitiveArray {
+ public:
+ static Local<PrimitiveArray> New(Isolate* isolate, int length);
+ int Length() const;
+ void Set(Isolate* isolate, int index, Local<Primitive> item);
+ Local<Primitive> Get(Isolate* isolate, int index);
+};
+
+/**
+ * A superclass for symbols and strings.
+ */
+class V8_EXPORT Name : public Primitive {
+ public:
+ /**
+ * Returns the identity hash for this object. The current implementation
+ * uses an inline property on the object to store the identity hash.
+ *
+ * The return value will never be 0. Also, it is not guaranteed to be
+ * unique.
+ */
+ int GetIdentityHash();
+
+ V8_INLINE static Name* Cast(Data* data) {
+#ifdef V8_ENABLE_CHECKS
+ CheckCast(data);
+#endif
+ return static_cast<Name*>(data);
+ }
+
+ private:
+ static void CheckCast(Data* that);
+};
+
+/**
+ * A flag describing different modes of string creation.
+ *
+ * Aside from performance implications there are no differences between the two
+ * creation modes.
+ */
+enum class NewStringType {
+ /**
+ * Create a new string, always allocating new storage memory.
+ */
+ kNormal,
+
+ /**
+ * Acts as a hint that the string should be created in the
+ * old generation heap space and be deduplicated if an identical string
+ * already exists.
+ */
+ kInternalized
+};
+
+/**
+ * A JavaScript string value (ECMA-262, 4.3.17).
+ */
+class V8_EXPORT String : public Name {
+ public:
+ static constexpr int kMaxLength =
+ internal::kApiSystemPointerSize == 4 ? (1 << 28) - 16 : (1 << 29) - 24;
+
+ enum Encoding {
+ UNKNOWN_ENCODING = 0x1,
+ TWO_BYTE_ENCODING = 0x0,
+ ONE_BYTE_ENCODING = 0x8
+ };
+ /**
+ * Returns the number of characters (UTF-16 code units) in this string.
+ */
+ int Length() const;
+
+ /**
+ * Returns the number of bytes in the UTF-8 encoded
+ * representation of this string.
+ */
+ int Utf8Length(Isolate* isolate) const;
+
+ /**
+ * Returns whether this string is known to contain only one byte data,
+ * i.e. ISO-8859-1 code points.
+ * Does not read the string.
+ * False negatives are possible.
+ */
+ bool IsOneByte() const;
+
+ /**
+ * Returns whether this string contain only one byte data,
+ * i.e. ISO-8859-1 code points.
+ * Will read the entire string in some cases.
+ */
+ bool ContainsOnlyOneByte() const;
+
+ /**
+ * Write the contents of the string to an external buffer.
+ * If no arguments are given, expects the buffer to be large
+ * enough to hold the entire string and NULL terminator. Copies
+ * the contents of the string and the NULL terminator into the
+ * buffer.
+ *
+ * WriteUtf8 will not write partial UTF-8 sequences, preferring to stop
+ * before the end of the buffer.
+ *
+ * Copies up to length characters into the output buffer.
+ * Only null-terminates if there is enough space in the buffer.
+ *
+ * \param buffer The buffer into which the string will be copied.
+ * \param start The starting position within the string at which
+ * copying begins.
+ * \param length The number of characters to copy from the string. For
+ * WriteUtf8 the number of bytes in the buffer.
+ * \param nchars_ref The number of characters written, can be NULL.
+ * \param options Various options that might affect performance of this or
+ * subsequent operations.
+ * \return The number of characters copied to the buffer excluding the null
+ * terminator. For WriteUtf8: The number of bytes copied to the buffer
+ * including the null terminator (if written).
+ */
+ enum WriteOptions {
+ NO_OPTIONS = 0,
+ HINT_MANY_WRITES_EXPECTED = 1,
+ NO_NULL_TERMINATION = 2,
+ PRESERVE_ONE_BYTE_NULL = 4,
+ // Used by WriteUtf8 to replace orphan surrogate code units with the
+ // unicode replacement character. Needs to be set to guarantee valid UTF-8
+ // output.
+ REPLACE_INVALID_UTF8 = 8
+ };
+
+ // 16-bit character codes.
+ int Write(Isolate* isolate, uint16_t* buffer, int start = 0, int length = -1,
+ int options = NO_OPTIONS) const;
+ // One byte characters.
+ int WriteOneByte(Isolate* isolate, uint8_t* buffer, int start = 0,
+ int length = -1, int options = NO_OPTIONS) const;
+ // UTF-8 encoded characters.
+ int WriteUtf8(Isolate* isolate, char* buffer, int length = -1,
+ int* nchars_ref = nullptr, int options = NO_OPTIONS) const;
+
+ /**
+ * A zero length string.
+ */
+ V8_INLINE static Local<String> Empty(Isolate* isolate);
+
+ /**
+ * Returns true if the string is external.
+ */
+ bool IsExternal() const;
+
+ /**
+ * Returns true if the string is both external and two-byte.
+ */
+ bool IsExternalTwoByte() const;
+
+ /**
+ * Returns true if the string is both external and one-byte.
+ */
+ bool IsExternalOneByte() const;
+
+ class V8_EXPORT ExternalStringResourceBase {
+ public:
+ virtual ~ExternalStringResourceBase() = default;
+
+ /**
+ * If a string is cacheable, the value returned by
+ * ExternalStringResource::data() may be cached, otherwise it is not
+ * expected to be stable beyond the current top-level task.
+ */
+ virtual bool IsCacheable() const { return true; }
+
+ // Disallow copying and assigning.
+ ExternalStringResourceBase(const ExternalStringResourceBase&) = delete;
+ void operator=(const ExternalStringResourceBase&) = delete;
+
+ protected:
+ ExternalStringResourceBase() = default;
+
+ /**
+ * Internally V8 will call this Dispose method when the external string
+ * resource is no longer needed. The default implementation will use the
+ * delete operator. This method can be overridden in subclasses to
+ * control how allocated external string resources are disposed.
+ */
+ virtual void Dispose() { delete this; }
+
+ /**
+ * For a non-cacheable string, the value returned by
+ * |ExternalStringResource::data()| has to be stable between |Lock()| and
+ * |Unlock()|, that is the string must behave as is |IsCacheable()| returned
+ * true.
+ *
+ * These two functions must be thread-safe, and can be called from anywhere.
+ * They also must handle lock depth, in the sense that each can be called
+ * several times, from different threads, and unlocking should only happen
+ * when the balance of Lock() and Unlock() calls is 0.
+ */
+ virtual void Lock() const {}
+
+ /**
+ * Unlocks the string.
+ */
+ virtual void Unlock() const {}
+
+ private:
+ friend class internal::ExternalString;
+ friend class v8::String;
+ friend class internal::ScopedExternalStringLock;
+ };
+
+ /**
+ * An ExternalStringResource is a wrapper around a two-byte string
+ * buffer that resides outside V8's heap. Implement an
+ * ExternalStringResource to manage the life cycle of the underlying
+ * buffer. Note that the string data must be immutable.
+ */
+ class V8_EXPORT ExternalStringResource : public ExternalStringResourceBase {
+ public:
+ /**
+ * Override the destructor to manage the life cycle of the underlying
+ * buffer.
+ */
+ ~ExternalStringResource() override = default;
+
+ /**
+ * The string data from the underlying buffer. If the resource is cacheable
+ * then data() must return the same value for all invocations.
+ */
+ virtual const uint16_t* data() const = 0;
+
+ /**
+ * The length of the string. That is, the number of two-byte characters.
+ */
+ virtual size_t length() const = 0;
+
+ /**
+ * Returns the cached data from the underlying buffer. This method can be
+ * called only for cacheable resources (i.e. IsCacheable() == true) and only
+ * after UpdateDataCache() was called.
+ */
+ const uint16_t* cached_data() const {
+ CheckCachedDataInvariants();
+ return cached_data_;
+ }
+
+ /**
+ * Update {cached_data_} with the data from the underlying buffer. This can
+ * be called only for cacheable resources.
+ */
+ void UpdateDataCache();
+
+ protected:
+ ExternalStringResource() = default;
+
+ private:
+ void CheckCachedDataInvariants() const;
+
+ const uint16_t* cached_data_ = nullptr;
+ };
+
+ /**
+ * An ExternalOneByteStringResource is a wrapper around an one-byte
+ * string buffer that resides outside V8's heap. Implement an
+ * ExternalOneByteStringResource to manage the life cycle of the
+ * underlying buffer. Note that the string data must be immutable
+ * and that the data must be Latin-1 and not UTF-8, which would require
+ * special treatment internally in the engine and do not allow efficient
+ * indexing. Use String::New or convert to 16 bit data for non-Latin1.
+ */
+
+ class V8_EXPORT ExternalOneByteStringResource
+ : public ExternalStringResourceBase {
+ public:
+ /**
+ * Override the destructor to manage the life cycle of the underlying
+ * buffer.
+ */
+ ~ExternalOneByteStringResource() override = default;
+
+ /**
+ * The string data from the underlying buffer. If the resource is cacheable
+ * then data() must return the same value for all invocations.
+ */
+ virtual const char* data() const = 0;
+
+ /** The number of Latin-1 characters in the string.*/
+ virtual size_t length() const = 0;
+
+ /**
+ * Returns the cached data from the underlying buffer. If the resource is
+ * uncacheable or if UpdateDataCache() was not called before, it has
+ * undefined behaviour.
+ */
+ const char* cached_data() const {
+ CheckCachedDataInvariants();
+ return cached_data_;
+ }
+
+ /**
+ * Update {cached_data_} with the data from the underlying buffer. This can
+ * be called only for cacheable resources.
+ */
+ void UpdateDataCache();
+
+ protected:
+ ExternalOneByteStringResource() = default;
+
+ private:
+ void CheckCachedDataInvariants() const;
+
+ const char* cached_data_ = nullptr;
+ };
+
+ /**
+ * If the string is an external string, return the ExternalStringResourceBase
+ * regardless of the encoding, otherwise return NULL. The encoding of the
+ * string is returned in encoding_out.
+ */
+ V8_INLINE ExternalStringResourceBase* GetExternalStringResourceBase(
+ Encoding* encoding_out) const;
+
+ /**
+ * Get the ExternalStringResource for an external string. Returns
+ * NULL if IsExternal() doesn't return true.
+ */
+ V8_INLINE ExternalStringResource* GetExternalStringResource() const;
+
+ /**
+ * Get the ExternalOneByteStringResource for an external one-byte string.
+ * Returns NULL if IsExternalOneByte() doesn't return true.
+ */
+ const ExternalOneByteStringResource* GetExternalOneByteStringResource() const;
+
+ V8_INLINE static String* Cast(v8::Data* data) {
+#ifdef V8_ENABLE_CHECKS
+ CheckCast(data);
+#endif
+ return static_cast<String*>(data);
+ }
+
+ /**
+ * Allocates a new string from a UTF-8 literal. This is equivalent to calling
+ * String::NewFromUtf(isolate, "...").ToLocalChecked(), but without the check
+ * overhead.
+ *
+ * When called on a string literal containing '\0', the inferred length is the
+ * length of the input array minus 1 (for the final '\0') and not the value
+ * returned by strlen.
+ **/
+ template <int N>
+ static V8_WARN_UNUSED_RESULT Local<String> NewFromUtf8Literal(
+ Isolate* isolate, const char (&literal)[N],
+ NewStringType type = NewStringType::kNormal) {
+ static_assert(N <= kMaxLength, "String is too long");
+ return NewFromUtf8Literal(isolate, literal, type, N - 1);
+ }
+
+ /** Allocates a new string from UTF-8 data. Only returns an empty value when
+ * length > kMaxLength. **/
+ static V8_WARN_UNUSED_RESULT MaybeLocal<String> NewFromUtf8(
+ Isolate* isolate, const char* data,
+ NewStringType type = NewStringType::kNormal, int length = -1);
+
+ /** Allocates a new string from Latin-1 data. Only returns an empty value
+ * when length > kMaxLength. **/
+ static V8_WARN_UNUSED_RESULT MaybeLocal<String> NewFromOneByte(
+ Isolate* isolate, const uint8_t* data,
+ NewStringType type = NewStringType::kNormal, int length = -1);
+
+ /** Allocates a new string from UTF-16 data. Only returns an empty value when
+ * length > kMaxLength. **/
+ static V8_WARN_UNUSED_RESULT MaybeLocal<String> NewFromTwoByte(
+ Isolate* isolate, const uint16_t* data,
+ NewStringType type = NewStringType::kNormal, int length = -1);
+
+ /**
+ * Creates a new string by concatenating the left and the right strings
+ * passed in as parameters.
+ */
+ static Local<String> Concat(Isolate* isolate, Local<String> left,
+ Local<String> right);
+
+ /**
+ * Creates a new external string using the data defined in the given
+ * resource. When the external string is no longer live on V8's heap the
+ * resource will be disposed by calling its Dispose method. The caller of
+ * this function should not otherwise delete or modify the resource. Neither
+ * should the underlying buffer be deallocated or modified except through the
+ * destructor of the external string resource.
+ */
+ static V8_WARN_UNUSED_RESULT MaybeLocal<String> NewExternalTwoByte(
+ Isolate* isolate, ExternalStringResource* resource);
+
+ /**
+ * Associate an external string resource with this string by transforming it
+ * in place so that existing references to this string in the JavaScript heap
+ * will use the external string resource. The external string resource's
+ * character contents need to be equivalent to this string.
+ * Returns true if the string has been changed to be an external string.
+ * The string is not modified if the operation fails. See NewExternal for
+ * information on the lifetime of the resource.
+ */
+ bool MakeExternal(ExternalStringResource* resource);
+
+ /**
+ * Creates a new external string using the one-byte data defined in the given
+ * resource. When the external string is no longer live on V8's heap the
+ * resource will be disposed by calling its Dispose method. The caller of
+ * this function should not otherwise delete or modify the resource. Neither
+ * should the underlying buffer be deallocated or modified except through the
+ * destructor of the external string resource.
+ */
+ static V8_WARN_UNUSED_RESULT MaybeLocal<String> NewExternalOneByte(
+ Isolate* isolate, ExternalOneByteStringResource* resource);
+
+ /**
+ * Associate an external string resource with this string by transforming it
+ * in place so that existing references to this string in the JavaScript heap
+ * will use the external string resource. The external string resource's
+ * character contents need to be equivalent to this string.
+ * Returns true if the string has been changed to be an external string.
+ * The string is not modified if the operation fails. See NewExternal for
+ * information on the lifetime of the resource.
+ */
+ bool MakeExternal(ExternalOneByteStringResource* resource);
+
+ /**
+ * Returns true if this string can be made external.
+ */
+ bool CanMakeExternal() const;
+
+ /**
+ * Returns true if the strings values are equal. Same as JS ==/===.
+ */
+ bool StringEquals(Local<String> str) const;
+
+ /**
+ * Converts an object to a UTF-8-encoded character array. Useful if
+ * you want to print the object. If conversion to a string fails
+ * (e.g. due to an exception in the toString() method of the object)
+ * then the length() method returns 0 and the * operator returns
+ * NULL.
+ */
+ class V8_EXPORT Utf8Value {
+ public:
+ Utf8Value(Isolate* isolate, Local<v8::Value> obj);
+ ~Utf8Value();
+ char* operator*() { return str_; }
+ const char* operator*() const { return str_; }
+ int length() const { return length_; }
+
+ // Disallow copying and assigning.
+ Utf8Value(const Utf8Value&) = delete;
+ void operator=(const Utf8Value&) = delete;
+
+ private:
+ char* str_;
+ int length_;
+ };
+
+ /**
+ * Converts an object to a two-byte (UTF-16-encoded) string.
+ * If conversion to a string fails (eg. due to an exception in the toString()
+ * method of the object) then the length() method returns 0 and the * operator
+ * returns NULL.
+ */
+ class V8_EXPORT Value {
+ public:
+ Value(Isolate* isolate, Local<v8::Value> obj);
+ ~Value();
+ uint16_t* operator*() { return str_; }
+ const uint16_t* operator*() const { return str_; }
+ int length() const { return length_; }
+
+ // Disallow copying and assigning.
+ Value(const Value&) = delete;
+ void operator=(const Value&) = delete;
+
+ private:
+ uint16_t* str_;
+ int length_;
+ };
+
+ private:
+ void VerifyExternalStringResourceBase(ExternalStringResourceBase* v,
+ Encoding encoding) const;
+ void VerifyExternalStringResource(ExternalStringResource* val) const;
+ ExternalStringResource* GetExternalStringResourceSlow() const;
+ ExternalStringResourceBase* GetExternalStringResourceBaseSlow(
+ String::Encoding* encoding_out) const;
+
+ static Local<v8::String> NewFromUtf8Literal(Isolate* isolate,
+ const char* literal,
+ NewStringType type, int length);
+
+ static void CheckCast(v8::Data* that);
+};
+
+// Zero-length string specialization (templated string size includes
+// terminator).
+template <>
+inline V8_WARN_UNUSED_RESULT Local<String> String::NewFromUtf8Literal(
+ Isolate* isolate, const char (&literal)[1], NewStringType type) {
+ return String::Empty(isolate);
+}
+
+/**
+ * Interface for iterating through all external resources in the heap.
+ */
+class V8_EXPORT ExternalResourceVisitor {
+ public:
+ virtual ~ExternalResourceVisitor() = default;
+ virtual void VisitExternalString(Local<String> string) {}
+};
+
+/**
+ * A JavaScript symbol (ECMA-262 edition 6)
+ */
+class V8_EXPORT Symbol : public Name {
+ public:
+ /**
+ * Returns the description string of the symbol, or undefined if none.
+ */
+ V8_DEPRECATE_SOON("Use Symbol::Description(isolate)")
+ Local<Value> Description() const;
+ Local<Value> Description(Isolate* isolate) const;
+
+ /**
+ * Create a symbol. If description is not empty, it will be used as the
+ * description.
+ */
+ static Local<Symbol> New(Isolate* isolate,
+ Local<String> description = Local<String>());
+
+ /**
+ * Access global symbol registry.
+ * Note that symbols created this way are never collected, so
+ * they should only be used for statically fixed properties.
+ * Also, there is only one global name space for the descriptions used as
+ * keys.
+ * To minimize the potential for clashes, use qualified names as keys.
+ */
+ static Local<Symbol> For(Isolate* isolate, Local<String> description);
+
+ /**
+ * Retrieve a global symbol. Similar to |For|, but using a separate
+ * registry that is not accessible by (and cannot clash with) JavaScript code.
+ */
+ static Local<Symbol> ForApi(Isolate* isolate, Local<String> description);
+
+ // Well-known symbols
+ static Local<Symbol> GetAsyncIterator(Isolate* isolate);
+ static Local<Symbol> GetHasInstance(Isolate* isolate);
+ static Local<Symbol> GetIsConcatSpreadable(Isolate* isolate);
+ static Local<Symbol> GetIterator(Isolate* isolate);
+ static Local<Symbol> GetMatch(Isolate* isolate);
+ static Local<Symbol> GetReplace(Isolate* isolate);
+ static Local<Symbol> GetSearch(Isolate* isolate);
+ static Local<Symbol> GetSplit(Isolate* isolate);
+ static Local<Symbol> GetToPrimitive(Isolate* isolate);
+ static Local<Symbol> GetToStringTag(Isolate* isolate);
+ static Local<Symbol> GetUnscopables(Isolate* isolate);
+
+ V8_INLINE static Symbol* Cast(Data* data) {
+#ifdef V8_ENABLE_CHECKS
+ CheckCast(data);
+#endif
+ return static_cast<Symbol*>(data);
+ }
+
+ private:
+ Symbol();
+ static void CheckCast(Data* that);
+};
+
+/**
+ * A JavaScript number value (ECMA-262, 4.3.20)
+ */
+class V8_EXPORT Number : public Primitive {
+ public:
+ double Value() const;
+ static Local<Number> New(Isolate* isolate, double value);
+ V8_INLINE static Number* Cast(v8::Data* data) {
+#ifdef V8_ENABLE_CHECKS
+ CheckCast(data);
+#endif
+ return static_cast<Number*>(data);
+ }
+
+ private:
+ Number();
+ static void CheckCast(v8::Data* that);
+};
+
+/**
+ * A JavaScript value representing a signed integer.
+ */
+class V8_EXPORT Integer : public Number {
+ public:
+ static Local<Integer> New(Isolate* isolate, int32_t value);
+ static Local<Integer> NewFromUnsigned(Isolate* isolate, uint32_t value);
+ int64_t Value() const;
+ V8_INLINE static Integer* Cast(v8::Data* data) {
+#ifdef V8_ENABLE_CHECKS
+ CheckCast(data);
+#endif
+ return static_cast<Integer*>(data);
+ }
+
+ private:
+ Integer();
+ static void CheckCast(v8::Data* that);
+};
+
+/**
+ * A JavaScript value representing a 32-bit signed integer.
+ */
+class V8_EXPORT Int32 : public Integer {
+ public:
+ int32_t Value() const;
+ V8_INLINE static Int32* Cast(v8::Data* data) {
+#ifdef V8_ENABLE_CHECKS
+ CheckCast(data);
+#endif
+ return static_cast<Int32*>(data);
+ }
+
+ private:
+ Int32();
+ static void CheckCast(v8::Data* that);
+};
+
+/**
+ * A JavaScript value representing a 32-bit unsigned integer.
+ */
+class V8_EXPORT Uint32 : public Integer {
+ public:
+ uint32_t Value() const;
+ V8_INLINE static Uint32* Cast(v8::Data* data) {
+#ifdef V8_ENABLE_CHECKS
+ CheckCast(data);
+#endif
+ return static_cast<Uint32*>(data);
+ }
+
+ private:
+ Uint32();
+ static void CheckCast(v8::Data* that);
+};
+
+/**
+ * A JavaScript BigInt value (https://tc39.github.io/proposal-bigint)
+ */
+class V8_EXPORT BigInt : public Primitive {
+ public:
+ static Local<BigInt> New(Isolate* isolate, int64_t value);
+ static Local<BigInt> NewFromUnsigned(Isolate* isolate, uint64_t value);
+ /**
+ * Creates a new BigInt object using a specified sign bit and a
+ * specified list of digits/words.
+ * The resulting number is calculated as:
+ *
+ * (-1)^sign_bit * (words[0] * (2^64)^0 + words[1] * (2^64)^1 + ...)
+ */
+ static MaybeLocal<BigInt> NewFromWords(Local<Context> context, int sign_bit,
+ int word_count, const uint64_t* words);
+
+ /**
+ * Returns the value of this BigInt as an unsigned 64-bit integer.
+ * If `lossless` is provided, it will reflect whether the return value was
+ * truncated or wrapped around. In particular, it is set to `false` if this
+ * BigInt is negative.
+ */
+ uint64_t Uint64Value(bool* lossless = nullptr) const;
+
+ /**
+ * Returns the value of this BigInt as a signed 64-bit integer.
+ * If `lossless` is provided, it will reflect whether this BigInt was
+ * truncated or not.
+ */
+ int64_t Int64Value(bool* lossless = nullptr) const;
+
+ /**
+ * Returns the number of 64-bit words needed to store the result of
+ * ToWordsArray().
+ */
+ int WordCount() const;
+
+ /**
+ * Writes the contents of this BigInt to a specified memory location.
+ * `sign_bit` must be provided and will be set to 1 if this BigInt is
+ * negative.
+ * `*word_count` has to be initialized to the length of the `words` array.
+ * Upon return, it will be set to the actual number of words that would
+ * be needed to store this BigInt (i.e. the return value of `WordCount()`).
+ */
+ void ToWordsArray(int* sign_bit, int* word_count, uint64_t* words) const;
+
+ V8_INLINE static BigInt* Cast(v8::Data* data) {
+#ifdef V8_ENABLE_CHECKS
+ CheckCast(data);
+#endif
+ return static_cast<BigInt*>(data);
+ }
+
+ private:
+ BigInt();
+ static void CheckCast(v8::Data* that);
+};
+
+Local<String> String::Empty(Isolate* isolate) {
+ using S = internal::Address;
+ using I = internal::Internals;
+ I::CheckInitialized(isolate);
+ S* slot = I::GetRoot(isolate, I::kEmptyStringRootIndex);
+ return Local<String>(reinterpret_cast<String*>(slot));
+}
+
+String::ExternalStringResource* String::GetExternalStringResource() const {
+ using A = internal::Address;
+ using I = internal::Internals;
+ A obj = *reinterpret_cast<const A*>(this);
+
+ ExternalStringResource* result;
+ if (I::IsExternalTwoByteString(I::GetInstanceType(obj))) {
+ internal::Isolate* isolate = I::GetIsolateForHeapSandbox(obj);
+ A value =
+ I::ReadExternalPointerField(isolate, obj, I::kStringResourceOffset,
+ internal::kExternalStringResourceTag);
+ result = reinterpret_cast<String::ExternalStringResource*>(value);
+ } else {
+ result = GetExternalStringResourceSlow();
+ }
+#ifdef V8_ENABLE_CHECKS
+ VerifyExternalStringResource(result);
+#endif
+ return result;
+}
+
+String::ExternalStringResourceBase* String::GetExternalStringResourceBase(
+ String::Encoding* encoding_out) const {
+ using A = internal::Address;
+ using I = internal::Internals;
+ A obj = *reinterpret_cast<const A*>(this);
+ int type = I::GetInstanceType(obj) & I::kFullStringRepresentationMask;
+ *encoding_out = static_cast<Encoding>(type & I::kStringEncodingMask);
+ ExternalStringResourceBase* resource;
+ if (type == I::kExternalOneByteRepresentationTag ||
+ type == I::kExternalTwoByteRepresentationTag) {
+ internal::Isolate* isolate = I::GetIsolateForHeapSandbox(obj);
+ A value =
+ I::ReadExternalPointerField(isolate, obj, I::kStringResourceOffset,
+ internal::kExternalStringResourceTag);
+ resource = reinterpret_cast<ExternalStringResourceBase*>(value);
+ } else {
+ resource = GetExternalStringResourceBaseSlow(encoding_out);
+ }
+#ifdef V8_ENABLE_CHECKS
+ VerifyExternalStringResourceBase(resource, *encoding_out);
+#endif
+ return resource;
+}
+
+// --- Statics ---
+
+V8_INLINE Local<Primitive> Undefined(Isolate* isolate) {
+ using S = internal::Address;
+ using I = internal::Internals;
+ I::CheckInitialized(isolate);
+ S* slot = I::GetRoot(isolate, I::kUndefinedValueRootIndex);
+ return Local<Primitive>(reinterpret_cast<Primitive*>(slot));
+}
+
+V8_INLINE Local<Primitive> Null(Isolate* isolate) {
+ using S = internal::Address;
+ using I = internal::Internals;
+ I::CheckInitialized(isolate);
+ S* slot = I::GetRoot(isolate, I::kNullValueRootIndex);
+ return Local<Primitive>(reinterpret_cast<Primitive*>(slot));
+}
+
+V8_INLINE Local<Boolean> True(Isolate* isolate) {
+ using S = internal::Address;
+ using I = internal::Internals;
+ I::CheckInitialized(isolate);
+ S* slot = I::GetRoot(isolate, I::kTrueValueRootIndex);
+ return Local<Boolean>(reinterpret_cast<Boolean*>(slot));
+}
+
+V8_INLINE Local<Boolean> False(Isolate* isolate) {
+ using S = internal::Address;
+ using I = internal::Internals;
+ I::CheckInitialized(isolate);
+ S* slot = I::GetRoot(isolate, I::kFalseValueRootIndex);
+ return Local<Boolean>(reinterpret_cast<Boolean*>(slot));
+}
+
+Local<Boolean> Boolean::New(Isolate* isolate, bool value) {
+ return value ? True(isolate) : False(isolate);
+}
+
+} // namespace v8
+
+#endif // INCLUDE_V8_PRIMITIVE_H_
diff --git a/deps/v8/include/v8-profiler.h b/deps/v8/include/v8-profiler.h
index 9a40cfcf30..f2354cac38 100644
--- a/deps/v8/include/v8-profiler.h
+++ b/deps/v8/include/v8-profiler.h
@@ -11,7 +11,9 @@
#include <unordered_set>
#include <vector>
-#include "v8.h" // NOLINT(build/include_directory)
+#include "v8-local-handle.h" // NOLINT(build/include_directory)
+#include "v8-message.h" // NOLINT(build/include_directory)
+#include "v8-persistent-handle.h" // NOLINT(build/include_directory)
/**
* Profiler support for the V8 JavaScript engine.
@@ -20,6 +22,7 @@ namespace v8 {
class HeapGraphNode;
struct HeapStatsUpdate;
+class Object;
using NativeObject = void*;
using SnapshotObjectId = uint32_t;
diff --git a/deps/v8/include/v8-promise.h b/deps/v8/include/v8-promise.h
new file mode 100644
index 0000000000..9da8e4b4e8
--- /dev/null
+++ b/deps/v8/include/v8-promise.h
@@ -0,0 +1,174 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef INCLUDE_V8_PROMISE_H_
+#define INCLUDE_V8_PROMISE_H_
+
+#include "v8-local-handle.h" // NOLINT(build/include_directory)
+#include "v8-object.h" // NOLINT(build/include_directory)
+#include "v8config.h" // NOLINT(build/include_directory)
+
+namespace v8 {
+
+class Context;
+
+#ifndef V8_PROMISE_INTERNAL_FIELD_COUNT
+// The number of required internal fields can be defined by embedder.
+#define V8_PROMISE_INTERNAL_FIELD_COUNT 0
+#endif
+
+/**
+ * An instance of the built-in Promise constructor (ES6 draft).
+ */
+class V8_EXPORT Promise : public Object {
+ public:
+ /**
+ * State of the promise. Each value corresponds to one of the possible values
+ * of the [[PromiseState]] field.
+ */
+ enum PromiseState { kPending, kFulfilled, kRejected };
+
+ class V8_EXPORT Resolver : public Object {
+ public:
+ /**
+ * Create a new resolver, along with an associated promise in pending state.
+ */
+ static V8_WARN_UNUSED_RESULT MaybeLocal<Resolver> New(
+ Local<Context> context);
+
+ /**
+ * Extract the associated promise.
+ */
+ Local<Promise> GetPromise();
+
+ /**
+ * Resolve/reject the associated promise with a given value.
+ * Ignored if the promise is no longer pending.
+ */
+ V8_WARN_UNUSED_RESULT Maybe<bool> Resolve(Local<Context> context,
+ Local<Value> value);
+
+ V8_WARN_UNUSED_RESULT Maybe<bool> Reject(Local<Context> context,
+ Local<Value> value);
+
+ V8_INLINE static Resolver* Cast(Value* value) {
+#ifdef V8_ENABLE_CHECKS
+ CheckCast(value);
+#endif
+ return static_cast<Promise::Resolver*>(value);
+ }
+
+ private:
+ Resolver();
+ static void CheckCast(Value* obj);
+ };
+
+ /**
+ * Register a resolution/rejection handler with a promise.
+ * The handler is given the respective resolution/rejection value as
+ * an argument. If the promise is already resolved/rejected, the handler is
+ * invoked at the end of turn.
+ */
+ V8_WARN_UNUSED_RESULT MaybeLocal<Promise> Catch(Local<Context> context,
+ Local<Function> handler);
+
+ V8_WARN_UNUSED_RESULT MaybeLocal<Promise> Then(Local<Context> context,
+ Local<Function> handler);
+
+ V8_WARN_UNUSED_RESULT MaybeLocal<Promise> Then(Local<Context> context,
+ Local<Function> on_fulfilled,
+ Local<Function> on_rejected);
+
+ /**
+ * Returns true if the promise has at least one derived promise, and
+ * therefore resolve/reject handlers (including default handler).
+ */
+ bool HasHandler() const;
+
+ /**
+ * Returns the content of the [[PromiseResult]] field. The Promise must not
+ * be pending.
+ */
+ Local<Value> Result();
+
+ /**
+ * Returns the value of the [[PromiseState]] field.
+ */
+ PromiseState State();
+
+ /**
+ * Marks this promise as handled to avoid reporting unhandled rejections.
+ */
+ void MarkAsHandled();
+
+ /**
+ * Marks this promise as silent to prevent pausing the debugger when the
+ * promise is rejected.
+ */
+ void MarkAsSilent();
+
+ V8_INLINE static Promise* Cast(Value* value) {
+#ifdef V8_ENABLE_CHECKS
+ CheckCast(value);
+#endif
+ return static_cast<Promise*>(value);
+ }
+
+ static const int kEmbedderFieldCount = V8_PROMISE_INTERNAL_FIELD_COUNT;
+
+ private:
+ Promise();
+ static void CheckCast(Value* obj);
+};
+
+/**
+ * PromiseHook with type kInit is called when a new promise is
+ * created. When a new promise is created as part of the chain in the
+ * case of Promise.then or in the intermediate promises created by
+ * Promise.{race, all}/AsyncFunctionAwait, we pass the parent promise
+ * otherwise we pass undefined.
+ *
+ * PromiseHook with type kResolve is called at the beginning of
+ * resolve or reject function defined by CreateResolvingFunctions.
+ *
+ * PromiseHook with type kBefore is called at the beginning of the
+ * PromiseReactionJob.
+ *
+ * PromiseHook with type kAfter is called right at the end of the
+ * PromiseReactionJob.
+ */
+enum class PromiseHookType { kInit, kResolve, kBefore, kAfter };
+
+using PromiseHook = void (*)(PromiseHookType type, Local<Promise> promise,
+ Local<Value> parent);
+
+// --- Promise Reject Callback ---
+enum PromiseRejectEvent {
+ kPromiseRejectWithNoHandler = 0,
+ kPromiseHandlerAddedAfterReject = 1,
+ kPromiseRejectAfterResolved = 2,
+ kPromiseResolveAfterResolved = 3,
+};
+
+class PromiseRejectMessage {
+ public:
+ PromiseRejectMessage(Local<Promise> promise, PromiseRejectEvent event,
+ Local<Value> value)
+ : promise_(promise), event_(event), value_(value) {}
+
+ V8_INLINE Local<Promise> GetPromise() const { return promise_; }
+ V8_INLINE PromiseRejectEvent GetEvent() const { return event_; }
+ V8_INLINE Local<Value> GetValue() const { return value_; }
+
+ private:
+ Local<Promise> promise_;
+ PromiseRejectEvent event_;
+ Local<Value> value_;
+};
+
+using PromiseRejectCallback = void (*)(PromiseRejectMessage message);
+
+} // namespace v8
+
+#endif // INCLUDE_V8_PROMISE_H_
diff --git a/deps/v8/include/v8-proxy.h b/deps/v8/include/v8-proxy.h
new file mode 100644
index 0000000000..a08db8805c
--- /dev/null
+++ b/deps/v8/include/v8-proxy.h
@@ -0,0 +1,50 @@
+
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef INCLUDE_V8_PROXY_H_
+#define INCLUDE_V8_PROXY_H_
+
+#include "v8-context.h" // NOLINT(build/include_directory)
+#include "v8-local-handle.h" // NOLINT(build/include_directory)
+#include "v8-object.h" // NOLINT(build/include_directory)
+#include "v8config.h" // NOLINT(build/include_directory)
+
+namespace v8 {
+
+class Context;
+
+/**
+ * An instance of the built-in Proxy constructor (ECMA-262, 6th Edition,
+ * 26.2.1).
+ */
+class V8_EXPORT Proxy : public Object {
+ public:
+ Local<Value> GetTarget();
+ Local<Value> GetHandler();
+ bool IsRevoked() const;
+ void Revoke();
+
+ /**
+ * Creates a new Proxy for the target object.
+ */
+ static MaybeLocal<Proxy> New(Local<Context> context,
+ Local<Object> local_target,
+ Local<Object> local_handler);
+
+ V8_INLINE static Proxy* Cast(Value* value) {
+#ifdef V8_ENABLE_CHECKS
+ CheckCast(value);
+#endif
+ return static_cast<Proxy*>(value);
+ }
+
+ private:
+ Proxy();
+ static void CheckCast(Value* obj);
+};
+
+} // namespace v8
+
+#endif // INCLUDE_V8_PROXY_H_
diff --git a/deps/v8/include/v8-regexp.h b/deps/v8/include/v8-regexp.h
new file mode 100644
index 0000000000..3791bc0368
--- /dev/null
+++ b/deps/v8/include/v8-regexp.h
@@ -0,0 +1,105 @@
+
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef INCLUDE_V8_REGEXP_H_
+#define INCLUDE_V8_REGEXP_H_
+
+#include "v8-local-handle.h" // NOLINT(build/include_directory)
+#include "v8-object.h" // NOLINT(build/include_directory)
+#include "v8config.h" // NOLINT(build/include_directory)
+
+namespace v8 {
+
+class Context;
+
+/**
+ * An instance of the built-in RegExp constructor (ECMA-262, 15.10).
+ */
+class V8_EXPORT RegExp : public Object {
+ public:
+ /**
+ * Regular expression flag bits. They can be or'ed to enable a set
+ * of flags.
+ * The kLinear value ('l') is experimental and can only be used with
+ * --enable-experimental-regexp-engine. RegExps with kLinear flag are
+ * guaranteed to be executed in asymptotic linear time wrt. the length of
+ * the subject string.
+ */
+ enum Flags {
+ kNone = 0,
+ kGlobal = 1 << 0,
+ kIgnoreCase = 1 << 1,
+ kMultiline = 1 << 2,
+ kSticky = 1 << 3,
+ kUnicode = 1 << 4,
+ kDotAll = 1 << 5,
+ kLinear = 1 << 6,
+ kHasIndices = 1 << 7,
+ };
+
+ static constexpr int kFlagCount = 8;
+
+ /**
+ * Creates a regular expression from the given pattern string and
+ * the flags bit field. May throw a JavaScript exception as
+ * described in ECMA-262, 15.10.4.1.
+ *
+ * For example,
+ * RegExp::New(v8::String::New("foo"),
+ * static_cast<RegExp::Flags>(kGlobal | kMultiline))
+ * is equivalent to evaluating "/foo/gm".
+ */
+ static V8_WARN_UNUSED_RESULT MaybeLocal<RegExp> New(Local<Context> context,
+ Local<String> pattern,
+ Flags flags);
+
+ /**
+ * Like New, but additionally specifies a backtrack limit. If the number of
+ * backtracks done in one Exec call hits the limit, a match failure is
+ * immediately returned.
+ */
+ static V8_WARN_UNUSED_RESULT MaybeLocal<RegExp> NewWithBacktrackLimit(
+ Local<Context> context, Local<String> pattern, Flags flags,
+ uint32_t backtrack_limit);
+
+ /**
+ * Executes the current RegExp instance on the given subject string.
+ * Equivalent to RegExp.prototype.exec as described in
+ *
+ * https://tc39.es/ecma262/#sec-regexp.prototype.exec
+ *
+ * On success, an Array containing the matched strings is returned. On
+ * failure, returns Null.
+ *
+ * Note: modifies global context state, accessible e.g. through RegExp.input.
+ */
+ V8_WARN_UNUSED_RESULT MaybeLocal<Object> Exec(Local<Context> context,
+ Local<String> subject);
+
+ /**
+ * Returns the value of the source property: a string representing
+ * the regular expression.
+ */
+ Local<String> GetSource() const;
+
+ /**
+ * Returns the flags bit field.
+ */
+ Flags GetFlags() const;
+
+ V8_INLINE static RegExp* Cast(Value* value) {
+#ifdef V8_ENABLE_CHECKS
+ CheckCast(value);
+#endif
+ return static_cast<RegExp*>(value);
+ }
+
+ private:
+ static void CheckCast(Value* obj);
+};
+
+} // namespace v8
+
+#endif // INCLUDE_V8_REGEXP_H_
diff --git a/deps/v8/include/v8-script.h b/deps/v8/include/v8-script.h
new file mode 100644
index 0000000000..d17089932c
--- /dev/null
+++ b/deps/v8/include/v8-script.h
@@ -0,0 +1,771 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef INCLUDE_V8_SCRIPT_H_
+#define INCLUDE_V8_SCRIPT_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include <memory>
+#include <vector>
+
+#include "v8-data.h" // NOLINT(build/include_directory)
+#include "v8-local-handle.h" // NOLINT(build/include_directory)
+#include "v8-maybe.h" // NOLINT(build/include_directory)
+#include "v8-message.h" // NOLINT(build/include_directory)
+#include "v8config.h" // NOLINT(build/include_directory)
+
+namespace v8 {
+
+class Function;
+class Object;
+class PrimitiveArray;
+class Script;
+
+namespace internal {
+class BackgroundDeserializeTask;
+struct ScriptStreamingData;
+} // namespace internal
+
+/**
+ * A container type that holds relevant metadata for module loading.
+ *
+ * This is passed back to the embedder as part of
+ * HostImportModuleDynamicallyCallback for module loading.
+ */
+class V8_EXPORT ScriptOrModule {
+ public:
+ /**
+ * The name that was passed by the embedder as ResourceName to the
+ * ScriptOrigin. This can be either a v8::String or v8::Undefined.
+ */
+ Local<Value> GetResourceName();
+
+ /**
+ * The options that were passed by the embedder as HostDefinedOptions to
+ * the ScriptOrigin.
+ */
+ Local<PrimitiveArray> GetHostDefinedOptions();
+};
+
+/**
+ * A compiled JavaScript script, not yet tied to a Context.
+ */
+class V8_EXPORT UnboundScript {
+ public:
+ /**
+ * Binds the script to the currently entered context.
+ */
+ Local<Script> BindToCurrentContext();
+
+ int GetId() const;
+ Local<Value> GetScriptName();
+
+ /**
+ * Data read from magic sourceURL comments.
+ */
+ Local<Value> GetSourceURL();
+ /**
+ * Data read from magic sourceMappingURL comments.
+ */
+ Local<Value> GetSourceMappingURL();
+
+ /**
+ * Returns zero based line number of the code_pos location in the script.
+ * -1 will be returned if no information available.
+ */
+ int GetLineNumber(int code_pos);
+
+ static const int kNoScriptId = 0;
+};
+
+/**
+ * A compiled JavaScript module, not yet tied to a Context.
+ */
+class V8_EXPORT UnboundModuleScript : public Data {
+ // Only used as a container for code caching.
+};
+
+/**
+ * A location in JavaScript source.
+ */
+class V8_EXPORT Location {
+ public:
+ int GetLineNumber() { return line_number_; }
+ int GetColumnNumber() { return column_number_; }
+
+ Location(int line_number, int column_number)
+ : line_number_(line_number), column_number_(column_number) {}
+
+ private:
+ int line_number_;
+ int column_number_;
+};
+
+class V8_EXPORT ModuleRequest : public Data {
+ public:
+ /**
+ * Returns the module specifier for this ModuleRequest.
+ */
+ Local<String> GetSpecifier() const;
+
+ /**
+ * Returns the source code offset of this module request.
+ * Use Module::SourceOffsetToLocation to convert this to line/column numbers.
+ */
+ int GetSourceOffset() const;
+
+ /**
+ * Contains the import assertions for this request in the form:
+ * [key1, value1, source_offset1, key2, value2, source_offset2, ...].
+ * The keys and values are of type v8::String, and the source offsets are of
+ * type Int32. Use Module::SourceOffsetToLocation to convert the source
+ * offsets to Locations with line/column numbers.
+ *
+ * All assertions present in the module request will be supplied in this
+ * list, regardless of whether they are supported by the host. Per
+ * https://tc39.es/proposal-import-assertions/#sec-hostgetsupportedimportassertions,
+ * hosts are expected to ignore assertions that they do not support (as
+ * opposed to, for example, triggering an error if an unsupported assertion is
+ * present).
+ */
+ Local<FixedArray> GetImportAssertions() const;
+
+ V8_INLINE static ModuleRequest* Cast(Data* data);
+
+ private:
+ static void CheckCast(Data* obj);
+};
+
+/**
+ * A compiled JavaScript module.
+ */
+class V8_EXPORT Module : public Data {
+ public:
+ /**
+ * The different states a module can be in.
+ *
+ * This corresponds to the states used in ECMAScript except that "evaluated"
+ * is split into kEvaluated and kErrored, indicating success and failure,
+ * respectively.
+ */
+ enum Status {
+ kUninstantiated,
+ kInstantiating,
+ kInstantiated,
+ kEvaluating,
+ kEvaluated,
+ kErrored
+ };
+
+ /**
+ * Returns the module's current status.
+ */
+ Status GetStatus() const;
+
+ /**
+ * For a module in kErrored status, this returns the corresponding exception.
+ */
+ Local<Value> GetException() const;
+
+ /**
+ * Returns the number of modules requested by this module.
+ */
+ V8_DEPRECATE_SOON("Use Module::GetModuleRequests() and FixedArray::Length().")
+ int GetModuleRequestsLength() const;
+
+ /**
+ * Returns the ith module specifier in this module.
+ * i must be < GetModuleRequestsLength() and >= 0.
+ */
+ V8_DEPRECATE_SOON(
+ "Use Module::GetModuleRequests() and ModuleRequest::GetSpecifier().")
+ Local<String> GetModuleRequest(int i) const;
+
+ /**
+ * Returns the source location (line number and column number) of the ith
+ * module specifier's first occurrence in this module.
+ */
+ V8_DEPRECATE_SOON(
+ "Use Module::GetModuleRequests(), ModuleRequest::GetSourceOffset(), and "
+ "Module::SourceOffsetToLocation().")
+ Location GetModuleRequestLocation(int i) const;
+
+ /**
+ * Returns the ModuleRequests for this module.
+ */
+ Local<FixedArray> GetModuleRequests() const;
+
+ /**
+ * For the given source text offset in this module, returns the corresponding
+ * Location with line and column numbers.
+ */
+ Location SourceOffsetToLocation(int offset) const;
+
+ /**
+ * Returns the identity hash for this object.
+ */
+ int GetIdentityHash() const;
+
+ using ResolveCallback V8_DEPRECATE_SOON("Use ResolveModuleCallback") =
+ MaybeLocal<Module> (*)(Local<Context> context, Local<String> specifier,
+ Local<Module> referrer);
+ using ResolveModuleCallback = MaybeLocal<Module> (*)(
+ Local<Context> context, Local<String> specifier,
+ Local<FixedArray> import_assertions, Local<Module> referrer);
+
+ /**
+ * Instantiates the module and its dependencies.
+ *
+ * Returns an empty Maybe<bool> if an exception occurred during
+ * instantiation. (In the case where the callback throws an exception, that
+ * exception is propagated.)
+ */
+ V8_DEPRECATE_SOON(
+ "Use the version of InstantiateModule that takes a ResolveModuleCallback "
+ "parameter")
+ V8_WARN_UNUSED_RESULT Maybe<bool> InstantiateModule(Local<Context> context,
+ ResolveCallback callback);
+ V8_WARN_UNUSED_RESULT Maybe<bool> InstantiateModule(
+ Local<Context> context, ResolveModuleCallback callback);
+
+ /**
+ * Evaluates the module and its dependencies.
+ *
+ * If status is kInstantiated, run the module's code and return a Promise
+ * object. On success, set status to kEvaluated and resolve the Promise with
+ * the completion value; on failure, set status to kErrored and reject the
+ * Promise with the error.
+ *
+ * If IsGraphAsync() is false, the returned Promise is settled.
+ */
+ V8_WARN_UNUSED_RESULT MaybeLocal<Value> Evaluate(Local<Context> context);
+
+ /**
+ * Returns the namespace object of this module.
+ *
+ * The module's status must be at least kInstantiated.
+ */
+ Local<Value> GetModuleNamespace();
+
+ /**
+ * Returns the corresponding context-unbound module script.
+ *
+ * The module must be unevaluated, i.e. its status must not be kEvaluating,
+ * kEvaluated or kErrored.
+ */
+ Local<UnboundModuleScript> GetUnboundModuleScript();
+
+ /**
+ * Returns the underlying script's id.
+ *
+ * The module must be a SourceTextModule and must not have a kErrored status.
+ */
+ int ScriptId() const;
+
+ /**
+ * Returns whether this module or any of its requested modules is async,
+ * i.e. contains top-level await.
+ *
+ * The module's status must be at least kInstantiated.
+ */
+ bool IsGraphAsync() const;
+
+ /**
+ * Returns whether the module is a SourceTextModule.
+ */
+ bool IsSourceTextModule() const;
+
+ /**
+ * Returns whether the module is a SyntheticModule.
+ */
+ bool IsSyntheticModule() const;
+
+ /*
+ * Callback defined in the embedder. This is responsible for setting
+ * the module's exported values with calls to SetSyntheticModuleExport().
+ * The callback must return a resolved Promise to indicate success (where no
+ * exception was thrown) and return an empy MaybeLocal to indicate falure
+ * (where an exception was thrown).
+ */
+ using SyntheticModuleEvaluationSteps =
+ MaybeLocal<Value> (*)(Local<Context> context, Local<Module> module);
+
+ /**
+ * Creates a new SyntheticModule with the specified export names, where
+ * evaluation_steps will be executed upon module evaluation.
+ * export_names must not contain duplicates.
+ * module_name is used solely for logging/debugging and doesn't affect module
+ * behavior.
+ */
+ static Local<Module> CreateSyntheticModule(
+ Isolate* isolate, Local<String> module_name,
+ const std::vector<Local<String>>& export_names,
+ SyntheticModuleEvaluationSteps evaluation_steps);
+
+ /**
+ * Set this module's exported value for the name export_name to the specified
+ * export_value. This method must be called only on Modules created via
+ * CreateSyntheticModule. An error will be thrown if export_name is not one
+ * of the export_names that were passed in that CreateSyntheticModule call.
+ * Returns Just(true) on success, Nothing<bool>() if an error was thrown.
+ */
+ V8_WARN_UNUSED_RESULT Maybe<bool> SetSyntheticModuleExport(
+ Isolate* isolate, Local<String> export_name, Local<Value> export_value);
+
+ V8_INLINE static Module* Cast(Data* data);
+
+ private:
+ static void CheckCast(Data* obj);
+};
+
+/**
+ * A compiled JavaScript script, tied to a Context which was active when the
+ * script was compiled.
+ */
+class V8_EXPORT Script {
+ public:
+ /**
+ * A shorthand for ScriptCompiler::Compile().
+ */
+ static V8_WARN_UNUSED_RESULT MaybeLocal<Script> Compile(
+ Local<Context> context, Local<String> source,
+ ScriptOrigin* origin = nullptr);
+
+ /**
+ * Runs the script returning the resulting value. It will be run in the
+ * context in which it was created (ScriptCompiler::CompileBound or
+ * UnboundScript::BindToCurrentContext()).
+ */
+ V8_WARN_UNUSED_RESULT MaybeLocal<Value> Run(Local<Context> context);
+
+ /**
+ * Returns the corresponding context-unbound script.
+ */
+ Local<UnboundScript> GetUnboundScript();
+};
+
+enum class ScriptType { kClassic, kModule };
+
+/**
+ * For compiling scripts.
+ */
+class V8_EXPORT ScriptCompiler {
+ public:
+ class ConsumeCodeCacheTask;
+
+ /**
+ * Compilation data that the embedder can cache and pass back to speed up
+ * future compilations. The data is produced if the CompilerOptions passed to
+ * the compilation functions in ScriptCompiler contains produce_data_to_cache
+ * = true. The data to cache can then can be retrieved from
+ * UnboundScript.
+ */
+ struct V8_EXPORT CachedData {
+ enum BufferPolicy { BufferNotOwned, BufferOwned };
+
+ CachedData()
+ : data(nullptr),
+ length(0),
+ rejected(false),
+ buffer_policy(BufferNotOwned) {}
+
+ // If buffer_policy is BufferNotOwned, the caller keeps the ownership of
+ // data and guarantees that it stays alive until the CachedData object is
+ // destroyed. If the policy is BufferOwned, the given data will be deleted
+ // (with delete[]) when the CachedData object is destroyed.
+ CachedData(const uint8_t* data, int length,
+ BufferPolicy buffer_policy = BufferNotOwned);
+ ~CachedData();
+ // TODO(marja): Async compilation; add constructors which take a callback
+ // which will be called when V8 no longer needs the data.
+ const uint8_t* data;
+ int length;
+ bool rejected;
+ BufferPolicy buffer_policy;
+
+ // Prevent copying.
+ CachedData(const CachedData&) = delete;
+ CachedData& operator=(const CachedData&) = delete;
+ };
+
+ /**
+ * Source code which can be then compiled to a UnboundScript or Script.
+ */
+ class Source {
+ public:
+ // Source takes ownership of both CachedData and CodeCacheConsumeTask.
+ V8_INLINE Source(Local<String> source_string, const ScriptOrigin& origin,
+ CachedData* cached_data = nullptr,
+ ConsumeCodeCacheTask* consume_cache_task = nullptr);
+ // Source takes ownership of both CachedData and CodeCacheConsumeTask.
+ V8_INLINE explicit Source(
+ Local<String> source_string, CachedData* cached_data = nullptr,
+ ConsumeCodeCacheTask* consume_cache_task = nullptr);
+ V8_INLINE ~Source() = default;
+
+ // Ownership of the CachedData or its buffers is *not* transferred to the
+ // caller. The CachedData object is alive as long as the Source object is
+ // alive.
+ V8_INLINE const CachedData* GetCachedData() const;
+
+ V8_INLINE const ScriptOriginOptions& GetResourceOptions() const;
+
+ private:
+ friend class ScriptCompiler;
+
+ Local<String> source_string;
+
+ // Origin information
+ Local<Value> resource_name;
+ int resource_line_offset;
+ int resource_column_offset;
+ ScriptOriginOptions resource_options;
+ Local<Value> source_map_url;
+ Local<PrimitiveArray> host_defined_options;
+
+ // Cached data from previous compilation (if a kConsume*Cache flag is
+ // set), or hold newly generated cache data (kProduce*Cache flags) are
+ // set when calling a compile method.
+ std::unique_ptr<CachedData> cached_data;
+ std::unique_ptr<ConsumeCodeCacheTask> consume_cache_task;
+ };
+
+ /**
+ * For streaming incomplete script data to V8. The embedder should implement a
+ * subclass of this class.
+ */
+ class V8_EXPORT ExternalSourceStream {
+ public:
+ virtual ~ExternalSourceStream() = default;
+
+ /**
+ * V8 calls this to request the next chunk of data from the embedder. This
+ * function will be called on a background thread, so it's OK to block and
+ * wait for the data, if the embedder doesn't have data yet. Returns the
+ * length of the data returned. When the data ends, GetMoreData should
+ * return 0. Caller takes ownership of the data.
+ *
+ * When streaming UTF-8 data, V8 handles multi-byte characters split between
+ * two data chunks, but doesn't handle multi-byte characters split between
+ * more than two data chunks. The embedder can avoid this problem by always
+ * returning at least 2 bytes of data.
+ *
+ * When streaming UTF-16 data, V8 does not handle characters split between
+ * two data chunks. The embedder has to make sure that chunks have an even
+ * length.
+ *
+ * If the embedder wants to cancel the streaming, they should make the next
+ * GetMoreData call return 0. V8 will interpret it as end of data (and most
+ * probably, parsing will fail). The streaming task will return as soon as
+ * V8 has parsed the data it received so far.
+ */
+ virtual size_t GetMoreData(const uint8_t** src) = 0;
+
+ /**
+ * V8 calls this method to set a 'bookmark' at the current position in
+ * the source stream, for the purpose of (maybe) later calling
+ * ResetToBookmark. If ResetToBookmark is called later, then subsequent
+ * calls to GetMoreData should return the same data as they did when
+ * SetBookmark was called earlier.
+ *
+ * The embedder may return 'false' to indicate it cannot provide this
+ * functionality.
+ */
+ virtual bool SetBookmark();
+
+ /**
+ * V8 calls this to return to a previously set bookmark.
+ */
+ virtual void ResetToBookmark();
+ };
+
+ /**
+ * Source code which can be streamed into V8 in pieces. It will be parsed
+ * while streaming and compiled after parsing has completed. StreamedSource
+ * must be kept alive while the streaming task is run (see ScriptStreamingTask
+ * below).
+ */
+ class V8_EXPORT StreamedSource {
+ public:
+ enum Encoding { ONE_BYTE, TWO_BYTE, UTF8, WINDOWS_1252 };
+
+ StreamedSource(std::unique_ptr<ExternalSourceStream> source_stream,
+ Encoding encoding);
+ ~StreamedSource();
+
+ internal::ScriptStreamingData* impl() const { return impl_.get(); }
+
+ // Prevent copying.
+ StreamedSource(const StreamedSource&) = delete;
+ StreamedSource& operator=(const StreamedSource&) = delete;
+
+ private:
+ std::unique_ptr<internal::ScriptStreamingData> impl_;
+ };
+
+ /**
+ * A streaming task which the embedder must run on a background thread to
+ * stream scripts into V8. Returned by ScriptCompiler::StartStreaming.
+ */
+ class V8_EXPORT ScriptStreamingTask final {
+ public:
+ void Run();
+
+ private:
+ friend class ScriptCompiler;
+
+ explicit ScriptStreamingTask(internal::ScriptStreamingData* data)
+ : data_(data) {}
+
+ internal::ScriptStreamingData* data_;
+ };
+
+ /**
+ * A task which the embedder must run on a background thread to
+ * consume a V8 code cache. Returned by
+ * ScriptCompiler::StarConsumingCodeCache.
+ */
+ class V8_EXPORT ConsumeCodeCacheTask final {
+ public:
+ ~ConsumeCodeCacheTask();
+
+ void Run();
+
+ private:
+ friend class ScriptCompiler;
+
+ explicit ConsumeCodeCacheTask(
+ std::unique_ptr<internal::BackgroundDeserializeTask> impl);
+
+ std::unique_ptr<internal::BackgroundDeserializeTask> impl_;
+ };
+
+ enum CompileOptions {
+ kNoCompileOptions = 0,
+ kConsumeCodeCache,
+ kEagerCompile
+ };
+
+ /**
+ * The reason for which we are not requesting or providing a code cache.
+ */
+ enum NoCacheReason {
+ kNoCacheNoReason = 0,
+ kNoCacheBecauseCachingDisabled,
+ kNoCacheBecauseNoResource,
+ kNoCacheBecauseInlineScript,
+ kNoCacheBecauseModule,
+ kNoCacheBecauseStreamingSource,
+ kNoCacheBecauseInspector,
+ kNoCacheBecauseScriptTooSmall,
+ kNoCacheBecauseCacheTooCold,
+ kNoCacheBecauseV8Extension,
+ kNoCacheBecauseExtensionModule,
+ kNoCacheBecausePacScript,
+ kNoCacheBecauseInDocumentWrite,
+ kNoCacheBecauseResourceWithNoCacheHandler,
+ kNoCacheBecauseDeferredProduceCodeCache
+ };
+
+ /**
+ * Compiles the specified script (context-independent).
+ * Cached data as part of the source object can be optionally produced to be
+ * consumed later to speed up compilation of identical source scripts.
+ *
+ * Note that when producing cached data, the source must point to NULL for
+ * cached data. When consuming cached data, the cached data must have been
+ * produced by the same version of V8, and the embedder needs to ensure the
+ * cached data is the correct one for the given script.
+ *
+ * \param source Script source code.
+ * \return Compiled script object (context independent; for running it must be
+ * bound to a context).
+ */
+ static V8_WARN_UNUSED_RESULT MaybeLocal<UnboundScript> CompileUnboundScript(
+ Isolate* isolate, Source* source,
+ CompileOptions options = kNoCompileOptions,
+ NoCacheReason no_cache_reason = kNoCacheNoReason);
+
+ /**
+ * Compiles the specified script (bound to current context).
+ *
+ * \param source Script source code.
+ * \param pre_data Pre-parsing data, as obtained by ScriptData::PreCompile()
+ * using pre_data speeds compilation if it's done multiple times.
+ * Owned by caller, no references are kept when this function returns.
+ * \return Compiled script object, bound to the context that was active
+ * when this function was called. When run it will always use this
+ * context.
+ */
+ static V8_WARN_UNUSED_RESULT MaybeLocal<Script> Compile(
+ Local<Context> context, Source* source,
+ CompileOptions options = kNoCompileOptions,
+ NoCacheReason no_cache_reason = kNoCacheNoReason);
+
+ /**
+ * Returns a task which streams script data into V8, or NULL if the script
+ * cannot be streamed. The user is responsible for running the task on a
+ * background thread and deleting it. When ran, the task starts parsing the
+ * script, and it will request data from the StreamedSource as needed. When
+ * ScriptStreamingTask::Run exits, all data has been streamed and the script
+ * can be compiled (see Compile below).
+ *
+ * This API allows to start the streaming with as little data as possible, and
+ * the remaining data (for example, the ScriptOrigin) is passed to Compile.
+ */
+ static ScriptStreamingTask* StartStreaming(
+ Isolate* isolate, StreamedSource* source,
+ ScriptType type = ScriptType::kClassic);
+
+ static ConsumeCodeCacheTask* StartConsumingCodeCache(
+ Isolate* isolate, std::unique_ptr<CachedData> source);
+
+ /**
+ * Compiles a streamed script (bound to current context).
+ *
+ * This can only be called after the streaming has finished
+ * (ScriptStreamingTask has been run). V8 doesn't construct the source string
+ * during streaming, so the embedder needs to pass the full source here.
+ */
+ static V8_WARN_UNUSED_RESULT MaybeLocal<Script> Compile(
+ Local<Context> context, StreamedSource* source,
+ Local<String> full_source_string, const ScriptOrigin& origin);
+
+ /**
+ * Return a version tag for CachedData for the current V8 version & flags.
+ *
+ * This value is meant only for determining whether a previously generated
+ * CachedData instance is still valid; the tag has no other meaing.
+ *
+ * Background: The data carried by CachedData may depend on the exact
+ * V8 version number or current compiler flags. This means that when
+ * persisting CachedData, the embedder must take care to not pass in
+ * data from another V8 version, or the same version with different
+ * features enabled.
+ *
+ * The easiest way to do so is to clear the embedder's cache on any
+ * such change.
+ *
+ * Alternatively, this tag can be stored alongside the cached data and
+ * compared when it is being used.
+ */
+ static uint32_t CachedDataVersionTag();
+
+ /**
+ * Compile an ES module, returning a Module that encapsulates
+ * the compiled code.
+ *
+ * Corresponds to the ParseModule abstract operation in the
+ * ECMAScript specification.
+ */
+ static V8_WARN_UNUSED_RESULT MaybeLocal<Module> CompileModule(
+ Isolate* isolate, Source* source,
+ CompileOptions options = kNoCompileOptions,
+ NoCacheReason no_cache_reason = kNoCacheNoReason);
+
+ /**
+ * Compiles a streamed module script.
+ *
+ * This can only be called after the streaming has finished
+ * (ScriptStreamingTask has been run). V8 doesn't construct the source string
+ * during streaming, so the embedder needs to pass the full source here.
+ */
+ static V8_WARN_UNUSED_RESULT MaybeLocal<Module> CompileModule(
+ Local<Context> context, StreamedSource* v8_source,
+ Local<String> full_source_string, const ScriptOrigin& origin);
+
+ /**
+ * Compile a function for a given context. This is equivalent to running
+ *
+ * with (obj) {
+ * return function(args) { ... }
+ * }
+ *
+ * It is possible to specify multiple context extensions (obj in the above
+ * example).
+ */
+ static V8_WARN_UNUSED_RESULT MaybeLocal<Function> CompileFunctionInContext(
+ Local<Context> context, Source* source, size_t arguments_count,
+ Local<String> arguments[], size_t context_extension_count,
+ Local<Object> context_extensions[],
+ CompileOptions options = kNoCompileOptions,
+ NoCacheReason no_cache_reason = kNoCacheNoReason,
+ Local<ScriptOrModule>* script_or_module_out = nullptr);
+
+ /**
+ * Creates and returns code cache for the specified unbound_script.
+ * This will return nullptr if the script cannot be serialized. The
+ * CachedData returned by this function should be owned by the caller.
+ */
+ static CachedData* CreateCodeCache(Local<UnboundScript> unbound_script);
+
+ /**
+ * Creates and returns code cache for the specified unbound_module_script.
+ * This will return nullptr if the script cannot be serialized. The
+ * CachedData returned by this function should be owned by the caller.
+ */
+ static CachedData* CreateCodeCache(
+ Local<UnboundModuleScript> unbound_module_script);
+
+ /**
+ * Creates and returns code cache for the specified function that was
+ * previously produced by CompileFunctionInContext.
+ * This will return nullptr if the script cannot be serialized. The
+ * CachedData returned by this function should be owned by the caller.
+ */
+ static CachedData* CreateCodeCacheForFunction(Local<Function> function);
+
+ private:
+ static V8_WARN_UNUSED_RESULT MaybeLocal<UnboundScript> CompileUnboundInternal(
+ Isolate* isolate, Source* source, CompileOptions options,
+ NoCacheReason no_cache_reason);
+};
+
+ScriptCompiler::Source::Source(Local<String> string, const ScriptOrigin& origin,
+ CachedData* data,
+ ConsumeCodeCacheTask* consume_cache_task)
+ : source_string(string),
+ resource_name(origin.ResourceName()),
+ resource_line_offset(origin.LineOffset()),
+ resource_column_offset(origin.ColumnOffset()),
+ resource_options(origin.Options()),
+ source_map_url(origin.SourceMapUrl()),
+ host_defined_options(origin.HostDefinedOptions()),
+ cached_data(data),
+ consume_cache_task(consume_cache_task) {}
+
+ScriptCompiler::Source::Source(Local<String> string, CachedData* data,
+ ConsumeCodeCacheTask* consume_cache_task)
+ : source_string(string),
+ cached_data(data),
+ consume_cache_task(consume_cache_task) {}
+
+const ScriptCompiler::CachedData* ScriptCompiler::Source::GetCachedData()
+ const {
+ return cached_data.get();
+}
+
+const ScriptOriginOptions& ScriptCompiler::Source::GetResourceOptions() const {
+ return resource_options;
+}
+
+ModuleRequest* ModuleRequest::Cast(Data* data) {
+#ifdef V8_ENABLE_CHECKS
+ CheckCast(data);
+#endif
+ return reinterpret_cast<ModuleRequest*>(data);
+}
+
+Module* Module::Cast(Data* data) {
+#ifdef V8_ENABLE_CHECKS
+ CheckCast(data);
+#endif
+ return reinterpret_cast<Module*>(data);
+}
+
+} // namespace v8
+
+#endif // INCLUDE_V8_SCRIPT_H_
diff --git a/deps/v8/include/v8-snapshot.h b/deps/v8/include/v8-snapshot.h
new file mode 100644
index 0000000000..ed02598c36
--- /dev/null
+++ b/deps/v8/include/v8-snapshot.h
@@ -0,0 +1,198 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef INCLUDE_V8_SNAPSHOT_H_
+#define INCLUDE_V8_SNAPSHOT_H_
+
+#include <vector>
+
+#include "v8-internal.h" // NOLINT(build/include_directory)
+#include "v8-local-handle.h" // NOLINT(build/include_directory)
+#include "v8config.h" // NOLINT(build/include_directory)
+
+namespace v8 {
+
+class Object;
+
+class V8_EXPORT StartupData {
+ public:
+ /**
+ * Whether the data created can be rehashed and and the hash seed can be
+ * recomputed when deserialized.
+ * Only valid for StartupData returned by SnapshotCreator::CreateBlob().
+ */
+ bool CanBeRehashed() const;
+ /**
+ * Allows embedders to verify whether the data is valid for the current
+ * V8 instance.
+ */
+ bool IsValid() const;
+
+ const char* data;
+ int raw_size;
+};
+
+/**
+ * Callback and supporting data used in SnapshotCreator to implement embedder
+ * logic to serialize internal fields.
+ * Internal fields that directly reference V8 objects are serialized without
+ * calling this callback. Internal fields that contain aligned pointers are
+ * serialized by this callback if it returns non-zero result. Otherwise it is
+ * serialized verbatim.
+ */
+struct SerializeInternalFieldsCallback {
+ using CallbackFunction = StartupData (*)(Local<Object> holder, int index,
+ void* data);
+ SerializeInternalFieldsCallback(CallbackFunction function = nullptr,
+ void* data_arg = nullptr)
+ : callback(function), data(data_arg) {}
+ CallbackFunction callback;
+ void* data;
+};
+// Note that these fields are called "internal fields" in the API and called
+// "embedder fields" within V8.
+using SerializeEmbedderFieldsCallback = SerializeInternalFieldsCallback;
+
+/**
+ * Callback and supporting data used to implement embedder logic to deserialize
+ * internal fields.
+ */
+struct DeserializeInternalFieldsCallback {
+ using CallbackFunction = void (*)(Local<Object> holder, int index,
+ StartupData payload, void* data);
+ DeserializeInternalFieldsCallback(CallbackFunction function = nullptr,
+ void* data_arg = nullptr)
+ : callback(function), data(data_arg) {}
+ void (*callback)(Local<Object> holder, int index, StartupData payload,
+ void* data);
+ void* data;
+};
+
+using DeserializeEmbedderFieldsCallback = DeserializeInternalFieldsCallback;
+
+/**
+ * Helper class to create a snapshot data blob.
+ *
+ * The Isolate used by a SnapshotCreator is owned by it, and will be entered
+ * and exited by the constructor and destructor, respectively; The destructor
+ * will also destroy the Isolate. Experimental language features, including
+ * those available by default, are not available while creating a snapshot.
+ */
+class V8_EXPORT SnapshotCreator {
+ public:
+ enum class FunctionCodeHandling { kClear, kKeep };
+
+ /**
+ * Initialize and enter an isolate, and set it up for serialization.
+ * The isolate is either created from scratch or from an existing snapshot.
+ * The caller keeps ownership of the argument snapshot.
+ * \param existing_blob existing snapshot from which to create this one.
+ * \param external_references a null-terminated array of external references
+ * that must be equivalent to CreateParams::external_references.
+ */
+ SnapshotCreator(Isolate* isolate,
+ const intptr_t* external_references = nullptr,
+ StartupData* existing_blob = nullptr);
+
+ /**
+ * Create and enter an isolate, and set it up for serialization.
+ * The isolate is either created from scratch or from an existing snapshot.
+ * The caller keeps ownership of the argument snapshot.
+ * \param existing_blob existing snapshot from which to create this one.
+ * \param external_references a null-terminated array of external references
+ * that must be equivalent to CreateParams::external_references.
+ */
+ SnapshotCreator(const intptr_t* external_references = nullptr,
+ StartupData* existing_blob = nullptr);
+
+ /**
+ * Destroy the snapshot creator, and exit and dispose of the Isolate
+ * associated with it.
+ */
+ ~SnapshotCreator();
+
+ /**
+ * \returns the isolate prepared by the snapshot creator.
+ */
+ Isolate* GetIsolate();
+
+ /**
+ * Set the default context to be included in the snapshot blob.
+ * The snapshot will not contain the global proxy, and we expect one or a
+ * global object template to create one, to be provided upon deserialization.
+ *
+ * \param callback optional callback to serialize internal fields.
+ */
+ void SetDefaultContext(Local<Context> context,
+ SerializeInternalFieldsCallback callback =
+ SerializeInternalFieldsCallback());
+
+ /**
+ * Add additional context to be included in the snapshot blob.
+ * The snapshot will include the global proxy.
+ *
+ * \param callback optional callback to serialize internal fields.
+ *
+ * \returns the index of the context in the snapshot blob.
+ */
+ size_t AddContext(Local<Context> context,
+ SerializeInternalFieldsCallback callback =
+ SerializeInternalFieldsCallback());
+
+ /**
+ * Attach arbitrary V8::Data to the context snapshot, which can be retrieved
+ * via Context::GetDataFromSnapshotOnce after deserialization. This data does
+ * not survive when a new snapshot is created from an existing snapshot.
+ * \returns the index for retrieval.
+ */
+ template <class T>
+ V8_INLINE size_t AddData(Local<Context> context, Local<T> object);
+
+ /**
+ * Attach arbitrary V8::Data to the isolate snapshot, which can be retrieved
+ * via Isolate::GetDataFromSnapshotOnce after deserialization. This data does
+ * not survive when a new snapshot is created from an existing snapshot.
+ * \returns the index for retrieval.
+ */
+ template <class T>
+ V8_INLINE size_t AddData(Local<T> object);
+
+ /**
+ * Created a snapshot data blob.
+ * This must not be called from within a handle scope.
+ * \param function_code_handling whether to include compiled function code
+ * in the snapshot.
+ * \returns { nullptr, 0 } on failure, and a startup snapshot on success. The
+ * caller acquires ownership of the data array in the return value.
+ */
+ StartupData CreateBlob(FunctionCodeHandling function_code_handling);
+
+ // Disallow copying and assigning.
+ SnapshotCreator(const SnapshotCreator&) = delete;
+ void operator=(const SnapshotCreator&) = delete;
+
+ private:
+ size_t AddData(Local<Context> context, internal::Address object);
+ size_t AddData(internal::Address object);
+
+ void* data_;
+};
+
+template <class T>
+size_t SnapshotCreator::AddData(Local<Context> context, Local<T> object) {
+ T* object_ptr = *object;
+ internal::Address* p = reinterpret_cast<internal::Address*>(object_ptr);
+ return AddData(context, *p);
+}
+
+template <class T>
+size_t SnapshotCreator::AddData(Local<T> object) {
+ T* object_ptr = *object;
+ internal::Address* p = reinterpret_cast<internal::Address*>(object_ptr);
+ return AddData(*p);
+}
+
+} // namespace v8
+
+#endif // INCLUDE_V8_SNAPSHOT_H_
diff --git a/deps/v8/include/v8-statistics.h b/deps/v8/include/v8-statistics.h
new file mode 100644
index 0000000000..7f69e5d65e
--- /dev/null
+++ b/deps/v8/include/v8-statistics.h
@@ -0,0 +1,215 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef INCLUDE_V8_STATISTICS_H_
+#define INCLUDE_V8_STATISTICS_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include <memory>
+#include <utility>
+#include <vector>
+
+#include "v8-local-handle.h" // NOLINT(build/include_directory)
+#include "v8-promise.h" // NOLINT(build/include_directory)
+#include "v8config.h" // NOLINT(build/include_directory)
+
+namespace v8 {
+
+class Context;
+class Isolate;
+
+namespace internal {
+class ReadOnlyHeap;
+} // namespace internal
+
+/**
+ * Controls how the default MeasureMemoryDelegate reports the result of
+ * the memory measurement to JS. With kSummary only the total size is reported.
+ * With kDetailed the result includes the size of each native context.
+ */
+enum class MeasureMemoryMode { kSummary, kDetailed };
+
+/**
+ * Controls how promptly a memory measurement request is executed.
+ * By default the measurement is folded with the next scheduled GC which may
+ * happen after a while and is forced after some timeout.
+ * The kEager mode starts incremental GC right away and is useful for testing.
+ * The kLazy mode does not force GC.
+ */
+enum class MeasureMemoryExecution { kDefault, kEager, kLazy };
+
+/**
+ * The delegate is used in Isolate::MeasureMemory API.
+ *
+ * It specifies the contexts that need to be measured and gets called when
+ * the measurement is completed to report the results.
+ */
+class V8_EXPORT MeasureMemoryDelegate {
+ public:
+ virtual ~MeasureMemoryDelegate() = default;
+
+ /**
+ * Returns true if the size of the given context needs to be measured.
+ */
+ virtual bool ShouldMeasure(Local<Context> context) = 0;
+
+ /**
+ * This function is called when memory measurement finishes.
+ *
+ * \param context_sizes_in_bytes a vector of (context, size) pairs that
+ * includes each context for which ShouldMeasure returned true and that
+ * was not garbage collected while the memory measurement was in progress.
+ *
+ * \param unattributed_size_in_bytes total size of objects that were not
+ * attributed to any context (i.e. are likely shared objects).
+ */
+ virtual void MeasurementComplete(
+ const std::vector<std::pair<Local<Context>, size_t>>&
+ context_sizes_in_bytes,
+ size_t unattributed_size_in_bytes) = 0;
+
+ /**
+ * Returns a default delegate that resolves the given promise when
+ * the memory measurement completes.
+ *
+ * \param isolate the current isolate
+ * \param context the current context
+ * \param promise_resolver the promise resolver that is given the
+ * result of the memory measurement.
+ * \param mode the detail level of the result.
+ */
+ static std::unique_ptr<MeasureMemoryDelegate> Default(
+ Isolate* isolate, Local<Context> context,
+ Local<Promise::Resolver> promise_resolver, MeasureMemoryMode mode);
+};
+
+/**
+ * Collection of shared per-process V8 memory information.
+ *
+ * Instances of this class can be passed to
+ * v8::V8::GetSharedMemoryStatistics to get shared memory statistics from V8.
+ */
+class V8_EXPORT SharedMemoryStatistics {
+ public:
+ SharedMemoryStatistics();
+ size_t read_only_space_size() { return read_only_space_size_; }
+ size_t read_only_space_used_size() { return read_only_space_used_size_; }
+ size_t read_only_space_physical_size() {
+ return read_only_space_physical_size_;
+ }
+
+ private:
+ size_t read_only_space_size_;
+ size_t read_only_space_used_size_;
+ size_t read_only_space_physical_size_;
+
+ friend class V8;
+ friend class internal::ReadOnlyHeap;
+};
+
+/**
+ * Collection of V8 heap information.
+ *
+ * Instances of this class can be passed to v8::Isolate::GetHeapStatistics to
+ * get heap statistics from V8.
+ */
+class V8_EXPORT HeapStatistics {
+ public:
+ HeapStatistics();
+ size_t total_heap_size() { return total_heap_size_; }
+ size_t total_heap_size_executable() { return total_heap_size_executable_; }
+ size_t total_physical_size() { return total_physical_size_; }
+ size_t total_available_size() { return total_available_size_; }
+ size_t total_global_handles_size() { return total_global_handles_size_; }
+ size_t used_global_handles_size() { return used_global_handles_size_; }
+ size_t used_heap_size() { return used_heap_size_; }
+ size_t heap_size_limit() { return heap_size_limit_; }
+ size_t malloced_memory() { return malloced_memory_; }
+ size_t external_memory() { return external_memory_; }
+ size_t peak_malloced_memory() { return peak_malloced_memory_; }
+ size_t number_of_native_contexts() { return number_of_native_contexts_; }
+ size_t number_of_detached_contexts() { return number_of_detached_contexts_; }
+
+ /**
+ * Returns a 0/1 boolean, which signifies whether the V8 overwrite heap
+ * garbage with a bit pattern.
+ */
+ size_t does_zap_garbage() { return does_zap_garbage_; }
+
+ private:
+ size_t total_heap_size_;
+ size_t total_heap_size_executable_;
+ size_t total_physical_size_;
+ size_t total_available_size_;
+ size_t used_heap_size_;
+ size_t heap_size_limit_;
+ size_t malloced_memory_;
+ size_t external_memory_;
+ size_t peak_malloced_memory_;
+ bool does_zap_garbage_;
+ size_t number_of_native_contexts_;
+ size_t number_of_detached_contexts_;
+ size_t total_global_handles_size_;
+ size_t used_global_handles_size_;
+
+ friend class V8;
+ friend class Isolate;
+};
+
+class V8_EXPORT HeapSpaceStatistics {
+ public:
+ HeapSpaceStatistics();
+ const char* space_name() { return space_name_; }
+ size_t space_size() { return space_size_; }
+ size_t space_used_size() { return space_used_size_; }
+ size_t space_available_size() { return space_available_size_; }
+ size_t physical_space_size() { return physical_space_size_; }
+
+ private:
+ const char* space_name_;
+ size_t space_size_;
+ size_t space_used_size_;
+ size_t space_available_size_;
+ size_t physical_space_size_;
+
+ friend class Isolate;
+};
+
+class V8_EXPORT HeapObjectStatistics {
+ public:
+ HeapObjectStatistics();
+ const char* object_type() { return object_type_; }
+ const char* object_sub_type() { return object_sub_type_; }
+ size_t object_count() { return object_count_; }
+ size_t object_size() { return object_size_; }
+
+ private:
+ const char* object_type_;
+ const char* object_sub_type_;
+ size_t object_count_;
+ size_t object_size_;
+
+ friend class Isolate;
+};
+
+class V8_EXPORT HeapCodeStatistics {
+ public:
+ HeapCodeStatistics();
+ size_t code_and_metadata_size() { return code_and_metadata_size_; }
+ size_t bytecode_and_metadata_size() { return bytecode_and_metadata_size_; }
+ size_t external_script_source_size() { return external_script_source_size_; }
+
+ private:
+ size_t code_and_metadata_size_;
+ size_t bytecode_and_metadata_size_;
+ size_t external_script_source_size_;
+
+ friend class Isolate;
+};
+
+} // namespace v8
+
+#endif // INCLUDE_V8_STATISTICS_H_
diff --git a/deps/v8/include/v8-template.h b/deps/v8/include/v8-template.h
new file mode 100644
index 0000000000..b05639cfc1
--- /dev/null
+++ b/deps/v8/include/v8-template.h
@@ -0,0 +1,1051 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef INCLUDE_V8_TEMPLATE_H_
+#define INCLUDE_V8_TEMPLATE_H_
+
+#include "v8-data.h" // NOLINT(build/include_directory)
+#include "v8-function-callback.h" // NOLINT(build/include_directory)
+#include "v8-local-handle.h" // NOLINT(build/include_directory)
+#include "v8-memory-span.h" // NOLINT(build/include_directory)
+#include "v8-object.h" // NOLINT(build/include_directory)
+#include "v8config.h" // NOLINT(build/include_directory)
+
+namespace v8 {
+
+class AccessorSignature;
+class CFunction;
+class FunctionTemplate;
+class ObjectTemplate;
+class Signature;
+
+// --- Templates ---
+
+#define V8_INTRINSICS_LIST(F) \
+ F(ArrayProto_entries, array_entries_iterator) \
+ F(ArrayProto_forEach, array_for_each_iterator) \
+ F(ArrayProto_keys, array_keys_iterator) \
+ F(ArrayProto_values, array_values_iterator) \
+ F(AsyncIteratorPrototype, initial_async_iterator_prototype) \
+ F(ErrorPrototype, initial_error_prototype) \
+ F(IteratorPrototype, initial_iterator_prototype) \
+ F(ObjProto_valueOf, object_value_of_function)
+
+enum Intrinsic {
+#define V8_DECL_INTRINSIC(name, iname) k##name,
+ V8_INTRINSICS_LIST(V8_DECL_INTRINSIC)
+#undef V8_DECL_INTRINSIC
+};
+
+/**
+ * The superclass of object and function templates.
+ */
+class V8_EXPORT Template : public Data {
+ public:
+ /**
+ * Adds a property to each instance created by this template.
+ *
+ * The property must be defined either as a primitive value, or a template.
+ */
+ void Set(Local<Name> name, Local<Data> value,
+ PropertyAttribute attributes = None);
+ void SetPrivate(Local<Private> name, Local<Data> value,
+ PropertyAttribute attributes = None);
+ V8_INLINE void Set(Isolate* isolate, const char* name, Local<Data> value,
+ PropertyAttribute attributes = None);
+
+ void SetAccessorProperty(
+ Local<Name> name,
+ Local<FunctionTemplate> getter = Local<FunctionTemplate>(),
+ Local<FunctionTemplate> setter = Local<FunctionTemplate>(),
+ PropertyAttribute attribute = None, AccessControl settings = DEFAULT);
+
+ /**
+ * Whenever the property with the given name is accessed on objects
+ * created from this Template the getter and setter callbacks
+ * are called instead of getting and setting the property directly
+ * on the JavaScript object.
+ *
+ * \param name The name of the property for which an accessor is added.
+ * \param getter The callback to invoke when getting the property.
+ * \param setter The callback to invoke when setting the property.
+ * \param data A piece of data that will be passed to the getter and setter
+ * callbacks whenever they are invoked.
+ * \param settings Access control settings for the accessor. This is a bit
+ * field consisting of one of more of
+ * DEFAULT = 0, ALL_CAN_READ = 1, or ALL_CAN_WRITE = 2.
+ * The default is to not allow cross-context access.
+ * ALL_CAN_READ means that all cross-context reads are allowed.
+ * ALL_CAN_WRITE means that all cross-context writes are allowed.
+ * The combination ALL_CAN_READ | ALL_CAN_WRITE can be used to allow all
+ * cross-context access.
+ * \param attribute The attributes of the property for which an accessor
+ * is added.
+ * \param signature The signature describes valid receivers for the accessor
+ * and is used to perform implicit instance checks against them. If the
+ * receiver is incompatible (i.e. is not an instance of the constructor as
+ * defined by FunctionTemplate::HasInstance()), an implicit TypeError is
+ * thrown and no callback is invoked.
+ */
+ void SetNativeDataProperty(
+ Local<String> name, AccessorGetterCallback getter,
+ AccessorSetterCallback setter = nullptr,
+ Local<Value> data = Local<Value>(), PropertyAttribute attribute = None,
+ Local<AccessorSignature> signature = Local<AccessorSignature>(),
+ AccessControl settings = DEFAULT,
+ SideEffectType getter_side_effect_type = SideEffectType::kHasSideEffect,
+ SideEffectType setter_side_effect_type = SideEffectType::kHasSideEffect);
+ void SetNativeDataProperty(
+ Local<Name> name, AccessorNameGetterCallback getter,
+ AccessorNameSetterCallback setter = nullptr,
+ Local<Value> data = Local<Value>(), PropertyAttribute attribute = None,
+ Local<AccessorSignature> signature = Local<AccessorSignature>(),
+ AccessControl settings = DEFAULT,
+ SideEffectType getter_side_effect_type = SideEffectType::kHasSideEffect,
+ SideEffectType setter_side_effect_type = SideEffectType::kHasSideEffect);
+
+ /**
+ * Like SetNativeDataProperty, but V8 will replace the native data property
+ * with a real data property on first access.
+ */
+ void SetLazyDataProperty(
+ Local<Name> name, AccessorNameGetterCallback getter,
+ Local<Value> data = Local<Value>(), PropertyAttribute attribute = None,
+ SideEffectType getter_side_effect_type = SideEffectType::kHasSideEffect,
+ SideEffectType setter_side_effect_type = SideEffectType::kHasSideEffect);
+
+ /**
+ * During template instantiation, sets the value with the intrinsic property
+ * from the correct context.
+ */
+ void SetIntrinsicDataProperty(Local<Name> name, Intrinsic intrinsic,
+ PropertyAttribute attribute = None);
+
+ private:
+ Template();
+
+ friend class ObjectTemplate;
+ friend class FunctionTemplate;
+};
+
+// TODO(dcarney): Replace GenericNamedPropertyFooCallback with just
+// NamedPropertyFooCallback.
+
+/**
+ * Interceptor for get requests on an object.
+ *
+ * Use `info.GetReturnValue().Set()` to set the return value of the
+ * intercepted get request.
+ *
+ * \param property The name of the property for which the request was
+ * intercepted.
+ * \param info Information about the intercepted request, such as
+ * isolate, receiver, return value, or whether running in `'use strict`' mode.
+ * See `PropertyCallbackInfo`.
+ *
+ * \code
+ * void GetterCallback(
+ * Local<Name> name,
+ * const v8::PropertyCallbackInfo<v8::Value>& info) {
+ * info.GetReturnValue().Set(v8_num(42));
+ * }
+ *
+ * v8::Local<v8::FunctionTemplate> templ =
+ * v8::FunctionTemplate::New(isolate);
+ * templ->InstanceTemplate()->SetHandler(
+ * v8::NamedPropertyHandlerConfiguration(GetterCallback));
+ * LocalContext env;
+ * env->Global()
+ * ->Set(env.local(), v8_str("obj"), templ->GetFunction(env.local())
+ * .ToLocalChecked()
+ * ->NewInstance(env.local())
+ * .ToLocalChecked())
+ * .FromJust();
+ * v8::Local<v8::Value> result = CompileRun("obj.a = 17; obj.a");
+ * CHECK(v8_num(42)->Equals(env.local(), result).FromJust());
+ * \endcode
+ *
+ * See also `ObjectTemplate::SetHandler`.
+ */
+using GenericNamedPropertyGetterCallback =
+ void (*)(Local<Name> property, const PropertyCallbackInfo<Value>& info);
+
+/**
+ * Interceptor for set requests on an object.
+ *
+ * Use `info.GetReturnValue()` to indicate whether the request was intercepted
+ * or not. If the setter successfully intercepts the request, i.e., if the
+ * request should not be further executed, call
+ * `info.GetReturnValue().Set(value)`. If the setter
+ * did not intercept the request, i.e., if the request should be handled as
+ * if no interceptor is present, do not not call `Set()`.
+ *
+ * \param property The name of the property for which the request was
+ * intercepted.
+ * \param value The value which the property will have if the request
+ * is not intercepted.
+ * \param info Information about the intercepted request, such as
+ * isolate, receiver, return value, or whether running in `'use strict'` mode.
+ * See `PropertyCallbackInfo`.
+ *
+ * See also
+ * `ObjectTemplate::SetHandler.`
+ */
+using GenericNamedPropertySetterCallback =
+ void (*)(Local<Name> property, Local<Value> value,
+ const PropertyCallbackInfo<Value>& info);
+
+/**
+ * Intercepts all requests that query the attributes of the
+ * property, e.g., getOwnPropertyDescriptor(), propertyIsEnumerable(), and
+ * defineProperty().
+ *
+ * Use `info.GetReturnValue().Set(value)` to set the property attributes. The
+ * value is an integer encoding a `v8::PropertyAttribute`.
+ *
+ * \param property The name of the property for which the request was
+ * intercepted.
+ * \param info Information about the intercepted request, such as
+ * isolate, receiver, return value, or whether running in `'use strict'` mode.
+ * See `PropertyCallbackInfo`.
+ *
+ * \note Some functions query the property attributes internally, even though
+ * they do not return the attributes. For example, `hasOwnProperty()` can
+ * trigger this interceptor depending on the state of the object.
+ *
+ * See also
+ * `ObjectTemplate::SetHandler.`
+ */
+using GenericNamedPropertyQueryCallback =
+ void (*)(Local<Name> property, const PropertyCallbackInfo<Integer>& info);
+
+/**
+ * Interceptor for delete requests on an object.
+ *
+ * Use `info.GetReturnValue()` to indicate whether the request was intercepted
+ * or not. If the deleter successfully intercepts the request, i.e., if the
+ * request should not be further executed, call
+ * `info.GetReturnValue().Set(value)` with a boolean `value`. The `value` is
+ * used as the return value of `delete`.
+ *
+ * \param property The name of the property for which the request was
+ * intercepted.
+ * \param info Information about the intercepted request, such as
+ * isolate, receiver, return value, or whether running in `'use strict'` mode.
+ * See `PropertyCallbackInfo`.
+ *
+ * \note If you need to mimic the behavior of `delete`, i.e., throw in strict
+ * mode instead of returning false, use `info.ShouldThrowOnError()` to determine
+ * if you are in strict mode.
+ *
+ * See also `ObjectTemplate::SetHandler.`
+ */
+using GenericNamedPropertyDeleterCallback =
+ void (*)(Local<Name> property, const PropertyCallbackInfo<Boolean>& info);
+
+/**
+ * Returns an array containing the names of the properties the named
+ * property getter intercepts.
+ *
+ * Note: The values in the array must be of type v8::Name.
+ */
+using GenericNamedPropertyEnumeratorCallback =
+ void (*)(const PropertyCallbackInfo<Array>& info);
+
+/**
+ * Interceptor for defineProperty requests on an object.
+ *
+ * Use `info.GetReturnValue()` to indicate whether the request was intercepted
+ * or not. If the definer successfully intercepts the request, i.e., if the
+ * request should not be further executed, call
+ * `info.GetReturnValue().Set(value)`. If the definer
+ * did not intercept the request, i.e., if the request should be handled as
+ * if no interceptor is present, do not not call `Set()`.
+ *
+ * \param property The name of the property for which the request was
+ * intercepted.
+ * \param desc The property descriptor which is used to define the
+ * property if the request is not intercepted.
+ * \param info Information about the intercepted request, such as
+ * isolate, receiver, return value, or whether running in `'use strict'` mode.
+ * See `PropertyCallbackInfo`.
+ *
+ * See also `ObjectTemplate::SetHandler`.
+ */
+using GenericNamedPropertyDefinerCallback =
+ void (*)(Local<Name> property, const PropertyDescriptor& desc,
+ const PropertyCallbackInfo<Value>& info);
+
+/**
+ * Interceptor for getOwnPropertyDescriptor requests on an object.
+ *
+ * Use `info.GetReturnValue().Set()` to set the return value of the
+ * intercepted request. The return value must be an object that
+ * can be converted to a PropertyDescriptor, e.g., a `v8::value` returned from
+ * `v8::Object::getOwnPropertyDescriptor`.
+ *
+ * \param property The name of the property for which the request was
+ * intercepted.
+ * \info Information about the intercepted request, such as
+ * isolate, receiver, return value, or whether running in `'use strict'` mode.
+ * See `PropertyCallbackInfo`.
+ *
+ * \note If GetOwnPropertyDescriptor is intercepted, it will
+ * always return true, i.e., indicate that the property was found.
+ *
+ * See also `ObjectTemplate::SetHandler`.
+ */
+using GenericNamedPropertyDescriptorCallback =
+ void (*)(Local<Name> property, const PropertyCallbackInfo<Value>& info);
+
+/**
+ * See `v8::GenericNamedPropertyGetterCallback`.
+ */
+using IndexedPropertyGetterCallback =
+ void (*)(uint32_t index, const PropertyCallbackInfo<Value>& info);
+
+/**
+ * See `v8::GenericNamedPropertySetterCallback`.
+ */
+using IndexedPropertySetterCallback =
+ void (*)(uint32_t index, Local<Value> value,
+ const PropertyCallbackInfo<Value>& info);
+
+/**
+ * See `v8::GenericNamedPropertyQueryCallback`.
+ */
+using IndexedPropertyQueryCallback =
+ void (*)(uint32_t index, const PropertyCallbackInfo<Integer>& info);
+
+/**
+ * See `v8::GenericNamedPropertyDeleterCallback`.
+ */
+using IndexedPropertyDeleterCallback =
+ void (*)(uint32_t index, const PropertyCallbackInfo<Boolean>& info);
+
+/**
+ * Returns an array containing the indices of the properties the indexed
+ * property getter intercepts.
+ *
+ * Note: The values in the array must be uint32_t.
+ */
+using IndexedPropertyEnumeratorCallback =
+ void (*)(const PropertyCallbackInfo<Array>& info);
+
+/**
+ * See `v8::GenericNamedPropertyDefinerCallback`.
+ */
+using IndexedPropertyDefinerCallback =
+ void (*)(uint32_t index, const PropertyDescriptor& desc,
+ const PropertyCallbackInfo<Value>& info);
+
+/**
+ * See `v8::GenericNamedPropertyDescriptorCallback`.
+ */
+using IndexedPropertyDescriptorCallback =
+ void (*)(uint32_t index, const PropertyCallbackInfo<Value>& info);
+
+/**
+ * Returns true if the given context should be allowed to access the given
+ * object.
+ */
+using AccessCheckCallback = bool (*)(Local<Context> accessing_context,
+ Local<Object> accessed_object,
+ Local<Value> data);
+
+enum class ConstructorBehavior { kThrow, kAllow };
+
+/**
+ * A FunctionTemplate is used to create functions at runtime. There
+ * can only be one function created from a FunctionTemplate in a
+ * context. The lifetime of the created function is equal to the
+ * lifetime of the context. So in case the embedder needs to create
+ * temporary functions that can be collected using Scripts is
+ * preferred.
+ *
+ * Any modification of a FunctionTemplate after first instantiation will trigger
+ * a crash.
+ *
+ * A FunctionTemplate can have properties, these properties are added to the
+ * function object when it is created.
+ *
+ * A FunctionTemplate has a corresponding instance template which is
+ * used to create object instances when the function is used as a
+ * constructor. Properties added to the instance template are added to
+ * each object instance.
+ *
+ * A FunctionTemplate can have a prototype template. The prototype template
+ * is used to create the prototype object of the function.
+ *
+ * The following example shows how to use a FunctionTemplate:
+ *
+ * \code
+ * v8::Local<v8::FunctionTemplate> t = v8::FunctionTemplate::New(isolate);
+ * t->Set(isolate, "func_property", v8::Number::New(isolate, 1));
+ *
+ * v8::Local<v8::Template> proto_t = t->PrototypeTemplate();
+ * proto_t->Set(isolate,
+ * "proto_method",
+ * v8::FunctionTemplate::New(isolate, InvokeCallback));
+ * proto_t->Set(isolate, "proto_const", v8::Number::New(isolate, 2));
+ *
+ * v8::Local<v8::ObjectTemplate> instance_t = t->InstanceTemplate();
+ * instance_t->SetAccessor(
+ String::NewFromUtf8Literal(isolate, "instance_accessor"),
+ * InstanceAccessorCallback);
+ * instance_t->SetHandler(
+ * NamedPropertyHandlerConfiguration(PropertyHandlerCallback));
+ * instance_t->Set(String::NewFromUtf8Literal(isolate, "instance_property"),
+ * Number::New(isolate, 3));
+ *
+ * v8::Local<v8::Function> function = t->GetFunction();
+ * v8::Local<v8::Object> instance = function->NewInstance();
+ * \endcode
+ *
+ * Let's use "function" as the JS variable name of the function object
+ * and "instance" for the instance object created above. The function
+ * and the instance will have the following properties:
+ *
+ * \code
+ * func_property in function == true;
+ * function.func_property == 1;
+ *
+ * function.prototype.proto_method() invokes 'InvokeCallback'
+ * function.prototype.proto_const == 2;
+ *
+ * instance instanceof function == true;
+ * instance.instance_accessor calls 'InstanceAccessorCallback'
+ * instance.instance_property == 3;
+ * \endcode
+ *
+ * A FunctionTemplate can inherit from another one by calling the
+ * FunctionTemplate::Inherit method. The following graph illustrates
+ * the semantics of inheritance:
+ *
+ * \code
+ * FunctionTemplate Parent -> Parent() . prototype -> { }
+ * ^ ^
+ * | Inherit(Parent) | .__proto__
+ * | |
+ * FunctionTemplate Child -> Child() . prototype -> { }
+ * \endcode
+ *
+ * A FunctionTemplate 'Child' inherits from 'Parent', the prototype
+ * object of the Child() function has __proto__ pointing to the
+ * Parent() function's prototype object. An instance of the Child
+ * function has all properties on Parent's instance templates.
+ *
+ * Let Parent be the FunctionTemplate initialized in the previous
+ * section and create a Child FunctionTemplate by:
+ *
+ * \code
+ * Local<FunctionTemplate> parent = t;
+ * Local<FunctionTemplate> child = FunctionTemplate::New();
+ * child->Inherit(parent);
+ *
+ * Local<Function> child_function = child->GetFunction();
+ * Local<Object> child_instance = child_function->NewInstance();
+ * \endcode
+ *
+ * The Child function and Child instance will have the following
+ * properties:
+ *
+ * \code
+ * child_func.prototype.__proto__ == function.prototype;
+ * child_instance.instance_accessor calls 'InstanceAccessorCallback'
+ * child_instance.instance_property == 3;
+ * \endcode
+ *
+ * The additional 'c_function' parameter refers to a fast API call, which
+ * must not trigger GC or JavaScript execution, or call into V8 in other
+ * ways. For more information how to define them, see
+ * include/v8-fast-api-calls.h. Please note that this feature is still
+ * experimental.
+ */
+class V8_EXPORT FunctionTemplate : public Template {
+ public:
+ /** Creates a function template.*/
+ static Local<FunctionTemplate> New(
+ Isolate* isolate, FunctionCallback callback = nullptr,
+ Local<Value> data = Local<Value>(),
+ Local<Signature> signature = Local<Signature>(), int length = 0,
+ ConstructorBehavior behavior = ConstructorBehavior::kAllow,
+ SideEffectType side_effect_type = SideEffectType::kHasSideEffect,
+ const CFunction* c_function = nullptr, uint16_t instance_type = 0,
+ uint16_t allowed_receiver_instance_type_range_start = 0,
+ uint16_t allowed_receiver_instance_type_range_end = 0);
+
+ /** Creates a function template for multiple overloaded fast API calls.*/
+ static Local<FunctionTemplate> NewWithCFunctionOverloads(
+ Isolate* isolate, FunctionCallback callback = nullptr,
+ Local<Value> data = Local<Value>(),
+ Local<Signature> signature = Local<Signature>(), int length = 0,
+ ConstructorBehavior behavior = ConstructorBehavior::kAllow,
+ SideEffectType side_effect_type = SideEffectType::kHasSideEffect,
+ const MemorySpan<const CFunction>& c_function_overloads = {});
+
+ /**
+ * Creates a function template backed/cached by a private property.
+ */
+ static Local<FunctionTemplate> NewWithCache(
+ Isolate* isolate, FunctionCallback callback,
+ Local<Private> cache_property, Local<Value> data = Local<Value>(),
+ Local<Signature> signature = Local<Signature>(), int length = 0,
+ SideEffectType side_effect_type = SideEffectType::kHasSideEffect);
+
+ /** Returns the unique function instance in the current execution context.*/
+ V8_WARN_UNUSED_RESULT MaybeLocal<Function> GetFunction(
+ Local<Context> context);
+
+ /**
+ * Similar to Context::NewRemoteContext, this creates an instance that
+ * isn't backed by an actual object.
+ *
+ * The InstanceTemplate of this FunctionTemplate must have access checks with
+ * handlers installed.
+ */
+ V8_WARN_UNUSED_RESULT MaybeLocal<Object> NewRemoteInstance();
+
+ /**
+ * Set the call-handler callback for a FunctionTemplate. This
+ * callback is called whenever the function created from this
+ * FunctionTemplate is called. The 'c_function' represents a fast
+ * API call, see the comment above the class declaration.
+ */
+ void SetCallHandler(
+ FunctionCallback callback, Local<Value> data = Local<Value>(),
+ SideEffectType side_effect_type = SideEffectType::kHasSideEffect,
+ const MemorySpan<const CFunction>& c_function_overloads = {});
+
+ /** Set the predefined length property for the FunctionTemplate. */
+ void SetLength(int length);
+
+ /** Get the InstanceTemplate. */
+ Local<ObjectTemplate> InstanceTemplate();
+
+ /**
+ * Causes the function template to inherit from a parent function template.
+ * This means the function's prototype.__proto__ is set to the parent
+ * function's prototype.
+ **/
+ void Inherit(Local<FunctionTemplate> parent);
+
+ /**
+ * A PrototypeTemplate is the template used to create the prototype object
+ * of the function created by this template.
+ */
+ Local<ObjectTemplate> PrototypeTemplate();
+
+ /**
+ * A PrototypeProviderTemplate is another function template whose prototype
+ * property is used for this template. This is mutually exclusive with setting
+ * a prototype template indirectly by calling PrototypeTemplate() or using
+ * Inherit().
+ **/
+ void SetPrototypeProviderTemplate(Local<FunctionTemplate> prototype_provider);
+
+ /**
+ * Set the class name of the FunctionTemplate. This is used for
+ * printing objects created with the function created from the
+ * FunctionTemplate as its constructor.
+ */
+ void SetClassName(Local<String> name);
+
+ /**
+ * When set to true, no access check will be performed on the receiver of a
+ * function call. Currently defaults to true, but this is subject to change.
+ */
+ void SetAcceptAnyReceiver(bool value);
+
+ /**
+ * Sets the ReadOnly flag in the attributes of the 'prototype' property
+ * of functions created from this FunctionTemplate to true.
+ */
+ void ReadOnlyPrototype();
+
+ /**
+ * Removes the prototype property from functions created from this
+ * FunctionTemplate.
+ */
+ void RemovePrototype();
+
+ /**
+ * Returns true if the given object is an instance of this function
+ * template.
+ */
+ bool HasInstance(Local<Value> object);
+
+ /**
+ * Returns true if the given value is an API object that was constructed by an
+ * instance of this function template (without checking for inheriting
+ * function templates).
+ *
+ * This is an experimental feature and may still change significantly.
+ */
+ bool IsLeafTemplateForApiObject(v8::Local<v8::Value> value) const;
+
+ V8_INLINE static FunctionTemplate* Cast(Data* data);
+
+ private:
+ FunctionTemplate();
+
+ static void CheckCast(Data* that);
+ friend class Context;
+ friend class ObjectTemplate;
+};
+
+/**
+ * Configuration flags for v8::NamedPropertyHandlerConfiguration or
+ * v8::IndexedPropertyHandlerConfiguration.
+ */
+enum class PropertyHandlerFlags {
+ /**
+ * None.
+ */
+ kNone = 0,
+
+ /**
+ * See ALL_CAN_READ above.
+ */
+ kAllCanRead = 1,
+
+ /** Will not call into interceptor for properties on the receiver or prototype
+ * chain, i.e., only call into interceptor for properties that do not exist.
+ * Currently only valid for named interceptors.
+ */
+ kNonMasking = 1 << 1,
+
+ /**
+ * Will not call into interceptor for symbol lookup. Only meaningful for
+ * named interceptors.
+ */
+ kOnlyInterceptStrings = 1 << 2,
+
+ /**
+ * The getter, query, enumerator callbacks do not produce side effects.
+ */
+ kHasNoSideEffect = 1 << 3,
+};
+
+struct NamedPropertyHandlerConfiguration {
+ NamedPropertyHandlerConfiguration(
+ GenericNamedPropertyGetterCallback getter,
+ GenericNamedPropertySetterCallback setter,
+ GenericNamedPropertyQueryCallback query,
+ GenericNamedPropertyDeleterCallback deleter,
+ GenericNamedPropertyEnumeratorCallback enumerator,
+ GenericNamedPropertyDefinerCallback definer,
+ GenericNamedPropertyDescriptorCallback descriptor,
+ Local<Value> data = Local<Value>(),
+ PropertyHandlerFlags flags = PropertyHandlerFlags::kNone)
+ : getter(getter),
+ setter(setter),
+ query(query),
+ deleter(deleter),
+ enumerator(enumerator),
+ definer(definer),
+ descriptor(descriptor),
+ data(data),
+ flags(flags) {}
+
+ NamedPropertyHandlerConfiguration(
+ /** Note: getter is required */
+ GenericNamedPropertyGetterCallback getter = nullptr,
+ GenericNamedPropertySetterCallback setter = nullptr,
+ GenericNamedPropertyQueryCallback query = nullptr,
+ GenericNamedPropertyDeleterCallback deleter = nullptr,
+ GenericNamedPropertyEnumeratorCallback enumerator = nullptr,
+ Local<Value> data = Local<Value>(),
+ PropertyHandlerFlags flags = PropertyHandlerFlags::kNone)
+ : getter(getter),
+ setter(setter),
+ query(query),
+ deleter(deleter),
+ enumerator(enumerator),
+ definer(nullptr),
+ descriptor(nullptr),
+ data(data),
+ flags(flags) {}
+
+ NamedPropertyHandlerConfiguration(
+ GenericNamedPropertyGetterCallback getter,
+ GenericNamedPropertySetterCallback setter,
+ GenericNamedPropertyDescriptorCallback descriptor,
+ GenericNamedPropertyDeleterCallback deleter,
+ GenericNamedPropertyEnumeratorCallback enumerator,
+ GenericNamedPropertyDefinerCallback definer,
+ Local<Value> data = Local<Value>(),
+ PropertyHandlerFlags flags = PropertyHandlerFlags::kNone)
+ : getter(getter),
+ setter(setter),
+ query(nullptr),
+ deleter(deleter),
+ enumerator(enumerator),
+ definer(definer),
+ descriptor(descriptor),
+ data(data),
+ flags(flags) {}
+
+ GenericNamedPropertyGetterCallback getter;
+ GenericNamedPropertySetterCallback setter;
+ GenericNamedPropertyQueryCallback query;
+ GenericNamedPropertyDeleterCallback deleter;
+ GenericNamedPropertyEnumeratorCallback enumerator;
+ GenericNamedPropertyDefinerCallback definer;
+ GenericNamedPropertyDescriptorCallback descriptor;
+ Local<Value> data;
+ PropertyHandlerFlags flags;
+};
+
+struct IndexedPropertyHandlerConfiguration {
+ IndexedPropertyHandlerConfiguration(
+ IndexedPropertyGetterCallback getter,
+ IndexedPropertySetterCallback setter, IndexedPropertyQueryCallback query,
+ IndexedPropertyDeleterCallback deleter,
+ IndexedPropertyEnumeratorCallback enumerator,
+ IndexedPropertyDefinerCallback definer,
+ IndexedPropertyDescriptorCallback descriptor,
+ Local<Value> data = Local<Value>(),
+ PropertyHandlerFlags flags = PropertyHandlerFlags::kNone)
+ : getter(getter),
+ setter(setter),
+ query(query),
+ deleter(deleter),
+ enumerator(enumerator),
+ definer(definer),
+ descriptor(descriptor),
+ data(data),
+ flags(flags) {}
+
+ IndexedPropertyHandlerConfiguration(
+ /** Note: getter is required */
+ IndexedPropertyGetterCallback getter = nullptr,
+ IndexedPropertySetterCallback setter = nullptr,
+ IndexedPropertyQueryCallback query = nullptr,
+ IndexedPropertyDeleterCallback deleter = nullptr,
+ IndexedPropertyEnumeratorCallback enumerator = nullptr,
+ Local<Value> data = Local<Value>(),
+ PropertyHandlerFlags flags = PropertyHandlerFlags::kNone)
+ : getter(getter),
+ setter(setter),
+ query(query),
+ deleter(deleter),
+ enumerator(enumerator),
+ definer(nullptr),
+ descriptor(nullptr),
+ data(data),
+ flags(flags) {}
+
+ IndexedPropertyHandlerConfiguration(
+ IndexedPropertyGetterCallback getter,
+ IndexedPropertySetterCallback setter,
+ IndexedPropertyDescriptorCallback descriptor,
+ IndexedPropertyDeleterCallback deleter,
+ IndexedPropertyEnumeratorCallback enumerator,
+ IndexedPropertyDefinerCallback definer,
+ Local<Value> data = Local<Value>(),
+ PropertyHandlerFlags flags = PropertyHandlerFlags::kNone)
+ : getter(getter),
+ setter(setter),
+ query(nullptr),
+ deleter(deleter),
+ enumerator(enumerator),
+ definer(definer),
+ descriptor(descriptor),
+ data(data),
+ flags(flags) {}
+
+ IndexedPropertyGetterCallback getter;
+ IndexedPropertySetterCallback setter;
+ IndexedPropertyQueryCallback query;
+ IndexedPropertyDeleterCallback deleter;
+ IndexedPropertyEnumeratorCallback enumerator;
+ IndexedPropertyDefinerCallback definer;
+ IndexedPropertyDescriptorCallback descriptor;
+ Local<Value> data;
+ PropertyHandlerFlags flags;
+};
+
+/**
+ * An ObjectTemplate is used to create objects at runtime.
+ *
+ * Properties added to an ObjectTemplate are added to each object
+ * created from the ObjectTemplate.
+ */
+class V8_EXPORT ObjectTemplate : public Template {
+ public:
+ /** Creates an ObjectTemplate. */
+ static Local<ObjectTemplate> New(
+ Isolate* isolate,
+ Local<FunctionTemplate> constructor = Local<FunctionTemplate>());
+
+ /** Creates a new instance of this template.*/
+ V8_WARN_UNUSED_RESULT MaybeLocal<Object> NewInstance(Local<Context> context);
+
+ /**
+ * Sets an accessor on the object template.
+ *
+ * Whenever the property with the given name is accessed on objects
+ * created from this ObjectTemplate the getter and setter callbacks
+ * are called instead of getting and setting the property directly
+ * on the JavaScript object.
+ *
+ * \param name The name of the property for which an accessor is added.
+ * \param getter The callback to invoke when getting the property.
+ * \param setter The callback to invoke when setting the property.
+ * \param data A piece of data that will be passed to the getter and setter
+ * callbacks whenever they are invoked.
+ * \param settings Access control settings for the accessor. This is a bit
+ * field consisting of one of more of
+ * DEFAULT = 0, ALL_CAN_READ = 1, or ALL_CAN_WRITE = 2.
+ * The default is to not allow cross-context access.
+ * ALL_CAN_READ means that all cross-context reads are allowed.
+ * ALL_CAN_WRITE means that all cross-context writes are allowed.
+ * The combination ALL_CAN_READ | ALL_CAN_WRITE can be used to allow all
+ * cross-context access.
+ * \param attribute The attributes of the property for which an accessor
+ * is added.
+ * \param signature The signature describes valid receivers for the accessor
+ * and is used to perform implicit instance checks against them. If the
+ * receiver is incompatible (i.e. is not an instance of the constructor as
+ * defined by FunctionTemplate::HasInstance()), an implicit TypeError is
+ * thrown and no callback is invoked.
+ */
+ void SetAccessor(
+ Local<String> name, AccessorGetterCallback getter,
+ AccessorSetterCallback setter = nullptr,
+ Local<Value> data = Local<Value>(), AccessControl settings = DEFAULT,
+ PropertyAttribute attribute = None,
+ Local<AccessorSignature> signature = Local<AccessorSignature>(),
+ SideEffectType getter_side_effect_type = SideEffectType::kHasSideEffect,
+ SideEffectType setter_side_effect_type = SideEffectType::kHasSideEffect);
+ void SetAccessor(
+ Local<Name> name, AccessorNameGetterCallback getter,
+ AccessorNameSetterCallback setter = nullptr,
+ Local<Value> data = Local<Value>(), AccessControl settings = DEFAULT,
+ PropertyAttribute attribute = None,
+ Local<AccessorSignature> signature = Local<AccessorSignature>(),
+ SideEffectType getter_side_effect_type = SideEffectType::kHasSideEffect,
+ SideEffectType setter_side_effect_type = SideEffectType::kHasSideEffect);
+
+ /**
+ * Sets a named property handler on the object template.
+ *
+ * Whenever a property whose name is a string or a symbol is accessed on
+ * objects created from this object template, the provided callback is
+ * invoked instead of accessing the property directly on the JavaScript
+ * object.
+ *
+ * @param configuration The NamedPropertyHandlerConfiguration that defines the
+ * callbacks to invoke when accessing a property.
+ */
+ void SetHandler(const NamedPropertyHandlerConfiguration& configuration);
+
+ /**
+ * Sets an indexed property handler on the object template.
+ *
+ * Whenever an indexed property is accessed on objects created from
+ * this object template, the provided callback is invoked instead of
+ * accessing the property directly on the JavaScript object.
+ *
+ * \param getter The callback to invoke when getting a property.
+ * \param setter The callback to invoke when setting a property.
+ * \param query The callback to invoke to check if an object has a property.
+ * \param deleter The callback to invoke when deleting a property.
+ * \param enumerator The callback to invoke to enumerate all the indexed
+ * properties of an object.
+ * \param data A piece of data that will be passed to the callbacks
+ * whenever they are invoked.
+ */
+ // TODO(dcarney): deprecate
+ void SetIndexedPropertyHandler(
+ IndexedPropertyGetterCallback getter,
+ IndexedPropertySetterCallback setter = nullptr,
+ IndexedPropertyQueryCallback query = nullptr,
+ IndexedPropertyDeleterCallback deleter = nullptr,
+ IndexedPropertyEnumeratorCallback enumerator = nullptr,
+ Local<Value> data = Local<Value>()) {
+ SetHandler(IndexedPropertyHandlerConfiguration(getter, setter, query,
+ deleter, enumerator, data));
+ }
+
+ /**
+ * Sets an indexed property handler on the object template.
+ *
+ * Whenever an indexed property is accessed on objects created from
+ * this object template, the provided callback is invoked instead of
+ * accessing the property directly on the JavaScript object.
+ *
+ * @param configuration The IndexedPropertyHandlerConfiguration that defines
+ * the callbacks to invoke when accessing a property.
+ */
+ void SetHandler(const IndexedPropertyHandlerConfiguration& configuration);
+
+ /**
+ * Sets the callback to be used when calling instances created from
+ * this template as a function. If no callback is set, instances
+ * behave like normal JavaScript objects that cannot be called as a
+ * function.
+ */
+ void SetCallAsFunctionHandler(FunctionCallback callback,
+ Local<Value> data = Local<Value>());
+
+ /**
+ * Mark object instances of the template as undetectable.
+ *
+ * In many ways, undetectable objects behave as though they are not
+ * there. They behave like 'undefined' in conditionals and when
+ * printed. However, properties can be accessed and called as on
+ * normal objects.
+ */
+ void MarkAsUndetectable();
+
+ /**
+ * Sets access check callback on the object template and enables access
+ * checks.
+ *
+ * When accessing properties on instances of this object template,
+ * the access check callback will be called to determine whether or
+ * not to allow cross-context access to the properties.
+ */
+ void SetAccessCheckCallback(AccessCheckCallback callback,
+ Local<Value> data = Local<Value>());
+
+ /**
+ * Like SetAccessCheckCallback but invokes an interceptor on failed access
+ * checks instead of looking up all-can-read properties. You can only use
+ * either this method or SetAccessCheckCallback, but not both at the same
+ * time.
+ */
+ void SetAccessCheckCallbackAndHandler(
+ AccessCheckCallback callback,
+ const NamedPropertyHandlerConfiguration& named_handler,
+ const IndexedPropertyHandlerConfiguration& indexed_handler,
+ Local<Value> data = Local<Value>());
+
+ /**
+ * Gets the number of internal fields for objects generated from
+ * this template.
+ */
+ int InternalFieldCount() const;
+
+ /**
+ * Sets the number of internal fields for objects generated from
+ * this template.
+ */
+ void SetInternalFieldCount(int value);
+
+ /**
+ * Returns true if the object will be an immutable prototype exotic object.
+ */
+ bool IsImmutableProto() const;
+
+ /**
+ * Makes the ObjectTemplate for an immutable prototype exotic object, with an
+ * immutable __proto__.
+ */
+ void SetImmutableProto();
+
+ /**
+ * Support for TC39 "dynamic code brand checks" proposal.
+ *
+ * This API allows to mark (& query) objects as "code like", which causes
+ * them to be treated like Strings in the context of eval and function
+ * constructor.
+ *
+ * Reference: https://github.com/tc39/proposal-dynamic-code-brand-checks
+ */
+ void SetCodeLike();
+ bool IsCodeLike() const;
+
+ V8_INLINE static ObjectTemplate* Cast(Data* data);
+
+ private:
+ ObjectTemplate();
+ static Local<ObjectTemplate> New(internal::Isolate* isolate,
+ Local<FunctionTemplate> constructor);
+ static void CheckCast(Data* that);
+ friend class FunctionTemplate;
+};
+
+/**
+ * A Signature specifies which receiver is valid for a function.
+ *
+ * A receiver matches a given signature if the receiver (or any of its
+ * hidden prototypes) was created from the signature's FunctionTemplate, or
+ * from a FunctionTemplate that inherits directly or indirectly from the
+ * signature's FunctionTemplate.
+ */
+class V8_EXPORT Signature : public Data {
+ public:
+ static Local<Signature> New(
+ Isolate* isolate,
+ Local<FunctionTemplate> receiver = Local<FunctionTemplate>());
+
+ V8_INLINE static Signature* Cast(Data* data);
+
+ private:
+ Signature();
+
+ static void CheckCast(Data* that);
+};
+
+/**
+ * An AccessorSignature specifies which receivers are valid parameters
+ * to an accessor callback.
+ */
+class V8_EXPORT AccessorSignature : public Data {
+ public:
+ static Local<AccessorSignature> New(
+ Isolate* isolate,
+ Local<FunctionTemplate> receiver = Local<FunctionTemplate>());
+
+ V8_INLINE static AccessorSignature* Cast(Data* data);
+
+ private:
+ AccessorSignature();
+
+ static void CheckCast(Data* that);
+};
+
+// --- Implementation ---
+
+void Template::Set(Isolate* isolate, const char* name, Local<Data> value,
+ PropertyAttribute attributes) {
+ Set(String::NewFromUtf8(isolate, name, NewStringType::kInternalized)
+ .ToLocalChecked(),
+ value, attributes);
+}
+
+FunctionTemplate* FunctionTemplate::Cast(Data* data) {
+#ifdef V8_ENABLE_CHECKS
+ CheckCast(data);
+#endif
+ return reinterpret_cast<FunctionTemplate*>(data);
+}
+
+ObjectTemplate* ObjectTemplate::Cast(Data* data) {
+#ifdef V8_ENABLE_CHECKS
+ CheckCast(data);
+#endif
+ return reinterpret_cast<ObjectTemplate*>(data);
+}
+
+Signature* Signature::Cast(Data* data) {
+#ifdef V8_ENABLE_CHECKS
+ CheckCast(data);
+#endif
+ return reinterpret_cast<Signature*>(data);
+}
+
+AccessorSignature* AccessorSignature::Cast(Data* data) {
+#ifdef V8_ENABLE_CHECKS
+ CheckCast(data);
+#endif
+ return reinterpret_cast<AccessorSignature*>(data);
+}
+
+} // namespace v8
+
+#endif // INCLUDE_V8_TEMPLATE_H_
diff --git a/deps/v8/include/v8-traced-handle.h b/deps/v8/include/v8-traced-handle.h
new file mode 100644
index 0000000000..15c9693ecb
--- /dev/null
+++ b/deps/v8/include/v8-traced-handle.h
@@ -0,0 +1,605 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef INCLUDE_V8_TRACED_HANDLE_H_
+#define INCLUDE_V8_TRACED_HANDLE_H_
+
+#include <stddef.h>
+#include <stdint.h>
+#include <stdio.h>
+
+#include <atomic>
+#include <memory>
+#include <string>
+#include <type_traits>
+#include <utility>
+#include <vector>
+
+#include "v8-internal.h" // NOLINT(build/include_directory)
+#include "v8-local-handle.h" // NOLINT(build/include_directory)
+#include "v8-weak-callback-info.h" // NOLINT(build/include_directory)
+#include "v8config.h" // NOLINT(build/include_directory)
+
+namespace v8 {
+
+class Value;
+
+namespace internal {
+class BasicTracedReferenceExtractor;
+} // namespace internal
+
+namespace api_internal {
+V8_EXPORT internal::Address* GlobalizeTracedReference(
+ internal::Isolate* isolate, internal::Address* handle,
+ internal::Address* slot, bool has_destructor);
+V8_EXPORT void MoveTracedGlobalReference(internal::Address** from,
+ internal::Address** to);
+V8_EXPORT void CopyTracedGlobalReference(const internal::Address* const* from,
+ internal::Address** to);
+V8_EXPORT void DisposeTracedGlobal(internal::Address* global_handle);
+V8_EXPORT void SetFinalizationCallbackTraced(
+ internal::Address* location, void* parameter,
+ WeakCallbackInfo<void>::Callback callback);
+} // namespace api_internal
+
+/**
+ * Deprecated. Use |TracedReference<T>| instead.
+ */
+template <typename T>
+struct TracedGlobalTrait {};
+
+class TracedReferenceBase {
+ public:
+ /**
+ * Returns true if the reference is empty, i.e., has not been assigned
+ * object.
+ */
+ bool IsEmpty() const { return val_ == nullptr; }
+
+ /**
+ * If non-empty, destroy the underlying storage cell. |IsEmpty| will return
+ * true after this call.
+ */
+ V8_INLINE void Reset();
+
+ /**
+ * Construct a Local<Value> from this handle.
+ */
+ V8_INLINE v8::Local<v8::Value> Get(v8::Isolate* isolate) const {
+ if (IsEmpty()) return Local<Value>();
+ return Local<Value>::New(isolate, reinterpret_cast<Value*>(val_));
+ }
+
+ /**
+ * Returns true if this TracedReference is empty, i.e., has not been
+ * assigned an object. This version of IsEmpty is thread-safe.
+ */
+ bool IsEmptyThreadSafe() const {
+ return this->GetSlotThreadSafe() == nullptr;
+ }
+
+ /**
+ * Assigns a wrapper class ID to the handle.
+ */
+ V8_INLINE void SetWrapperClassId(uint16_t class_id);
+
+ /**
+ * Returns the class ID previously assigned to this handle or 0 if no class ID
+ * was previously assigned.
+ */
+ V8_INLINE uint16_t WrapperClassId() const;
+
+ protected:
+ /**
+ * Update this reference in a thread-safe way.
+ */
+ void SetSlotThreadSafe(void* new_val) {
+ reinterpret_cast<std::atomic<void*>*>(&val_)->store(
+ new_val, std::memory_order_relaxed);
+ }
+
+ /**
+ * Get this reference in a thread-safe way
+ */
+ const void* GetSlotThreadSafe() const {
+ return reinterpret_cast<std::atomic<const void*> const*>(&val_)->load(
+ std::memory_order_relaxed);
+ }
+
+ V8_EXPORT void CheckValue() const;
+
+ // val_ points to a GlobalHandles node.
+ internal::Address* val_ = nullptr;
+
+ friend class internal::BasicTracedReferenceExtractor;
+ template <typename F>
+ friend class Local;
+ template <typename U>
+ friend bool operator==(const TracedReferenceBase&, const Local<U>&);
+ friend bool operator==(const TracedReferenceBase&,
+ const TracedReferenceBase&);
+};
+
+/**
+ * A traced handle with copy and move semantics. The handle is to be used
+ * together with |v8::EmbedderHeapTracer| or as part of GarbageCollected objects
+ * (see v8-cppgc.h) and specifies edges from C++ objects to JavaScript.
+ *
+ * The exact semantics are:
+ * - Tracing garbage collections use |v8::EmbedderHeapTracer| or cppgc.
+ * - Non-tracing garbage collections refer to
+ * |v8::EmbedderRootsHandler::IsRoot()| whether the handle should
+ * be treated as root or not.
+ *
+ * Note that the base class cannot be instantiated itself. Choose from
+ * - TracedGlobal
+ * - TracedReference
+ */
+template <typename T>
+class BasicTracedReference : public TracedReferenceBase {
+ public:
+ /**
+ * Construct a Local<T> from this handle.
+ */
+ Local<T> Get(Isolate* isolate) const { return Local<T>::New(isolate, *this); }
+
+ template <class S>
+ V8_INLINE BasicTracedReference<S>& As() const {
+ return reinterpret_cast<BasicTracedReference<S>&>(
+ const_cast<BasicTracedReference<T>&>(*this));
+ }
+
+ T* operator->() const {
+#ifdef V8_ENABLE_CHECKS
+ CheckValue();
+#endif // V8_ENABLE_CHECKS
+ return reinterpret_cast<T*>(val_);
+ }
+ T* operator*() const {
+#ifdef V8_ENABLE_CHECKS
+ CheckValue();
+#endif // V8_ENABLE_CHECKS
+ return reinterpret_cast<T*>(val_);
+ }
+
+ private:
+ enum DestructionMode { kWithDestructor, kWithoutDestructor };
+
+ /**
+ * An empty BasicTracedReference without storage cell.
+ */
+ BasicTracedReference() = default;
+
+ V8_INLINE static internal::Address* New(Isolate* isolate, T* that, void* slot,
+ DestructionMode destruction_mode);
+
+ friend class EmbedderHeapTracer;
+ template <typename F>
+ friend class Local;
+ friend class Object;
+ template <typename F>
+ friend class TracedGlobal;
+ template <typename F>
+ friend class TracedReference;
+ template <typename F>
+ friend class BasicTracedReference;
+ template <typename F>
+ friend class ReturnValue;
+};
+
+/**
+ * A traced handle with destructor that clears the handle. For more details see
+ * BasicTracedReference.
+ */
+template <typename T>
+class TracedGlobal : public BasicTracedReference<T> {
+ public:
+ using BasicTracedReference<T>::Reset;
+
+ /**
+ * Destructor resetting the handle.Is
+ */
+ ~TracedGlobal() { this->Reset(); }
+
+ /**
+ * An empty TracedGlobal without storage cell.
+ */
+ TracedGlobal() : BasicTracedReference<T>() {}
+
+ /**
+ * Construct a TracedGlobal from a Local.
+ *
+ * When the Local is non-empty, a new storage cell is created
+ * pointing to the same object.
+ */
+ template <class S>
+ TracedGlobal(Isolate* isolate, Local<S> that) : BasicTracedReference<T>() {
+ this->val_ = this->New(isolate, that.val_, &this->val_,
+ BasicTracedReference<T>::kWithDestructor);
+ static_assert(std::is_base_of<T, S>::value, "type check");
+ }
+
+ /**
+ * Move constructor initializing TracedGlobal from an existing one.
+ */
+ V8_INLINE TracedGlobal(TracedGlobal&& other) {
+ // Forward to operator=.
+ *this = std::move(other);
+ }
+
+ /**
+ * Move constructor initializing TracedGlobal from an existing one.
+ */
+ template <typename S>
+ V8_INLINE TracedGlobal(TracedGlobal<S>&& other) {
+ // Forward to operator=.
+ *this = std::move(other);
+ }
+
+ /**
+ * Copy constructor initializing TracedGlobal from an existing one.
+ */
+ V8_INLINE TracedGlobal(const TracedGlobal& other) {
+ // Forward to operator=;
+ *this = other;
+ }
+
+ /**
+ * Copy constructor initializing TracedGlobal from an existing one.
+ */
+ template <typename S>
+ V8_INLINE TracedGlobal(const TracedGlobal<S>& other) {
+ // Forward to operator=;
+ *this = other;
+ }
+
+ /**
+ * Move assignment operator initializing TracedGlobal from an existing one.
+ */
+ V8_INLINE TracedGlobal& operator=(TracedGlobal&& rhs);
+
+ /**
+ * Move assignment operator initializing TracedGlobal from an existing one.
+ */
+ template <class S>
+ V8_INLINE TracedGlobal& operator=(TracedGlobal<S>&& rhs);
+
+ /**
+ * Copy assignment operator initializing TracedGlobal from an existing one.
+ *
+ * Note: Prohibited when |other| has a finalization callback set through
+ * |SetFinalizationCallback|.
+ */
+ V8_INLINE TracedGlobal& operator=(const TracedGlobal& rhs);
+
+ /**
+ * Copy assignment operator initializing TracedGlobal from an existing one.
+ *
+ * Note: Prohibited when |other| has a finalization callback set through
+ * |SetFinalizationCallback|.
+ */
+ template <class S>
+ V8_INLINE TracedGlobal& operator=(const TracedGlobal<S>& rhs);
+
+ /**
+ * If non-empty, destroy the underlying storage cell and create a new one with
+ * the contents of other if other is non empty
+ */
+ template <class S>
+ V8_INLINE void Reset(Isolate* isolate, const Local<S>& other);
+
+ template <class S>
+ V8_INLINE TracedGlobal<S>& As() const {
+ return reinterpret_cast<TracedGlobal<S>&>(
+ const_cast<TracedGlobal<T>&>(*this));
+ }
+
+ /**
+ * Adds a finalization callback to the handle. The type of this callback is
+ * similar to WeakCallbackType::kInternalFields, i.e., it will pass the
+ * parameter and the first two internal fields of the object.
+ *
+ * The callback is then supposed to reset the handle in the callback. No
+ * further V8 API may be called in this callback. In case additional work
+ * involving V8 needs to be done, a second callback can be scheduled using
+ * WeakCallbackInfo<void>::SetSecondPassCallback.
+ */
+ V8_INLINE void SetFinalizationCallback(
+ void* parameter, WeakCallbackInfo<void>::Callback callback);
+};
+
+/**
+ * A traced handle without destructor that clears the handle. The embedder needs
+ * to ensure that the handle is not accessed once the V8 object has been
+ * reclaimed. This can happen when the handle is not passed through the
+ * EmbedderHeapTracer. For more details see BasicTracedReference.
+ *
+ * The reference assumes the embedder has precise knowledge about references at
+ * all times. In case V8 needs to separately handle on-stack references, the
+ * embedder is required to set the stack start through
+ * |EmbedderHeapTracer::SetStackStart|.
+ */
+template <typename T>
+class TracedReference : public BasicTracedReference<T> {
+ public:
+ using BasicTracedReference<T>::Reset;
+
+ /**
+ * An empty TracedReference without storage cell.
+ */
+ TracedReference() : BasicTracedReference<T>() {}
+
+ /**
+ * Construct a TracedReference from a Local.
+ *
+ * When the Local is non-empty, a new storage cell is created
+ * pointing to the same object.
+ */
+ template <class S>
+ TracedReference(Isolate* isolate, Local<S> that) : BasicTracedReference<T>() {
+ this->val_ = this->New(isolate, that.val_, &this->val_,
+ BasicTracedReference<T>::kWithoutDestructor);
+ static_assert(std::is_base_of<T, S>::value, "type check");
+ }
+
+ /**
+ * Move constructor initializing TracedReference from an
+ * existing one.
+ */
+ V8_INLINE TracedReference(TracedReference&& other) {
+ // Forward to operator=.
+ *this = std::move(other);
+ }
+
+ /**
+ * Move constructor initializing TracedReference from an
+ * existing one.
+ */
+ template <typename S>
+ V8_INLINE TracedReference(TracedReference<S>&& other) {
+ // Forward to operator=.
+ *this = std::move(other);
+ }
+
+ /**
+ * Copy constructor initializing TracedReference from an
+ * existing one.
+ */
+ V8_INLINE TracedReference(const TracedReference& other) {
+ // Forward to operator=;
+ *this = other;
+ }
+
+ /**
+ * Copy constructor initializing TracedReference from an
+ * existing one.
+ */
+ template <typename S>
+ V8_INLINE TracedReference(const TracedReference<S>& other) {
+ // Forward to operator=;
+ *this = other;
+ }
+
+ /**
+ * Move assignment operator initializing TracedGlobal from an existing one.
+ */
+ V8_INLINE TracedReference& operator=(TracedReference&& rhs);
+
+ /**
+ * Move assignment operator initializing TracedGlobal from an existing one.
+ */
+ template <class S>
+ V8_INLINE TracedReference& operator=(TracedReference<S>&& rhs);
+
+ /**
+ * Copy assignment operator initializing TracedGlobal from an existing one.
+ */
+ V8_INLINE TracedReference& operator=(const TracedReference& rhs);
+
+ /**
+ * Copy assignment operator initializing TracedGlobal from an existing one.
+ */
+ template <class S>
+ V8_INLINE TracedReference& operator=(const TracedReference<S>& rhs);
+
+ /**
+ * If non-empty, destroy the underlying storage cell and create a new one with
+ * the contents of other if other is non empty
+ */
+ template <class S>
+ V8_INLINE void Reset(Isolate* isolate, const Local<S>& other);
+
+ template <class S>
+ V8_INLINE TracedReference<S>& As() const {
+ return reinterpret_cast<TracedReference<S>&>(
+ const_cast<TracedReference<T>&>(*this));
+ }
+};
+
+// --- Implementation ---
+template <class T>
+internal::Address* BasicTracedReference<T>::New(
+ Isolate* isolate, T* that, void* slot, DestructionMode destruction_mode) {
+ if (that == nullptr) return nullptr;
+ internal::Address* p = reinterpret_cast<internal::Address*>(that);
+ return api_internal::GlobalizeTracedReference(
+ reinterpret_cast<internal::Isolate*>(isolate), p,
+ reinterpret_cast<internal::Address*>(slot),
+ destruction_mode == kWithDestructor);
+}
+
+void TracedReferenceBase::Reset() {
+ if (IsEmpty()) return;
+ api_internal::DisposeTracedGlobal(reinterpret_cast<internal::Address*>(val_));
+ SetSlotThreadSafe(nullptr);
+}
+
+V8_INLINE bool operator==(const TracedReferenceBase& lhs,
+ const TracedReferenceBase& rhs) {
+ v8::internal::Address* a = reinterpret_cast<v8::internal::Address*>(lhs.val_);
+ v8::internal::Address* b = reinterpret_cast<v8::internal::Address*>(rhs.val_);
+ if (a == nullptr) return b == nullptr;
+ if (b == nullptr) return false;
+ return *a == *b;
+}
+
+template <typename U>
+V8_INLINE bool operator==(const TracedReferenceBase& lhs,
+ const v8::Local<U>& rhs) {
+ v8::internal::Address* a = reinterpret_cast<v8::internal::Address*>(lhs.val_);
+ v8::internal::Address* b = reinterpret_cast<v8::internal::Address*>(*rhs);
+ if (a == nullptr) return b == nullptr;
+ if (b == nullptr) return false;
+ return *a == *b;
+}
+
+template <typename U>
+V8_INLINE bool operator==(const v8::Local<U>& lhs,
+ const TracedReferenceBase& rhs) {
+ return rhs == lhs;
+}
+
+V8_INLINE bool operator!=(const TracedReferenceBase& lhs,
+ const TracedReferenceBase& rhs) {
+ return !(lhs == rhs);
+}
+
+template <typename U>
+V8_INLINE bool operator!=(const TracedReferenceBase& lhs,
+ const v8::Local<U>& rhs) {
+ return !(lhs == rhs);
+}
+
+template <typename U>
+V8_INLINE bool operator!=(const v8::Local<U>& lhs,
+ const TracedReferenceBase& rhs) {
+ return !(rhs == lhs);
+}
+
+template <class T>
+template <class S>
+void TracedGlobal<T>::Reset(Isolate* isolate, const Local<S>& other) {
+ static_assert(std::is_base_of<T, S>::value, "type check");
+ Reset();
+ if (other.IsEmpty()) return;
+ this->val_ = this->New(isolate, other.val_, &this->val_,
+ BasicTracedReference<T>::kWithDestructor);
+}
+
+template <class T>
+template <class S>
+TracedGlobal<T>& TracedGlobal<T>::operator=(TracedGlobal<S>&& rhs) {
+ static_assert(std::is_base_of<T, S>::value, "type check");
+ *this = std::move(rhs.template As<T>());
+ return *this;
+}
+
+template <class T>
+template <class S>
+TracedGlobal<T>& TracedGlobal<T>::operator=(const TracedGlobal<S>& rhs) {
+ static_assert(std::is_base_of<T, S>::value, "type check");
+ *this = rhs.template As<T>();
+ return *this;
+}
+
+template <class T>
+TracedGlobal<T>& TracedGlobal<T>::operator=(TracedGlobal&& rhs) {
+ if (this != &rhs) {
+ api_internal::MoveTracedGlobalReference(
+ reinterpret_cast<internal::Address**>(&rhs.val_),
+ reinterpret_cast<internal::Address**>(&this->val_));
+ }
+ return *this;
+}
+
+template <class T>
+TracedGlobal<T>& TracedGlobal<T>::operator=(const TracedGlobal& rhs) {
+ if (this != &rhs) {
+ this->Reset();
+ if (rhs.val_ != nullptr) {
+ api_internal::CopyTracedGlobalReference(
+ reinterpret_cast<const internal::Address* const*>(&rhs.val_),
+ reinterpret_cast<internal::Address**>(&this->val_));
+ }
+ }
+ return *this;
+}
+
+template <class T>
+template <class S>
+void TracedReference<T>::Reset(Isolate* isolate, const Local<S>& other) {
+ static_assert(std::is_base_of<T, S>::value, "type check");
+ this->Reset();
+ if (other.IsEmpty()) return;
+ this->SetSlotThreadSafe(
+ this->New(isolate, other.val_, &this->val_,
+ BasicTracedReference<T>::kWithoutDestructor));
+}
+
+template <class T>
+template <class S>
+TracedReference<T>& TracedReference<T>::operator=(TracedReference<S>&& rhs) {
+ static_assert(std::is_base_of<T, S>::value, "type check");
+ *this = std::move(rhs.template As<T>());
+ return *this;
+}
+
+template <class T>
+template <class S>
+TracedReference<T>& TracedReference<T>::operator=(
+ const TracedReference<S>& rhs) {
+ static_assert(std::is_base_of<T, S>::value, "type check");
+ *this = rhs.template As<T>();
+ return *this;
+}
+
+template <class T>
+TracedReference<T>& TracedReference<T>::operator=(TracedReference&& rhs) {
+ if (this != &rhs) {
+ api_internal::MoveTracedGlobalReference(
+ reinterpret_cast<internal::Address**>(&rhs.val_),
+ reinterpret_cast<internal::Address**>(&this->val_));
+ }
+ return *this;
+}
+
+template <class T>
+TracedReference<T>& TracedReference<T>::operator=(const TracedReference& rhs) {
+ if (this != &rhs) {
+ this->Reset();
+ if (rhs.val_ != nullptr) {
+ api_internal::CopyTracedGlobalReference(
+ reinterpret_cast<const internal::Address* const*>(&rhs.val_),
+ reinterpret_cast<internal::Address**>(&this->val_));
+ }
+ }
+ return *this;
+}
+
+void TracedReferenceBase::SetWrapperClassId(uint16_t class_id) {
+ using I = internal::Internals;
+ if (IsEmpty()) return;
+ internal::Address* obj = reinterpret_cast<internal::Address*>(val_);
+ uint8_t* addr = reinterpret_cast<uint8_t*>(obj) + I::kNodeClassIdOffset;
+ *reinterpret_cast<uint16_t*>(addr) = class_id;
+}
+
+uint16_t TracedReferenceBase::WrapperClassId() const {
+ using I = internal::Internals;
+ if (IsEmpty()) return 0;
+ internal::Address* obj = reinterpret_cast<internal::Address*>(val_);
+ uint8_t* addr = reinterpret_cast<uint8_t*>(obj) + I::kNodeClassIdOffset;
+ return *reinterpret_cast<uint16_t*>(addr);
+}
+
+template <class T>
+void TracedGlobal<T>::SetFinalizationCallback(
+ void* parameter, typename WeakCallbackInfo<void>::Callback callback) {
+ api_internal::SetFinalizationCallbackTraced(
+ reinterpret_cast<internal::Address*>(this->val_), parameter, callback);
+}
+
+} // namespace v8
+
+#endif // INCLUDE_V8_TRACED_HANDLE_H_
diff --git a/deps/v8/include/v8-typed-array.h b/deps/v8/include/v8-typed-array.h
new file mode 100644
index 0000000000..483b4f772f
--- /dev/null
+++ b/deps/v8/include/v8-typed-array.h
@@ -0,0 +1,282 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef INCLUDE_V8_TYPED_ARRAY_H_
+#define INCLUDE_V8_TYPED_ARRAY_H_
+
+#include "v8-array-buffer.h" // NOLINT(build/include_directory)
+#include "v8-local-handle.h" // NOLINT(build/include_directory)
+#include "v8config.h" // NOLINT(build/include_directory)
+
+namespace v8 {
+
+class SharedArrayBuffer;
+
+/**
+ * A base class for an instance of TypedArray series of constructors
+ * (ES6 draft 15.13.6).
+ */
+class V8_EXPORT TypedArray : public ArrayBufferView {
+ public:
+ /*
+ * The largest typed array size that can be constructed using New.
+ */
+ static constexpr size_t kMaxLength =
+ internal::kApiSystemPointerSize == 4
+ ? internal::kSmiMaxValue
+ : static_cast<size_t>(uint64_t{1} << 32);
+
+ /**
+ * Number of elements in this typed array
+ * (e.g. for Int16Array, |ByteLength|/2).
+ */
+ size_t Length();
+
+ V8_INLINE static TypedArray* Cast(Value* value) {
+#ifdef V8_ENABLE_CHECKS
+ CheckCast(value);
+#endif
+ return static_cast<TypedArray*>(value);
+ }
+
+ private:
+ TypedArray();
+ static void CheckCast(Value* obj);
+};
+
+/**
+ * An instance of Uint8Array constructor (ES6 draft 15.13.6).
+ */
+class V8_EXPORT Uint8Array : public TypedArray {
+ public:
+ static Local<Uint8Array> New(Local<ArrayBuffer> array_buffer,
+ size_t byte_offset, size_t length);
+ static Local<Uint8Array> New(Local<SharedArrayBuffer> shared_array_buffer,
+ size_t byte_offset, size_t length);
+ V8_INLINE static Uint8Array* Cast(Value* value) {
+#ifdef V8_ENABLE_CHECKS
+ CheckCast(value);
+#endif
+ return static_cast<Uint8Array*>(value);
+ }
+
+ private:
+ Uint8Array();
+ static void CheckCast(Value* obj);
+};
+
+/**
+ * An instance of Uint8ClampedArray constructor (ES6 draft 15.13.6).
+ */
+class V8_EXPORT Uint8ClampedArray : public TypedArray {
+ public:
+ static Local<Uint8ClampedArray> New(Local<ArrayBuffer> array_buffer,
+ size_t byte_offset, size_t length);
+ static Local<Uint8ClampedArray> New(
+ Local<SharedArrayBuffer> shared_array_buffer, size_t byte_offset,
+ size_t length);
+ V8_INLINE static Uint8ClampedArray* Cast(Value* value) {
+#ifdef V8_ENABLE_CHECKS
+ CheckCast(value);
+#endif
+ return static_cast<Uint8ClampedArray*>(value);
+ }
+
+ private:
+ Uint8ClampedArray();
+ static void CheckCast(Value* obj);
+};
+
+/**
+ * An instance of Int8Array constructor (ES6 draft 15.13.6).
+ */
+class V8_EXPORT Int8Array : public TypedArray {
+ public:
+ static Local<Int8Array> New(Local<ArrayBuffer> array_buffer,
+ size_t byte_offset, size_t length);
+ static Local<Int8Array> New(Local<SharedArrayBuffer> shared_array_buffer,
+ size_t byte_offset, size_t length);
+ V8_INLINE static Int8Array* Cast(Value* value) {
+#ifdef V8_ENABLE_CHECKS
+ CheckCast(value);
+#endif
+ return static_cast<Int8Array*>(value);
+ }
+
+ private:
+ Int8Array();
+ static void CheckCast(Value* obj);
+};
+
+/**
+ * An instance of Uint16Array constructor (ES6 draft 15.13.6).
+ */
+class V8_EXPORT Uint16Array : public TypedArray {
+ public:
+ static Local<Uint16Array> New(Local<ArrayBuffer> array_buffer,
+ size_t byte_offset, size_t length);
+ static Local<Uint16Array> New(Local<SharedArrayBuffer> shared_array_buffer,
+ size_t byte_offset, size_t length);
+ V8_INLINE static Uint16Array* Cast(Value* value) {
+#ifdef V8_ENABLE_CHECKS
+ CheckCast(value);
+#endif
+ return static_cast<Uint16Array*>(value);
+ }
+
+ private:
+ Uint16Array();
+ static void CheckCast(Value* obj);
+};
+
+/**
+ * An instance of Int16Array constructor (ES6 draft 15.13.6).
+ */
+class V8_EXPORT Int16Array : public TypedArray {
+ public:
+ static Local<Int16Array> New(Local<ArrayBuffer> array_buffer,
+ size_t byte_offset, size_t length);
+ static Local<Int16Array> New(Local<SharedArrayBuffer> shared_array_buffer,
+ size_t byte_offset, size_t length);
+ V8_INLINE static Int16Array* Cast(Value* value) {
+#ifdef V8_ENABLE_CHECKS
+ CheckCast(value);
+#endif
+ return static_cast<Int16Array*>(value);
+ }
+
+ private:
+ Int16Array();
+ static void CheckCast(Value* obj);
+};
+
+/**
+ * An instance of Uint32Array constructor (ES6 draft 15.13.6).
+ */
+class V8_EXPORT Uint32Array : public TypedArray {
+ public:
+ static Local<Uint32Array> New(Local<ArrayBuffer> array_buffer,
+ size_t byte_offset, size_t length);
+ static Local<Uint32Array> New(Local<SharedArrayBuffer> shared_array_buffer,
+ size_t byte_offset, size_t length);
+ V8_INLINE static Uint32Array* Cast(Value* value) {
+#ifdef V8_ENABLE_CHECKS
+ CheckCast(value);
+#endif
+ return static_cast<Uint32Array*>(value);
+ }
+
+ private:
+ Uint32Array();
+ static void CheckCast(Value* obj);
+};
+
+/**
+ * An instance of Int32Array constructor (ES6 draft 15.13.6).
+ */
+class V8_EXPORT Int32Array : public TypedArray {
+ public:
+ static Local<Int32Array> New(Local<ArrayBuffer> array_buffer,
+ size_t byte_offset, size_t length);
+ static Local<Int32Array> New(Local<SharedArrayBuffer> shared_array_buffer,
+ size_t byte_offset, size_t length);
+ V8_INLINE static Int32Array* Cast(Value* value) {
+#ifdef V8_ENABLE_CHECKS
+ CheckCast(value);
+#endif
+ return static_cast<Int32Array*>(value);
+ }
+
+ private:
+ Int32Array();
+ static void CheckCast(Value* obj);
+};
+
+/**
+ * An instance of Float32Array constructor (ES6 draft 15.13.6).
+ */
+class V8_EXPORT Float32Array : public TypedArray {
+ public:
+ static Local<Float32Array> New(Local<ArrayBuffer> array_buffer,
+ size_t byte_offset, size_t length);
+ static Local<Float32Array> New(Local<SharedArrayBuffer> shared_array_buffer,
+ size_t byte_offset, size_t length);
+ V8_INLINE static Float32Array* Cast(Value* value) {
+#ifdef V8_ENABLE_CHECKS
+ CheckCast(value);
+#endif
+ return static_cast<Float32Array*>(value);
+ }
+
+ private:
+ Float32Array();
+ static void CheckCast(Value* obj);
+};
+
+/**
+ * An instance of Float64Array constructor (ES6 draft 15.13.6).
+ */
+class V8_EXPORT Float64Array : public TypedArray {
+ public:
+ static Local<Float64Array> New(Local<ArrayBuffer> array_buffer,
+ size_t byte_offset, size_t length);
+ static Local<Float64Array> New(Local<SharedArrayBuffer> shared_array_buffer,
+ size_t byte_offset, size_t length);
+ V8_INLINE static Float64Array* Cast(Value* value) {
+#ifdef V8_ENABLE_CHECKS
+ CheckCast(value);
+#endif
+ return static_cast<Float64Array*>(value);
+ }
+
+ private:
+ Float64Array();
+ static void CheckCast(Value* obj);
+};
+
+/**
+ * An instance of BigInt64Array constructor.
+ */
+class V8_EXPORT BigInt64Array : public TypedArray {
+ public:
+ static Local<BigInt64Array> New(Local<ArrayBuffer> array_buffer,
+ size_t byte_offset, size_t length);
+ static Local<BigInt64Array> New(Local<SharedArrayBuffer> shared_array_buffer,
+ size_t byte_offset, size_t length);
+ V8_INLINE static BigInt64Array* Cast(Value* value) {
+#ifdef V8_ENABLE_CHECKS
+ CheckCast(value);
+#endif
+ return static_cast<BigInt64Array*>(value);
+ }
+
+ private:
+ BigInt64Array();
+ static void CheckCast(Value* obj);
+};
+
+/**
+ * An instance of BigUint64Array constructor.
+ */
+class V8_EXPORT BigUint64Array : public TypedArray {
+ public:
+ static Local<BigUint64Array> New(Local<ArrayBuffer> array_buffer,
+ size_t byte_offset, size_t length);
+ static Local<BigUint64Array> New(Local<SharedArrayBuffer> shared_array_buffer,
+ size_t byte_offset, size_t length);
+ V8_INLINE static BigUint64Array* Cast(Value* value) {
+#ifdef V8_ENABLE_CHECKS
+ CheckCast(value);
+#endif
+ return static_cast<BigUint64Array*>(value);
+ }
+
+ private:
+ BigUint64Array();
+ static void CheckCast(Value* obj);
+};
+
+} // namespace v8
+
+#endif // INCLUDE_V8_TYPED_ARRAY_H_
diff --git a/deps/v8/include/v8-unwinder-state.h b/deps/v8/include/v8-unwinder-state.h
index 00f8b8b176..a30f7325f4 100644
--- a/deps/v8/include/v8-unwinder-state.h
+++ b/deps/v8/include/v8-unwinder-state.h
@@ -17,9 +17,10 @@ struct CalleeSavedRegisters {
void* arm_r9;
void* arm_r10;
};
-#elif V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_ARM64 || \
- V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_PPC || \
- V8_TARGET_ARCH_PPC64 || V8_TARGET_ARCH_RISCV64 || V8_TARGET_ARCH_S390
+#elif V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_ARM64 || \
+ V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_PPC || \
+ V8_TARGET_ARCH_PPC64 || V8_TARGET_ARCH_RISCV64 || V8_TARGET_ARCH_S390 || \
+ V8_TARGET_ARCH_LOONG64
struct CalleeSavedRegisters {};
#else
#error Target architecture was not detected as supported by v8
diff --git a/deps/v8/include/v8-unwinder.h b/deps/v8/include/v8-unwinder.h
new file mode 100644
index 0000000000..22a5cd713d
--- /dev/null
+++ b/deps/v8/include/v8-unwinder.h
@@ -0,0 +1,129 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef INCLUDE_V8_UNWINDER_H_
+#define INCLUDE_V8_UNWINDER_H_
+
+#include <memory>
+
+#include "v8config.h" // NOLINT(build/include_directory)
+
+namespace v8 {
+// Holds the callee saved registers needed for the stack unwinder. It is the
+// empty struct if no registers are required. Implemented in
+// include/v8-unwinder-state.h.
+struct CalleeSavedRegisters;
+
+// A RegisterState represents the current state of registers used
+// by the sampling profiler API.
+struct V8_EXPORT RegisterState {
+ RegisterState();
+ ~RegisterState();
+ RegisterState(const RegisterState& other);
+ RegisterState& operator=(const RegisterState& other);
+
+ void* pc; // Instruction pointer.
+ void* sp; // Stack pointer.
+ void* fp; // Frame pointer.
+ void* lr; // Link register (or nullptr on platforms without a link register).
+ // Callee saved registers (or null if no callee saved registers were stored)
+ std::unique_ptr<CalleeSavedRegisters> callee_saved;
+};
+
+// A StateTag represents a possible state of the VM.
+enum StateTag {
+ JS,
+ GC,
+ PARSER,
+ BYTECODE_COMPILER,
+ COMPILER,
+ OTHER,
+ EXTERNAL,
+ ATOMICS_WAIT,
+ IDLE
+};
+
+// The output structure filled up by GetStackSample API function.
+struct SampleInfo {
+ size_t frames_count; // Number of frames collected.
+ StateTag vm_state; // Current VM state.
+ void* external_callback_entry; // External callback address if VM is
+ // executing an external callback.
+ void* context; // Incumbent native context address.
+};
+
+struct MemoryRange {
+ const void* start = nullptr;
+ size_t length_in_bytes = 0;
+};
+
+struct JSEntryStub {
+ MemoryRange code;
+};
+
+struct JSEntryStubs {
+ JSEntryStub js_entry_stub;
+ JSEntryStub js_construct_entry_stub;
+ JSEntryStub js_run_microtasks_entry_stub;
+};
+
+/**
+ * Various helpers for skipping over V8 frames in a given stack.
+ *
+ * The unwinder API is only supported on the x64, ARM64 and ARM32 architectures.
+ */
+class V8_EXPORT Unwinder {
+ public:
+ /**
+ * Attempt to unwind the stack to the most recent C++ frame. This function is
+ * signal-safe and does not access any V8 state and thus doesn't require an
+ * Isolate.
+ *
+ * The unwinder needs to know the location of the JS Entry Stub (a piece of
+ * code that is run when C++ code calls into generated JS code). This is used
+ * for edge cases where the current frame is being constructed or torn down
+ * when the stack sample occurs.
+ *
+ * The unwinder also needs the virtual memory range of all possible V8 code
+ * objects. There are two ranges required - the heap code range and the range
+ * for code embedded in the binary.
+ *
+ * Available on x64, ARM64 and ARM32.
+ *
+ * \param code_pages A list of all of the ranges in which V8 has allocated
+ * executable code. The caller should obtain this list by calling
+ * Isolate::CopyCodePages() during the same interrupt/thread suspension that
+ * captures the stack.
+ * \param register_state The current registers. This is an in-out param that
+ * will be overwritten with the register values after unwinding, on success.
+ * \param stack_base The resulting stack pointer and frame pointer values are
+ * bounds-checked against the stack_base and the original stack pointer value
+ * to ensure that they are valid locations in the given stack. If these values
+ * or any intermediate frame pointer values used during unwinding are ever out
+ * of these bounds, unwinding will fail.
+ *
+ * \return True on success.
+ */
+ static bool TryUnwindV8Frames(const JSEntryStubs& entry_stubs,
+ size_t code_pages_length,
+ const MemoryRange* code_pages,
+ RegisterState* register_state,
+ const void* stack_base);
+
+ /**
+ * Whether the PC is within the V8 code range represented by code_pages.
+ *
+ * If this returns false, then calling UnwindV8Frames() with the same PC
+ * and unwind_state will always fail. If it returns true, then unwinding may
+ * (but not necessarily) be successful.
+ *
+ * Available on x64, ARM64 and ARM32
+ */
+ static bool PCIsInV8(size_t code_pages_length, const MemoryRange* code_pages,
+ void* pc);
+};
+
+} // namespace v8
+
+#endif // INCLUDE_V8_UNWINDER_H_
diff --git a/deps/v8/include/v8-util.h b/deps/v8/include/v8-util.h
index 8e4d66153d..c54418aa25 100644
--- a/deps/v8/include/v8-util.h
+++ b/deps/v8/include/v8-util.h
@@ -5,11 +5,14 @@
#ifndef V8_UTIL_H_
#define V8_UTIL_H_
-#include "v8.h" // NOLINT(build/include_directory)
#include <assert.h>
+
#include <map>
#include <vector>
+#include "v8-function-callback.h" // NOLINT(build/include_directory)
+#include "v8-persistent-handle.h" // NOLINT(build/include_directory)
+
/**
* Support for Persistent containers.
*
@@ -19,6 +22,9 @@
*/
namespace v8 {
+template <typename K, typename V, typename Traits>
+class GlobalValueMap;
+
typedef uintptr_t PersistentContainerValue;
static const uintptr_t kPersistentContainerNotFound = 0;
enum PersistentContainerCallbackType {
diff --git a/deps/v8/include/v8-value-serializer.h b/deps/v8/include/v8-value-serializer.h
new file mode 100644
index 0000000000..574567bd5a
--- /dev/null
+++ b/deps/v8/include/v8-value-serializer.h
@@ -0,0 +1,249 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef INCLUDE_V8_VALUE_SERIALIZER_H_
+#define INCLUDE_V8_VALUE_SERIALIZER_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include <utility>
+
+#include "v8-local-handle.h" // NOLINT(build/include_directory)
+#include "v8-maybe.h" // NOLINT(build/include_directory)
+#include "v8config.h" // NOLINT(build/include_directory)
+
+namespace v8 {
+
+class ArrayBuffer;
+class Isolate;
+class Object;
+class SharedArrayBuffer;
+class String;
+class WasmModuleObject;
+class Value;
+
+namespace internal {
+struct ScriptStreamingData;
+} // namespace internal
+
+/**
+ * Value serialization compatible with the HTML structured clone algorithm.
+ * The format is backward-compatible (i.e. safe to store to disk).
+ */
+class V8_EXPORT ValueSerializer {
+ public:
+ class V8_EXPORT Delegate {
+ public:
+ virtual ~Delegate() = default;
+
+ /**
+ * Handles the case where a DataCloneError would be thrown in the structured
+ * clone spec. Other V8 embedders may throw some other appropriate exception
+ * type.
+ */
+ virtual void ThrowDataCloneError(Local<String> message) = 0;
+
+ /**
+ * The embedder overrides this method to write some kind of host object, if
+ * possible. If not, a suitable exception should be thrown and
+ * Nothing<bool>() returned.
+ */
+ virtual Maybe<bool> WriteHostObject(Isolate* isolate, Local<Object> object);
+
+ /**
+ * Called when the ValueSerializer is going to serialize a
+ * SharedArrayBuffer object. The embedder must return an ID for the
+ * object, using the same ID if this SharedArrayBuffer has already been
+ * serialized in this buffer. When deserializing, this ID will be passed to
+ * ValueDeserializer::GetSharedArrayBufferFromId as |clone_id|.
+ *
+ * If the object cannot be serialized, an
+ * exception should be thrown and Nothing<uint32_t>() returned.
+ */
+ virtual Maybe<uint32_t> GetSharedArrayBufferId(
+ Isolate* isolate, Local<SharedArrayBuffer> shared_array_buffer);
+
+ virtual Maybe<uint32_t> GetWasmModuleTransferId(
+ Isolate* isolate, Local<WasmModuleObject> module);
+ /**
+ * Allocates memory for the buffer of at least the size provided. The actual
+ * size (which may be greater or equal) is written to |actual_size|. If no
+ * buffer has been allocated yet, nullptr will be provided.
+ *
+ * If the memory cannot be allocated, nullptr should be returned.
+ * |actual_size| will be ignored. It is assumed that |old_buffer| is still
+ * valid in this case and has not been modified.
+ *
+ * The default implementation uses the stdlib's `realloc()` function.
+ */
+ virtual void* ReallocateBufferMemory(void* old_buffer, size_t size,
+ size_t* actual_size);
+
+ /**
+ * Frees a buffer allocated with |ReallocateBufferMemory|.
+ *
+ * The default implementation uses the stdlib's `free()` function.
+ */
+ virtual void FreeBufferMemory(void* buffer);
+ };
+
+ explicit ValueSerializer(Isolate* isolate);
+ ValueSerializer(Isolate* isolate, Delegate* delegate);
+ ~ValueSerializer();
+
+ /**
+ * Writes out a header, which includes the format version.
+ */
+ void WriteHeader();
+
+ /**
+ * Serializes a JavaScript value into the buffer.
+ */
+ V8_WARN_UNUSED_RESULT Maybe<bool> WriteValue(Local<Context> context,
+ Local<Value> value);
+
+ /**
+ * Returns the stored data (allocated using the delegate's
+ * ReallocateBufferMemory) and its size. This serializer should not be used
+ * once the buffer is released. The contents are undefined if a previous write
+ * has failed. Ownership of the buffer is transferred to the caller.
+ */
+ V8_WARN_UNUSED_RESULT std::pair<uint8_t*, size_t> Release();
+
+ /**
+ * Marks an ArrayBuffer as havings its contents transferred out of band.
+ * Pass the corresponding ArrayBuffer in the deserializing context to
+ * ValueDeserializer::TransferArrayBuffer.
+ */
+ void TransferArrayBuffer(uint32_t transfer_id,
+ Local<ArrayBuffer> array_buffer);
+
+ /**
+ * Indicate whether to treat ArrayBufferView objects as host objects,
+ * i.e. pass them to Delegate::WriteHostObject. This should not be
+ * called when no Delegate was passed.
+ *
+ * The default is not to treat ArrayBufferViews as host objects.
+ */
+ void SetTreatArrayBufferViewsAsHostObjects(bool mode);
+
+ /**
+ * Write raw data in various common formats to the buffer.
+ * Note that integer types are written in base-128 varint format, not with a
+ * binary copy. For use during an override of Delegate::WriteHostObject.
+ */
+ void WriteUint32(uint32_t value);
+ void WriteUint64(uint64_t value);
+ void WriteDouble(double value);
+ void WriteRawBytes(const void* source, size_t length);
+
+ ValueSerializer(const ValueSerializer&) = delete;
+ void operator=(const ValueSerializer&) = delete;
+
+ private:
+ struct PrivateData;
+ PrivateData* private_;
+};
+
+/**
+ * Deserializes values from data written with ValueSerializer, or a compatible
+ * implementation.
+ */
+class V8_EXPORT ValueDeserializer {
+ public:
+ class V8_EXPORT Delegate {
+ public:
+ virtual ~Delegate() = default;
+
+ /**
+ * The embedder overrides this method to read some kind of host object, if
+ * possible. If not, a suitable exception should be thrown and
+ * MaybeLocal<Object>() returned.
+ */
+ virtual MaybeLocal<Object> ReadHostObject(Isolate* isolate);
+
+ /**
+ * Get a WasmModuleObject given a transfer_id previously provided
+ * by ValueSerializer::GetWasmModuleTransferId
+ */
+ virtual MaybeLocal<WasmModuleObject> GetWasmModuleFromId(
+ Isolate* isolate, uint32_t transfer_id);
+
+ /**
+ * Get a SharedArrayBuffer given a clone_id previously provided
+ * by ValueSerializer::GetSharedArrayBufferId
+ */
+ virtual MaybeLocal<SharedArrayBuffer> GetSharedArrayBufferFromId(
+ Isolate* isolate, uint32_t clone_id);
+ };
+
+ ValueDeserializer(Isolate* isolate, const uint8_t* data, size_t size);
+ ValueDeserializer(Isolate* isolate, const uint8_t* data, size_t size,
+ Delegate* delegate);
+ ~ValueDeserializer();
+
+ /**
+ * Reads and validates a header (including the format version).
+ * May, for example, reject an invalid or unsupported wire format.
+ */
+ V8_WARN_UNUSED_RESULT Maybe<bool> ReadHeader(Local<Context> context);
+
+ /**
+ * Deserializes a JavaScript value from the buffer.
+ */
+ V8_WARN_UNUSED_RESULT MaybeLocal<Value> ReadValue(Local<Context> context);
+
+ /**
+ * Accepts the array buffer corresponding to the one passed previously to
+ * ValueSerializer::TransferArrayBuffer.
+ */
+ void TransferArrayBuffer(uint32_t transfer_id,
+ Local<ArrayBuffer> array_buffer);
+
+ /**
+ * Similar to TransferArrayBuffer, but for SharedArrayBuffer.
+ * The id is not necessarily in the same namespace as unshared ArrayBuffer
+ * objects.
+ */
+ void TransferSharedArrayBuffer(uint32_t id,
+ Local<SharedArrayBuffer> shared_array_buffer);
+
+ /**
+ * Must be called before ReadHeader to enable support for reading the legacy
+ * wire format (i.e., which predates this being shipped).
+ *
+ * Don't use this unless you need to read data written by previous versions of
+ * blink::ScriptValueSerializer.
+ */
+ void SetSupportsLegacyWireFormat(bool supports_legacy_wire_format);
+
+ /**
+ * Reads the underlying wire format version. Likely mostly to be useful to
+ * legacy code reading old wire format versions. Must be called after
+ * ReadHeader.
+ */
+ uint32_t GetWireFormatVersion() const;
+
+ /**
+ * Reads raw data in various common formats to the buffer.
+ * Note that integer types are read in base-128 varint format, not with a
+ * binary copy. For use during an override of Delegate::ReadHostObject.
+ */
+ V8_WARN_UNUSED_RESULT bool ReadUint32(uint32_t* value);
+ V8_WARN_UNUSED_RESULT bool ReadUint64(uint64_t* value);
+ V8_WARN_UNUSED_RESULT bool ReadDouble(double* value);
+ V8_WARN_UNUSED_RESULT bool ReadRawBytes(size_t length, const void** data);
+
+ ValueDeserializer(const ValueDeserializer&) = delete;
+ void operator=(const ValueDeserializer&) = delete;
+
+ private:
+ struct PrivateData;
+ PrivateData* private_;
+};
+
+} // namespace v8
+
+#endif // INCLUDE_V8_VALUE_SERIALIZER_H_
diff --git a/deps/v8/include/v8-value.h b/deps/v8/include/v8-value.h
new file mode 100644
index 0000000000..adca989e00
--- /dev/null
+++ b/deps/v8/include/v8-value.h
@@ -0,0 +1,526 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef INCLUDE_V8_VALUE_H_
+#define INCLUDE_V8_VALUE_H_
+
+#include "v8-data.h" // NOLINT(build/include_directory)
+#include "v8-internal.h" // NOLINT(build/include_directory)
+#include "v8-local-handle.h" // NOLINT(build/include_directory)
+#include "v8-maybe.h" // NOLINT(build/include_directory)
+#include "v8config.h" // NOLINT(build/include_directory)
+
+/**
+ * The v8 JavaScript engine.
+ */
+namespace v8 {
+
+class BigInt;
+class Int32;
+class Integer;
+class Number;
+class Object;
+class String;
+class Uint32;
+
+/**
+ * The superclass of all JavaScript values and objects.
+ */
+class V8_EXPORT Value : public Data {
+ public:
+ /**
+ * Returns true if this value is the undefined value. See ECMA-262
+ * 4.3.10.
+ *
+ * This is equivalent to `value === undefined` in JS.
+ */
+ V8_INLINE bool IsUndefined() const;
+
+ /**
+ * Returns true if this value is the null value. See ECMA-262
+ * 4.3.11.
+ *
+ * This is equivalent to `value === null` in JS.
+ */
+ V8_INLINE bool IsNull() const;
+
+ /**
+ * Returns true if this value is either the null or the undefined value.
+ * See ECMA-262
+ * 4.3.11. and 4.3.12
+ *
+ * This is equivalent to `value == null` in JS.
+ */
+ V8_INLINE bool IsNullOrUndefined() const;
+
+ /**
+ * Returns true if this value is true.
+ *
+ * This is not the same as `BooleanValue()`. The latter performs a
+ * conversion to boolean, i.e. the result of `Boolean(value)` in JS, whereas
+ * this checks `value === true`.
+ */
+ bool IsTrue() const;
+
+ /**
+ * Returns true if this value is false.
+ *
+ * This is not the same as `!BooleanValue()`. The latter performs a
+ * conversion to boolean, i.e. the result of `!Boolean(value)` in JS, whereas
+ * this checks `value === false`.
+ */
+ bool IsFalse() const;
+
+ /**
+ * Returns true if this value is a symbol or a string.
+ *
+ * This is equivalent to
+ * `typeof value === 'string' || typeof value === 'symbol'` in JS.
+ */
+ bool IsName() const;
+
+ /**
+ * Returns true if this value is an instance of the String type.
+ * See ECMA-262 8.4.
+ *
+ * This is equivalent to `typeof value === 'string'` in JS.
+ */
+ V8_INLINE bool IsString() const;
+
+ /**
+ * Returns true if this value is a symbol.
+ *
+ * This is equivalent to `typeof value === 'symbol'` in JS.
+ */
+ bool IsSymbol() const;
+
+ /**
+ * Returns true if this value is a function.
+ *
+ * This is equivalent to `typeof value === 'function'` in JS.
+ */
+ bool IsFunction() const;
+
+ /**
+ * Returns true if this value is an array. Note that it will return false for
+ * an Proxy for an array.
+ */
+ bool IsArray() const;
+
+ /**
+ * Returns true if this value is an object.
+ */
+ bool IsObject() const;
+
+ /**
+ * Returns true if this value is a bigint.
+ *
+ * This is equivalent to `typeof value === 'bigint'` in JS.
+ */
+ bool IsBigInt() const;
+
+ /**
+ * Returns true if this value is boolean.
+ *
+ * This is equivalent to `typeof value === 'boolean'` in JS.
+ */
+ bool IsBoolean() const;
+
+ /**
+ * Returns true if this value is a number.
+ *
+ * This is equivalent to `typeof value === 'number'` in JS.
+ */
+ bool IsNumber() const;
+
+ /**
+ * Returns true if this value is an `External` object.
+ */
+ bool IsExternal() const;
+
+ /**
+ * Returns true if this value is a 32-bit signed integer.
+ */
+ bool IsInt32() const;
+
+ /**
+ * Returns true if this value is a 32-bit unsigned integer.
+ */
+ bool IsUint32() const;
+
+ /**
+ * Returns true if this value is a Date.
+ */
+ bool IsDate() const;
+
+ /**
+ * Returns true if this value is an Arguments object.
+ */
+ bool IsArgumentsObject() const;
+
+ /**
+ * Returns true if this value is a BigInt object.
+ */
+ bool IsBigIntObject() const;
+
+ /**
+ * Returns true if this value is a Boolean object.
+ */
+ bool IsBooleanObject() const;
+
+ /**
+ * Returns true if this value is a Number object.
+ */
+ bool IsNumberObject() const;
+
+ /**
+ * Returns true if this value is a String object.
+ */
+ bool IsStringObject() const;
+
+ /**
+ * Returns true if this value is a Symbol object.
+ */
+ bool IsSymbolObject() const;
+
+ /**
+ * Returns true if this value is a NativeError.
+ */
+ bool IsNativeError() const;
+
+ /**
+ * Returns true if this value is a RegExp.
+ */
+ bool IsRegExp() const;
+
+ /**
+ * Returns true if this value is an async function.
+ */
+ bool IsAsyncFunction() const;
+
+ /**
+ * Returns true if this value is a Generator function.
+ */
+ bool IsGeneratorFunction() const;
+
+ /**
+ * Returns true if this value is a Generator object (iterator).
+ */
+ bool IsGeneratorObject() const;
+
+ /**
+ * Returns true if this value is a Promise.
+ */
+ bool IsPromise() const;
+
+ /**
+ * Returns true if this value is a Map.
+ */
+ bool IsMap() const;
+
+ /**
+ * Returns true if this value is a Set.
+ */
+ bool IsSet() const;
+
+ /**
+ * Returns true if this value is a Map Iterator.
+ */
+ bool IsMapIterator() const;
+
+ /**
+ * Returns true if this value is a Set Iterator.
+ */
+ bool IsSetIterator() const;
+
+ /**
+ * Returns true if this value is a WeakMap.
+ */
+ bool IsWeakMap() const;
+
+ /**
+ * Returns true if this value is a WeakSet.
+ */
+ bool IsWeakSet() const;
+
+ /**
+ * Returns true if this value is an ArrayBuffer.
+ */
+ bool IsArrayBuffer() const;
+
+ /**
+ * Returns true if this value is an ArrayBufferView.
+ */
+ bool IsArrayBufferView() const;
+
+ /**
+ * Returns true if this value is one of TypedArrays.
+ */
+ bool IsTypedArray() const;
+
+ /**
+ * Returns true if this value is an Uint8Array.
+ */
+ bool IsUint8Array() const;
+
+ /**
+ * Returns true if this value is an Uint8ClampedArray.
+ */
+ bool IsUint8ClampedArray() const;
+
+ /**
+ * Returns true if this value is an Int8Array.
+ */
+ bool IsInt8Array() const;
+
+ /**
+ * Returns true if this value is an Uint16Array.
+ */
+ bool IsUint16Array() const;
+
+ /**
+ * Returns true if this value is an Int16Array.
+ */
+ bool IsInt16Array() const;
+
+ /**
+ * Returns true if this value is an Uint32Array.
+ */
+ bool IsUint32Array() const;
+
+ /**
+ * Returns true if this value is an Int32Array.
+ */
+ bool IsInt32Array() const;
+
+ /**
+ * Returns true if this value is a Float32Array.
+ */
+ bool IsFloat32Array() const;
+
+ /**
+ * Returns true if this value is a Float64Array.
+ */
+ bool IsFloat64Array() const;
+
+ /**
+ * Returns true if this value is a BigInt64Array.
+ */
+ bool IsBigInt64Array() const;
+
+ /**
+ * Returns true if this value is a BigUint64Array.
+ */
+ bool IsBigUint64Array() const;
+
+ /**
+ * Returns true if this value is a DataView.
+ */
+ bool IsDataView() const;
+
+ /**
+ * Returns true if this value is a SharedArrayBuffer.
+ */
+ bool IsSharedArrayBuffer() const;
+
+ /**
+ * Returns true if this value is a JavaScript Proxy.
+ */
+ bool IsProxy() const;
+
+ /**
+ * Returns true if this value is a WasmMemoryObject.
+ */
+ bool IsWasmMemoryObject() const;
+
+ /**
+ * Returns true if this value is a WasmModuleObject.
+ */
+ bool IsWasmModuleObject() const;
+
+ /**
+ * Returns true if the value is a Module Namespace Object.
+ */
+ bool IsModuleNamespaceObject() const;
+
+ /**
+ * Perform the equivalent of `BigInt(value)` in JS.
+ */
+ V8_WARN_UNUSED_RESULT MaybeLocal<BigInt> ToBigInt(
+ Local<Context> context) const;
+ /**
+ * Perform the equivalent of `Number(value)` in JS.
+ */
+ V8_WARN_UNUSED_RESULT MaybeLocal<Number> ToNumber(
+ Local<Context> context) const;
+ /**
+ * Perform the equivalent of `String(value)` in JS.
+ */
+ V8_WARN_UNUSED_RESULT MaybeLocal<String> ToString(
+ Local<Context> context) const;
+ /**
+ * Provide a string representation of this value usable for debugging.
+ * This operation has no observable side effects and will succeed
+ * unless e.g. execution is being terminated.
+ */
+ V8_WARN_UNUSED_RESULT MaybeLocal<String> ToDetailString(
+ Local<Context> context) const;
+ /**
+ * Perform the equivalent of `Object(value)` in JS.
+ */
+ V8_WARN_UNUSED_RESULT MaybeLocal<Object> ToObject(
+ Local<Context> context) const;
+ /**
+ * Perform the equivalent of `Number(value)` in JS and convert the result
+ * to an integer. Negative values are rounded up, positive values are rounded
+ * down. NaN is converted to 0. Infinite values yield undefined results.
+ */
+ V8_WARN_UNUSED_RESULT MaybeLocal<Integer> ToInteger(
+ Local<Context> context) const;
+ /**
+ * Perform the equivalent of `Number(value)` in JS and convert the result
+ * to an unsigned 32-bit integer by performing the steps in
+ * https://tc39.es/ecma262/#sec-touint32.
+ */
+ V8_WARN_UNUSED_RESULT MaybeLocal<Uint32> ToUint32(
+ Local<Context> context) const;
+ /**
+ * Perform the equivalent of `Number(value)` in JS and convert the result
+ * to a signed 32-bit integer by performing the steps in
+ * https://tc39.es/ecma262/#sec-toint32.
+ */
+ V8_WARN_UNUSED_RESULT MaybeLocal<Int32> ToInt32(Local<Context> context) const;
+
+ /**
+ * Perform the equivalent of `Boolean(value)` in JS. This can never fail.
+ */
+ Local<Boolean> ToBoolean(Isolate* isolate) const;
+
+ /**
+ * Attempts to convert a string to an array index.
+ * Returns an empty handle if the conversion fails.
+ */
+ V8_WARN_UNUSED_RESULT MaybeLocal<Uint32> ToArrayIndex(
+ Local<Context> context) const;
+
+ /** Returns the equivalent of `ToBoolean()->Value()`. */
+ bool BooleanValue(Isolate* isolate) const;
+
+ /** Returns the equivalent of `ToNumber()->Value()`. */
+ V8_WARN_UNUSED_RESULT Maybe<double> NumberValue(Local<Context> context) const;
+ /** Returns the equivalent of `ToInteger()->Value()`. */
+ V8_WARN_UNUSED_RESULT Maybe<int64_t> IntegerValue(
+ Local<Context> context) const;
+ /** Returns the equivalent of `ToUint32()->Value()`. */
+ V8_WARN_UNUSED_RESULT Maybe<uint32_t> Uint32Value(
+ Local<Context> context) const;
+ /** Returns the equivalent of `ToInt32()->Value()`. */
+ V8_WARN_UNUSED_RESULT Maybe<int32_t> Int32Value(Local<Context> context) const;
+
+ /** JS == */
+ V8_WARN_UNUSED_RESULT Maybe<bool> Equals(Local<Context> context,
+ Local<Value> that) const;
+ bool StrictEquals(Local<Value> that) const;
+ bool SameValue(Local<Value> that) const;
+
+ template <class T>
+ V8_INLINE static Value* Cast(T* value) {
+ return static_cast<Value*>(value);
+ }
+
+ Local<String> TypeOf(Isolate*);
+
+ Maybe<bool> InstanceOf(Local<Context> context, Local<Object> object);
+
+ private:
+ V8_INLINE bool QuickIsUndefined() const;
+ V8_INLINE bool QuickIsNull() const;
+ V8_INLINE bool QuickIsNullOrUndefined() const;
+ V8_INLINE bool QuickIsString() const;
+ bool FullIsUndefined() const;
+ bool FullIsNull() const;
+ bool FullIsString() const;
+
+ static void CheckCast(Data* that);
+};
+
+template <>
+V8_INLINE Value* Value::Cast(Data* value) {
+#ifdef V8_ENABLE_CHECKS
+ CheckCast(value);
+#endif
+ return static_cast<Value*>(value);
+}
+
+bool Value::IsUndefined() const {
+#ifdef V8_ENABLE_CHECKS
+ return FullIsUndefined();
+#else
+ return QuickIsUndefined();
+#endif
+}
+
+bool Value::QuickIsUndefined() const {
+ using A = internal::Address;
+ using I = internal::Internals;
+ A obj = *reinterpret_cast<const A*>(this);
+ if (!I::HasHeapObjectTag(obj)) return false;
+ if (I::GetInstanceType(obj) != I::kOddballType) return false;
+ return (I::GetOddballKind(obj) == I::kUndefinedOddballKind);
+}
+
+bool Value::IsNull() const {
+#ifdef V8_ENABLE_CHECKS
+ return FullIsNull();
+#else
+ return QuickIsNull();
+#endif
+}
+
+bool Value::QuickIsNull() const {
+ using A = internal::Address;
+ using I = internal::Internals;
+ A obj = *reinterpret_cast<const A*>(this);
+ if (!I::HasHeapObjectTag(obj)) return false;
+ if (I::GetInstanceType(obj) != I::kOddballType) return false;
+ return (I::GetOddballKind(obj) == I::kNullOddballKind);
+}
+
+bool Value::IsNullOrUndefined() const {
+#ifdef V8_ENABLE_CHECKS
+ return FullIsNull() || FullIsUndefined();
+#else
+ return QuickIsNullOrUndefined();
+#endif
+}
+
+bool Value::QuickIsNullOrUndefined() const {
+ using A = internal::Address;
+ using I = internal::Internals;
+ A obj = *reinterpret_cast<const A*>(this);
+ if (!I::HasHeapObjectTag(obj)) return false;
+ if (I::GetInstanceType(obj) != I::kOddballType) return false;
+ int kind = I::GetOddballKind(obj);
+ return kind == I::kNullOddballKind || kind == I::kUndefinedOddballKind;
+}
+
+bool Value::IsString() const {
+#ifdef V8_ENABLE_CHECKS
+ return FullIsString();
+#else
+ return QuickIsString();
+#endif
+}
+
+bool Value::QuickIsString() const {
+ using A = internal::Address;
+ using I = internal::Internals;
+ A obj = *reinterpret_cast<const A*>(this);
+ if (!I::HasHeapObjectTag(obj)) return false;
+ return (I::GetInstanceType(obj) < I::kFirstNonstringType);
+}
+
+} // namespace v8
+
+#endif // INCLUDE_V8_VALUE_H_
diff --git a/deps/v8/include/v8-version.h b/deps/v8/include/v8-version.h
index 845d32f360..1d9bd6ff14 100644
--- a/deps/v8/include/v8-version.h
+++ b/deps/v8/include/v8-version.h
@@ -9,9 +9,9 @@
// NOTE these macros are used by some of the tool scripts and the build
// system so their names cannot be changed without changing the scripts.
#define V8_MAJOR_VERSION 9
-#define V8_MINOR_VERSION 4
-#define V8_BUILD_NUMBER 146
-#define V8_PATCH_LEVEL 18
+#define V8_MINOR_VERSION 5
+#define V8_BUILD_NUMBER 172
+#define V8_PATCH_LEVEL 19
// Use 1 for candidates and 0 otherwise.
// (Boolean macro values are not supported by all preprocessors.)
diff --git a/deps/v8/include/v8-wasm.h b/deps/v8/include/v8-wasm.h
new file mode 100644
index 0000000000..af47a3eab3
--- /dev/null
+++ b/deps/v8/include/v8-wasm.h
@@ -0,0 +1,245 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef INCLUDE_V8_WASM_H_
+#define INCLUDE_V8_WASM_H_
+
+#include <memory>
+#include <string>
+
+#include "v8-local-handle.h" // NOLINT(build/include_directory)
+#include "v8-memory-span.h" // NOLINT(build/include_directory)
+#include "v8-object.h" // NOLINT(build/include_directory)
+#include "v8config.h" // NOLINT(build/include_directory)
+
+namespace v8 {
+
+class ArrayBuffer;
+class Promise;
+
+namespace internal {
+namespace wasm {
+class NativeModule;
+class StreamingDecoder;
+} // namespace wasm
+} // namespace internal
+
+/**
+ * An owned byte buffer with associated size.
+ */
+struct OwnedBuffer {
+ std::unique_ptr<const uint8_t[]> buffer;
+ size_t size = 0;
+ OwnedBuffer(std::unique_ptr<const uint8_t[]> buffer, size_t size)
+ : buffer(std::move(buffer)), size(size) {}
+ OwnedBuffer() = default;
+};
+
+// Wrapper around a compiled WebAssembly module, which is potentially shared by
+// different WasmModuleObjects.
+class V8_EXPORT CompiledWasmModule {
+ public:
+ /**
+ * Serialize the compiled module. The serialized data does not include the
+ * wire bytes.
+ */
+ OwnedBuffer Serialize();
+
+ /**
+ * Get the (wasm-encoded) wire bytes that were used to compile this module.
+ */
+ MemorySpan<const uint8_t> GetWireBytesRef();
+
+ const std::string& source_url() const { return source_url_; }
+
+ private:
+ friend class WasmModuleObject;
+ friend class WasmStreaming;
+
+ explicit CompiledWasmModule(std::shared_ptr<internal::wasm::NativeModule>,
+ const char* source_url, size_t url_length);
+
+ const std::shared_ptr<internal::wasm::NativeModule> native_module_;
+ const std::string source_url_;
+};
+
+// An instance of WebAssembly.Memory.
+class V8_EXPORT WasmMemoryObject : public Object {
+ public:
+ WasmMemoryObject() = delete;
+
+ /**
+ * Returns underlying ArrayBuffer.
+ */
+ Local<ArrayBuffer> Buffer();
+
+ V8_INLINE static WasmMemoryObject* Cast(Value* value) {
+#ifdef V8_ENABLE_CHECKS
+ CheckCast(value);
+#endif
+ return static_cast<WasmMemoryObject*>(value);
+ }
+
+ private:
+ static void CheckCast(Value* object);
+};
+
+// An instance of WebAssembly.Module.
+class V8_EXPORT WasmModuleObject : public Object {
+ public:
+ WasmModuleObject() = delete;
+
+ /**
+ * Efficiently re-create a WasmModuleObject, without recompiling, from
+ * a CompiledWasmModule.
+ */
+ static MaybeLocal<WasmModuleObject> FromCompiledModule(
+ Isolate* isolate, const CompiledWasmModule&);
+
+ /**
+ * Get the compiled module for this module object. The compiled module can be
+ * shared by several module objects.
+ */
+ CompiledWasmModule GetCompiledModule();
+
+ V8_INLINE static WasmModuleObject* Cast(Value* value) {
+#ifdef V8_ENABLE_CHECKS
+ CheckCast(value);
+#endif
+ return static_cast<WasmModuleObject*>(value);
+ }
+
+ private:
+ static void CheckCast(Value* obj);
+};
+
+/**
+ * The V8 interface for WebAssembly streaming compilation. When streaming
+ * compilation is initiated, V8 passes a {WasmStreaming} object to the embedder
+ * such that the embedder can pass the input bytes for streaming compilation to
+ * V8.
+ */
+class V8_EXPORT WasmStreaming final {
+ public:
+ class WasmStreamingImpl;
+
+ /**
+ * Client to receive streaming event notifications.
+ */
+ class Client {
+ public:
+ virtual ~Client() = default;
+ /**
+ * Passes the fully compiled module to the client. This can be used to
+ * implement code caching.
+ */
+ virtual void OnModuleCompiled(CompiledWasmModule compiled_module) = 0;
+ };
+
+ explicit WasmStreaming(std::unique_ptr<WasmStreamingImpl> impl);
+
+ ~WasmStreaming();
+
+ /**
+ * Pass a new chunk of bytes to WebAssembly streaming compilation.
+ * The buffer passed into {OnBytesReceived} is owned by the caller.
+ */
+ void OnBytesReceived(const uint8_t* bytes, size_t size);
+
+ /**
+ * {Finish} should be called after all received bytes where passed to
+ * {OnBytesReceived} to tell V8 that there will be no more bytes. {Finish}
+ * does not have to be called after {Abort} has been called already.
+ */
+ void Finish();
+
+ /**
+ * Abort streaming compilation. If {exception} has a value, then the promise
+ * associated with streaming compilation is rejected with that value. If
+ * {exception} does not have value, the promise does not get rejected.
+ */
+ void Abort(MaybeLocal<Value> exception);
+
+ /**
+ * Passes previously compiled module bytes. This must be called before
+ * {OnBytesReceived}, {Finish}, or {Abort}. Returns true if the module bytes
+ * can be used, false otherwise. The buffer passed via {bytes} and {size}
+ * is owned by the caller. If {SetCompiledModuleBytes} returns true, the
+ * buffer must remain valid until either {Finish} or {Abort} completes.
+ */
+ bool SetCompiledModuleBytes(const uint8_t* bytes, size_t size);
+
+ /**
+ * Sets the client object that will receive streaming event notifications.
+ * This must be called before {OnBytesReceived}, {Finish}, or {Abort}.
+ */
+ void SetClient(std::shared_ptr<Client> client);
+
+ /*
+ * Sets the UTF-8 encoded source URL for the {Script} object. This must be
+ * called before {Finish}.
+ */
+ void SetUrl(const char* url, size_t length);
+
+ /**
+ * Unpacks a {WasmStreaming} object wrapped in a {Managed} for the embedder.
+ * Since the embedder is on the other side of the API, it cannot unpack the
+ * {Managed} itself.
+ */
+ static std::shared_ptr<WasmStreaming> Unpack(Isolate* isolate,
+ Local<Value> value);
+
+ private:
+ std::unique_ptr<WasmStreamingImpl> impl_;
+};
+
+// TODO(mtrofin): when streaming compilation is done, we can rename this
+// to simply WasmModuleObjectBuilder
+class V8_EXPORT WasmModuleObjectBuilderStreaming final {
+ public:
+ explicit WasmModuleObjectBuilderStreaming(Isolate* isolate);
+ /**
+ * The buffer passed into OnBytesReceived is owned by the caller.
+ */
+ void OnBytesReceived(const uint8_t*, size_t size);
+ void Finish();
+ /**
+ * Abort streaming compilation. If {exception} has a value, then the promise
+ * associated with streaming compilation is rejected with that value. If
+ * {exception} does not have value, the promise does not get rejected.
+ */
+ void Abort(MaybeLocal<Value> exception);
+ Local<Promise> GetPromise();
+
+ ~WasmModuleObjectBuilderStreaming() = default;
+
+ private:
+ WasmModuleObjectBuilderStreaming(const WasmModuleObjectBuilderStreaming&) =
+ delete;
+ WasmModuleObjectBuilderStreaming(WasmModuleObjectBuilderStreaming&&) =
+ default;
+ WasmModuleObjectBuilderStreaming& operator=(
+ const WasmModuleObjectBuilderStreaming&) = delete;
+ WasmModuleObjectBuilderStreaming& operator=(
+ WasmModuleObjectBuilderStreaming&&) = default;
+ Isolate* isolate_ = nullptr;
+
+#if V8_CC_MSVC
+ /**
+ * We don't need the static Copy API, so the default
+ * NonCopyablePersistentTraits would be sufficient, however,
+ * MSVC eagerly instantiates the Copy.
+ * We ensure we don't use Copy, however, by compiling with the
+ * defaults everywhere else.
+ */
+ Persistent<Promise, CopyablePersistentTraits<Promise>> promise_;
+#else
+ Persistent<Promise> promise_;
+#endif
+ std::shared_ptr<internal::wasm::StreamingDecoder> streaming_decoder_;
+};
+
+} // namespace v8
+
+#endif // INCLUDE_V8_WASM_H_
diff --git a/deps/v8/include/v8-weak-callback-info.h b/deps/v8/include/v8-weak-callback-info.h
new file mode 100644
index 0000000000..ff3c08238e
--- /dev/null
+++ b/deps/v8/include/v8-weak-callback-info.h
@@ -0,0 +1,73 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef INCLUDE_V8_WEAK_CALLBACK_INFO_H_
+#define INCLUDE_V8_WEAK_CALLBACK_INFO_H_
+
+#include "v8config.h" // NOLINT(build/include_directory)
+
+namespace v8 {
+
+class Isolate;
+
+namespace api_internal {
+V8_EXPORT void InternalFieldOutOfBounds(int index);
+} // namespace api_internal
+
+static const int kInternalFieldsInWeakCallback = 2;
+static const int kEmbedderFieldsInWeakCallback = 2;
+
+template <typename T>
+class WeakCallbackInfo {
+ public:
+ using Callback = void (*)(const WeakCallbackInfo<T>& data);
+
+ WeakCallbackInfo(Isolate* isolate, T* parameter,
+ void* embedder_fields[kEmbedderFieldsInWeakCallback],
+ Callback* callback)
+ : isolate_(isolate), parameter_(parameter), callback_(callback) {
+ for (int i = 0; i < kEmbedderFieldsInWeakCallback; ++i) {
+ embedder_fields_[i] = embedder_fields[i];
+ }
+ }
+
+ V8_INLINE Isolate* GetIsolate() const { return isolate_; }
+ V8_INLINE T* GetParameter() const { return parameter_; }
+ V8_INLINE void* GetInternalField(int index) const;
+
+ // When first called, the embedder MUST Reset() the Global which triggered the
+ // callback. The Global itself is unusable for anything else. No v8 other api
+ // calls may be called in the first callback. Should additional work be
+ // required, the embedder must set a second pass callback, which will be
+ // called after all the initial callbacks are processed.
+ // Calling SetSecondPassCallback on the second pass will immediately crash.
+ void SetSecondPassCallback(Callback callback) const { *callback_ = callback; }
+
+ private:
+ Isolate* isolate_;
+ T* parameter_;
+ Callback* callback_;
+ void* embedder_fields_[kEmbedderFieldsInWeakCallback];
+};
+
+// kParameter will pass a void* parameter back to the callback, kInternalFields
+// will pass the first two internal fields back to the callback, kFinalizer
+// will pass a void* parameter back, but is invoked before the object is
+// actually collected, so it can be resurrected. In the last case, it is not
+// possible to request a second pass callback.
+enum class WeakCallbackType { kParameter, kInternalFields, kFinalizer };
+
+template <class T>
+void* WeakCallbackInfo<T>::GetInternalField(int index) const {
+#ifdef V8_ENABLE_CHECKS
+ if (index < 0 || index >= kEmbedderFieldsInWeakCallback) {
+ api_internal::InternalFieldOutOfBounds(index);
+ }
+#endif
+ return embedder_fields_[index];
+}
+
+} // namespace v8
+
+#endif // INCLUDE_V8_WEAK_CALLBACK_INFO_H_
diff --git a/deps/v8/include/v8.h b/deps/v8/include/v8.h
index 8e664887be..dd91f880b7 100644
--- a/deps/v8/include/v8.h
+++ b/deps/v8/include/v8.h
@@ -7,7 +7,7 @@
* V8 is Google's open source JavaScript engine.
*
* This set of documents provides reference material generated from the
- * V8 header file, include/v8.h.
+ * V8 header files in the include/ subdirectory.
*
* For other documentation see https://v8.dev/.
*/
@@ -17,19 +17,51 @@
#include <stddef.h>
#include <stdint.h>
-#include <stdio.h>
-#include <atomic>
#include <memory>
-#include <string>
-#include <type_traits>
-#include <utility>
#include <vector>
#include "cppgc/common.h"
-#include "v8-internal.h" // NOLINT(build/include_directory)
-#include "v8-version.h" // NOLINT(build/include_directory)
-#include "v8config.h" // NOLINT(build/include_directory)
+#include "v8-array-buffer.h" // NOLINT(build/include_directory)
+#include "v8-container.h" // NOLINT(build/include_directory)
+#include "v8-context.h" // NOLINT(build/include_directory)
+#include "v8-data.h" // NOLINT(build/include_directory)
+#include "v8-date.h" // NOLINT(build/include_directory)
+#include "v8-debug.h" // NOLINT(build/include_directory)
+#include "v8-exception.h" // NOLINT(build/include_directory)
+#include "v8-extension.h" // NOLINT(build/include_directory)
+#include "v8-external.h" // NOLINT(build/include_directory)
+#include "v8-function.h" // NOLINT(build/include_directory)
+#include "v8-initialization.h" // NOLINT(build/include_directory)
+#include "v8-internal.h" // NOLINT(build/include_directory)
+#include "v8-isolate.h" // NOLINT(build/include_directory)
+#include "v8-json.h" // NOLINT(build/include_directory)
+#include "v8-local-handle.h" // NOLINT(build/include_directory)
+#include "v8-locker.h" // NOLINT(build/include_directory)
+#include "v8-maybe.h" // NOLINT(build/include_directory)
+#include "v8-memory-span.h" // NOLINT(build/include_directory)
+#include "v8-message.h" // NOLINT(build/include_directory)
+#include "v8-microtask-queue.h" // NOLINT(build/include_directory)
+#include "v8-microtask.h" // NOLINT(build/include_directory)
+#include "v8-object.h" // NOLINT(build/include_directory)
+#include "v8-persistent-handle.h" // NOLINT(build/include_directory)
+#include "v8-primitive-object.h" // NOLINT(build/include_directory)
+#include "v8-primitive.h" // NOLINT(build/include_directory)
+#include "v8-promise.h" // NOLINT(build/include_directory)
+#include "v8-proxy.h" // NOLINT(build/include_directory)
+#include "v8-regexp.h" // NOLINT(build/include_directory)
+#include "v8-script.h" // NOLINT(build/include_directory)
+#include "v8-snapshot.h" // NOLINT(build/include_directory)
+#include "v8-statistics.h" // NOLINT(build/include_directory)
+#include "v8-template.h" // NOLINT(build/include_directory)
+#include "v8-traced-handle.h" // NOLINT(build/include_directory)
+#include "v8-typed-array.h" // NOLINT(build/include_directory)
+#include "v8-unwinder.h" // NOLINT(build/include_directory)
+#include "v8-value-serializer.h" // NOLINT(build/include_directory)
+#include "v8-value.h" // NOLINT(build/include_directory)
+#include "v8-version.h" // NOLINT(build/include_directory)
+#include "v8-wasm.h" // NOLINT(build/include_directory)
+#include "v8config.h" // NOLINT(build/include_directory)
// We reserve the V8_* prefix for macros defined in V8 public API and
// assume there are no name conflicts with the embedder's code.
@@ -39,12295 +71,7 @@
*/
namespace v8 {
-class AccessorSignature;
-class Array;
-class ArrayBuffer;
-class BigInt;
-class BigIntObject;
-class Boolean;
-class BooleanObject;
-class CFunction;
-class CallHandlerHelper;
-class Context;
-class CppHeap;
-class CTypeInfo;
-class Data;
-class Date;
-class EscapableHandleScope;
-class External;
-class Function;
-class FunctionTemplate;
-class HeapProfiler;
-class ImplementationUtilities;
-class Int32;
-class Integer;
-class Isolate;
-class Isolate;
-class MicrotaskQueue;
-class Name;
-class Number;
-class NumberObject;
-class Object;
-class ObjectOperationDescriptor;
-class ObjectTemplate;
class Platform;
-class Primitive;
-class PrimitiveArray;
-class Private;
-class Promise;
-class PropertyDescriptor;
-class Proxy;
-class RawOperationDescriptor;
-class Script;
-class SharedArrayBuffer;
-class Signature;
-class StackFrame;
-class StackTrace;
-class StartupData;
-class String;
-class StringObject;
-class Symbol;
-class SymbolObject;
-class TracedReferenceBase;
-class Uint32;
-class Utils;
-class Value;
-class WasmMemoryObject;
-class WasmModuleObject;
-template <class K, class V, class T>
-class GlobalValueMap;
-template <class K, class V, class T>
-class PersistentValueMapBase;
-template<class T> class NonCopyablePersistentTraits;
-template <class T, class M = NonCopyablePersistentTraits<T>>
-class Persistent;
-template <class T>
-class BasicTracedReference;
-template <class T>
-class Eternal;
-template <class T>
-class Global;
-template <class T>
-class Local;
-template <class T>
-class Maybe;
-template <class T>
-class MaybeLocal;
-template <class T>
-class TracedGlobal;
-template <class T>
-class TracedReference;
-template<class K, class V, class T> class PersistentValueMap;
-template<class T, class P> class WeakCallbackObject;
-template <class T>
-class PersistentBase;
-template <class V, class T>
-class PersistentValueVector;
-template<typename T> class FunctionCallbackInfo;
-template<typename T> class PropertyCallbackInfo;
-template<typename T> class ReturnValue;
-
-namespace internal {
-class BackgroundDeserializeTask;
-class BasicTracedReferenceExtractor;
-class ExternalString;
-class FunctionCallbackArguments;
-class GlobalHandles;
-class Heap;
-class HeapObject;
-class Isolate;
-class LocalEmbedderHeapTracer;
-class MicrotaskQueue;
-class PropertyCallbackArguments;
-class ReadOnlyHeap;
-class ScopedExternalStringLock;
-class ThreadLocalTop;
-struct ScriptStreamingData;
-enum class ArgumentsType;
-template <ArgumentsType>
-class Arguments;
-template <typename T>
-class CustomArguments;
-
-namespace wasm {
-class NativeModule;
-class StreamingDecoder;
-} // namespace wasm
-
-} // namespace internal
-
-namespace metrics {
-class Recorder;
-} // namespace metrics
-
-namespace debug {
-class ConsoleCallArguments;
-} // namespace debug
-
-// --- Handles ---
-
-/**
- * An object reference managed by the v8 garbage collector.
- *
- * All objects returned from v8 have to be tracked by the garbage
- * collector so that it knows that the objects are still alive. Also,
- * because the garbage collector may move objects, it is unsafe to
- * point directly to an object. Instead, all objects are stored in
- * handles which are known by the garbage collector and updated
- * whenever an object moves. Handles should always be passed by value
- * (except in cases like out-parameters) and they should never be
- * allocated on the heap.
- *
- * There are two types of handles: local and persistent handles.
- *
- * Local handles are light-weight and transient and typically used in
- * local operations. They are managed by HandleScopes. That means that a
- * HandleScope must exist on the stack when they are created and that they are
- * only valid inside of the HandleScope active during their creation.
- * For passing a local handle to an outer HandleScope, an EscapableHandleScope
- * and its Escape() method must be used.
- *
- * Persistent handles can be used when storing objects across several
- * independent operations and have to be explicitly deallocated when they're no
- * longer used.
- *
- * It is safe to extract the object stored in the handle by
- * dereferencing the handle (for instance, to extract the Object* from
- * a Local<Object>); the value will still be governed by a handle
- * behind the scenes and the same rules apply to these values as to
- * their handles.
- */
-template <class T>
-class Local {
- public:
- V8_INLINE Local() : val_(nullptr) {}
- template <class S>
- V8_INLINE Local(Local<S> that)
- : val_(reinterpret_cast<T*>(*that)) {
- /**
- * This check fails when trying to convert between incompatible
- * handles. For example, converting from a Local<String> to a
- * Local<Number>.
- */
- static_assert(std::is_base_of<T, S>::value, "type check");
- }
-
- /**
- * Returns true if the handle is empty.
- */
- V8_INLINE bool IsEmpty() const { return val_ == nullptr; }
-
- /**
- * Sets the handle to be empty. IsEmpty() will then return true.
- */
- V8_INLINE void Clear() { val_ = nullptr; }
-
- V8_INLINE T* operator->() const { return val_; }
-
- V8_INLINE T* operator*() const { return val_; }
-
- /**
- * Checks whether two handles are the same.
- * Returns true if both are empty, or if the objects to which they refer
- * are identical.
- *
- * If both handles refer to JS objects, this is the same as strict equality.
- * For primitives, such as numbers or strings, a `false` return value does not
- * indicate that the values aren't equal in the JavaScript sense.
- * Use `Value::StrictEquals()` to check primitives for equality.
- */
- template <class S>
- V8_INLINE bool operator==(const Local<S>& that) const {
- internal::Address* a = reinterpret_cast<internal::Address*>(this->val_);
- internal::Address* b = reinterpret_cast<internal::Address*>(that.val_);
- if (a == nullptr) return b == nullptr;
- if (b == nullptr) return false;
- return *a == *b;
- }
-
- template <class S> V8_INLINE bool operator==(
- const PersistentBase<S>& that) const {
- internal::Address* a = reinterpret_cast<internal::Address*>(this->val_);
- internal::Address* b = reinterpret_cast<internal::Address*>(that.val_);
- if (a == nullptr) return b == nullptr;
- if (b == nullptr) return false;
- return *a == *b;
- }
-
- /**
- * Checks whether two handles are different.
- * Returns true if only one of the handles is empty, or if
- * the objects to which they refer are different.
- *
- * If both handles refer to JS objects, this is the same as strict
- * non-equality. For primitives, such as numbers or strings, a `true` return
- * value does not indicate that the values aren't equal in the JavaScript
- * sense. Use `Value::StrictEquals()` to check primitives for equality.
- */
- template <class S>
- V8_INLINE bool operator!=(const Local<S>& that) const {
- return !operator==(that);
- }
-
- template <class S> V8_INLINE bool operator!=(
- const Persistent<S>& that) const {
- return !operator==(that);
- }
-
- /**
- * Cast a handle to a subclass, e.g. Local<Value> to Local<Object>.
- * This is only valid if the handle actually refers to a value of the
- * target type.
- */
- template <class S> V8_INLINE static Local<T> Cast(Local<S> that) {
-#ifdef V8_ENABLE_CHECKS
- // If we're going to perform the type check then we have to check
- // that the handle isn't empty before doing the checked cast.
- if (that.IsEmpty()) return Local<T>();
-#endif
- return Local<T>(T::Cast(*that));
- }
-
- /**
- * Calling this is equivalent to Local<S>::Cast().
- * In particular, this is only valid if the handle actually refers to a value
- * of the target type.
- */
- template <class S>
- V8_INLINE Local<S> As() const {
- return Local<S>::Cast(*this);
- }
-
- /**
- * Create a local handle for the content of another handle.
- * The referee is kept alive by the local handle even when
- * the original handle is destroyed/disposed.
- */
- V8_INLINE static Local<T> New(Isolate* isolate, Local<T> that);
- V8_INLINE static Local<T> New(Isolate* isolate,
- const PersistentBase<T>& that);
- V8_INLINE static Local<T> New(Isolate* isolate,
- const BasicTracedReference<T>& that);
-
- private:
- friend class TracedReferenceBase;
- friend class Utils;
- template<class F> friend class Eternal;
- template<class F> friend class PersistentBase;
- template<class F, class M> friend class Persistent;
- template<class F> friend class Local;
- template <class F>
- friend class MaybeLocal;
- template<class F> friend class FunctionCallbackInfo;
- template<class F> friend class PropertyCallbackInfo;
- friend class String;
- friend class Object;
- friend class Context;
- friend class Isolate;
- friend class Private;
- template<class F> friend class internal::CustomArguments;
- friend Local<Primitive> Undefined(Isolate* isolate);
- friend Local<Primitive> Null(Isolate* isolate);
- friend Local<Boolean> True(Isolate* isolate);
- friend Local<Boolean> False(Isolate* isolate);
- friend class HandleScope;
- friend class EscapableHandleScope;
- template <class F1, class F2, class F3>
- friend class PersistentValueMapBase;
- template<class F1, class F2> friend class PersistentValueVector;
- template <class F>
- friend class ReturnValue;
- template <class F>
- friend class Traced;
- template <class F>
- friend class TracedGlobal;
- template <class F>
- friend class BasicTracedReference;
- template <class F>
- friend class TracedReference;
-
- explicit V8_INLINE Local(T* that) : val_(that) {}
- V8_INLINE static Local<T> New(Isolate* isolate, T* that);
- T* val_;
-};
-
-
-#if !defined(V8_IMMINENT_DEPRECATION_WARNINGS)
-// Handle is an alias for Local for historical reasons.
-template <class T>
-using Handle = Local<T>;
-#endif
-
-
-/**
- * A MaybeLocal<> is a wrapper around Local<> that enforces a check whether
- * the Local<> is empty before it can be used.
- *
- * If an API method returns a MaybeLocal<>, the API method can potentially fail
- * either because an exception is thrown, or because an exception is pending,
- * e.g. because a previous API call threw an exception that hasn't been caught
- * yet, or because a TerminateExecution exception was thrown. In that case, an
- * empty MaybeLocal is returned.
- */
-template <class T>
-class MaybeLocal {
- public:
- V8_INLINE MaybeLocal() : val_(nullptr) {}
- template <class S>
- V8_INLINE MaybeLocal(Local<S> that)
- : val_(reinterpret_cast<T*>(*that)) {
- static_assert(std::is_base_of<T, S>::value, "type check");
- }
-
- V8_INLINE bool IsEmpty() const { return val_ == nullptr; }
-
- /**
- * Converts this MaybeLocal<> to a Local<>. If this MaybeLocal<> is empty,
- * |false| is returned and |out| is left untouched.
- */
- template <class S>
- V8_WARN_UNUSED_RESULT V8_INLINE bool ToLocal(Local<S>* out) const {
- out->val_ = IsEmpty() ? nullptr : this->val_;
- return !IsEmpty();
- }
-
- /**
- * Converts this MaybeLocal<> to a Local<>. If this MaybeLocal<> is empty,
- * V8 will crash the process.
- */
- V8_INLINE Local<T> ToLocalChecked();
-
- /**
- * Converts this MaybeLocal<> to a Local<>, using a default value if this
- * MaybeLocal<> is empty.
- */
- template <class S>
- V8_INLINE Local<S> FromMaybe(Local<S> default_value) const {
- return IsEmpty() ? default_value : Local<S>(val_);
- }
-
- private:
- T* val_;
-};
-
-/**
- * Eternal handles are set-once handles that live for the lifetime of the
- * isolate.
- */
-template <class T> class Eternal {
- public:
- V8_INLINE Eternal() : val_(nullptr) {}
- template <class S>
- V8_INLINE Eternal(Isolate* isolate, Local<S> handle) : val_(nullptr) {
- Set(isolate, handle);
- }
- // Can only be safely called if already set.
- V8_INLINE Local<T> Get(Isolate* isolate) const;
- V8_INLINE bool IsEmpty() const { return val_ == nullptr; }
- template<class S> V8_INLINE void Set(Isolate* isolate, Local<S> handle);
-
- private:
- T* val_;
-};
-
-
-static const int kInternalFieldsInWeakCallback = 2;
-static const int kEmbedderFieldsInWeakCallback = 2;
-
-template <typename T>
-class WeakCallbackInfo {
- public:
- using Callback = void (*)(const WeakCallbackInfo<T>& data);
-
- WeakCallbackInfo(Isolate* isolate, T* parameter,
- void* embedder_fields[kEmbedderFieldsInWeakCallback],
- Callback* callback)
- : isolate_(isolate), parameter_(parameter), callback_(callback) {
- for (int i = 0; i < kEmbedderFieldsInWeakCallback; ++i) {
- embedder_fields_[i] = embedder_fields[i];
- }
- }
-
- V8_INLINE Isolate* GetIsolate() const { return isolate_; }
- V8_INLINE T* GetParameter() const { return parameter_; }
- V8_INLINE void* GetInternalField(int index) const;
-
- // When first called, the embedder MUST Reset() the Global which triggered the
- // callback. The Global itself is unusable for anything else. No v8 other api
- // calls may be called in the first callback. Should additional work be
- // required, the embedder must set a second pass callback, which will be
- // called after all the initial callbacks are processed.
- // Calling SetSecondPassCallback on the second pass will immediately crash.
- void SetSecondPassCallback(Callback callback) const { *callback_ = callback; }
-
- private:
- Isolate* isolate_;
- T* parameter_;
- Callback* callback_;
- void* embedder_fields_[kEmbedderFieldsInWeakCallback];
-};
-
-
-// kParameter will pass a void* parameter back to the callback, kInternalFields
-// will pass the first two internal fields back to the callback, kFinalizer
-// will pass a void* parameter back, but is invoked before the object is
-// actually collected, so it can be resurrected. In the last case, it is not
-// possible to request a second pass callback.
-enum class WeakCallbackType { kParameter, kInternalFields, kFinalizer };
-
-/**
- * An object reference that is independent of any handle scope. Where
- * a Local handle only lives as long as the HandleScope in which it was
- * allocated, a PersistentBase handle remains valid until it is explicitly
- * disposed using Reset().
- *
- * A persistent handle contains a reference to a storage cell within
- * the V8 engine which holds an object value and which is updated by
- * the garbage collector whenever the object is moved. A new storage
- * cell can be created using the constructor or PersistentBase::Reset and
- * existing handles can be disposed using PersistentBase::Reset.
- *
- */
-template <class T> class PersistentBase {
- public:
- /**
- * If non-empty, destroy the underlying storage cell
- * IsEmpty() will return true after this call.
- */
- V8_INLINE void Reset();
- /**
- * If non-empty, destroy the underlying storage cell
- * and create a new one with the contents of other if other is non empty
- */
- template <class S>
- V8_INLINE void Reset(Isolate* isolate, const Local<S>& other);
-
- /**
- * If non-empty, destroy the underlying storage cell
- * and create a new one with the contents of other if other is non empty
- */
- template <class S>
- V8_INLINE void Reset(Isolate* isolate, const PersistentBase<S>& other);
-
- V8_INLINE bool IsEmpty() const { return val_ == nullptr; }
- V8_INLINE void Empty() { val_ = 0; }
-
- V8_INLINE Local<T> Get(Isolate* isolate) const {
- return Local<T>::New(isolate, *this);
- }
-
- template <class S>
- V8_INLINE bool operator==(const PersistentBase<S>& that) const {
- internal::Address* a = reinterpret_cast<internal::Address*>(this->val_);
- internal::Address* b = reinterpret_cast<internal::Address*>(that.val_);
- if (a == nullptr) return b == nullptr;
- if (b == nullptr) return false;
- return *a == *b;
- }
-
- template <class S>
- V8_INLINE bool operator==(const Local<S>& that) const {
- internal::Address* a = reinterpret_cast<internal::Address*>(this->val_);
- internal::Address* b = reinterpret_cast<internal::Address*>(that.val_);
- if (a == nullptr) return b == nullptr;
- if (b == nullptr) return false;
- return *a == *b;
- }
-
- template <class S>
- V8_INLINE bool operator!=(const PersistentBase<S>& that) const {
- return !operator==(that);
- }
-
- template <class S>
- V8_INLINE bool operator!=(const Local<S>& that) const {
- return !operator==(that);
- }
-
- /**
- * Install a finalization callback on this object.
- * NOTE: There is no guarantee as to *when* or even *if* the callback is
- * invoked. The invocation is performed solely on a best effort basis.
- * As always, GC-based finalization should *not* be relied upon for any
- * critical form of resource management!
- *
- * The callback is supposed to reset the handle. No further V8 API may be
- * called in this callback. In case additional work involving V8 needs to be
- * done, a second callback can be scheduled using
- * WeakCallbackInfo<void>::SetSecondPassCallback.
- */
- template <typename P>
- V8_INLINE void SetWeak(P* parameter,
- typename WeakCallbackInfo<P>::Callback callback,
- WeakCallbackType type);
-
- /**
- * Turns this handle into a weak phantom handle without finalization callback.
- * The handle will be reset automatically when the garbage collector detects
- * that the object is no longer reachable.
- * A related function Isolate::NumberOfPhantomHandleResetsSinceLastCall
- * returns how many phantom handles were reset by the garbage collector.
- */
- V8_INLINE void SetWeak();
-
- template<typename P>
- V8_INLINE P* ClearWeak();
-
- // TODO(dcarney): remove this.
- V8_INLINE void ClearWeak() { ClearWeak<void>(); }
-
- /**
- * Annotates the strong handle with the given label, which is then used by the
- * heap snapshot generator as a name of the edge from the root to the handle.
- * The function does not take ownership of the label and assumes that the
- * label is valid as long as the handle is valid.
- */
- V8_INLINE void AnnotateStrongRetainer(const char* label);
-
- /** Returns true if the handle's reference is weak. */
- V8_INLINE bool IsWeak() const;
-
- /**
- * Assigns a wrapper class ID to the handle.
- */
- V8_INLINE void SetWrapperClassId(uint16_t class_id);
-
- /**
- * Returns the class ID previously assigned to this handle or 0 if no class ID
- * was previously assigned.
- */
- V8_INLINE uint16_t WrapperClassId() const;
-
- PersistentBase(const PersistentBase& other) = delete;
- void operator=(const PersistentBase&) = delete;
-
- private:
- friend class Isolate;
- friend class Utils;
- template<class F> friend class Local;
- template<class F1, class F2> friend class Persistent;
- template <class F>
- friend class Global;
- template<class F> friend class PersistentBase;
- template<class F> friend class ReturnValue;
- template <class F1, class F2, class F3>
- friend class PersistentValueMapBase;
- template<class F1, class F2> friend class PersistentValueVector;
- friend class Object;
-
- explicit V8_INLINE PersistentBase(T* val) : val_(val) {}
- V8_INLINE static T* New(Isolate* isolate, T* that);
-
- T* val_;
-};
-
-
-/**
- * Default traits for Persistent. This class does not allow
- * use of the copy constructor or assignment operator.
- * At present kResetInDestructor is not set, but that will change in a future
- * version.
- */
-template<class T>
-class NonCopyablePersistentTraits {
- public:
- using NonCopyablePersistent = Persistent<T, NonCopyablePersistentTraits<T>>;
- static const bool kResetInDestructor = false;
- template<class S, class M>
- V8_INLINE static void Copy(const Persistent<S, M>& source,
- NonCopyablePersistent* dest) {
- static_assert(sizeof(S) < 0,
- "NonCopyablePersistentTraits::Copy is not instantiable");
- }
-};
-
-
-/**
- * Helper class traits to allow copying and assignment of Persistent.
- * This will clone the contents of storage cell, but not any of the flags, etc.
- */
-template<class T>
-struct CopyablePersistentTraits {
- using CopyablePersistent = Persistent<T, CopyablePersistentTraits<T>>;
- static const bool kResetInDestructor = true;
- template<class S, class M>
- static V8_INLINE void Copy(const Persistent<S, M>& source,
- CopyablePersistent* dest) {
- // do nothing, just allow copy
- }
-};
-
-
-/**
- * A PersistentBase which allows copy and assignment.
- *
- * Copy, assignment and destructor behavior is controlled by the traits
- * class M.
- *
- * Note: Persistent class hierarchy is subject to future changes.
- */
-template <class T, class M> class Persistent : public PersistentBase<T> {
- public:
- /**
- * A Persistent with no storage cell.
- */
- V8_INLINE Persistent() : PersistentBase<T>(nullptr) {}
- /**
- * Construct a Persistent from a Local.
- * When the Local is non-empty, a new storage cell is created
- * pointing to the same object, and no flags are set.
- */
- template <class S>
- V8_INLINE Persistent(Isolate* isolate, Local<S> that)
- : PersistentBase<T>(PersistentBase<T>::New(isolate, *that)) {
- static_assert(std::is_base_of<T, S>::value, "type check");
- }
- /**
- * Construct a Persistent from a Persistent.
- * When the Persistent is non-empty, a new storage cell is created
- * pointing to the same object, and no flags are set.
- */
- template <class S, class M2>
- V8_INLINE Persistent(Isolate* isolate, const Persistent<S, M2>& that)
- : PersistentBase<T>(PersistentBase<T>::New(isolate, *that)) {
- static_assert(std::is_base_of<T, S>::value, "type check");
- }
- /**
- * The copy constructors and assignment operator create a Persistent
- * exactly as the Persistent constructor, but the Copy function from the
- * traits class is called, allowing the setting of flags based on the
- * copied Persistent.
- */
- V8_INLINE Persistent(const Persistent& that) : PersistentBase<T>(nullptr) {
- Copy(that);
- }
- template <class S, class M2>
- V8_INLINE Persistent(const Persistent<S, M2>& that) : PersistentBase<T>(0) {
- Copy(that);
- }
- V8_INLINE Persistent& operator=(const Persistent& that) {
- Copy(that);
- return *this;
- }
- template <class S, class M2>
- V8_INLINE Persistent& operator=(const Persistent<S, M2>& that) {
- Copy(that);
- return *this;
- }
- /**
- * The destructor will dispose the Persistent based on the
- * kResetInDestructor flags in the traits class. Since not calling dispose
- * can result in a memory leak, it is recommended to always set this flag.
- */
- V8_INLINE ~Persistent() {
- if (M::kResetInDestructor) this->Reset();
- }
-
- // TODO(dcarney): this is pretty useless, fix or remove
- template <class S>
- V8_INLINE static Persistent<T>& Cast(const Persistent<S>& that) {
-#ifdef V8_ENABLE_CHECKS
- // If we're going to perform the type check then we have to check
- // that the handle isn't empty before doing the checked cast.
- if (!that.IsEmpty()) T::Cast(*that);
-#endif
- return reinterpret_cast<Persistent<T>&>(const_cast<Persistent<S>&>(that));
- }
-
- // TODO(dcarney): this is pretty useless, fix or remove
- template <class S>
- V8_INLINE Persistent<S>& As() const {
- return Persistent<S>::Cast(*this);
- }
-
- private:
- friend class Isolate;
- friend class Utils;
- template<class F> friend class Local;
- template<class F1, class F2> friend class Persistent;
- template<class F> friend class ReturnValue;
-
- explicit V8_INLINE Persistent(T* that) : PersistentBase<T>(that) {}
- V8_INLINE T* operator*() const { return this->val_; }
- template<class S, class M2>
- V8_INLINE void Copy(const Persistent<S, M2>& that);
-};
-
-
-/**
- * A PersistentBase which has move semantics.
- *
- * Note: Persistent class hierarchy is subject to future changes.
- */
-template <class T>
-class Global : public PersistentBase<T> {
- public:
- /**
- * A Global with no storage cell.
- */
- V8_INLINE Global() : PersistentBase<T>(nullptr) {}
-
- /**
- * Construct a Global from a Local.
- * When the Local is non-empty, a new storage cell is created
- * pointing to the same object, and no flags are set.
- */
- template <class S>
- V8_INLINE Global(Isolate* isolate, Local<S> that)
- : PersistentBase<T>(PersistentBase<T>::New(isolate, *that)) {
- static_assert(std::is_base_of<T, S>::value, "type check");
- }
-
- /**
- * Construct a Global from a PersistentBase.
- * When the Persistent is non-empty, a new storage cell is created
- * pointing to the same object, and no flags are set.
- */
- template <class S>
- V8_INLINE Global(Isolate* isolate, const PersistentBase<S>& that)
- : PersistentBase<T>(PersistentBase<T>::New(isolate, that.val_)) {
- static_assert(std::is_base_of<T, S>::value, "type check");
- }
-
- /**
- * Move constructor.
- */
- V8_INLINE Global(Global&& other);
-
- V8_INLINE ~Global() { this->Reset(); }
-
- /**
- * Move via assignment.
- */
- template <class S>
- V8_INLINE Global& operator=(Global<S>&& rhs);
-
- /**
- * Pass allows returning uniques from functions, etc.
- */
- Global Pass() { return static_cast<Global&&>(*this); }
-
- /*
- * For compatibility with Chromium's base::Bind (base::Passed).
- */
- using MoveOnlyTypeForCPP03 = void;
-
- Global(const Global&) = delete;
- void operator=(const Global&) = delete;
-
- private:
- template <class F>
- friend class ReturnValue;
- V8_INLINE T* operator*() const { return this->val_; }
-};
-
-
-// UniquePersistent is an alias for Global for historical reason.
-template <class T>
-using UniquePersistent = Global<T>;
-
-/**
- * Deprecated. Use |TracedReference<T>| instead.
- */
-template <typename T>
-struct TracedGlobalTrait {};
-
-class TracedReferenceBase {
- public:
- /**
- * Returns true if the reference is empty, i.e., has not been assigned
- * object.
- */
- bool IsEmpty() const { return val_ == nullptr; }
-
- /**
- * If non-empty, destroy the underlying storage cell. |IsEmpty| will return
- * true after this call.
- */
- V8_INLINE void Reset();
-
- /**
- * Construct a Local<Value> from this handle.
- */
- V8_INLINE v8::Local<v8::Value> Get(v8::Isolate* isolate) const;
-
- /**
- * Returns true if this TracedReference is empty, i.e., has not been
- * assigned an object. This version of IsEmpty is thread-safe.
- */
- bool IsEmptyThreadSafe() const {
- return this->GetSlotThreadSafe() == nullptr;
- }
-
- /**
- * Assigns a wrapper class ID to the handle.
- */
- V8_INLINE void SetWrapperClassId(uint16_t class_id);
-
- /**
- * Returns the class ID previously assigned to this handle or 0 if no class ID
- * was previously assigned.
- */
- V8_INLINE uint16_t WrapperClassId() const;
-
- protected:
- /**
- * Update this reference in a thread-safe way.
- */
- void SetSlotThreadSafe(void* new_val) {
- reinterpret_cast<std::atomic<void*>*>(&val_)->store(
- new_val, std::memory_order_relaxed);
- }
-
- /**
- * Get this reference in a thread-safe way
- */
- const void* GetSlotThreadSafe() const {
- return reinterpret_cast<std::atomic<const void*> const*>(&val_)->load(
- std::memory_order_relaxed);
- }
-
- V8_EXPORT void CheckValue() const;
-
- // val_ points to a GlobalHandles node.
- internal::Address* val_ = nullptr;
-
- friend class internal::BasicTracedReferenceExtractor;
- template <typename F>
- friend class Local;
- template <typename U>
- friend bool operator==(const TracedReferenceBase&, const Local<U>&);
- friend bool operator==(const TracedReferenceBase&,
- const TracedReferenceBase&);
-};
-
-/**
- * A traced handle with copy and move semantics. The handle is to be used
- * together with |v8::EmbedderHeapTracer| or as part of GarbageCollected objects
- * (see v8-cppgc.h) and specifies edges from C++ objects to JavaScript.
- *
- * The exact semantics are:
- * - Tracing garbage collections use |v8::EmbedderHeapTracer| or cppgc.
- * - Non-tracing garbage collections refer to
- * |v8::EmbedderRootsHandler::IsRoot()| whether the handle should
- * be treated as root or not.
- *
- * Note that the base class cannot be instantiated itself. Choose from
- * - TracedGlobal
- * - TracedReference
- */
-template <typename T>
-class BasicTracedReference : public TracedReferenceBase {
- public:
- /**
- * Construct a Local<T> from this handle.
- */
- Local<T> Get(Isolate* isolate) const { return Local<T>::New(isolate, *this); }
-
- template <class S>
- V8_INLINE BasicTracedReference<S>& As() const {
- return reinterpret_cast<BasicTracedReference<S>&>(
- const_cast<BasicTracedReference<T>&>(*this));
- }
-
- T* operator->() const {
-#ifdef V8_ENABLE_CHECKS
- CheckValue();
-#endif // V8_ENABLE_CHECKS
- return reinterpret_cast<T*>(val_);
- }
- T* operator*() const {
-#ifdef V8_ENABLE_CHECKS
- CheckValue();
-#endif // V8_ENABLE_CHECKS
- return reinterpret_cast<T*>(val_);
- }
-
- private:
- enum DestructionMode { kWithDestructor, kWithoutDestructor };
-
- /**
- * An empty BasicTracedReference without storage cell.
- */
- BasicTracedReference() = default;
-
- V8_INLINE static internal::Address* New(Isolate* isolate, T* that, void* slot,
- DestructionMode destruction_mode);
-
- friend class EmbedderHeapTracer;
- template <typename F>
- friend class Local;
- friend class Object;
- template <typename F>
- friend class TracedGlobal;
- template <typename F>
- friend class TracedReference;
- template <typename F>
- friend class BasicTracedReference;
- template <typename F>
- friend class ReturnValue;
-};
-
-/**
- * A traced handle with destructor that clears the handle. For more details see
- * BasicTracedReference.
- */
-template <typename T>
-class TracedGlobal : public BasicTracedReference<T> {
- public:
- using BasicTracedReference<T>::Reset;
-
- /**
- * Destructor resetting the handle.Is
- */
- ~TracedGlobal() { this->Reset(); }
-
- /**
- * An empty TracedGlobal without storage cell.
- */
- TracedGlobal() : BasicTracedReference<T>() {}
-
- /**
- * Construct a TracedGlobal from a Local.
- *
- * When the Local is non-empty, a new storage cell is created
- * pointing to the same object.
- */
- template <class S>
- TracedGlobal(Isolate* isolate, Local<S> that) : BasicTracedReference<T>() {
- this->val_ = this->New(isolate, that.val_, &this->val_,
- BasicTracedReference<T>::kWithDestructor);
- static_assert(std::is_base_of<T, S>::value, "type check");
- }
-
- /**
- * Move constructor initializing TracedGlobal from an existing one.
- */
- V8_INLINE TracedGlobal(TracedGlobal&& other) {
- // Forward to operator=.
- *this = std::move(other);
- }
-
- /**
- * Move constructor initializing TracedGlobal from an existing one.
- */
- template <typename S>
- V8_INLINE TracedGlobal(TracedGlobal<S>&& other) {
- // Forward to operator=.
- *this = std::move(other);
- }
-
- /**
- * Copy constructor initializing TracedGlobal from an existing one.
- */
- V8_INLINE TracedGlobal(const TracedGlobal& other) {
- // Forward to operator=;
- *this = other;
- }
-
- /**
- * Copy constructor initializing TracedGlobal from an existing one.
- */
- template <typename S>
- V8_INLINE TracedGlobal(const TracedGlobal<S>& other) {
- // Forward to operator=;
- *this = other;
- }
-
- /**
- * Move assignment operator initializing TracedGlobal from an existing one.
- */
- V8_INLINE TracedGlobal& operator=(TracedGlobal&& rhs);
-
- /**
- * Move assignment operator initializing TracedGlobal from an existing one.
- */
- template <class S>
- V8_INLINE TracedGlobal& operator=(TracedGlobal<S>&& rhs);
-
- /**
- * Copy assignment operator initializing TracedGlobal from an existing one.
- *
- * Note: Prohibited when |other| has a finalization callback set through
- * |SetFinalizationCallback|.
- */
- V8_INLINE TracedGlobal& operator=(const TracedGlobal& rhs);
-
- /**
- * Copy assignment operator initializing TracedGlobal from an existing one.
- *
- * Note: Prohibited when |other| has a finalization callback set through
- * |SetFinalizationCallback|.
- */
- template <class S>
- V8_INLINE TracedGlobal& operator=(const TracedGlobal<S>& rhs);
-
- /**
- * If non-empty, destroy the underlying storage cell and create a new one with
- * the contents of other if other is non empty
- */
- template <class S>
- V8_INLINE void Reset(Isolate* isolate, const Local<S>& other);
-
- template <class S>
- V8_INLINE TracedGlobal<S>& As() const {
- return reinterpret_cast<TracedGlobal<S>&>(
- const_cast<TracedGlobal<T>&>(*this));
- }
-
- /**
- * Adds a finalization callback to the handle. The type of this callback is
- * similar to WeakCallbackType::kInternalFields, i.e., it will pass the
- * parameter and the first two internal fields of the object.
- *
- * The callback is then supposed to reset the handle in the callback. No
- * further V8 API may be called in this callback. In case additional work
- * involving V8 needs to be done, a second callback can be scheduled using
- * WeakCallbackInfo<void>::SetSecondPassCallback.
- */
- V8_INLINE void SetFinalizationCallback(
- void* parameter, WeakCallbackInfo<void>::Callback callback);
-};
-
-/**
- * A traced handle without destructor that clears the handle. The embedder needs
- * to ensure that the handle is not accessed once the V8 object has been
- * reclaimed. This can happen when the handle is not passed through the
- * EmbedderHeapTracer. For more details see BasicTracedReference.
- *
- * The reference assumes the embedder has precise knowledge about references at
- * all times. In case V8 needs to separately handle on-stack references, the
- * embedder is required to set the stack start through
- * |EmbedderHeapTracer::SetStackStart|.
- */
-template <typename T>
-class TracedReference : public BasicTracedReference<T> {
- public:
- using BasicTracedReference<T>::Reset;
-
- /**
- * An empty TracedReference without storage cell.
- */
- TracedReference() : BasicTracedReference<T>() {}
-
- /**
- * Construct a TracedReference from a Local.
- *
- * When the Local is non-empty, a new storage cell is created
- * pointing to the same object.
- */
- template <class S>
- TracedReference(Isolate* isolate, Local<S> that) : BasicTracedReference<T>() {
- this->val_ = this->New(isolate, that.val_, &this->val_,
- BasicTracedReference<T>::kWithoutDestructor);
- static_assert(std::is_base_of<T, S>::value, "type check");
- }
-
- /**
- * Move constructor initializing TracedReference from an
- * existing one.
- */
- V8_INLINE TracedReference(TracedReference&& other) {
- // Forward to operator=.
- *this = std::move(other);
- }
-
- /**
- * Move constructor initializing TracedReference from an
- * existing one.
- */
- template <typename S>
- V8_INLINE TracedReference(TracedReference<S>&& other) {
- // Forward to operator=.
- *this = std::move(other);
- }
-
- /**
- * Copy constructor initializing TracedReference from an
- * existing one.
- */
- V8_INLINE TracedReference(const TracedReference& other) {
- // Forward to operator=;
- *this = other;
- }
-
- /**
- * Copy constructor initializing TracedReference from an
- * existing one.
- */
- template <typename S>
- V8_INLINE TracedReference(const TracedReference<S>& other) {
- // Forward to operator=;
- *this = other;
- }
-
- /**
- * Move assignment operator initializing TracedGlobal from an existing one.
- */
- V8_INLINE TracedReference& operator=(TracedReference&& rhs);
-
- /**
- * Move assignment operator initializing TracedGlobal from an existing one.
- */
- template <class S>
- V8_INLINE TracedReference& operator=(TracedReference<S>&& rhs);
-
- /**
- * Copy assignment operator initializing TracedGlobal from an existing one.
- */
- V8_INLINE TracedReference& operator=(const TracedReference& rhs);
-
- /**
- * Copy assignment operator initializing TracedGlobal from an existing one.
- */
- template <class S>
- V8_INLINE TracedReference& operator=(const TracedReference<S>& rhs);
-
- /**
- * If non-empty, destroy the underlying storage cell and create a new one with
- * the contents of other if other is non empty
- */
- template <class S>
- V8_INLINE void Reset(Isolate* isolate, const Local<S>& other);
-
- template <class S>
- V8_INLINE TracedReference<S>& As() const {
- return reinterpret_cast<TracedReference<S>&>(
- const_cast<TracedReference<T>&>(*this));
- }
-};
-
- /**
- * A stack-allocated class that governs a number of local handles.
- * After a handle scope has been created, all local handles will be
- * allocated within that handle scope until either the handle scope is
- * deleted or another handle scope is created. If there is already a
- * handle scope and a new one is created, all allocations will take
- * place in the new handle scope until it is deleted. After that,
- * new handles will again be allocated in the original handle scope.
- *
- * After the handle scope of a local handle has been deleted the
- * garbage collector will no longer track the object stored in the
- * handle and may deallocate it. The behavior of accessing a handle
- * for which the handle scope has been deleted is undefined.
- */
-class V8_EXPORT V8_NODISCARD HandleScope {
- public:
- explicit HandleScope(Isolate* isolate);
-
- ~HandleScope();
-
- /**
- * Counts the number of allocated handles.
- */
- static int NumberOfHandles(Isolate* isolate);
-
- V8_INLINE Isolate* GetIsolate() const {
- return reinterpret_cast<Isolate*>(isolate_);
- }
-
- HandleScope(const HandleScope&) = delete;
- void operator=(const HandleScope&) = delete;
-
- protected:
- V8_INLINE HandleScope() = default;
-
- void Initialize(Isolate* isolate);
-
- static internal::Address* CreateHandle(internal::Isolate* isolate,
- internal::Address value);
-
- private:
- // Declaring operator new and delete as deleted is not spec compliant.
- // Therefore declare them private instead to disable dynamic alloc
- void* operator new(size_t size);
- void* operator new[](size_t size);
- void operator delete(void*, size_t);
- void operator delete[](void*, size_t);
-
- internal::Isolate* isolate_;
- internal::Address* prev_next_;
- internal::Address* prev_limit_;
-
- // Local::New uses CreateHandle with an Isolate* parameter.
- template<class F> friend class Local;
-
- // Object::GetInternalField and Context::GetEmbedderData use CreateHandle with
- // a HeapObject in their shortcuts.
- friend class Object;
- friend class Context;
-};
-
-/**
- * A HandleScope which first allocates a handle in the current scope
- * which will be later filled with the escape value.
- */
-class V8_EXPORT V8_NODISCARD EscapableHandleScope : public HandleScope {
- public:
- explicit EscapableHandleScope(Isolate* isolate);
- V8_INLINE ~EscapableHandleScope() = default;
-
- /**
- * Pushes the value into the previous scope and returns a handle to it.
- * Cannot be called twice.
- */
- template <class T>
- V8_INLINE Local<T> Escape(Local<T> value) {
- internal::Address* slot =
- Escape(reinterpret_cast<internal::Address*>(*value));
- return Local<T>(reinterpret_cast<T*>(slot));
- }
-
- template <class T>
- V8_INLINE MaybeLocal<T> EscapeMaybe(MaybeLocal<T> value) {
- return Escape(value.FromMaybe(Local<T>()));
- }
-
- EscapableHandleScope(const EscapableHandleScope&) = delete;
- void operator=(const EscapableHandleScope&) = delete;
-
- private:
- // Declaring operator new and delete as deleted is not spec compliant.
- // Therefore declare them private instead to disable dynamic alloc
- void* operator new(size_t size);
- void* operator new[](size_t size);
- void operator delete(void*, size_t);
- void operator delete[](void*, size_t);
-
- internal::Address* Escape(internal::Address* escape_value);
- internal::Address* escape_slot_;
-};
-
-/**
- * A SealHandleScope acts like a handle scope in which no handle allocations
- * are allowed. It can be useful for debugging handle leaks.
- * Handles can be allocated within inner normal HandleScopes.
- */
-class V8_EXPORT V8_NODISCARD SealHandleScope {
- public:
- explicit SealHandleScope(Isolate* isolate);
- ~SealHandleScope();
-
- SealHandleScope(const SealHandleScope&) = delete;
- void operator=(const SealHandleScope&) = delete;
-
- private:
- // Declaring operator new and delete as deleted is not spec compliant.
- // Therefore declare them private instead to disable dynamic alloc
- void* operator new(size_t size);
- void* operator new[](size_t size);
- void operator delete(void*, size_t);
- void operator delete[](void*, size_t);
-
- internal::Isolate* const isolate_;
- internal::Address* prev_limit_;
- int prev_sealed_level_;
-};
-
-// --- Special objects ---
-
-/**
- * The superclass of objects that can reside on V8's heap.
- */
-class V8_EXPORT Data {
- public:
- /**
- * Returns true if this data is a |v8::Value|.
- */
- bool IsValue() const;
-
- /**
- * Returns true if this data is a |v8::Module|.
- */
- bool IsModule() const;
-
- /**
- * Returns true if this data is a |v8::Private|.
- */
- bool IsPrivate() const;
-
- /**
- * Returns true if this data is a |v8::ObjectTemplate|.
- */
- bool IsObjectTemplate() const;
-
- /**
- * Returns true if this data is a |v8::FunctionTemplate|.
- */
- bool IsFunctionTemplate() const;
-
- /**
- * Returns true if this data is a |v8::Context|.
- */
- bool IsContext() const;
-
- private:
- Data();
-};
-
-/**
- * A container type that holds relevant metadata for module loading.
- *
- * This is passed back to the embedder as part of
- * HostImportModuleDynamicallyCallback for module loading.
- */
-class V8_EXPORT ScriptOrModule {
- public:
- /**
- * The name that was passed by the embedder as ResourceName to the
- * ScriptOrigin. This can be either a v8::String or v8::Undefined.
- */
- Local<Value> GetResourceName();
-
- /**
- * The options that were passed by the embedder as HostDefinedOptions to
- * the ScriptOrigin.
- */
- Local<PrimitiveArray> GetHostDefinedOptions();
-};
-
-/**
- * An array to hold Primitive values. This is used by the embedder to
- * pass host defined options to the ScriptOptions during compilation.
- *
- * This is passed back to the embedder as part of
- * HostImportModuleDynamicallyCallback for module loading.
- *
- */
-class V8_EXPORT PrimitiveArray {
- public:
- static Local<PrimitiveArray> New(Isolate* isolate, int length);
- int Length() const;
- void Set(Isolate* isolate, int index, Local<Primitive> item);
- Local<Primitive> Get(Isolate* isolate, int index);
-};
-
-/**
- * The optional attributes of ScriptOrigin.
- */
-class ScriptOriginOptions {
- public:
- V8_INLINE ScriptOriginOptions(bool is_shared_cross_origin = false,
- bool is_opaque = false, bool is_wasm = false,
- bool is_module = false)
- : flags_((is_shared_cross_origin ? kIsSharedCrossOrigin : 0) |
- (is_wasm ? kIsWasm : 0) | (is_opaque ? kIsOpaque : 0) |
- (is_module ? kIsModule : 0)) {}
- V8_INLINE ScriptOriginOptions(int flags)
- : flags_(flags &
- (kIsSharedCrossOrigin | kIsOpaque | kIsWasm | kIsModule)) {}
-
- bool IsSharedCrossOrigin() const {
- return (flags_ & kIsSharedCrossOrigin) != 0;
- }
- bool IsOpaque() const { return (flags_ & kIsOpaque) != 0; }
- bool IsWasm() const { return (flags_ & kIsWasm) != 0; }
- bool IsModule() const { return (flags_ & kIsModule) != 0; }
-
- int Flags() const { return flags_; }
-
- private:
- enum {
- kIsSharedCrossOrigin = 1,
- kIsOpaque = 1 << 1,
- kIsWasm = 1 << 2,
- kIsModule = 1 << 3
- };
- const int flags_;
-};
-
-/**
- * The origin, within a file, of a script.
- */
-class ScriptOrigin {
- public:
-#if defined(_MSC_VER) && _MSC_VER >= 1910 /* Disable on VS2015 */
- V8_DEPRECATED("Use constructor with primitive C++ types")
-#endif
- V8_INLINE explicit ScriptOrigin(
- Local<Value> resource_name, Local<Integer> resource_line_offset,
- Local<Integer> resource_column_offset,
- Local<Boolean> resource_is_shared_cross_origin = Local<Boolean>(),
- Local<Integer> script_id = Local<Integer>(),
- Local<Value> source_map_url = Local<Value>(),
- Local<Boolean> resource_is_opaque = Local<Boolean>(),
- Local<Boolean> is_wasm = Local<Boolean>(),
- Local<Boolean> is_module = Local<Boolean>(),
- Local<PrimitiveArray> host_defined_options = Local<PrimitiveArray>());
-#if defined(_MSC_VER) && _MSC_VER >= 1910 /* Disable on VS2015 */
- V8_DEPRECATED("Use constructor that takes an isolate")
-#endif
- V8_INLINE explicit ScriptOrigin(
- Local<Value> resource_name, int resource_line_offset = 0,
- int resource_column_offset = 0,
- bool resource_is_shared_cross_origin = false, int script_id = -1,
- Local<Value> source_map_url = Local<Value>(),
- bool resource_is_opaque = false, bool is_wasm = false,
- bool is_module = false,
- Local<PrimitiveArray> host_defined_options = Local<PrimitiveArray>());
- V8_INLINE explicit ScriptOrigin(
- Isolate* isolate, Local<Value> resource_name,
- int resource_line_offset = 0, int resource_column_offset = 0,
- bool resource_is_shared_cross_origin = false, int script_id = -1,
- Local<Value> source_map_url = Local<Value>(),
- bool resource_is_opaque = false, bool is_wasm = false,
- bool is_module = false,
- Local<PrimitiveArray> host_defined_options = Local<PrimitiveArray>());
-
- V8_INLINE Local<Value> ResourceName() const;
- V8_DEPRECATED("Use getter with primitvie C++ types.")
- V8_INLINE Local<Integer> ResourceLineOffset() const;
- V8_DEPRECATED("Use getter with primitvie C++ types.")
- V8_INLINE Local<Integer> ResourceColumnOffset() const;
- V8_DEPRECATED("Use getter with primitvie C++ types.")
- V8_INLINE Local<Integer> ScriptID() const;
- V8_INLINE int LineOffset() const;
- V8_INLINE int ColumnOffset() const;
- V8_INLINE int ScriptId() const;
- V8_INLINE Local<Value> SourceMapUrl() const;
- V8_INLINE Local<PrimitiveArray> HostDefinedOptions() const;
- V8_INLINE ScriptOriginOptions Options() const { return options_; }
-
- private:
- Isolate* isolate_;
- Local<Value> resource_name_;
- int resource_line_offset_;
- int resource_column_offset_;
- ScriptOriginOptions options_;
- int script_id_;
- Local<Value> source_map_url_;
- Local<PrimitiveArray> host_defined_options_;
-};
-
-/**
- * A compiled JavaScript script, not yet tied to a Context.
- */
-class V8_EXPORT UnboundScript {
- public:
- /**
- * Binds the script to the currently entered context.
- */
- Local<Script> BindToCurrentContext();
-
- int GetId() const;
- Local<Value> GetScriptName();
-
- /**
- * Data read from magic sourceURL comments.
- */
- Local<Value> GetSourceURL();
- /**
- * Data read from magic sourceMappingURL comments.
- */
- Local<Value> GetSourceMappingURL();
-
- /**
- * Returns zero based line number of the code_pos location in the script.
- * -1 will be returned if no information available.
- */
- int GetLineNumber(int code_pos);
-
- static const int kNoScriptId = 0;
-};
-
-/**
- * A compiled JavaScript module, not yet tied to a Context.
- */
-class V8_EXPORT UnboundModuleScript : public Data {
- // Only used as a container for code caching.
-};
-
-/**
- * A location in JavaScript source.
- */
-class V8_EXPORT Location {
- public:
- int GetLineNumber() { return line_number_; }
- int GetColumnNumber() { return column_number_; }
-
- Location(int line_number, int column_number)
- : line_number_(line_number), column_number_(column_number) {}
-
- private:
- int line_number_;
- int column_number_;
-};
-
-/**
- * A fixed-sized array with elements of type Data.
- */
-class V8_EXPORT FixedArray : public Data {
- public:
- int Length() const;
- Local<Data> Get(Local<Context> context, int i) const;
-};
-
-class V8_EXPORT ModuleRequest : public Data {
- public:
- /**
- * Returns the module specifier for this ModuleRequest.
- */
- Local<String> GetSpecifier() const;
-
- /**
- * Returns the source code offset of this module request.
- * Use Module::SourceOffsetToLocation to convert this to line/column numbers.
- */
- int GetSourceOffset() const;
-
- /**
- * Contains the import assertions for this request in the form:
- * [key1, value1, source_offset1, key2, value2, source_offset2, ...].
- * The keys and values are of type v8::String, and the source offsets are of
- * type Int32. Use Module::SourceOffsetToLocation to convert the source
- * offsets to Locations with line/column numbers.
- *
- * All assertions present in the module request will be supplied in this
- * list, regardless of whether they are supported by the host. Per
- * https://tc39.es/proposal-import-assertions/#sec-hostgetsupportedimportassertions,
- * hosts are expected to ignore assertions that they do not support (as
- * opposed to, for example, triggering an error if an unsupported assertion is
- * present).
- */
- Local<FixedArray> GetImportAssertions() const;
-
- V8_INLINE static ModuleRequest* Cast(Data* data);
-
- private:
- static void CheckCast(Data* obj);
-};
-
-/**
- * A compiled JavaScript module.
- */
-class V8_EXPORT Module : public Data {
- public:
- /**
- * The different states a module can be in.
- *
- * This corresponds to the states used in ECMAScript except that "evaluated"
- * is split into kEvaluated and kErrored, indicating success and failure,
- * respectively.
- */
- enum Status {
- kUninstantiated,
- kInstantiating,
- kInstantiated,
- kEvaluating,
- kEvaluated,
- kErrored
- };
-
- /**
- * Returns the module's current status.
- */
- Status GetStatus() const;
-
- /**
- * For a module in kErrored status, this returns the corresponding exception.
- */
- Local<Value> GetException() const;
-
- /**
- * Returns the number of modules requested by this module.
- */
- V8_DEPRECATED("Use Module::GetModuleRequests() and FixedArray::Length().")
- int GetModuleRequestsLength() const;
-
- /**
- * Returns the ith module specifier in this module.
- * i must be < GetModuleRequestsLength() and >= 0.
- */
- V8_DEPRECATED(
- "Use Module::GetModuleRequests() and ModuleRequest::GetSpecifier().")
- Local<String> GetModuleRequest(int i) const;
-
- /**
- * Returns the source location (line number and column number) of the ith
- * module specifier's first occurrence in this module.
- */
- V8_DEPRECATED(
- "Use Module::GetModuleRequests(), ModuleRequest::GetSourceOffset(), and "
- "Module::SourceOffsetToLocation().")
- Location GetModuleRequestLocation(int i) const;
-
- /**
- * Returns the ModuleRequests for this module.
- */
- Local<FixedArray> GetModuleRequests() const;
-
- /**
- * For the given source text offset in this module, returns the corresponding
- * Location with line and column numbers.
- */
- Location SourceOffsetToLocation(int offset) const;
-
- /**
- * Returns the identity hash for this object.
- */
- int GetIdentityHash() const;
-
- using ResolveCallback =
- MaybeLocal<Module> (*)(Local<Context> context, Local<String> specifier,
- Local<Module> referrer);
- using ResolveModuleCallback = MaybeLocal<Module> (*)(
- Local<Context> context, Local<String> specifier,
- Local<FixedArray> import_assertions, Local<Module> referrer);
-
- /**
- * Instantiates the module and its dependencies.
- *
- * Returns an empty Maybe<bool> if an exception occurred during
- * instantiation. (In the case where the callback throws an exception, that
- * exception is propagated.)
- */
- V8_DEPRECATED(
- "Use the version of InstantiateModule that takes a ResolveModuleCallback "
- "parameter")
- V8_WARN_UNUSED_RESULT Maybe<bool> InstantiateModule(Local<Context> context,
- ResolveCallback callback);
- V8_WARN_UNUSED_RESULT Maybe<bool> InstantiateModule(
- Local<Context> context, ResolveModuleCallback callback);
-
- /**
- * Evaluates the module and its dependencies.
- *
- * If status is kInstantiated, run the module's code and return a Promise
- * object. On success, set status to kEvaluated and resolve the Promise with
- * the completion value; on failure, set status to kErrored and reject the
- * Promise with the error.
- *
- * If IsGraphAsync() is false, the returned Promise is settled.
- */
- V8_WARN_UNUSED_RESULT MaybeLocal<Value> Evaluate(Local<Context> context);
-
- /**
- * Returns the namespace object of this module.
- *
- * The module's status must be at least kInstantiated.
- */
- Local<Value> GetModuleNamespace();
-
- /**
- * Returns the corresponding context-unbound module script.
- *
- * The module must be unevaluated, i.e. its status must not be kEvaluating,
- * kEvaluated or kErrored.
- */
- Local<UnboundModuleScript> GetUnboundModuleScript();
-
- /**
- * Returns the underlying script's id.
- *
- * The module must be a SourceTextModule and must not have a kErrored status.
- */
- int ScriptId() const;
-
- /**
- * Returns whether this module or any of its requested modules is async,
- * i.e. contains top-level await.
- *
- * The module's status must be at least kInstantiated.
- */
- bool IsGraphAsync() const;
-
- /**
- * Returns whether the module is a SourceTextModule.
- */
- bool IsSourceTextModule() const;
-
- /**
- * Returns whether the module is a SyntheticModule.
- */
- bool IsSyntheticModule() const;
-
- /*
- * Callback defined in the embedder. This is responsible for setting
- * the module's exported values with calls to SetSyntheticModuleExport().
- * The callback must return a resolved Promise to indicate success (where no
- * exception was thrown) and return an empy MaybeLocal to indicate falure
- * (where an exception was thrown).
- */
- using SyntheticModuleEvaluationSteps =
- MaybeLocal<Value> (*)(Local<Context> context, Local<Module> module);
-
- /**
- * Creates a new SyntheticModule with the specified export names, where
- * evaluation_steps will be executed upon module evaluation.
- * export_names must not contain duplicates.
- * module_name is used solely for logging/debugging and doesn't affect module
- * behavior.
- */
- static Local<Module> CreateSyntheticModule(
- Isolate* isolate, Local<String> module_name,
- const std::vector<Local<String>>& export_names,
- SyntheticModuleEvaluationSteps evaluation_steps);
-
- /**
- * Set this module's exported value for the name export_name to the specified
- * export_value. This method must be called only on Modules created via
- * CreateSyntheticModule. An error will be thrown if export_name is not one
- * of the export_names that were passed in that CreateSyntheticModule call.
- * Returns Just(true) on success, Nothing<bool>() if an error was thrown.
- */
- V8_WARN_UNUSED_RESULT Maybe<bool> SetSyntheticModuleExport(
- Isolate* isolate, Local<String> export_name, Local<Value> export_value);
-
- V8_INLINE static Module* Cast(Data* data);
-
- private:
- static void CheckCast(Data* obj);
-};
-
-/**
- * A compiled JavaScript script, tied to a Context which was active when the
- * script was compiled.
- */
-class V8_EXPORT Script {
- public:
- /**
- * A shorthand for ScriptCompiler::Compile().
- */
- static V8_WARN_UNUSED_RESULT MaybeLocal<Script> Compile(
- Local<Context> context, Local<String> source,
- ScriptOrigin* origin = nullptr);
-
- /**
- * Runs the script returning the resulting value. It will be run in the
- * context in which it was created (ScriptCompiler::CompileBound or
- * UnboundScript::BindToCurrentContext()).
- */
- V8_WARN_UNUSED_RESULT MaybeLocal<Value> Run(Local<Context> context);
-
- /**
- * Returns the corresponding context-unbound script.
- */
- Local<UnboundScript> GetUnboundScript();
-};
-
-enum class ScriptType { kClassic, kModule };
-
-/**
- * For compiling scripts.
- */
-class V8_EXPORT ScriptCompiler {
- public:
- class ConsumeCodeCacheTask;
-
- /**
- * Compilation data that the embedder can cache and pass back to speed up
- * future compilations. The data is produced if the CompilerOptions passed to
- * the compilation functions in ScriptCompiler contains produce_data_to_cache
- * = true. The data to cache can then can be retrieved from
- * UnboundScript.
- */
- struct V8_EXPORT CachedData {
- enum BufferPolicy {
- BufferNotOwned,
- BufferOwned
- };
-
- CachedData()
- : data(nullptr),
- length(0),
- rejected(false),
- buffer_policy(BufferNotOwned) {}
-
- // If buffer_policy is BufferNotOwned, the caller keeps the ownership of
- // data and guarantees that it stays alive until the CachedData object is
- // destroyed. If the policy is BufferOwned, the given data will be deleted
- // (with delete[]) when the CachedData object is destroyed.
- CachedData(const uint8_t* data, int length,
- BufferPolicy buffer_policy = BufferNotOwned);
- ~CachedData();
- // TODO(marja): Async compilation; add constructors which take a callback
- // which will be called when V8 no longer needs the data.
- const uint8_t* data;
- int length;
- bool rejected;
- BufferPolicy buffer_policy;
-
- // Prevent copying.
- CachedData(const CachedData&) = delete;
- CachedData& operator=(const CachedData&) = delete;
- };
-
- /**
- * Source code which can be then compiled to a UnboundScript or Script.
- */
- class Source {
- public:
- // Source takes ownership of both CachedData and CodeCacheConsumeTask.
- V8_INLINE Source(Local<String> source_string, const ScriptOrigin& origin,
- CachedData* cached_data = nullptr,
- ConsumeCodeCacheTask* consume_cache_task = nullptr);
- // Source takes ownership of both CachedData and CodeCacheConsumeTask.
- V8_INLINE explicit Source(
- Local<String> source_string, CachedData* cached_data = nullptr,
- ConsumeCodeCacheTask* consume_cache_task = nullptr);
- V8_INLINE ~Source() = default;
-
- // Ownership of the CachedData or its buffers is *not* transferred to the
- // caller. The CachedData object is alive as long as the Source object is
- // alive.
- V8_INLINE const CachedData* GetCachedData() const;
-
- V8_INLINE const ScriptOriginOptions& GetResourceOptions() const;
-
- private:
- friend class ScriptCompiler;
-
- Local<String> source_string;
-
- // Origin information
- Local<Value> resource_name;
- int resource_line_offset;
- int resource_column_offset;
- ScriptOriginOptions resource_options;
- Local<Value> source_map_url;
- Local<PrimitiveArray> host_defined_options;
-
- // Cached data from previous compilation (if a kConsume*Cache flag is
- // set), or hold newly generated cache data (kProduce*Cache flags) are
- // set when calling a compile method.
- std::unique_ptr<CachedData> cached_data;
- std::unique_ptr<ConsumeCodeCacheTask> consume_cache_task;
- };
-
- /**
- * For streaming incomplete script data to V8. The embedder should implement a
- * subclass of this class.
- */
- class V8_EXPORT ExternalSourceStream {
- public:
- virtual ~ExternalSourceStream() = default;
-
- /**
- * V8 calls this to request the next chunk of data from the embedder. This
- * function will be called on a background thread, so it's OK to block and
- * wait for the data, if the embedder doesn't have data yet. Returns the
- * length of the data returned. When the data ends, GetMoreData should
- * return 0. Caller takes ownership of the data.
- *
- * When streaming UTF-8 data, V8 handles multi-byte characters split between
- * two data chunks, but doesn't handle multi-byte characters split between
- * more than two data chunks. The embedder can avoid this problem by always
- * returning at least 2 bytes of data.
- *
- * When streaming UTF-16 data, V8 does not handle characters split between
- * two data chunks. The embedder has to make sure that chunks have an even
- * length.
- *
- * If the embedder wants to cancel the streaming, they should make the next
- * GetMoreData call return 0. V8 will interpret it as end of data (and most
- * probably, parsing will fail). The streaming task will return as soon as
- * V8 has parsed the data it received so far.
- */
- virtual size_t GetMoreData(const uint8_t** src) = 0;
-
- /**
- * V8 calls this method to set a 'bookmark' at the current position in
- * the source stream, for the purpose of (maybe) later calling
- * ResetToBookmark. If ResetToBookmark is called later, then subsequent
- * calls to GetMoreData should return the same data as they did when
- * SetBookmark was called earlier.
- *
- * The embedder may return 'false' to indicate it cannot provide this
- * functionality.
- */
- virtual bool SetBookmark();
-
- /**
- * V8 calls this to return to a previously set bookmark.
- */
- virtual void ResetToBookmark();
- };
-
- /**
- * Source code which can be streamed into V8 in pieces. It will be parsed
- * while streaming and compiled after parsing has completed. StreamedSource
- * must be kept alive while the streaming task is run (see ScriptStreamingTask
- * below).
- */
- class V8_EXPORT StreamedSource {
- public:
- enum Encoding { ONE_BYTE, TWO_BYTE, UTF8, WINDOWS_1252 };
-
- StreamedSource(std::unique_ptr<ExternalSourceStream> source_stream,
- Encoding encoding);
- ~StreamedSource();
-
- internal::ScriptStreamingData* impl() const { return impl_.get(); }
-
- // Prevent copying.
- StreamedSource(const StreamedSource&) = delete;
- StreamedSource& operator=(const StreamedSource&) = delete;
-
- private:
- std::unique_ptr<internal::ScriptStreamingData> impl_;
- };
-
- /**
- * A streaming task which the embedder must run on a background thread to
- * stream scripts into V8. Returned by ScriptCompiler::StartStreaming.
- */
- class V8_EXPORT ScriptStreamingTask final {
- public:
- void Run();
-
- private:
- friend class ScriptCompiler;
-
- explicit ScriptStreamingTask(internal::ScriptStreamingData* data)
- : data_(data) {}
-
- internal::ScriptStreamingData* data_;
- };
-
- /**
- * A task which the embedder must run on a background thread to
- * consume a V8 code cache. Returned by
- * ScriptCompiler::StarConsumingCodeCache.
- */
- class V8_EXPORT ConsumeCodeCacheTask final {
- public:
- ~ConsumeCodeCacheTask();
-
- void Run();
-
- private:
- friend class ScriptCompiler;
-
- explicit ConsumeCodeCacheTask(
- std::unique_ptr<internal::BackgroundDeserializeTask> impl);
-
- std::unique_ptr<internal::BackgroundDeserializeTask> impl_;
- };
-
- enum CompileOptions {
- kNoCompileOptions = 0,
- kConsumeCodeCache,
- kEagerCompile
- };
-
- /**
- * The reason for which we are not requesting or providing a code cache.
- */
- enum NoCacheReason {
- kNoCacheNoReason = 0,
- kNoCacheBecauseCachingDisabled,
- kNoCacheBecauseNoResource,
- kNoCacheBecauseInlineScript,
- kNoCacheBecauseModule,
- kNoCacheBecauseStreamingSource,
- kNoCacheBecauseInspector,
- kNoCacheBecauseScriptTooSmall,
- kNoCacheBecauseCacheTooCold,
- kNoCacheBecauseV8Extension,
- kNoCacheBecauseExtensionModule,
- kNoCacheBecausePacScript,
- kNoCacheBecauseInDocumentWrite,
- kNoCacheBecauseResourceWithNoCacheHandler,
- kNoCacheBecauseDeferredProduceCodeCache
- };
-
- /**
- * Compiles the specified script (context-independent).
- * Cached data as part of the source object can be optionally produced to be
- * consumed later to speed up compilation of identical source scripts.
- *
- * Note that when producing cached data, the source must point to NULL for
- * cached data. When consuming cached data, the cached data must have been
- * produced by the same version of V8, and the embedder needs to ensure the
- * cached data is the correct one for the given script.
- *
- * \param source Script source code.
- * \return Compiled script object (context independent; for running it must be
- * bound to a context).
- */
- static V8_WARN_UNUSED_RESULT MaybeLocal<UnboundScript> CompileUnboundScript(
- Isolate* isolate, Source* source,
- CompileOptions options = kNoCompileOptions,
- NoCacheReason no_cache_reason = kNoCacheNoReason);
-
- /**
- * Compiles the specified script (bound to current context).
- *
- * \param source Script source code.
- * \param pre_data Pre-parsing data, as obtained by ScriptData::PreCompile()
- * using pre_data speeds compilation if it's done multiple times.
- * Owned by caller, no references are kept when this function returns.
- * \return Compiled script object, bound to the context that was active
- * when this function was called. When run it will always use this
- * context.
- */
- static V8_WARN_UNUSED_RESULT MaybeLocal<Script> Compile(
- Local<Context> context, Source* source,
- CompileOptions options = kNoCompileOptions,
- NoCacheReason no_cache_reason = kNoCacheNoReason);
-
- /**
- * Returns a task which streams script data into V8, or NULL if the script
- * cannot be streamed. The user is responsible for running the task on a
- * background thread and deleting it. When ran, the task starts parsing the
- * script, and it will request data from the StreamedSource as needed. When
- * ScriptStreamingTask::Run exits, all data has been streamed and the script
- * can be compiled (see Compile below).
- *
- * This API allows to start the streaming with as little data as possible, and
- * the remaining data (for example, the ScriptOrigin) is passed to Compile.
- */
- static ScriptStreamingTask* StartStreaming(
- Isolate* isolate, StreamedSource* source,
- ScriptType type = ScriptType::kClassic);
-
- static ConsumeCodeCacheTask* StartConsumingCodeCache(
- Isolate* isolate, std::unique_ptr<CachedData> source);
-
- /**
- * Compiles a streamed script (bound to current context).
- *
- * This can only be called after the streaming has finished
- * (ScriptStreamingTask has been run). V8 doesn't construct the source string
- * during streaming, so the embedder needs to pass the full source here.
- */
- static V8_WARN_UNUSED_RESULT MaybeLocal<Script> Compile(
- Local<Context> context, StreamedSource* source,
- Local<String> full_source_string, const ScriptOrigin& origin);
-
- /**
- * Return a version tag for CachedData for the current V8 version & flags.
- *
- * This value is meant only for determining whether a previously generated
- * CachedData instance is still valid; the tag has no other meaing.
- *
- * Background: The data carried by CachedData may depend on the exact
- * V8 version number or current compiler flags. This means that when
- * persisting CachedData, the embedder must take care to not pass in
- * data from another V8 version, or the same version with different
- * features enabled.
- *
- * The easiest way to do so is to clear the embedder's cache on any
- * such change.
- *
- * Alternatively, this tag can be stored alongside the cached data and
- * compared when it is being used.
- */
- static uint32_t CachedDataVersionTag();
-
- /**
- * Compile an ES module, returning a Module that encapsulates
- * the compiled code.
- *
- * Corresponds to the ParseModule abstract operation in the
- * ECMAScript specification.
- */
- static V8_WARN_UNUSED_RESULT MaybeLocal<Module> CompileModule(
- Isolate* isolate, Source* source,
- CompileOptions options = kNoCompileOptions,
- NoCacheReason no_cache_reason = kNoCacheNoReason);
-
- /**
- * Compiles a streamed module script.
- *
- * This can only be called after the streaming has finished
- * (ScriptStreamingTask has been run). V8 doesn't construct the source string
- * during streaming, so the embedder needs to pass the full source here.
- */
- static V8_WARN_UNUSED_RESULT MaybeLocal<Module> CompileModule(
- Local<Context> context, StreamedSource* v8_source,
- Local<String> full_source_string, const ScriptOrigin& origin);
-
- /**
- * Compile a function for a given context. This is equivalent to running
- *
- * with (obj) {
- * return function(args) { ... }
- * }
- *
- * It is possible to specify multiple context extensions (obj in the above
- * example).
- */
- static V8_WARN_UNUSED_RESULT MaybeLocal<Function> CompileFunctionInContext(
- Local<Context> context, Source* source, size_t arguments_count,
- Local<String> arguments[], size_t context_extension_count,
- Local<Object> context_extensions[],
- CompileOptions options = kNoCompileOptions,
- NoCacheReason no_cache_reason = kNoCacheNoReason,
- Local<ScriptOrModule>* script_or_module_out = nullptr);
-
- /**
- * Creates and returns code cache for the specified unbound_script.
- * This will return nullptr if the script cannot be serialized. The
- * CachedData returned by this function should be owned by the caller.
- */
- static CachedData* CreateCodeCache(Local<UnboundScript> unbound_script);
-
- /**
- * Creates and returns code cache for the specified unbound_module_script.
- * This will return nullptr if the script cannot be serialized. The
- * CachedData returned by this function should be owned by the caller.
- */
- static CachedData* CreateCodeCache(
- Local<UnboundModuleScript> unbound_module_script);
-
- /**
- * Creates and returns code cache for the specified function that was
- * previously produced by CompileFunctionInContext.
- * This will return nullptr if the script cannot be serialized. The
- * CachedData returned by this function should be owned by the caller.
- */
- static CachedData* CreateCodeCacheForFunction(Local<Function> function);
-
- private:
- static V8_WARN_UNUSED_RESULT MaybeLocal<UnboundScript> CompileUnboundInternal(
- Isolate* isolate, Source* source, CompileOptions options,
- NoCacheReason no_cache_reason);
-};
-
-
-/**
- * An error message.
- */
-class V8_EXPORT Message {
- public:
- Local<String> Get() const;
-
- /**
- * Return the isolate to which the Message belongs.
- */
- Isolate* GetIsolate() const;
-
- V8_WARN_UNUSED_RESULT MaybeLocal<String> GetSource(
- Local<Context> context) const;
- V8_WARN_UNUSED_RESULT MaybeLocal<String> GetSourceLine(
- Local<Context> context) const;
-
- /**
- * Returns the origin for the script from where the function causing the
- * error originates.
- */
- ScriptOrigin GetScriptOrigin() const;
-
- /**
- * Returns the resource name for the script from where the function causing
- * the error originates.
- */
- Local<Value> GetScriptResourceName() const;
-
- /**
- * Exception stack trace. By default stack traces are not captured for
- * uncaught exceptions. SetCaptureStackTraceForUncaughtExceptions allows
- * to change this option.
- */
- Local<StackTrace> GetStackTrace() const;
-
- /**
- * Returns the number, 1-based, of the line where the error occurred.
- */
- V8_WARN_UNUSED_RESULT Maybe<int> GetLineNumber(Local<Context> context) const;
-
- /**
- * Returns the index within the script of the first character where
- * the error occurred.
- */
- int GetStartPosition() const;
-
- /**
- * Returns the index within the script of the last character where
- * the error occurred.
- */
- int GetEndPosition() const;
-
- /**
- * Returns the Wasm function index where the error occurred. Returns -1 if
- * message is not from a Wasm script.
- */
- int GetWasmFunctionIndex() const;
-
- /**
- * Returns the error level of the message.
- */
- int ErrorLevel() const;
-
- /**
- * Returns the index within the line of the first character where
- * the error occurred.
- */
- int GetStartColumn() const;
- V8_WARN_UNUSED_RESULT Maybe<int> GetStartColumn(Local<Context> context) const;
-
- /**
- * Returns the index within the line of the last character where
- * the error occurred.
- */
- int GetEndColumn() const;
- V8_WARN_UNUSED_RESULT Maybe<int> GetEndColumn(Local<Context> context) const;
-
- /**
- * Passes on the value set by the embedder when it fed the script from which
- * this Message was generated to V8.
- */
- bool IsSharedCrossOrigin() const;
- bool IsOpaque() const;
-
- // TODO(1245381): Print to a string instead of on a FILE.
- static void PrintCurrentStackTrace(Isolate* isolate, FILE* out);
-
- static const int kNoLineNumberInfo = 0;
- static const int kNoColumnInfo = 0;
- static const int kNoScriptIdInfo = 0;
- static const int kNoWasmFunctionIndexInfo = -1;
-};
-
-
-/**
- * Representation of a JavaScript stack trace. The information collected is a
- * snapshot of the execution stack and the information remains valid after
- * execution continues.
- */
-class V8_EXPORT StackTrace {
- public:
- /**
- * Flags that determine what information is placed captured for each
- * StackFrame when grabbing the current stack trace.
- * Note: these options are deprecated and we always collect all available
- * information (kDetailed).
- */
- enum StackTraceOptions {
- kLineNumber = 1,
- kColumnOffset = 1 << 1 | kLineNumber,
- kScriptName = 1 << 2,
- kFunctionName = 1 << 3,
- kIsEval = 1 << 4,
- kIsConstructor = 1 << 5,
- kScriptNameOrSourceURL = 1 << 6,
- kScriptId = 1 << 7,
- kExposeFramesAcrossSecurityOrigins = 1 << 8,
- kOverview = kLineNumber | kColumnOffset | kScriptName | kFunctionName,
- kDetailed = kOverview | kIsEval | kIsConstructor | kScriptNameOrSourceURL
- };
-
- /**
- * Returns a StackFrame at a particular index.
- */
- Local<StackFrame> GetFrame(Isolate* isolate, uint32_t index) const;
-
- /**
- * Returns the number of StackFrames.
- */
- int GetFrameCount() const;
-
- /**
- * Grab a snapshot of the current JavaScript execution stack.
- *
- * \param frame_limit The maximum number of stack frames we want to capture.
- * \param options Enumerates the set of things we will capture for each
- * StackFrame.
- */
- static Local<StackTrace> CurrentStackTrace(
- Isolate* isolate, int frame_limit, StackTraceOptions options = kDetailed);
-};
-
-
-/**
- * A single JavaScript stack frame.
- */
-class V8_EXPORT StackFrame {
- public:
- /**
- * Returns the number, 1-based, of the line for the associate function call.
- * This method will return Message::kNoLineNumberInfo if it is unable to
- * retrieve the line number, or if kLineNumber was not passed as an option
- * when capturing the StackTrace.
- */
- int GetLineNumber() const;
-
- /**
- * Returns the 1-based column offset on the line for the associated function
- * call.
- * This method will return Message::kNoColumnInfo if it is unable to retrieve
- * the column number, or if kColumnOffset was not passed as an option when
- * capturing the StackTrace.
- */
- int GetColumn() const;
-
- /**
- * Returns the id of the script for the function for this StackFrame.
- * This method will return Message::kNoScriptIdInfo if it is unable to
- * retrieve the script id, or if kScriptId was not passed as an option when
- * capturing the StackTrace.
- */
- int GetScriptId() const;
-
- /**
- * Returns the name of the resource that contains the script for the
- * function for this StackFrame.
- */
- Local<String> GetScriptName() const;
-
- /**
- * Returns the name of the resource that contains the script for the
- * function for this StackFrame or sourceURL value if the script name
- * is undefined and its source ends with //# sourceURL=... string or
- * deprecated //@ sourceURL=... string.
- */
- Local<String> GetScriptNameOrSourceURL() const;
-
- /**
- * Returns the source of the script for the function for this StackFrame.
- */
- Local<String> GetScriptSource() const;
-
- /**
- * Returns the source mapping URL (if one is present) of the script for
- * the function for this StackFrame.
- */
- Local<String> GetScriptSourceMappingURL() const;
-
- /**
- * Returns the name of the function associated with this stack frame.
- */
- Local<String> GetFunctionName() const;
-
- /**
- * Returns whether or not the associated function is compiled via a call to
- * eval().
- */
- bool IsEval() const;
-
- /**
- * Returns whether or not the associated function is called as a
- * constructor via "new".
- */
- bool IsConstructor() const;
-
- /**
- * Returns whether or not the associated functions is defined in wasm.
- */
- bool IsWasm() const;
-
- /**
- * Returns whether or not the associated function is defined by the user.
- */
- bool IsUserJavaScript() const;
-};
-
-
-// A StateTag represents a possible state of the VM.
-enum StateTag {
- JS,
- GC,
- PARSER,
- BYTECODE_COMPILER,
- COMPILER,
- OTHER,
- EXTERNAL,
- ATOMICS_WAIT,
- IDLE
-};
-
-// Holds the callee saved registers needed for the stack unwinder. It is the
-// empty struct if no registers are required. Implemented in
-// include/v8-unwinder-state.h.
-struct CalleeSavedRegisters;
-
-// A RegisterState represents the current state of registers used
-// by the sampling profiler API.
-struct V8_EXPORT RegisterState {
- RegisterState();
- ~RegisterState();
- RegisterState(const RegisterState& other);
- RegisterState& operator=(const RegisterState& other);
-
- void* pc; // Instruction pointer.
- void* sp; // Stack pointer.
- void* fp; // Frame pointer.
- void* lr; // Link register (or nullptr on platforms without a link register).
- // Callee saved registers (or null if no callee saved registers were stored)
- std::unique_ptr<CalleeSavedRegisters> callee_saved;
-};
-
-// The output structure filled up by GetStackSample API function.
-struct SampleInfo {
- size_t frames_count; // Number of frames collected.
- StateTag vm_state; // Current VM state.
- void* external_callback_entry; // External callback address if VM is
- // executing an external callback.
- void* context; // Incumbent native context address.
-};
-
-struct MemoryRange {
- const void* start = nullptr;
- size_t length_in_bytes = 0;
-};
-
-struct JSEntryStub {
- MemoryRange code;
-};
-
-struct JSEntryStubs {
- JSEntryStub js_entry_stub;
- JSEntryStub js_construct_entry_stub;
- JSEntryStub js_run_microtasks_entry_stub;
-};
-
-/**
- * A JSON Parser and Stringifier.
- */
-class V8_EXPORT JSON {
- public:
- /**
- * Tries to parse the string |json_string| and returns it as value if
- * successful.
- *
- * \param the context in which to parse and create the value.
- * \param json_string The string to parse.
- * \return The corresponding value if successfully parsed.
- */
- static V8_WARN_UNUSED_RESULT MaybeLocal<Value> Parse(
- Local<Context> context, Local<String> json_string);
-
- /**
- * Tries to stringify the JSON-serializable object |json_object| and returns
- * it as string if successful.
- *
- * \param json_object The JSON-serializable object to stringify.
- * \return The corresponding string if successfully stringified.
- */
- static V8_WARN_UNUSED_RESULT MaybeLocal<String> Stringify(
- Local<Context> context, Local<Value> json_object,
- Local<String> gap = Local<String>());
-};
-
-/**
- * Value serialization compatible with the HTML structured clone algorithm.
- * The format is backward-compatible (i.e. safe to store to disk).
- */
-class V8_EXPORT ValueSerializer {
- public:
- class V8_EXPORT Delegate {
- public:
- virtual ~Delegate() = default;
-
- /**
- * Handles the case where a DataCloneError would be thrown in the structured
- * clone spec. Other V8 embedders may throw some other appropriate exception
- * type.
- */
- virtual void ThrowDataCloneError(Local<String> message) = 0;
-
- /**
- * The embedder overrides this method to write some kind of host object, if
- * possible. If not, a suitable exception should be thrown and
- * Nothing<bool>() returned.
- */
- virtual Maybe<bool> WriteHostObject(Isolate* isolate, Local<Object> object);
-
- /**
- * Called when the ValueSerializer is going to serialize a
- * SharedArrayBuffer object. The embedder must return an ID for the
- * object, using the same ID if this SharedArrayBuffer has already been
- * serialized in this buffer. When deserializing, this ID will be passed to
- * ValueDeserializer::GetSharedArrayBufferFromId as |clone_id|.
- *
- * If the object cannot be serialized, an
- * exception should be thrown and Nothing<uint32_t>() returned.
- */
- virtual Maybe<uint32_t> GetSharedArrayBufferId(
- Isolate* isolate, Local<SharedArrayBuffer> shared_array_buffer);
-
- virtual Maybe<uint32_t> GetWasmModuleTransferId(
- Isolate* isolate, Local<WasmModuleObject> module);
- /**
- * Allocates memory for the buffer of at least the size provided. The actual
- * size (which may be greater or equal) is written to |actual_size|. If no
- * buffer has been allocated yet, nullptr will be provided.
- *
- * If the memory cannot be allocated, nullptr should be returned.
- * |actual_size| will be ignored. It is assumed that |old_buffer| is still
- * valid in this case and has not been modified.
- *
- * The default implementation uses the stdlib's `realloc()` function.
- */
- virtual void* ReallocateBufferMemory(void* old_buffer, size_t size,
- size_t* actual_size);
-
- /**
- * Frees a buffer allocated with |ReallocateBufferMemory|.
- *
- * The default implementation uses the stdlib's `free()` function.
- */
- virtual void FreeBufferMemory(void* buffer);
- };
-
- explicit ValueSerializer(Isolate* isolate);
- ValueSerializer(Isolate* isolate, Delegate* delegate);
- ~ValueSerializer();
-
- /**
- * Writes out a header, which includes the format version.
- */
- void WriteHeader();
-
- /**
- * Serializes a JavaScript value into the buffer.
- */
- V8_WARN_UNUSED_RESULT Maybe<bool> WriteValue(Local<Context> context,
- Local<Value> value);
-
- /**
- * Returns the stored data (allocated using the delegate's
- * ReallocateBufferMemory) and its size. This serializer should not be used
- * once the buffer is released. The contents are undefined if a previous write
- * has failed. Ownership of the buffer is transferred to the caller.
- */
- V8_WARN_UNUSED_RESULT std::pair<uint8_t*, size_t> Release();
-
- /**
- * Marks an ArrayBuffer as havings its contents transferred out of band.
- * Pass the corresponding ArrayBuffer in the deserializing context to
- * ValueDeserializer::TransferArrayBuffer.
- */
- void TransferArrayBuffer(uint32_t transfer_id,
- Local<ArrayBuffer> array_buffer);
-
-
- /**
- * Indicate whether to treat ArrayBufferView objects as host objects,
- * i.e. pass them to Delegate::WriteHostObject. This should not be
- * called when no Delegate was passed.
- *
- * The default is not to treat ArrayBufferViews as host objects.
- */
- void SetTreatArrayBufferViewsAsHostObjects(bool mode);
-
- /**
- * Write raw data in various common formats to the buffer.
- * Note that integer types are written in base-128 varint format, not with a
- * binary copy. For use during an override of Delegate::WriteHostObject.
- */
- void WriteUint32(uint32_t value);
- void WriteUint64(uint64_t value);
- void WriteDouble(double value);
- void WriteRawBytes(const void* source, size_t length);
-
- ValueSerializer(const ValueSerializer&) = delete;
- void operator=(const ValueSerializer&) = delete;
-
- private:
- struct PrivateData;
- PrivateData* private_;
-};
-
-/**
- * Deserializes values from data written with ValueSerializer, or a compatible
- * implementation.
- */
-class V8_EXPORT ValueDeserializer {
- public:
- class V8_EXPORT Delegate {
- public:
- virtual ~Delegate() = default;
-
- /**
- * The embedder overrides this method to read some kind of host object, if
- * possible. If not, a suitable exception should be thrown and
- * MaybeLocal<Object>() returned.
- */
- virtual MaybeLocal<Object> ReadHostObject(Isolate* isolate);
-
- /**
- * Get a WasmModuleObject given a transfer_id previously provided
- * by ValueSerializer::GetWasmModuleTransferId
- */
- virtual MaybeLocal<WasmModuleObject> GetWasmModuleFromId(
- Isolate* isolate, uint32_t transfer_id);
-
- /**
- * Get a SharedArrayBuffer given a clone_id previously provided
- * by ValueSerializer::GetSharedArrayBufferId
- */
- virtual MaybeLocal<SharedArrayBuffer> GetSharedArrayBufferFromId(
- Isolate* isolate, uint32_t clone_id);
- };
-
- ValueDeserializer(Isolate* isolate, const uint8_t* data, size_t size);
- ValueDeserializer(Isolate* isolate, const uint8_t* data, size_t size,
- Delegate* delegate);
- ~ValueDeserializer();
-
- /**
- * Reads and validates a header (including the format version).
- * May, for example, reject an invalid or unsupported wire format.
- */
- V8_WARN_UNUSED_RESULT Maybe<bool> ReadHeader(Local<Context> context);
-
- /**
- * Deserializes a JavaScript value from the buffer.
- */
- V8_WARN_UNUSED_RESULT MaybeLocal<Value> ReadValue(Local<Context> context);
-
- /**
- * Accepts the array buffer corresponding to the one passed previously to
- * ValueSerializer::TransferArrayBuffer.
- */
- void TransferArrayBuffer(uint32_t transfer_id,
- Local<ArrayBuffer> array_buffer);
-
- /**
- * Similar to TransferArrayBuffer, but for SharedArrayBuffer.
- * The id is not necessarily in the same namespace as unshared ArrayBuffer
- * objects.
- */
- void TransferSharedArrayBuffer(uint32_t id,
- Local<SharedArrayBuffer> shared_array_buffer);
-
- /**
- * Must be called before ReadHeader to enable support for reading the legacy
- * wire format (i.e., which predates this being shipped).
- *
- * Don't use this unless you need to read data written by previous versions of
- * blink::ScriptValueSerializer.
- */
- void SetSupportsLegacyWireFormat(bool supports_legacy_wire_format);
-
- /**
- * Reads the underlying wire format version. Likely mostly to be useful to
- * legacy code reading old wire format versions. Must be called after
- * ReadHeader.
- */
- uint32_t GetWireFormatVersion() const;
-
- /**
- * Reads raw data in various common formats to the buffer.
- * Note that integer types are read in base-128 varint format, not with a
- * binary copy. For use during an override of Delegate::ReadHostObject.
- */
- V8_WARN_UNUSED_RESULT bool ReadUint32(uint32_t* value);
- V8_WARN_UNUSED_RESULT bool ReadUint64(uint64_t* value);
- V8_WARN_UNUSED_RESULT bool ReadDouble(double* value);
- V8_WARN_UNUSED_RESULT bool ReadRawBytes(size_t length, const void** data);
-
- ValueDeserializer(const ValueDeserializer&) = delete;
- void operator=(const ValueDeserializer&) = delete;
-
- private:
- struct PrivateData;
- PrivateData* private_;
-};
-
-
-// --- Value ---
-
-
-/**
- * The superclass of all JavaScript values and objects.
- */
-class V8_EXPORT Value : public Data {
- public:
- /**
- * Returns true if this value is the undefined value. See ECMA-262
- * 4.3.10.
- *
- * This is equivalent to `value === undefined` in JS.
- */
- V8_INLINE bool IsUndefined() const;
-
- /**
- * Returns true if this value is the null value. See ECMA-262
- * 4.3.11.
- *
- * This is equivalent to `value === null` in JS.
- */
- V8_INLINE bool IsNull() const;
-
- /**
- * Returns true if this value is either the null or the undefined value.
- * See ECMA-262
- * 4.3.11. and 4.3.12
- *
- * This is equivalent to `value == null` in JS.
- */
- V8_INLINE bool IsNullOrUndefined() const;
-
- /**
- * Returns true if this value is true.
- *
- * This is not the same as `BooleanValue()`. The latter performs a
- * conversion to boolean, i.e. the result of `Boolean(value)` in JS, whereas
- * this checks `value === true`.
- */
- bool IsTrue() const;
-
- /**
- * Returns true if this value is false.
- *
- * This is not the same as `!BooleanValue()`. The latter performs a
- * conversion to boolean, i.e. the result of `!Boolean(value)` in JS, whereas
- * this checks `value === false`.
- */
- bool IsFalse() const;
-
- /**
- * Returns true if this value is a symbol or a string.
- *
- * This is equivalent to
- * `typeof value === 'string' || typeof value === 'symbol'` in JS.
- */
- bool IsName() const;
-
- /**
- * Returns true if this value is an instance of the String type.
- * See ECMA-262 8.4.
- *
- * This is equivalent to `typeof value === 'string'` in JS.
- */
- V8_INLINE bool IsString() const;
-
- /**
- * Returns true if this value is a symbol.
- *
- * This is equivalent to `typeof value === 'symbol'` in JS.
- */
- bool IsSymbol() const;
-
- /**
- * Returns true if this value is a function.
- *
- * This is equivalent to `typeof value === 'function'` in JS.
- */
- bool IsFunction() const;
-
- /**
- * Returns true if this value is an array. Note that it will return false for
- * an Proxy for an array.
- */
- bool IsArray() const;
-
- /**
- * Returns true if this value is an object.
- */
- bool IsObject() const;
-
- /**
- * Returns true if this value is a bigint.
- *
- * This is equivalent to `typeof value === 'bigint'` in JS.
- */
- bool IsBigInt() const;
-
- /**
- * Returns true if this value is boolean.
- *
- * This is equivalent to `typeof value === 'boolean'` in JS.
- */
- bool IsBoolean() const;
-
- /**
- * Returns true if this value is a number.
- *
- * This is equivalent to `typeof value === 'number'` in JS.
- */
- bool IsNumber() const;
-
- /**
- * Returns true if this value is an `External` object.
- */
- bool IsExternal() const;
-
- /**
- * Returns true if this value is a 32-bit signed integer.
- */
- bool IsInt32() const;
-
- /**
- * Returns true if this value is a 32-bit unsigned integer.
- */
- bool IsUint32() const;
-
- /**
- * Returns true if this value is a Date.
- */
- bool IsDate() const;
-
- /**
- * Returns true if this value is an Arguments object.
- */
- bool IsArgumentsObject() const;
-
- /**
- * Returns true if this value is a BigInt object.
- */
- bool IsBigIntObject() const;
-
- /**
- * Returns true if this value is a Boolean object.
- */
- bool IsBooleanObject() const;
-
- /**
- * Returns true if this value is a Number object.
- */
- bool IsNumberObject() const;
-
- /**
- * Returns true if this value is a String object.
- */
- bool IsStringObject() const;
-
- /**
- * Returns true if this value is a Symbol object.
- */
- bool IsSymbolObject() const;
-
- /**
- * Returns true if this value is a NativeError.
- */
- bool IsNativeError() const;
-
- /**
- * Returns true if this value is a RegExp.
- */
- bool IsRegExp() const;
-
- /**
- * Returns true if this value is an async function.
- */
- bool IsAsyncFunction() const;
-
- /**
- * Returns true if this value is a Generator function.
- */
- bool IsGeneratorFunction() const;
-
- /**
- * Returns true if this value is a Generator object (iterator).
- */
- bool IsGeneratorObject() const;
-
- /**
- * Returns true if this value is a Promise.
- */
- bool IsPromise() const;
-
- /**
- * Returns true if this value is a Map.
- */
- bool IsMap() const;
-
- /**
- * Returns true if this value is a Set.
- */
- bool IsSet() const;
-
- /**
- * Returns true if this value is a Map Iterator.
- */
- bool IsMapIterator() const;
-
- /**
- * Returns true if this value is a Set Iterator.
- */
- bool IsSetIterator() const;
-
- /**
- * Returns true if this value is a WeakMap.
- */
- bool IsWeakMap() const;
-
- /**
- * Returns true if this value is a WeakSet.
- */
- bool IsWeakSet() const;
-
- /**
- * Returns true if this value is an ArrayBuffer.
- */
- bool IsArrayBuffer() const;
-
- /**
- * Returns true if this value is an ArrayBufferView.
- */
- bool IsArrayBufferView() const;
-
- /**
- * Returns true if this value is one of TypedArrays.
- */
- bool IsTypedArray() const;
-
- /**
- * Returns true if this value is an Uint8Array.
- */
- bool IsUint8Array() const;
-
- /**
- * Returns true if this value is an Uint8ClampedArray.
- */
- bool IsUint8ClampedArray() const;
-
- /**
- * Returns true if this value is an Int8Array.
- */
- bool IsInt8Array() const;
-
- /**
- * Returns true if this value is an Uint16Array.
- */
- bool IsUint16Array() const;
-
- /**
- * Returns true if this value is an Int16Array.
- */
- bool IsInt16Array() const;
-
- /**
- * Returns true if this value is an Uint32Array.
- */
- bool IsUint32Array() const;
-
- /**
- * Returns true if this value is an Int32Array.
- */
- bool IsInt32Array() const;
-
- /**
- * Returns true if this value is a Float32Array.
- */
- bool IsFloat32Array() const;
-
- /**
- * Returns true if this value is a Float64Array.
- */
- bool IsFloat64Array() const;
-
- /**
- * Returns true if this value is a BigInt64Array.
- */
- bool IsBigInt64Array() const;
-
- /**
- * Returns true if this value is a BigUint64Array.
- */
- bool IsBigUint64Array() const;
-
- /**
- * Returns true if this value is a DataView.
- */
- bool IsDataView() const;
-
- /**
- * Returns true if this value is a SharedArrayBuffer.
- */
- bool IsSharedArrayBuffer() const;
-
- /**
- * Returns true if this value is a JavaScript Proxy.
- */
- bool IsProxy() const;
-
- /**
- * Returns true if this value is a WasmMemoryObject.
- */
- bool IsWasmMemoryObject() const;
-
- /**
- * Returns true if this value is a WasmModuleObject.
- */
- bool IsWasmModuleObject() const;
-
- /**
- * Returns true if the value is a Module Namespace Object.
- */
- bool IsModuleNamespaceObject() const;
-
- /**
- * Perform the equivalent of `BigInt(value)` in JS.
- */
- V8_WARN_UNUSED_RESULT MaybeLocal<BigInt> ToBigInt(
- Local<Context> context) const;
- /**
- * Perform the equivalent of `Number(value)` in JS.
- */
- V8_WARN_UNUSED_RESULT MaybeLocal<Number> ToNumber(
- Local<Context> context) const;
- /**
- * Perform the equivalent of `String(value)` in JS.
- */
- V8_WARN_UNUSED_RESULT MaybeLocal<String> ToString(
- Local<Context> context) const;
- /**
- * Provide a string representation of this value usable for debugging.
- * This operation has no observable side effects and will succeed
- * unless e.g. execution is being terminated.
- */
- V8_WARN_UNUSED_RESULT MaybeLocal<String> ToDetailString(
- Local<Context> context) const;
- /**
- * Perform the equivalent of `Object(value)` in JS.
- */
- V8_WARN_UNUSED_RESULT MaybeLocal<Object> ToObject(
- Local<Context> context) const;
- /**
- * Perform the equivalent of `Number(value)` in JS and convert the result
- * to an integer. Negative values are rounded up, positive values are rounded
- * down. NaN is converted to 0. Infinite values yield undefined results.
- */
- V8_WARN_UNUSED_RESULT MaybeLocal<Integer> ToInteger(
- Local<Context> context) const;
- /**
- * Perform the equivalent of `Number(value)` in JS and convert the result
- * to an unsigned 32-bit integer by performing the steps in
- * https://tc39.es/ecma262/#sec-touint32.
- */
- V8_WARN_UNUSED_RESULT MaybeLocal<Uint32> ToUint32(
- Local<Context> context) const;
- /**
- * Perform the equivalent of `Number(value)` in JS and convert the result
- * to a signed 32-bit integer by performing the steps in
- * https://tc39.es/ecma262/#sec-toint32.
- */
- V8_WARN_UNUSED_RESULT MaybeLocal<Int32> ToInt32(Local<Context> context) const;
-
- /**
- * Perform the equivalent of `Boolean(value)` in JS. This can never fail.
- */
- Local<Boolean> ToBoolean(Isolate* isolate) const;
-
- /**
- * Attempts to convert a string to an array index.
- * Returns an empty handle if the conversion fails.
- */
- V8_WARN_UNUSED_RESULT MaybeLocal<Uint32> ToArrayIndex(
- Local<Context> context) const;
-
- /** Returns the equivalent of `ToBoolean()->Value()`. */
- bool BooleanValue(Isolate* isolate) const;
-
- /** Returns the equivalent of `ToNumber()->Value()`. */
- V8_WARN_UNUSED_RESULT Maybe<double> NumberValue(Local<Context> context) const;
- /** Returns the equivalent of `ToInteger()->Value()`. */
- V8_WARN_UNUSED_RESULT Maybe<int64_t> IntegerValue(
- Local<Context> context) const;
- /** Returns the equivalent of `ToUint32()->Value()`. */
- V8_WARN_UNUSED_RESULT Maybe<uint32_t> Uint32Value(
- Local<Context> context) const;
- /** Returns the equivalent of `ToInt32()->Value()`. */
- V8_WARN_UNUSED_RESULT Maybe<int32_t> Int32Value(Local<Context> context) const;
-
- /** JS == */
- V8_WARN_UNUSED_RESULT Maybe<bool> Equals(Local<Context> context,
- Local<Value> that) const;
- bool StrictEquals(Local<Value> that) const;
- bool SameValue(Local<Value> that) const;
-
- template <class T> V8_INLINE static Value* Cast(T* value);
-
- Local<String> TypeOf(Isolate*);
-
- Maybe<bool> InstanceOf(Local<Context> context, Local<Object> object);
-
- private:
- V8_INLINE bool QuickIsUndefined() const;
- V8_INLINE bool QuickIsNull() const;
- V8_INLINE bool QuickIsNullOrUndefined() const;
- V8_INLINE bool QuickIsString() const;
- bool FullIsUndefined() const;
- bool FullIsNull() const;
- bool FullIsString() const;
-
- static void CheckCast(Data* that);
-};
-
-
-/**
- * The superclass of primitive values. See ECMA-262 4.3.2.
- */
-class V8_EXPORT Primitive : public Value { };
-
-
-/**
- * A primitive boolean value (ECMA-262, 4.3.14). Either the true
- * or false value.
- */
-class V8_EXPORT Boolean : public Primitive {
- public:
- bool Value() const;
- V8_INLINE static Boolean* Cast(v8::Data* data);
- V8_INLINE static Local<Boolean> New(Isolate* isolate, bool value);
-
- private:
- static void CheckCast(v8::Data* that);
-};
-
-
-/**
- * A superclass for symbols and strings.
- */
-class V8_EXPORT Name : public Primitive {
- public:
- /**
- * Returns the identity hash for this object. The current implementation
- * uses an inline property on the object to store the identity hash.
- *
- * The return value will never be 0. Also, it is not guaranteed to be
- * unique.
- */
- int GetIdentityHash();
-
- V8_INLINE static Name* Cast(Data* data);
-
- private:
- static void CheckCast(Data* that);
-};
-
-/**
- * A flag describing different modes of string creation.
- *
- * Aside from performance implications there are no differences between the two
- * creation modes.
- */
-enum class NewStringType {
- /**
- * Create a new string, always allocating new storage memory.
- */
- kNormal,
-
- /**
- * Acts as a hint that the string should be created in the
- * old generation heap space and be deduplicated if an identical string
- * already exists.
- */
- kInternalized
-};
-
-/**
- * A JavaScript string value (ECMA-262, 4.3.17).
- */
-class V8_EXPORT String : public Name {
- public:
- static constexpr int kMaxLength =
- internal::kApiSystemPointerSize == 4 ? (1 << 28) - 16 : (1 << 29) - 24;
-
- enum Encoding {
- UNKNOWN_ENCODING = 0x1,
- TWO_BYTE_ENCODING = 0x0,
- ONE_BYTE_ENCODING = 0x8
- };
- /**
- * Returns the number of characters (UTF-16 code units) in this string.
- */
- int Length() const;
-
- /**
- * Returns the number of bytes in the UTF-8 encoded
- * representation of this string.
- */
- int Utf8Length(Isolate* isolate) const;
-
- /**
- * Returns whether this string is known to contain only one byte data,
- * i.e. ISO-8859-1 code points.
- * Does not read the string.
- * False negatives are possible.
- */
- bool IsOneByte() const;
-
- /**
- * Returns whether this string contain only one byte data,
- * i.e. ISO-8859-1 code points.
- * Will read the entire string in some cases.
- */
- bool ContainsOnlyOneByte() const;
-
- /**
- * Write the contents of the string to an external buffer.
- * If no arguments are given, expects the buffer to be large
- * enough to hold the entire string and NULL terminator. Copies
- * the contents of the string and the NULL terminator into the
- * buffer.
- *
- * WriteUtf8 will not write partial UTF-8 sequences, preferring to stop
- * before the end of the buffer.
- *
- * Copies up to length characters into the output buffer.
- * Only null-terminates if there is enough space in the buffer.
- *
- * \param buffer The buffer into which the string will be copied.
- * \param start The starting position within the string at which
- * copying begins.
- * \param length The number of characters to copy from the string. For
- * WriteUtf8 the number of bytes in the buffer.
- * \param nchars_ref The number of characters written, can be NULL.
- * \param options Various options that might affect performance of this or
- * subsequent operations.
- * \return The number of characters copied to the buffer excluding the null
- * terminator. For WriteUtf8: The number of bytes copied to the buffer
- * including the null terminator (if written).
- */
- enum WriteOptions {
- NO_OPTIONS = 0,
- HINT_MANY_WRITES_EXPECTED = 1,
- NO_NULL_TERMINATION = 2,
- PRESERVE_ONE_BYTE_NULL = 4,
- // Used by WriteUtf8 to replace orphan surrogate code units with the
- // unicode replacement character. Needs to be set to guarantee valid UTF-8
- // output.
- REPLACE_INVALID_UTF8 = 8
- };
-
- // 16-bit character codes.
- int Write(Isolate* isolate, uint16_t* buffer, int start = 0, int length = -1,
- int options = NO_OPTIONS) const;
- // One byte characters.
- int WriteOneByte(Isolate* isolate, uint8_t* buffer, int start = 0,
- int length = -1, int options = NO_OPTIONS) const;
- // UTF-8 encoded characters.
- int WriteUtf8(Isolate* isolate, char* buffer, int length = -1,
- int* nchars_ref = nullptr, int options = NO_OPTIONS) const;
-
- /**
- * A zero length string.
- */
- V8_INLINE static Local<String> Empty(Isolate* isolate);
-
- /**
- * Returns true if the string is external.
- */
- bool IsExternal() const;
-
- /**
- * Returns true if the string is both external and two-byte.
- */
- bool IsExternalTwoByte() const;
-
- /**
- * Returns true if the string is both external and one-byte.
- */
- bool IsExternalOneByte() const;
-
- class V8_EXPORT ExternalStringResourceBase {
- public:
- virtual ~ExternalStringResourceBase() = default;
-
- /**
- * If a string is cacheable, the value returned by
- * ExternalStringResource::data() may be cached, otherwise it is not
- * expected to be stable beyond the current top-level task.
- */
- virtual bool IsCacheable() const { return true; }
-
- // Disallow copying and assigning.
- ExternalStringResourceBase(const ExternalStringResourceBase&) = delete;
- void operator=(const ExternalStringResourceBase&) = delete;
-
- protected:
- ExternalStringResourceBase() = default;
-
- /**
- * Internally V8 will call this Dispose method when the external string
- * resource is no longer needed. The default implementation will use the
- * delete operator. This method can be overridden in subclasses to
- * control how allocated external string resources are disposed.
- */
- virtual void Dispose() { delete this; }
-
- /**
- * For a non-cacheable string, the value returned by
- * |ExternalStringResource::data()| has to be stable between |Lock()| and
- * |Unlock()|, that is the string must behave as is |IsCacheable()| returned
- * true.
- *
- * These two functions must be thread-safe, and can be called from anywhere.
- * They also must handle lock depth, in the sense that each can be called
- * several times, from different threads, and unlocking should only happen
- * when the balance of Lock() and Unlock() calls is 0.
- */
- virtual void Lock() const {}
-
- /**
- * Unlocks the string.
- */
- virtual void Unlock() const {}
-
- private:
- friend class internal::ExternalString;
- friend class v8::String;
- friend class internal::ScopedExternalStringLock;
- };
-
- /**
- * An ExternalStringResource is a wrapper around a two-byte string
- * buffer that resides outside V8's heap. Implement an
- * ExternalStringResource to manage the life cycle of the underlying
- * buffer. Note that the string data must be immutable.
- */
- class V8_EXPORT ExternalStringResource
- : public ExternalStringResourceBase {
- public:
- /**
- * Override the destructor to manage the life cycle of the underlying
- * buffer.
- */
- ~ExternalStringResource() override = default;
-
- /**
- * The string data from the underlying buffer. If the resource is cacheable
- * then data() must return the same value for all invocations.
- */
- virtual const uint16_t* data() const = 0;
-
- /**
- * The length of the string. That is, the number of two-byte characters.
- */
- virtual size_t length() const = 0;
-
- /**
- * Returns the cached data from the underlying buffer. This method can be
- * called only for cacheable resources (i.e. IsCacheable() == true) and only
- * after UpdateDataCache() was called.
- */
- const uint16_t* cached_data() const {
- CheckCachedDataInvariants();
- return cached_data_;
- }
-
- /**
- * Update {cached_data_} with the data from the underlying buffer. This can
- * be called only for cacheable resources.
- */
- void UpdateDataCache();
-
- protected:
- ExternalStringResource() = default;
-
- private:
- void CheckCachedDataInvariants() const;
-
- const uint16_t* cached_data_ = nullptr;
- };
-
- /**
- * An ExternalOneByteStringResource is a wrapper around an one-byte
- * string buffer that resides outside V8's heap. Implement an
- * ExternalOneByteStringResource to manage the life cycle of the
- * underlying buffer. Note that the string data must be immutable
- * and that the data must be Latin-1 and not UTF-8, which would require
- * special treatment internally in the engine and do not allow efficient
- * indexing. Use String::New or convert to 16 bit data for non-Latin1.
- */
-
- class V8_EXPORT ExternalOneByteStringResource
- : public ExternalStringResourceBase {
- public:
- /**
- * Override the destructor to manage the life cycle of the underlying
- * buffer.
- */
- ~ExternalOneByteStringResource() override = default;
-
- /**
- * The string data from the underlying buffer. If the resource is cacheable
- * then data() must return the same value for all invocations.
- */
- virtual const char* data() const = 0;
-
- /** The number of Latin-1 characters in the string.*/
- virtual size_t length() const = 0;
-
- /**
- * Returns the cached data from the underlying buffer. If the resource is
- * uncacheable or if UpdateDataCache() was not called before, it has
- * undefined behaviour.
- */
- const char* cached_data() const {
- CheckCachedDataInvariants();
- return cached_data_;
- }
-
- /**
- * Update {cached_data_} with the data from the underlying buffer. This can
- * be called only for cacheable resources.
- */
- void UpdateDataCache();
-
- protected:
- ExternalOneByteStringResource() = default;
-
- private:
- void CheckCachedDataInvariants() const;
-
- const char* cached_data_ = nullptr;
- };
-
- /**
- * If the string is an external string, return the ExternalStringResourceBase
- * regardless of the encoding, otherwise return NULL. The encoding of the
- * string is returned in encoding_out.
- */
- V8_INLINE ExternalStringResourceBase* GetExternalStringResourceBase(
- Encoding* encoding_out) const;
-
- /**
- * Get the ExternalStringResource for an external string. Returns
- * NULL if IsExternal() doesn't return true.
- */
- V8_INLINE ExternalStringResource* GetExternalStringResource() const;
-
- /**
- * Get the ExternalOneByteStringResource for an external one-byte string.
- * Returns NULL if IsExternalOneByte() doesn't return true.
- */
- const ExternalOneByteStringResource* GetExternalOneByteStringResource() const;
-
- V8_INLINE static String* Cast(v8::Data* data);
-
- /**
- * Allocates a new string from a UTF-8 literal. This is equivalent to calling
- * String::NewFromUtf(isolate, "...").ToLocalChecked(), but without the check
- * overhead.
- *
- * When called on a string literal containing '\0', the inferred length is the
- * length of the input array minus 1 (for the final '\0') and not the value
- * returned by strlen.
- **/
- template <int N>
- static V8_WARN_UNUSED_RESULT Local<String> NewFromUtf8Literal(
- Isolate* isolate, const char (&literal)[N],
- NewStringType type = NewStringType::kNormal) {
- static_assert(N <= kMaxLength, "String is too long");
- return NewFromUtf8Literal(isolate, literal, type, N - 1);
- }
-
- /** Allocates a new string from UTF-8 data. Only returns an empty value when
- * length > kMaxLength. **/
- static V8_WARN_UNUSED_RESULT MaybeLocal<String> NewFromUtf8(
- Isolate* isolate, const char* data,
- NewStringType type = NewStringType::kNormal, int length = -1);
-
- /** Allocates a new string from Latin-1 data. Only returns an empty value
- * when length > kMaxLength. **/
- static V8_WARN_UNUSED_RESULT MaybeLocal<String> NewFromOneByte(
- Isolate* isolate, const uint8_t* data,
- NewStringType type = NewStringType::kNormal, int length = -1);
-
- /** Allocates a new string from UTF-16 data. Only returns an empty value when
- * length > kMaxLength. **/
- static V8_WARN_UNUSED_RESULT MaybeLocal<String> NewFromTwoByte(
- Isolate* isolate, const uint16_t* data,
- NewStringType type = NewStringType::kNormal, int length = -1);
-
- /**
- * Creates a new string by concatenating the left and the right strings
- * passed in as parameters.
- */
- static Local<String> Concat(Isolate* isolate, Local<String> left,
- Local<String> right);
-
- /**
- * Creates a new external string using the data defined in the given
- * resource. When the external string is no longer live on V8's heap the
- * resource will be disposed by calling its Dispose method. The caller of
- * this function should not otherwise delete or modify the resource. Neither
- * should the underlying buffer be deallocated or modified except through the
- * destructor of the external string resource.
- */
- static V8_WARN_UNUSED_RESULT MaybeLocal<String> NewExternalTwoByte(
- Isolate* isolate, ExternalStringResource* resource);
-
- /**
- * Associate an external string resource with this string by transforming it
- * in place so that existing references to this string in the JavaScript heap
- * will use the external string resource. The external string resource's
- * character contents need to be equivalent to this string.
- * Returns true if the string has been changed to be an external string.
- * The string is not modified if the operation fails. See NewExternal for
- * information on the lifetime of the resource.
- */
- bool MakeExternal(ExternalStringResource* resource);
-
- /**
- * Creates a new external string using the one-byte data defined in the given
- * resource. When the external string is no longer live on V8's heap the
- * resource will be disposed by calling its Dispose method. The caller of
- * this function should not otherwise delete or modify the resource. Neither
- * should the underlying buffer be deallocated or modified except through the
- * destructor of the external string resource.
- */
- static V8_WARN_UNUSED_RESULT MaybeLocal<String> NewExternalOneByte(
- Isolate* isolate, ExternalOneByteStringResource* resource);
-
- /**
- * Associate an external string resource with this string by transforming it
- * in place so that existing references to this string in the JavaScript heap
- * will use the external string resource. The external string resource's
- * character contents need to be equivalent to this string.
- * Returns true if the string has been changed to be an external string.
- * The string is not modified if the operation fails. See NewExternal for
- * information on the lifetime of the resource.
- */
- bool MakeExternal(ExternalOneByteStringResource* resource);
-
- /**
- * Returns true if this string can be made external.
- */
- bool CanMakeExternal() const;
-
- /**
- * Returns true if the strings values are equal. Same as JS ==/===.
- */
- bool StringEquals(Local<String> str) const;
-
- /**
- * Converts an object to a UTF-8-encoded character array. Useful if
- * you want to print the object. If conversion to a string fails
- * (e.g. due to an exception in the toString() method of the object)
- * then the length() method returns 0 and the * operator returns
- * NULL.
- */
- class V8_EXPORT Utf8Value {
- public:
- Utf8Value(Isolate* isolate, Local<v8::Value> obj);
- ~Utf8Value();
- char* operator*() { return str_; }
- const char* operator*() const { return str_; }
- int length() const { return length_; }
-
- // Disallow copying and assigning.
- Utf8Value(const Utf8Value&) = delete;
- void operator=(const Utf8Value&) = delete;
-
- private:
- char* str_;
- int length_;
- };
-
- /**
- * Converts an object to a two-byte (UTF-16-encoded) string.
- * If conversion to a string fails (eg. due to an exception in the toString()
- * method of the object) then the length() method returns 0 and the * operator
- * returns NULL.
- */
- class V8_EXPORT Value {
- public:
- Value(Isolate* isolate, Local<v8::Value> obj);
- ~Value();
- uint16_t* operator*() { return str_; }
- const uint16_t* operator*() const { return str_; }
- int length() const { return length_; }
-
- // Disallow copying and assigning.
- Value(const Value&) = delete;
- void operator=(const Value&) = delete;
-
- private:
- uint16_t* str_;
- int length_;
- };
-
- private:
- void VerifyExternalStringResourceBase(ExternalStringResourceBase* v,
- Encoding encoding) const;
- void VerifyExternalStringResource(ExternalStringResource* val) const;
- ExternalStringResource* GetExternalStringResourceSlow() const;
- ExternalStringResourceBase* GetExternalStringResourceBaseSlow(
- String::Encoding* encoding_out) const;
-
- static Local<v8::String> NewFromUtf8Literal(Isolate* isolate,
- const char* literal,
- NewStringType type, int length);
-
- static void CheckCast(v8::Data* that);
-};
-
-// Zero-length string specialization (templated string size includes
-// terminator).
-template <>
-inline V8_WARN_UNUSED_RESULT Local<String> String::NewFromUtf8Literal(
- Isolate* isolate, const char (&literal)[1], NewStringType type) {
- return String::Empty(isolate);
-}
-
-/**
- * A JavaScript symbol (ECMA-262 edition 6)
- */
-class V8_EXPORT Symbol : public Name {
- public:
- /**
- * Returns the description string of the symbol, or undefined if none.
- */
- V8_DEPRECATE_SOON("Use Symbol::Description(isolate)")
- Local<Value> Description() const;
- Local<Value> Description(Isolate* isolate) const;
-
- /**
- * Create a symbol. If description is not empty, it will be used as the
- * description.
- */
- static Local<Symbol> New(Isolate* isolate,
- Local<String> description = Local<String>());
-
- /**
- * Access global symbol registry.
- * Note that symbols created this way are never collected, so
- * they should only be used for statically fixed properties.
- * Also, there is only one global name space for the descriptions used as
- * keys.
- * To minimize the potential for clashes, use qualified names as keys.
- */
- static Local<Symbol> For(Isolate* isolate, Local<String> description);
-
- /**
- * Retrieve a global symbol. Similar to |For|, but using a separate
- * registry that is not accessible by (and cannot clash with) JavaScript code.
- */
- static Local<Symbol> ForApi(Isolate* isolate, Local<String> description);
-
- // Well-known symbols
- static Local<Symbol> GetAsyncIterator(Isolate* isolate);
- static Local<Symbol> GetHasInstance(Isolate* isolate);
- static Local<Symbol> GetIsConcatSpreadable(Isolate* isolate);
- static Local<Symbol> GetIterator(Isolate* isolate);
- static Local<Symbol> GetMatch(Isolate* isolate);
- static Local<Symbol> GetReplace(Isolate* isolate);
- static Local<Symbol> GetSearch(Isolate* isolate);
- static Local<Symbol> GetSplit(Isolate* isolate);
- static Local<Symbol> GetToPrimitive(Isolate* isolate);
- static Local<Symbol> GetToStringTag(Isolate* isolate);
- static Local<Symbol> GetUnscopables(Isolate* isolate);
-
- V8_INLINE static Symbol* Cast(Data* data);
-
- private:
- Symbol();
- static void CheckCast(Data* that);
-};
-
-
-/**
- * A private symbol
- *
- * This is an experimental feature. Use at your own risk.
- */
-class V8_EXPORT Private : public Data {
- public:
- /**
- * Returns the print name string of the private symbol, or undefined if none.
- */
- Local<Value> Name() const;
-
- /**
- * Create a private symbol. If name is not empty, it will be the description.
- */
- static Local<Private> New(Isolate* isolate,
- Local<String> name = Local<String>());
-
- /**
- * Retrieve a global private symbol. If a symbol with this name has not
- * been retrieved in the same isolate before, it is created.
- * Note that private symbols created this way are never collected, so
- * they should only be used for statically fixed properties.
- * Also, there is only one global name space for the names used as keys.
- * To minimize the potential for clashes, use qualified names as keys,
- * e.g., "Class#property".
- */
- static Local<Private> ForApi(Isolate* isolate, Local<String> name);
-
- V8_INLINE static Private* Cast(Data* data);
-
- private:
- Private();
-
- static void CheckCast(Data* that);
-};
-
-
-/**
- * A JavaScript number value (ECMA-262, 4.3.20)
- */
-class V8_EXPORT Number : public Primitive {
- public:
- double Value() const;
- static Local<Number> New(Isolate* isolate, double value);
- V8_INLINE static Number* Cast(v8::Data* data);
-
- private:
- Number();
- static void CheckCast(v8::Data* that);
-};
-
-
-/**
- * A JavaScript value representing a signed integer.
- */
-class V8_EXPORT Integer : public Number {
- public:
- static Local<Integer> New(Isolate* isolate, int32_t value);
- static Local<Integer> NewFromUnsigned(Isolate* isolate, uint32_t value);
- int64_t Value() const;
- V8_INLINE static Integer* Cast(v8::Data* data);
-
- private:
- Integer();
- static void CheckCast(v8::Data* that);
-};
-
-
-/**
- * A JavaScript value representing a 32-bit signed integer.
- */
-class V8_EXPORT Int32 : public Integer {
- public:
- int32_t Value() const;
- V8_INLINE static Int32* Cast(v8::Data* data);
-
- private:
- Int32();
- static void CheckCast(v8::Data* that);
-};
-
-
-/**
- * A JavaScript value representing a 32-bit unsigned integer.
- */
-class V8_EXPORT Uint32 : public Integer {
- public:
- uint32_t Value() const;
- V8_INLINE static Uint32* Cast(v8::Data* data);
-
- private:
- Uint32();
- static void CheckCast(v8::Data* that);
-};
-
-/**
- * A JavaScript BigInt value (https://tc39.github.io/proposal-bigint)
- */
-class V8_EXPORT BigInt : public Primitive {
- public:
- static Local<BigInt> New(Isolate* isolate, int64_t value);
- static Local<BigInt> NewFromUnsigned(Isolate* isolate, uint64_t value);
- /**
- * Creates a new BigInt object using a specified sign bit and a
- * specified list of digits/words.
- * The resulting number is calculated as:
- *
- * (-1)^sign_bit * (words[0] * (2^64)^0 + words[1] * (2^64)^1 + ...)
- */
- static MaybeLocal<BigInt> NewFromWords(Local<Context> context, int sign_bit,
- int word_count, const uint64_t* words);
-
- /**
- * Returns the value of this BigInt as an unsigned 64-bit integer.
- * If `lossless` is provided, it will reflect whether the return value was
- * truncated or wrapped around. In particular, it is set to `false` if this
- * BigInt is negative.
- */
- uint64_t Uint64Value(bool* lossless = nullptr) const;
-
- /**
- * Returns the value of this BigInt as a signed 64-bit integer.
- * If `lossless` is provided, it will reflect whether this BigInt was
- * truncated or not.
- */
- int64_t Int64Value(bool* lossless = nullptr) const;
-
- /**
- * Returns the number of 64-bit words needed to store the result of
- * ToWordsArray().
- */
- int WordCount() const;
-
- /**
- * Writes the contents of this BigInt to a specified memory location.
- * `sign_bit` must be provided and will be set to 1 if this BigInt is
- * negative.
- * `*word_count` has to be initialized to the length of the `words` array.
- * Upon return, it will be set to the actual number of words that would
- * be needed to store this BigInt (i.e. the return value of `WordCount()`).
- */
- void ToWordsArray(int* sign_bit, int* word_count, uint64_t* words) const;
-
- V8_INLINE static BigInt* Cast(v8::Data* data);
-
- private:
- BigInt();
- static void CheckCast(v8::Data* that);
-};
-
-/**
- * PropertyAttribute.
- */
-enum PropertyAttribute {
- /** None. **/
- None = 0,
- /** ReadOnly, i.e., not writable. **/
- ReadOnly = 1 << 0,
- /** DontEnum, i.e., not enumerable. **/
- DontEnum = 1 << 1,
- /** DontDelete, i.e., not configurable. **/
- DontDelete = 1 << 2
-};
-
-/**
- * Accessor[Getter|Setter] are used as callback functions when
- * setting|getting a particular property. See Object and ObjectTemplate's
- * method SetAccessor.
- */
-using AccessorGetterCallback =
- void (*)(Local<String> property, const PropertyCallbackInfo<Value>& info);
-using AccessorNameGetterCallback =
- void (*)(Local<Name> property, const PropertyCallbackInfo<Value>& info);
-
-using AccessorSetterCallback = void (*)(Local<String> property,
- Local<Value> value,
- const PropertyCallbackInfo<void>& info);
-using AccessorNameSetterCallback =
- void (*)(Local<Name> property, Local<Value> value,
- const PropertyCallbackInfo<void>& info);
-
-/**
- * Access control specifications.
- *
- * Some accessors should be accessible across contexts. These
- * accessors have an explicit access control parameter which specifies
- * the kind of cross-context access that should be allowed.
- *
- * TODO(dcarney): Remove PROHIBITS_OVERWRITING as it is now unused.
- */
-enum AccessControl {
- DEFAULT = 0,
- ALL_CAN_READ = 1,
- ALL_CAN_WRITE = 1 << 1,
- PROHIBITS_OVERWRITING = 1 << 2
-};
-
-/**
- * Property filter bits. They can be or'ed to build a composite filter.
- */
-enum PropertyFilter {
- ALL_PROPERTIES = 0,
- ONLY_WRITABLE = 1,
- ONLY_ENUMERABLE = 2,
- ONLY_CONFIGURABLE = 4,
- SKIP_STRINGS = 8,
- SKIP_SYMBOLS = 16
-};
-
-/**
- * Options for marking whether callbacks may trigger JS-observable side effects.
- * Side-effect-free callbacks are allowlisted during debug evaluation with
- * throwOnSideEffect. It applies when calling a Function, FunctionTemplate,
- * or an Accessor callback. For Interceptors, please see
- * PropertyHandlerFlags's kHasNoSideEffect.
- * Callbacks that only cause side effects to the receiver are allowlisted if
- * invoked on receiver objects that are created within the same debug-evaluate
- * call, as these objects are temporary and the side effect does not escape.
- */
-enum class SideEffectType {
- kHasSideEffect,
- kHasNoSideEffect,
- kHasSideEffectToReceiver
-};
-
-/**
- * Keys/Properties filter enums:
- *
- * KeyCollectionMode limits the range of collected properties. kOwnOnly limits
- * the collected properties to the given Object only. kIncludesPrototypes will
- * include all keys of the objects's prototype chain as well.
- */
-enum class KeyCollectionMode { kOwnOnly, kIncludePrototypes };
-
-/**
- * kIncludesIndices allows for integer indices to be collected, while
- * kSkipIndices will exclude integer indices from being collected.
- */
-enum class IndexFilter { kIncludeIndices, kSkipIndices };
-
-/**
- * kConvertToString will convert integer indices to strings.
- * kKeepNumbers will return numbers for integer indices.
- */
-enum class KeyConversionMode { kConvertToString, kKeepNumbers, kNoNumbers };
-
-/**
- * Integrity level for objects.
- */
-enum class IntegrityLevel { kFrozen, kSealed };
-
-/**
- * A JavaScript object (ECMA-262, 4.3.3)
- */
-class V8_EXPORT Object : public Value {
- public:
- /**
- * Set only return Just(true) or Empty(), so if it should never fail, use
- * result.Check().
- */
- V8_WARN_UNUSED_RESULT Maybe<bool> Set(Local<Context> context,
- Local<Value> key, Local<Value> value);
-
- V8_WARN_UNUSED_RESULT Maybe<bool> Set(Local<Context> context, uint32_t index,
- Local<Value> value);
-
- // Implements CreateDataProperty (ECMA-262, 7.3.4).
- //
- // Defines a configurable, writable, enumerable property with the given value
- // on the object unless the property already exists and is not configurable
- // or the object is not extensible.
- //
- // Returns true on success.
- V8_WARN_UNUSED_RESULT Maybe<bool> CreateDataProperty(Local<Context> context,
- Local<Name> key,
- Local<Value> value);
- V8_WARN_UNUSED_RESULT Maybe<bool> CreateDataProperty(Local<Context> context,
- uint32_t index,
- Local<Value> value);
-
- // Implements DefineOwnProperty.
- //
- // In general, CreateDataProperty will be faster, however, does not allow
- // for specifying attributes.
- //
- // Returns true on success.
- V8_WARN_UNUSED_RESULT Maybe<bool> DefineOwnProperty(
- Local<Context> context, Local<Name> key, Local<Value> value,
- PropertyAttribute attributes = None);
-
- // Implements Object.DefineProperty(O, P, Attributes), see Ecma-262 19.1.2.4.
- //
- // The defineProperty function is used to add an own property or
- // update the attributes of an existing own property of an object.
- //
- // Both data and accessor descriptors can be used.
- //
- // In general, CreateDataProperty is faster, however, does not allow
- // for specifying attributes or an accessor descriptor.
- //
- // The PropertyDescriptor can change when redefining a property.
- //
- // Returns true on success.
- V8_WARN_UNUSED_RESULT Maybe<bool> DefineProperty(
- Local<Context> context, Local<Name> key, PropertyDescriptor& descriptor);
-
- V8_WARN_UNUSED_RESULT MaybeLocal<Value> Get(Local<Context> context,
- Local<Value> key);
-
- V8_WARN_UNUSED_RESULT MaybeLocal<Value> Get(Local<Context> context,
- uint32_t index);
-
- /**
- * Gets the property attributes of a property which can be None or
- * any combination of ReadOnly, DontEnum and DontDelete. Returns
- * None when the property doesn't exist.
- */
- V8_WARN_UNUSED_RESULT Maybe<PropertyAttribute> GetPropertyAttributes(
- Local<Context> context, Local<Value> key);
-
- /**
- * Returns Object.getOwnPropertyDescriptor as per ES2016 section 19.1.2.6.
- */
- V8_WARN_UNUSED_RESULT MaybeLocal<Value> GetOwnPropertyDescriptor(
- Local<Context> context, Local<Name> key);
-
- /**
- * Object::Has() calls the abstract operation HasProperty(O, P) described
- * in ECMA-262, 7.3.10. Has() returns
- * true, if the object has the property, either own or on the prototype chain.
- * Interceptors, i.e., PropertyQueryCallbacks, are called if present.
- *
- * Has() has the same side effects as JavaScript's `variable in object`.
- * For example, calling Has() on a revoked proxy will throw an exception.
- *
- * \note Has() converts the key to a name, which possibly calls back into
- * JavaScript.
- *
- * See also v8::Object::HasOwnProperty() and
- * v8::Object::HasRealNamedProperty().
- */
- V8_WARN_UNUSED_RESULT Maybe<bool> Has(Local<Context> context,
- Local<Value> key);
-
- V8_WARN_UNUSED_RESULT Maybe<bool> Delete(Local<Context> context,
- Local<Value> key);
-
- V8_WARN_UNUSED_RESULT Maybe<bool> Has(Local<Context> context, uint32_t index);
-
- V8_WARN_UNUSED_RESULT Maybe<bool> Delete(Local<Context> context,
- uint32_t index);
-
- /**
- * Note: SideEffectType affects the getter only, not the setter.
- */
- V8_WARN_UNUSED_RESULT Maybe<bool> SetAccessor(
- Local<Context> context, Local<Name> name,
- AccessorNameGetterCallback getter,
- AccessorNameSetterCallback setter = nullptr,
- MaybeLocal<Value> data = MaybeLocal<Value>(),
- AccessControl settings = DEFAULT, PropertyAttribute attribute = None,
- SideEffectType getter_side_effect_type = SideEffectType::kHasSideEffect,
- SideEffectType setter_side_effect_type = SideEffectType::kHasSideEffect);
-
- void SetAccessorProperty(Local<Name> name, Local<Function> getter,
- Local<Function> setter = Local<Function>(),
- PropertyAttribute attribute = None,
- AccessControl settings = DEFAULT);
-
- /**
- * Sets a native data property like Template::SetNativeDataProperty, but
- * this method sets on this object directly.
- */
- V8_WARN_UNUSED_RESULT Maybe<bool> SetNativeDataProperty(
- Local<Context> context, Local<Name> name,
- AccessorNameGetterCallback getter,
- AccessorNameSetterCallback setter = nullptr,
- Local<Value> data = Local<Value>(), PropertyAttribute attributes = None,
- SideEffectType getter_side_effect_type = SideEffectType::kHasSideEffect,
- SideEffectType setter_side_effect_type = SideEffectType::kHasSideEffect);
-
- /**
- * Attempts to create a property with the given name which behaves like a data
- * property, except that the provided getter is invoked (and provided with the
- * data value) to supply its value the first time it is read. After the
- * property is accessed once, it is replaced with an ordinary data property.
- *
- * Analogous to Template::SetLazyDataProperty.
- */
- V8_WARN_UNUSED_RESULT Maybe<bool> SetLazyDataProperty(
- Local<Context> context, Local<Name> name,
- AccessorNameGetterCallback getter, Local<Value> data = Local<Value>(),
- PropertyAttribute attributes = None,
- SideEffectType getter_side_effect_type = SideEffectType::kHasSideEffect,
- SideEffectType setter_side_effect_type = SideEffectType::kHasSideEffect);
-
- /**
- * Functionality for private properties.
- * This is an experimental feature, use at your own risk.
- * Note: Private properties are not inherited. Do not rely on this, since it
- * may change.
- */
- Maybe<bool> HasPrivate(Local<Context> context, Local<Private> key);
- Maybe<bool> SetPrivate(Local<Context> context, Local<Private> key,
- Local<Value> value);
- Maybe<bool> DeletePrivate(Local<Context> context, Local<Private> key);
- MaybeLocal<Value> GetPrivate(Local<Context> context, Local<Private> key);
-
- /**
- * Returns an array containing the names of the enumerable properties
- * of this object, including properties from prototype objects. The
- * array returned by this method contains the same values as would
- * be enumerated by a for-in statement over this object.
- */
- V8_WARN_UNUSED_RESULT MaybeLocal<Array> GetPropertyNames(
- Local<Context> context);
- V8_WARN_UNUSED_RESULT MaybeLocal<Array> GetPropertyNames(
- Local<Context> context, KeyCollectionMode mode,
- PropertyFilter property_filter, IndexFilter index_filter,
- KeyConversionMode key_conversion = KeyConversionMode::kKeepNumbers);
-
- /**
- * This function has the same functionality as GetPropertyNames but
- * the returned array doesn't contain the names of properties from
- * prototype objects.
- */
- V8_WARN_UNUSED_RESULT MaybeLocal<Array> GetOwnPropertyNames(
- Local<Context> context);
-
- /**
- * Returns an array containing the names of the filtered properties
- * of this object, including properties from prototype objects. The
- * array returned by this method contains the same values as would
- * be enumerated by a for-in statement over this object.
- */
- V8_WARN_UNUSED_RESULT MaybeLocal<Array> GetOwnPropertyNames(
- Local<Context> context, PropertyFilter filter,
- KeyConversionMode key_conversion = KeyConversionMode::kKeepNumbers);
-
- /**
- * Get the prototype object. This does not skip objects marked to
- * be skipped by __proto__ and it does not consult the security
- * handler.
- */
- Local<Value> GetPrototype();
-
- /**
- * Set the prototype object. This does not skip objects marked to
- * be skipped by __proto__ and it does not consult the security
- * handler.
- */
- V8_WARN_UNUSED_RESULT Maybe<bool> SetPrototype(Local<Context> context,
- Local<Value> prototype);
-
- /**
- * Finds an instance of the given function template in the prototype
- * chain.
- */
- Local<Object> FindInstanceInPrototypeChain(Local<FunctionTemplate> tmpl);
-
- /**
- * Call builtin Object.prototype.toString on this object.
- * This is different from Value::ToString() that may call
- * user-defined toString function. This one does not.
- */
- V8_WARN_UNUSED_RESULT MaybeLocal<String> ObjectProtoToString(
- Local<Context> context);
-
- /**
- * Returns the name of the function invoked as a constructor for this object.
- */
- Local<String> GetConstructorName();
-
- /**
- * Sets the integrity level of the object.
- */
- Maybe<bool> SetIntegrityLevel(Local<Context> context, IntegrityLevel level);
-
- /** Gets the number of internal fields for this Object. */
- int InternalFieldCount() const;
-
- /** Same as above, but works for PersistentBase. */
- V8_INLINE static int InternalFieldCount(
- const PersistentBase<Object>& object) {
- return object.val_->InternalFieldCount();
- }
-
- /** Same as above, but works for BasicTracedReference. */
- V8_INLINE static int InternalFieldCount(
- const BasicTracedReference<Object>& object) {
- return object->InternalFieldCount();
- }
-
- /** Gets the value from an internal field. */
- V8_INLINE Local<Value> GetInternalField(int index);
-
- /** Sets the value in an internal field. */
- void SetInternalField(int index, Local<Value> value);
-
- /**
- * Gets a 2-byte-aligned native pointer from an internal field. This field
- * must have been set by SetAlignedPointerInInternalField, everything else
- * leads to undefined behavior.
- */
- V8_INLINE void* GetAlignedPointerFromInternalField(int index);
-
- /** Same as above, but works for PersistentBase. */
- V8_INLINE static void* GetAlignedPointerFromInternalField(
- const PersistentBase<Object>& object, int index) {
- return object.val_->GetAlignedPointerFromInternalField(index);
- }
-
- /** Same as above, but works for TracedGlobal. */
- V8_INLINE static void* GetAlignedPointerFromInternalField(
- const BasicTracedReference<Object>& object, int index) {
- return object->GetAlignedPointerFromInternalField(index);
- }
-
- /**
- * Sets a 2-byte-aligned native pointer in an internal field. To retrieve such
- * a field, GetAlignedPointerFromInternalField must be used, everything else
- * leads to undefined behavior.
- */
- void SetAlignedPointerInInternalField(int index, void* value);
- void SetAlignedPointerInInternalFields(int argc, int indices[],
- void* values[]);
-
- /**
- * HasOwnProperty() is like JavaScript's Object.prototype.hasOwnProperty().
- *
- * See also v8::Object::Has() and v8::Object::HasRealNamedProperty().
- */
- V8_WARN_UNUSED_RESULT Maybe<bool> HasOwnProperty(Local<Context> context,
- Local<Name> key);
- V8_WARN_UNUSED_RESULT Maybe<bool> HasOwnProperty(Local<Context> context,
- uint32_t index);
- /**
- * Use HasRealNamedProperty() if you want to check if an object has an own
- * property without causing side effects, i.e., without calling interceptors.
- *
- * This function is similar to v8::Object::HasOwnProperty(), but it does not
- * call interceptors.
- *
- * \note Consider using non-masking interceptors, i.e., the interceptors are
- * not called if the receiver has the real named property. See
- * `v8::PropertyHandlerFlags::kNonMasking`.
- *
- * See also v8::Object::Has().
- */
- V8_WARN_UNUSED_RESULT Maybe<bool> HasRealNamedProperty(Local<Context> context,
- Local<Name> key);
- V8_WARN_UNUSED_RESULT Maybe<bool> HasRealIndexedProperty(
- Local<Context> context, uint32_t index);
- V8_WARN_UNUSED_RESULT Maybe<bool> HasRealNamedCallbackProperty(
- Local<Context> context, Local<Name> key);
-
- /**
- * If result.IsEmpty() no real property was located in the prototype chain.
- * This means interceptors in the prototype chain are not called.
- */
- V8_WARN_UNUSED_RESULT MaybeLocal<Value> GetRealNamedPropertyInPrototypeChain(
- Local<Context> context, Local<Name> key);
-
- /**
- * Gets the property attributes of a real property in the prototype chain,
- * which can be None or any combination of ReadOnly, DontEnum and DontDelete.
- * Interceptors in the prototype chain are not called.
- */
- V8_WARN_UNUSED_RESULT Maybe<PropertyAttribute>
- GetRealNamedPropertyAttributesInPrototypeChain(Local<Context> context,
- Local<Name> key);
-
- /**
- * If result.IsEmpty() no real property was located on the object or
- * in the prototype chain.
- * This means interceptors in the prototype chain are not called.
- */
- V8_WARN_UNUSED_RESULT MaybeLocal<Value> GetRealNamedProperty(
- Local<Context> context, Local<Name> key);
-
- /**
- * Gets the property attributes of a real property which can be
- * None or any combination of ReadOnly, DontEnum and DontDelete.
- * Interceptors in the prototype chain are not called.
- */
- V8_WARN_UNUSED_RESULT Maybe<PropertyAttribute> GetRealNamedPropertyAttributes(
- Local<Context> context, Local<Name> key);
-
- /** Tests for a named lookup interceptor.*/
- bool HasNamedLookupInterceptor() const;
-
- /** Tests for an index lookup interceptor.*/
- bool HasIndexedLookupInterceptor() const;
-
- /**
- * Returns the identity hash for this object. The current implementation
- * uses a hidden property on the object to store the identity hash.
- *
- * The return value will never be 0. Also, it is not guaranteed to be
- * unique.
- */
- int GetIdentityHash();
-
- /**
- * Clone this object with a fast but shallow copy. Values will point
- * to the same values as the original object.
- */
- // TODO(dcarney): take an isolate and optionally bail out?
- Local<Object> Clone();
-
- /**
- * Returns the context in which the object was created.
- */
- // TODO(chromium:1166077): Mark as deprecate once users are updated.
- V8_DEPRECATE_SOON("Use MaybeLocal<Context> GetCreationContext()")
- Local<Context> CreationContext();
- MaybeLocal<Context> GetCreationContext();
-
- /** Same as above, but works for Persistents */
- // TODO(chromium:1166077): Mark as deprecate once users are updated.
- V8_DEPRECATE_SOON(
- "Use MaybeLocal<Context> GetCreationContext(const "
- "PersistentBase<Object>& object)")
- static Local<Context> CreationContext(const PersistentBase<Object>& object);
- V8_INLINE static MaybeLocal<Context> GetCreationContext(
- const PersistentBase<Object>& object) {
- return object.val_->GetCreationContext();
- }
-
- /**
- * Checks whether a callback is set by the
- * ObjectTemplate::SetCallAsFunctionHandler method.
- * When an Object is callable this method returns true.
- */
- bool IsCallable() const;
-
- /**
- * True if this object is a constructor.
- */
- bool IsConstructor() const;
-
- /**
- * True if this object can carry information relevant to the embedder in its
- * embedder fields, false otherwise. This is generally true for objects
- * constructed through function templates but also holds for other types where
- * V8 automatically adds internal fields at compile time, such as e.g.
- * v8::ArrayBuffer.
- */
- bool IsApiWrapper() const;
-
- /**
- * True if this object was created from an object template which was marked
- * as undetectable. See v8::ObjectTemplate::MarkAsUndetectable for more
- * information.
- */
- bool IsUndetectable() const;
-
- /**
- * Call an Object as a function if a callback is set by the
- * ObjectTemplate::SetCallAsFunctionHandler method.
- */
- V8_WARN_UNUSED_RESULT MaybeLocal<Value> CallAsFunction(Local<Context> context,
- Local<Value> recv,
- int argc,
- Local<Value> argv[]);
-
- /**
- * Call an Object as a constructor if a callback is set by the
- * ObjectTemplate::SetCallAsFunctionHandler method.
- * Note: This method behaves like the Function::NewInstance method.
- */
- V8_WARN_UNUSED_RESULT MaybeLocal<Value> CallAsConstructor(
- Local<Context> context, int argc, Local<Value> argv[]);
-
- /**
- * Return the isolate to which the Object belongs to.
- */
- Isolate* GetIsolate();
-
- /**
- * If this object is a Set, Map, WeakSet or WeakMap, this returns a
- * representation of the elements of this object as an array.
- * If this object is a SetIterator or MapIterator, this returns all
- * elements of the underlying collection, starting at the iterator's current
- * position.
- * For other types, this will return an empty MaybeLocal<Array> (without
- * scheduling an exception).
- */
- MaybeLocal<Array> PreviewEntries(bool* is_key_value);
-
- static Local<Object> New(Isolate* isolate);
-
- /**
- * Creates a JavaScript object with the given properties, and
- * a the given prototype_or_null (which can be any JavaScript
- * value, and if it's null, the newly created object won't have
- * a prototype at all). This is similar to Object.create().
- * All properties will be created as enumerable, configurable
- * and writable properties.
- */
- static Local<Object> New(Isolate* isolate, Local<Value> prototype_or_null,
- Local<Name>* names, Local<Value>* values,
- size_t length);
-
- V8_INLINE static Object* Cast(Value* obj);
-
- /**
- * Support for TC39 "dynamic code brand checks" proposal.
- *
- * This API allows to query whether an object was constructed from a
- * "code like" ObjectTemplate.
- *
- * See also: v8::ObjectTemplate::SetCodeLike
- */
- bool IsCodeLike(Isolate* isolate) const;
-
- private:
- Object();
- static void CheckCast(Value* obj);
- Local<Value> SlowGetInternalField(int index);
- void* SlowGetAlignedPointerFromInternalField(int index);
-};
-
-
-/**
- * An instance of the built-in array constructor (ECMA-262, 15.4.2).
- */
-class V8_EXPORT Array : public Object {
- public:
- uint32_t Length() const;
-
- /**
- * Creates a JavaScript array with the given length. If the length
- * is negative the returned array will have length 0.
- */
- static Local<Array> New(Isolate* isolate, int length = 0);
-
- /**
- * Creates a JavaScript array out of a Local<Value> array in C++
- * with a known length.
- */
- static Local<Array> New(Isolate* isolate, Local<Value>* elements,
- size_t length);
- V8_INLINE static Array* Cast(Value* obj);
-
- private:
- Array();
- static void CheckCast(Value* obj);
-};
-
-
-/**
- * An instance of the built-in Map constructor (ECMA-262, 6th Edition, 23.1.1).
- */
-class V8_EXPORT Map : public Object {
- public:
- size_t Size() const;
- void Clear();
- V8_WARN_UNUSED_RESULT MaybeLocal<Value> Get(Local<Context> context,
- Local<Value> key);
- V8_WARN_UNUSED_RESULT MaybeLocal<Map> Set(Local<Context> context,
- Local<Value> key,
- Local<Value> value);
- V8_WARN_UNUSED_RESULT Maybe<bool> Has(Local<Context> context,
- Local<Value> key);
- V8_WARN_UNUSED_RESULT Maybe<bool> Delete(Local<Context> context,
- Local<Value> key);
-
- /**
- * Returns an array of length Size() * 2, where index N is the Nth key and
- * index N + 1 is the Nth value.
- */
- Local<Array> AsArray() const;
-
- /**
- * Creates a new empty Map.
- */
- static Local<Map> New(Isolate* isolate);
-
- V8_INLINE static Map* Cast(Value* obj);
-
- private:
- Map();
- static void CheckCast(Value* obj);
-};
-
-
-/**
- * An instance of the built-in Set constructor (ECMA-262, 6th Edition, 23.2.1).
- */
-class V8_EXPORT Set : public Object {
- public:
- size_t Size() const;
- void Clear();
- V8_WARN_UNUSED_RESULT MaybeLocal<Set> Add(Local<Context> context,
- Local<Value> key);
- V8_WARN_UNUSED_RESULT Maybe<bool> Has(Local<Context> context,
- Local<Value> key);
- V8_WARN_UNUSED_RESULT Maybe<bool> Delete(Local<Context> context,
- Local<Value> key);
-
- /**
- * Returns an array of the keys in this Set.
- */
- Local<Array> AsArray() const;
-
- /**
- * Creates a new empty Set.
- */
- static Local<Set> New(Isolate* isolate);
-
- V8_INLINE static Set* Cast(Value* obj);
-
- private:
- Set();
- static void CheckCast(Value* obj);
-};
-
-
-template<typename T>
-class ReturnValue {
- public:
- template <class S> V8_INLINE ReturnValue(const ReturnValue<S>& that)
- : value_(that.value_) {
- static_assert(std::is_base_of<T, S>::value, "type check");
- }
- // Local setters
- template <typename S>
- V8_INLINE void Set(const Global<S>& handle);
- template <typename S>
- V8_INLINE void Set(const BasicTracedReference<S>& handle);
- template <typename S>
- V8_INLINE void Set(const Local<S> handle);
- // Fast primitive setters
- V8_INLINE void Set(bool value);
- V8_INLINE void Set(double i);
- V8_INLINE void Set(int32_t i);
- V8_INLINE void Set(uint32_t i);
- // Fast JS primitive setters
- V8_INLINE void SetNull();
- V8_INLINE void SetUndefined();
- V8_INLINE void SetEmptyString();
- // Convenience getter for Isolate
- V8_INLINE Isolate* GetIsolate() const;
-
- // Pointer setter: Uncompilable to prevent inadvertent misuse.
- template <typename S>
- V8_INLINE void Set(S* whatever);
-
- // Getter. Creates a new Local<> so it comes with a certain performance
- // hit. If the ReturnValue was not yet set, this will return the undefined
- // value.
- V8_INLINE Local<Value> Get() const;
-
- private:
- template<class F> friend class ReturnValue;
- template<class F> friend class FunctionCallbackInfo;
- template<class F> friend class PropertyCallbackInfo;
- template <class F, class G, class H>
- friend class PersistentValueMapBase;
- V8_INLINE void SetInternal(internal::Address value) { *value_ = value; }
- V8_INLINE internal::Address GetDefaultValue();
- V8_INLINE explicit ReturnValue(internal::Address* slot);
- internal::Address* value_;
-};
-
-
-/**
- * The argument information given to function call callbacks. This
- * class provides access to information about the context of the call,
- * including the receiver, the number and values of arguments, and
- * the holder of the function.
- */
-template<typename T>
-class FunctionCallbackInfo {
- public:
- /** The number of available arguments. */
- V8_INLINE int Length() const;
- /**
- * Accessor for the available arguments. Returns `undefined` if the index
- * is out of bounds.
- */
- V8_INLINE Local<Value> operator[](int i) const;
- /** Returns the receiver. This corresponds to the "this" value. */
- V8_INLINE Local<Object> This() const;
- /**
- * If the callback was created without a Signature, this is the same
- * value as This(). If there is a signature, and the signature didn't match
- * This() but one of its hidden prototypes, this will be the respective
- * hidden prototype.
- *
- * Note that this is not the prototype of This() on which the accessor
- * referencing this callback was found (which in V8 internally is often
- * referred to as holder [sic]).
- */
- V8_INLINE Local<Object> Holder() const;
- /** For construct calls, this returns the "new.target" value. */
- V8_INLINE Local<Value> NewTarget() const;
- /** Indicates whether this is a regular call or a construct call. */
- V8_INLINE bool IsConstructCall() const;
- /** The data argument specified when creating the callback. */
- V8_INLINE Local<Value> Data() const;
- /** The current Isolate. */
- V8_INLINE Isolate* GetIsolate() const;
- /** The ReturnValue for the call. */
- V8_INLINE ReturnValue<T> GetReturnValue() const;
- // This shouldn't be public, but the arm compiler needs it.
- static const int kArgsLength = 6;
-
- protected:
- friend class internal::FunctionCallbackArguments;
- friend class internal::CustomArguments<FunctionCallbackInfo>;
- friend class debug::ConsoleCallArguments;
- static const int kHolderIndex = 0;
- static const int kIsolateIndex = 1;
- static const int kReturnValueDefaultValueIndex = 2;
- static const int kReturnValueIndex = 3;
- static const int kDataIndex = 4;
- static const int kNewTargetIndex = 5;
-
- V8_INLINE FunctionCallbackInfo(internal::Address* implicit_args,
- internal::Address* values, int length);
- internal::Address* implicit_args_;
- internal::Address* values_;
- int length_;
-};
-
-
-/**
- * The information passed to a property callback about the context
- * of the property access.
- */
-template<typename T>
-class PropertyCallbackInfo {
- public:
- /**
- * \return The isolate of the property access.
- */
- V8_INLINE Isolate* GetIsolate() const;
-
- /**
- * \return The data set in the configuration, i.e., in
- * `NamedPropertyHandlerConfiguration` or
- * `IndexedPropertyHandlerConfiguration.`
- */
- V8_INLINE Local<Value> Data() const;
-
- /**
- * \return The receiver. In many cases, this is the object on which the
- * property access was intercepted. When using
- * `Reflect.get`, `Function.prototype.call`, or similar functions, it is the
- * object passed in as receiver or thisArg.
- *
- * \code
- * void GetterCallback(Local<Name> name,
- * const v8::PropertyCallbackInfo<v8::Value>& info) {
- * auto context = info.GetIsolate()->GetCurrentContext();
- *
- * v8::Local<v8::Value> a_this =
- * info.This()
- * ->GetRealNamedProperty(context, v8_str("a"))
- * .ToLocalChecked();
- * v8::Local<v8::Value> a_holder =
- * info.Holder()
- * ->GetRealNamedProperty(context, v8_str("a"))
- * .ToLocalChecked();
- *
- * CHECK(v8_str("r")->Equals(context, a_this).FromJust());
- * CHECK(v8_str("obj")->Equals(context, a_holder).FromJust());
- *
- * info.GetReturnValue().Set(name);
- * }
- *
- * v8::Local<v8::FunctionTemplate> templ =
- * v8::FunctionTemplate::New(isolate);
- * templ->InstanceTemplate()->SetHandler(
- * v8::NamedPropertyHandlerConfiguration(GetterCallback));
- * LocalContext env;
- * env->Global()
- * ->Set(env.local(), v8_str("obj"), templ->GetFunction(env.local())
- * .ToLocalChecked()
- * ->NewInstance(env.local())
- * .ToLocalChecked())
- * .FromJust();
- *
- * CompileRun("obj.a = 'obj'; var r = {a: 'r'}; Reflect.get(obj, 'x', r)");
- * \endcode
- */
- V8_INLINE Local<Object> This() const;
-
- /**
- * \return The object in the prototype chain of the receiver that has the
- * interceptor. Suppose you have `x` and its prototype is `y`, and `y`
- * has an interceptor. Then `info.This()` is `x` and `info.Holder()` is `y`.
- * The Holder() could be a hidden object (the global object, rather
- * than the global proxy).
- *
- * \note For security reasons, do not pass the object back into the runtime.
- */
- V8_INLINE Local<Object> Holder() const;
-
- /**
- * \return The return value of the callback.
- * Can be changed by calling Set().
- * \code
- * info.GetReturnValue().Set(...)
- * \endcode
- *
- */
- V8_INLINE ReturnValue<T> GetReturnValue() const;
-
- /**
- * \return True if the intercepted function should throw if an error occurs.
- * Usually, `true` corresponds to `'use strict'`.
- *
- * \note Always `false` when intercepting `Reflect.set()`
- * independent of the language mode.
- */
- V8_INLINE bool ShouldThrowOnError() const;
-
- // This shouldn't be public, but the arm compiler needs it.
- static const int kArgsLength = 7;
-
- protected:
- friend class MacroAssembler;
- friend class internal::PropertyCallbackArguments;
- friend class internal::CustomArguments<PropertyCallbackInfo>;
- static const int kShouldThrowOnErrorIndex = 0;
- static const int kHolderIndex = 1;
- static const int kIsolateIndex = 2;
- static const int kReturnValueDefaultValueIndex = 3;
- static const int kReturnValueIndex = 4;
- static const int kDataIndex = 5;
- static const int kThisIndex = 6;
-
- V8_INLINE PropertyCallbackInfo(internal::Address* args) : args_(args) {}
- internal::Address* args_;
-};
-
-using FunctionCallback = void (*)(const FunctionCallbackInfo<Value>& info);
-
-enum class ConstructorBehavior { kThrow, kAllow };
-
-/**
- * A JavaScript function object (ECMA-262, 15.3).
- */
-class V8_EXPORT Function : public Object {
- public:
- /**
- * Create a function in the current execution context
- * for a given FunctionCallback.
- */
- static MaybeLocal<Function> New(
- Local<Context> context, FunctionCallback callback,
- Local<Value> data = Local<Value>(), int length = 0,
- ConstructorBehavior behavior = ConstructorBehavior::kAllow,
- SideEffectType side_effect_type = SideEffectType::kHasSideEffect);
-
- V8_WARN_UNUSED_RESULT MaybeLocal<Object> NewInstance(
- Local<Context> context, int argc, Local<Value> argv[]) const;
-
- V8_WARN_UNUSED_RESULT MaybeLocal<Object> NewInstance(
- Local<Context> context) const {
- return NewInstance(context, 0, nullptr);
- }
-
- /**
- * When side effect checks are enabled, passing kHasNoSideEffect allows the
- * constructor to be invoked without throwing. Calls made within the
- * constructor are still checked.
- */
- V8_WARN_UNUSED_RESULT MaybeLocal<Object> NewInstanceWithSideEffectType(
- Local<Context> context, int argc, Local<Value> argv[],
- SideEffectType side_effect_type = SideEffectType::kHasSideEffect) const;
-
- V8_WARN_UNUSED_RESULT MaybeLocal<Value> Call(Local<Context> context,
- Local<Value> recv, int argc,
- Local<Value> argv[]);
-
- void SetName(Local<String> name);
- Local<Value> GetName() const;
-
- /**
- * Name inferred from variable or property assignment of this function.
- * Used to facilitate debugging and profiling of JavaScript code written
- * in an OO style, where many functions are anonymous but are assigned
- * to object properties.
- */
- Local<Value> GetInferredName() const;
-
- /**
- * displayName if it is set, otherwise name if it is configured, otherwise
- * function name, otherwise inferred name.
- */
- Local<Value> GetDebugName() const;
-
- /**
- * Returns zero based line number of function body and
- * kLineOffsetNotFound if no information available.
- */
- int GetScriptLineNumber() const;
- /**
- * Returns zero based column number of function body and
- * kLineOffsetNotFound if no information available.
- */
- int GetScriptColumnNumber() const;
-
- /**
- * Returns scriptId.
- */
- int ScriptId() const;
-
- /**
- * Returns the original function if this function is bound, else returns
- * v8::Undefined.
- */
- Local<Value> GetBoundFunction() const;
-
- /**
- * Calls builtin Function.prototype.toString on this function.
- * This is different from Value::ToString() that may call a user-defined
- * toString() function, and different than Object::ObjectProtoToString() which
- * always serializes "[object Function]".
- */
- V8_WARN_UNUSED_RESULT MaybeLocal<String> FunctionProtoToString(
- Local<Context> context);
-
- ScriptOrigin GetScriptOrigin() const;
- V8_INLINE static Function* Cast(Value* obj);
- static const int kLineOffsetNotFound;
-
- private:
- Function();
- static void CheckCast(Value* obj);
-};
-
-#ifndef V8_PROMISE_INTERNAL_FIELD_COUNT
-// The number of required internal fields can be defined by embedder.
-#define V8_PROMISE_INTERNAL_FIELD_COUNT 0
-#endif
-
-/**
- * An instance of the built-in Promise constructor (ES6 draft).
- */
-class V8_EXPORT Promise : public Object {
- public:
- /**
- * State of the promise. Each value corresponds to one of the possible values
- * of the [[PromiseState]] field.
- */
- enum PromiseState { kPending, kFulfilled, kRejected };
-
- class V8_EXPORT Resolver : public Object {
- public:
- /**
- * Create a new resolver, along with an associated promise in pending state.
- */
- static V8_WARN_UNUSED_RESULT MaybeLocal<Resolver> New(
- Local<Context> context);
-
- /**
- * Extract the associated promise.
- */
- Local<Promise> GetPromise();
-
- /**
- * Resolve/reject the associated promise with a given value.
- * Ignored if the promise is no longer pending.
- */
- V8_WARN_UNUSED_RESULT Maybe<bool> Resolve(Local<Context> context,
- Local<Value> value);
-
- V8_WARN_UNUSED_RESULT Maybe<bool> Reject(Local<Context> context,
- Local<Value> value);
-
- V8_INLINE static Resolver* Cast(Value* obj);
-
- private:
- Resolver();
- static void CheckCast(Value* obj);
- };
-
- /**
- * Register a resolution/rejection handler with a promise.
- * The handler is given the respective resolution/rejection value as
- * an argument. If the promise is already resolved/rejected, the handler is
- * invoked at the end of turn.
- */
- V8_WARN_UNUSED_RESULT MaybeLocal<Promise> Catch(Local<Context> context,
- Local<Function> handler);
-
- V8_WARN_UNUSED_RESULT MaybeLocal<Promise> Then(Local<Context> context,
- Local<Function> handler);
-
- V8_WARN_UNUSED_RESULT MaybeLocal<Promise> Then(Local<Context> context,
- Local<Function> on_fulfilled,
- Local<Function> on_rejected);
-
- /**
- * Returns true if the promise has at least one derived promise, and
- * therefore resolve/reject handlers (including default handler).
- */
- bool HasHandler() const;
-
- /**
- * Returns the content of the [[PromiseResult]] field. The Promise must not
- * be pending.
- */
- Local<Value> Result();
-
- /**
- * Returns the value of the [[PromiseState]] field.
- */
- PromiseState State();
-
- /**
- * Marks this promise as handled to avoid reporting unhandled rejections.
- */
- void MarkAsHandled();
-
- /**
- * Marks this promise as silent to prevent pausing the debugger when the
- * promise is rejected.
- */
- void MarkAsSilent();
-
- V8_INLINE static Promise* Cast(Value* obj);
-
- static const int kEmbedderFieldCount = V8_PROMISE_INTERNAL_FIELD_COUNT;
-
- private:
- Promise();
- static void CheckCast(Value* obj);
-};
-
-/**
- * An instance of a Property Descriptor, see Ecma-262 6.2.4.
- *
- * Properties in a descriptor are present or absent. If you do not set
- * `enumerable`, `configurable`, and `writable`, they are absent. If `value`,
- * `get`, or `set` are absent, but you must specify them in the constructor, use
- * empty handles.
- *
- * Accessors `get` and `set` must be callable or undefined if they are present.
- *
- * \note Only query properties if they are present, i.e., call `x()` only if
- * `has_x()` returns true.
- *
- * \code
- * // var desc = {writable: false}
- * v8::PropertyDescriptor d(Local<Value>()), false);
- * d.value(); // error, value not set
- * if (d.has_writable()) {
- * d.writable(); // false
- * }
- *
- * // var desc = {value: undefined}
- * v8::PropertyDescriptor d(v8::Undefined(isolate));
- *
- * // var desc = {get: undefined}
- * v8::PropertyDescriptor d(v8::Undefined(isolate), Local<Value>()));
- * \endcode
- */
-class V8_EXPORT PropertyDescriptor {
- public:
- // GenericDescriptor
- PropertyDescriptor();
-
- // DataDescriptor
- explicit PropertyDescriptor(Local<Value> value);
-
- // DataDescriptor with writable property
- PropertyDescriptor(Local<Value> value, bool writable);
-
- // AccessorDescriptor
- PropertyDescriptor(Local<Value> get, Local<Value> set);
-
- ~PropertyDescriptor();
-
- Local<Value> value() const;
- bool has_value() const;
-
- Local<Value> get() const;
- bool has_get() const;
- Local<Value> set() const;
- bool has_set() const;
-
- void set_enumerable(bool enumerable);
- bool enumerable() const;
- bool has_enumerable() const;
-
- void set_configurable(bool configurable);
- bool configurable() const;
- bool has_configurable() const;
-
- bool writable() const;
- bool has_writable() const;
-
- struct PrivateData;
- PrivateData* get_private() const { return private_; }
-
- PropertyDescriptor(const PropertyDescriptor&) = delete;
- void operator=(const PropertyDescriptor&) = delete;
-
- private:
- PrivateData* private_;
-};
-
-/**
- * An instance of the built-in Proxy constructor (ECMA-262, 6th Edition,
- * 26.2.1).
- */
-class V8_EXPORT Proxy : public Object {
- public:
- Local<Value> GetTarget();
- Local<Value> GetHandler();
- bool IsRevoked() const;
- void Revoke();
-
- /**
- * Creates a new Proxy for the target object.
- */
- static MaybeLocal<Proxy> New(Local<Context> context,
- Local<Object> local_target,
- Local<Object> local_handler);
-
- V8_INLINE static Proxy* Cast(Value* obj);
-
- private:
- Proxy();
- static void CheckCast(Value* obj);
-};
-
-/**
- * Points to an unowned continous buffer holding a known number of elements.
- *
- * This is similar to std::span (under consideration for C++20), but does not
- * require advanced C++ support. In the (far) future, this may be replaced with
- * or aliased to std::span.
- *
- * To facilitate future migration, this class exposes a subset of the interface
- * implemented by std::span.
- */
-template <typename T>
-class V8_EXPORT MemorySpan {
- public:
- /** The default constructor creates an empty span. */
- constexpr MemorySpan() = default;
-
- constexpr MemorySpan(T* data, size_t size) : data_(data), size_(size) {}
-
- /** Returns a pointer to the beginning of the buffer. */
- constexpr T* data() const { return data_; }
- /** Returns the number of elements that the buffer holds. */
- constexpr size_t size() const { return size_; }
-
- private:
- T* data_ = nullptr;
- size_t size_ = 0;
-};
-
-/**
- * An owned byte buffer with associated size.
- */
-struct OwnedBuffer {
- std::unique_ptr<const uint8_t[]> buffer;
- size_t size = 0;
- OwnedBuffer(std::unique_ptr<const uint8_t[]> buffer, size_t size)
- : buffer(std::move(buffer)), size(size) {}
- OwnedBuffer() = default;
-};
-
-// Wrapper around a compiled WebAssembly module, which is potentially shared by
-// different WasmModuleObjects.
-class V8_EXPORT CompiledWasmModule {
- public:
- /**
- * Serialize the compiled module. The serialized data does not include the
- * wire bytes.
- */
- OwnedBuffer Serialize();
-
- /**
- * Get the (wasm-encoded) wire bytes that were used to compile this module.
- */
- MemorySpan<const uint8_t> GetWireBytesRef();
-
- const std::string& source_url() const { return source_url_; }
-
- private:
- friend class WasmModuleObject;
- friend class WasmStreaming;
-
- explicit CompiledWasmModule(std::shared_ptr<internal::wasm::NativeModule>,
- const char* source_url, size_t url_length);
-
- const std::shared_ptr<internal::wasm::NativeModule> native_module_;
- const std::string source_url_;
-};
-
-// An instance of WebAssembly.Memory.
-class V8_EXPORT WasmMemoryObject : public Object {
- public:
- WasmMemoryObject() = delete;
-
- /**
- * Returns underlying ArrayBuffer.
- */
- Local<ArrayBuffer> Buffer();
-
- V8_INLINE static WasmMemoryObject* Cast(Value* obj);
-
- private:
- static void CheckCast(Value* object);
-};
-
-// An instance of WebAssembly.Module.
-class V8_EXPORT WasmModuleObject : public Object {
- public:
- WasmModuleObject() = delete;
-
- /**
- * Efficiently re-create a WasmModuleObject, without recompiling, from
- * a CompiledWasmModule.
- */
- static MaybeLocal<WasmModuleObject> FromCompiledModule(
- Isolate* isolate, const CompiledWasmModule&);
-
- /**
- * Get the compiled module for this module object. The compiled module can be
- * shared by several module objects.
- */
- CompiledWasmModule GetCompiledModule();
-
- V8_INLINE static WasmModuleObject* Cast(Value* obj);
-
- private:
- static void CheckCast(Value* obj);
-};
-
-/**
- * The V8 interface for WebAssembly streaming compilation. When streaming
- * compilation is initiated, V8 passes a {WasmStreaming} object to the embedder
- * such that the embedder can pass the input bytes for streaming compilation to
- * V8.
- */
-class V8_EXPORT WasmStreaming final {
- public:
- class WasmStreamingImpl;
-
- /**
- * Client to receive streaming event notifications.
- */
- class Client {
- public:
- virtual ~Client() = default;
- /**
- * Passes the fully compiled module to the client. This can be used to
- * implement code caching.
- */
- virtual void OnModuleCompiled(CompiledWasmModule compiled_module) = 0;
- };
-
- explicit WasmStreaming(std::unique_ptr<WasmStreamingImpl> impl);
-
- ~WasmStreaming();
-
- /**
- * Pass a new chunk of bytes to WebAssembly streaming compilation.
- * The buffer passed into {OnBytesReceived} is owned by the caller.
- */
- void OnBytesReceived(const uint8_t* bytes, size_t size);
-
- /**
- * {Finish} should be called after all received bytes where passed to
- * {OnBytesReceived} to tell V8 that there will be no more bytes. {Finish}
- * does not have to be called after {Abort} has been called already.
- */
- void Finish();
-
- /**
- * Abort streaming compilation. If {exception} has a value, then the promise
- * associated with streaming compilation is rejected with that value. If
- * {exception} does not have value, the promise does not get rejected.
- */
- void Abort(MaybeLocal<Value> exception);
-
- /**
- * Passes previously compiled module bytes. This must be called before
- * {OnBytesReceived}, {Finish}, or {Abort}. Returns true if the module bytes
- * can be used, false otherwise. The buffer passed via {bytes} and {size}
- * is owned by the caller. If {SetCompiledModuleBytes} returns true, the
- * buffer must remain valid until either {Finish} or {Abort} completes.
- */
- bool SetCompiledModuleBytes(const uint8_t* bytes, size_t size);
-
- /**
- * Sets the client object that will receive streaming event notifications.
- * This must be called before {OnBytesReceived}, {Finish}, or {Abort}.
- */
- void SetClient(std::shared_ptr<Client> client);
-
- /*
- * Sets the UTF-8 encoded source URL for the {Script} object. This must be
- * called before {Finish}.
- */
- void SetUrl(const char* url, size_t length);
-
- /**
- * Unpacks a {WasmStreaming} object wrapped in a {Managed} for the embedder.
- * Since the embedder is on the other side of the API, it cannot unpack the
- * {Managed} itself.
- */
- static std::shared_ptr<WasmStreaming> Unpack(Isolate* isolate,
- Local<Value> value);
-
- private:
- std::unique_ptr<WasmStreamingImpl> impl_;
-};
-
-// TODO(mtrofin): when streaming compilation is done, we can rename this
-// to simply WasmModuleObjectBuilder
-class V8_EXPORT WasmModuleObjectBuilderStreaming final {
- public:
- explicit WasmModuleObjectBuilderStreaming(Isolate* isolate);
- /**
- * The buffer passed into OnBytesReceived is owned by the caller.
- */
- void OnBytesReceived(const uint8_t*, size_t size);
- void Finish();
- /**
- * Abort streaming compilation. If {exception} has a value, then the promise
- * associated with streaming compilation is rejected with that value. If
- * {exception} does not have value, the promise does not get rejected.
- */
- void Abort(MaybeLocal<Value> exception);
- Local<Promise> GetPromise();
-
- ~WasmModuleObjectBuilderStreaming() = default;
-
- private:
- WasmModuleObjectBuilderStreaming(const WasmModuleObjectBuilderStreaming&) =
- delete;
- WasmModuleObjectBuilderStreaming(WasmModuleObjectBuilderStreaming&&) =
- default;
- WasmModuleObjectBuilderStreaming& operator=(
- const WasmModuleObjectBuilderStreaming&) = delete;
- WasmModuleObjectBuilderStreaming& operator=(
- WasmModuleObjectBuilderStreaming&&) = default;
- Isolate* isolate_ = nullptr;
-
-#if V8_CC_MSVC
- /**
- * We don't need the static Copy API, so the default
- * NonCopyablePersistentTraits would be sufficient, however,
- * MSVC eagerly instantiates the Copy.
- * We ensure we don't use Copy, however, by compiling with the
- * defaults everywhere else.
- */
- Persistent<Promise, CopyablePersistentTraits<Promise>> promise_;
-#else
- Persistent<Promise> promise_;
-#endif
- std::shared_ptr<internal::wasm::StreamingDecoder> streaming_decoder_;
-};
-
-#ifndef V8_ARRAY_BUFFER_INTERNAL_FIELD_COUNT
-// The number of required internal fields can be defined by embedder.
-#define V8_ARRAY_BUFFER_INTERNAL_FIELD_COUNT 2
-#endif
-
-
-enum class ArrayBufferCreationMode { kInternalized, kExternalized };
-
-/**
- * A wrapper around the backing store (i.e. the raw memory) of an array buffer.
- * See a document linked in http://crbug.com/v8/9908 for more information.
- *
- * The allocation and destruction of backing stores is generally managed by
- * V8. Clients should always use standard C++ memory ownership types (i.e.
- * std::unique_ptr and std::shared_ptr) to manage lifetimes of backing stores
- * properly, since V8 internal objects may alias backing stores.
- *
- * This object does not keep the underlying |ArrayBuffer::Allocator| alive by
- * default. Use Isolate::CreateParams::array_buffer_allocator_shared when
- * creating the Isolate to make it hold a reference to the allocator itself.
- */
-class V8_EXPORT BackingStore : public v8::internal::BackingStoreBase {
- public:
- ~BackingStore();
-
- /**
- * Return a pointer to the beginning of the memory block for this backing
- * store. The pointer is only valid as long as this backing store object
- * lives.
- */
- void* Data() const;
-
- /**
- * The length (in bytes) of this backing store.
- */
- size_t ByteLength() const;
-
- /**
- * Indicates whether the backing store was created for an ArrayBuffer or
- * a SharedArrayBuffer.
- */
- bool IsShared() const;
-
- /**
- * Prevent implicit instantiation of operator delete with size_t argument.
- * The size_t argument would be incorrect because ptr points to the
- * internal BackingStore object.
- */
- void operator delete(void* ptr) { ::operator delete(ptr); }
-
- /**
- * Wrapper around ArrayBuffer::Allocator::Reallocate that preserves IsShared.
- * Assumes that the backing_store was allocated by the ArrayBuffer allocator
- * of the given isolate.
- */
- static std::unique_ptr<BackingStore> Reallocate(
- v8::Isolate* isolate, std::unique_ptr<BackingStore> backing_store,
- size_t byte_length);
-
- /**
- * This callback is used only if the memory block for a BackingStore cannot be
- * allocated with an ArrayBuffer::Allocator. In such cases the destructor of
- * the BackingStore invokes the callback to free the memory block.
- */
- using DeleterCallback = void (*)(void* data, size_t length,
- void* deleter_data);
-
- /**
- * If the memory block of a BackingStore is static or is managed manually,
- * then this empty deleter along with nullptr deleter_data can be passed to
- * ArrayBuffer::NewBackingStore to indicate that.
- *
- * The manually managed case should be used with caution and only when it
- * is guaranteed that the memory block freeing happens after detaching its
- * ArrayBuffer.
- */
- static void EmptyDeleter(void* data, size_t length, void* deleter_data);
-
- private:
- /**
- * See [Shared]ArrayBuffer::GetBackingStore and
- * [Shared]ArrayBuffer::NewBackingStore.
- */
- BackingStore();
-};
-
-#if !defined(V8_IMMINENT_DEPRECATION_WARNINGS)
-// Use v8::BackingStore::DeleterCallback instead.
-using BackingStoreDeleterCallback = void (*)(void* data, size_t length,
- void* deleter_data);
-
-#endif
-
-/**
- * An instance of the built-in ArrayBuffer constructor (ES6 draft 15.13.5).
- */
-class V8_EXPORT ArrayBuffer : public Object {
- public:
- /**
- * A thread-safe allocator that V8 uses to allocate |ArrayBuffer|'s memory.
- * The allocator is a global V8 setting. It has to be set via
- * Isolate::CreateParams.
- *
- * Memory allocated through this allocator by V8 is accounted for as external
- * memory by V8. Note that V8 keeps track of the memory for all internalized
- * |ArrayBuffer|s. Responsibility for tracking external memory (using
- * Isolate::AdjustAmountOfExternalAllocatedMemory) is handed over to the
- * embedder upon externalization and taken over upon internalization (creating
- * an internalized buffer from an existing buffer).
- *
- * Note that it is unsafe to call back into V8 from any of the allocator
- * functions.
- */
- class V8_EXPORT Allocator {
- public:
- virtual ~Allocator() = default;
-
- /**
- * Allocate |length| bytes. Return nullptr if allocation is not successful.
- * Memory should be initialized to zeroes.
- */
- virtual void* Allocate(size_t length) = 0;
-
- /**
- * Allocate |length| bytes. Return nullptr if allocation is not successful.
- * Memory does not have to be initialized.
- */
- virtual void* AllocateUninitialized(size_t length) = 0;
-
- /**
- * Free the memory block of size |length|, pointed to by |data|.
- * That memory is guaranteed to be previously allocated by |Allocate|.
- */
- virtual void Free(void* data, size_t length) = 0;
-
- /**
- * Reallocate the memory block of size |old_length| to a memory block of
- * size |new_length| by expanding, contracting, or copying the existing
- * memory block. If |new_length| > |old_length|, then the new part of
- * the memory must be initialized to zeros. Return nullptr if reallocation
- * is not successful.
- *
- * The caller guarantees that the memory block was previously allocated
- * using Allocate or AllocateUninitialized.
- *
- * The default implementation allocates a new block and copies data.
- */
- virtual void* Reallocate(void* data, size_t old_length, size_t new_length);
-
- /**
- * ArrayBuffer allocation mode. kNormal is a malloc/free style allocation,
- * while kReservation is for larger allocations with the ability to set
- * access permissions.
- */
- enum class AllocationMode { kNormal, kReservation };
-
- /**
- * malloc/free based convenience allocator.
- *
- * Caller takes ownership, i.e. the returned object needs to be freed using
- * |delete allocator| once it is no longer in use.
- */
- static Allocator* NewDefaultAllocator();
- };
-
- /**
- * Data length in bytes.
- */
- size_t ByteLength() const;
-
- /**
- * Create a new ArrayBuffer. Allocate |byte_length| bytes.
- * Allocated memory will be owned by a created ArrayBuffer and
- * will be deallocated when it is garbage-collected,
- * unless the object is externalized.
- */
- static Local<ArrayBuffer> New(Isolate* isolate, size_t byte_length);
-
- /**
- * Create a new ArrayBuffer with an existing backing store.
- * The created array keeps a reference to the backing store until the array
- * is garbage collected. Note that the IsExternal bit does not affect this
- * reference from the array to the backing store.
- *
- * In future IsExternal bit will be removed. Until then the bit is set as
- * follows. If the backing store does not own the underlying buffer, then
- * the array is created in externalized state. Otherwise, the array is created
- * in internalized state. In the latter case the array can be transitioned
- * to the externalized state using Externalize(backing_store).
- */
- static Local<ArrayBuffer> New(Isolate* isolate,
- std::shared_ptr<BackingStore> backing_store);
-
- /**
- * Returns a new standalone BackingStore that is allocated using the array
- * buffer allocator of the isolate. The result can be later passed to
- * ArrayBuffer::New.
- *
- * If the allocator returns nullptr, then the function may cause GCs in the
- * given isolate and re-try the allocation. If GCs do not help, then the
- * function will crash with an out-of-memory error.
- */
- static std::unique_ptr<BackingStore> NewBackingStore(Isolate* isolate,
- size_t byte_length);
- /**
- * Returns a new standalone BackingStore that takes over the ownership of
- * the given buffer. The destructor of the BackingStore invokes the given
- * deleter callback.
- *
- * The result can be later passed to ArrayBuffer::New. The raw pointer
- * to the buffer must not be passed again to any V8 API function.
- */
- static std::unique_ptr<BackingStore> NewBackingStore(
- void* data, size_t byte_length, v8::BackingStore::DeleterCallback deleter,
- void* deleter_data);
-
- /**
- * Returns true if this ArrayBuffer may be detached.
- */
- bool IsDetachable() const;
-
- /**
- * Detaches this ArrayBuffer and all its views (typed arrays).
- * Detaching sets the byte length of the buffer and all typed arrays to zero,
- * preventing JavaScript from ever accessing underlying backing store.
- * ArrayBuffer should have been externalized and must be detachable.
- */
- void Detach();
-
- /**
- * Get a shared pointer to the backing store of this array buffer. This
- * pointer coordinates the lifetime management of the internal storage
- * with any live ArrayBuffers on the heap, even across isolates. The embedder
- * should not attempt to manage lifetime of the storage through other means.
- */
- std::shared_ptr<BackingStore> GetBackingStore();
-
- V8_INLINE static ArrayBuffer* Cast(Value* obj);
-
- static const int kInternalFieldCount = V8_ARRAY_BUFFER_INTERNAL_FIELD_COUNT;
- static const int kEmbedderFieldCount = V8_ARRAY_BUFFER_INTERNAL_FIELD_COUNT;
-
- private:
- ArrayBuffer();
- static void CheckCast(Value* obj);
-};
-
-
-#ifndef V8_ARRAY_BUFFER_VIEW_INTERNAL_FIELD_COUNT
-// The number of required internal fields can be defined by embedder.
-#define V8_ARRAY_BUFFER_VIEW_INTERNAL_FIELD_COUNT 2
-#endif
-
-
-/**
- * A base class for an instance of one of "views" over ArrayBuffer,
- * including TypedArrays and DataView (ES6 draft 15.13).
- */
-class V8_EXPORT ArrayBufferView : public Object {
- public:
- /**
- * Returns underlying ArrayBuffer.
- */
- Local<ArrayBuffer> Buffer();
- /**
- * Byte offset in |Buffer|.
- */
- size_t ByteOffset();
- /**
- * Size of a view in bytes.
- */
- size_t ByteLength();
-
- /**
- * Copy the contents of the ArrayBufferView's buffer to an embedder defined
- * memory without additional overhead that calling ArrayBufferView::Buffer
- * might incur.
- *
- * Will write at most min(|byte_length|, ByteLength) bytes starting at
- * ByteOffset of the underlying buffer to the memory starting at |dest|.
- * Returns the number of bytes actually written.
- */
- size_t CopyContents(void* dest, size_t byte_length);
-
- /**
- * Returns true if ArrayBufferView's backing ArrayBuffer has already been
- * allocated.
- */
- bool HasBuffer() const;
-
- V8_INLINE static ArrayBufferView* Cast(Value* obj);
-
- static const int kInternalFieldCount =
- V8_ARRAY_BUFFER_VIEW_INTERNAL_FIELD_COUNT;
- static const int kEmbedderFieldCount =
- V8_ARRAY_BUFFER_VIEW_INTERNAL_FIELD_COUNT;
-
- private:
- ArrayBufferView();
- static void CheckCast(Value* obj);
-};
-
-
-/**
- * A base class for an instance of TypedArray series of constructors
- * (ES6 draft 15.13.6).
- */
-class V8_EXPORT TypedArray : public ArrayBufferView {
- public:
- /*
- * The largest typed array size that can be constructed using New.
- */
- static constexpr size_t kMaxLength =
- internal::kApiSystemPointerSize == 4
- ? internal::kSmiMaxValue
- : static_cast<size_t>(uint64_t{1} << 32);
-
- /**
- * Number of elements in this typed array
- * (e.g. for Int16Array, |ByteLength|/2).
- */
- size_t Length();
-
- V8_INLINE static TypedArray* Cast(Value* obj);
-
- private:
- TypedArray();
- static void CheckCast(Value* obj);
-};
-
-
-/**
- * An instance of Uint8Array constructor (ES6 draft 15.13.6).
- */
-class V8_EXPORT Uint8Array : public TypedArray {
- public:
- static Local<Uint8Array> New(Local<ArrayBuffer> array_buffer,
- size_t byte_offset, size_t length);
- static Local<Uint8Array> New(Local<SharedArrayBuffer> shared_array_buffer,
- size_t byte_offset, size_t length);
- V8_INLINE static Uint8Array* Cast(Value* obj);
-
- private:
- Uint8Array();
- static void CheckCast(Value* obj);
-};
-
-
-/**
- * An instance of Uint8ClampedArray constructor (ES6 draft 15.13.6).
- */
-class V8_EXPORT Uint8ClampedArray : public TypedArray {
- public:
- static Local<Uint8ClampedArray> New(Local<ArrayBuffer> array_buffer,
- size_t byte_offset, size_t length);
- static Local<Uint8ClampedArray> New(
- Local<SharedArrayBuffer> shared_array_buffer, size_t byte_offset,
- size_t length);
- V8_INLINE static Uint8ClampedArray* Cast(Value* obj);
-
- private:
- Uint8ClampedArray();
- static void CheckCast(Value* obj);
-};
-
-/**
- * An instance of Int8Array constructor (ES6 draft 15.13.6).
- */
-class V8_EXPORT Int8Array : public TypedArray {
- public:
- static Local<Int8Array> New(Local<ArrayBuffer> array_buffer,
- size_t byte_offset, size_t length);
- static Local<Int8Array> New(Local<SharedArrayBuffer> shared_array_buffer,
- size_t byte_offset, size_t length);
- V8_INLINE static Int8Array* Cast(Value* obj);
-
- private:
- Int8Array();
- static void CheckCast(Value* obj);
-};
-
-
-/**
- * An instance of Uint16Array constructor (ES6 draft 15.13.6).
- */
-class V8_EXPORT Uint16Array : public TypedArray {
- public:
- static Local<Uint16Array> New(Local<ArrayBuffer> array_buffer,
- size_t byte_offset, size_t length);
- static Local<Uint16Array> New(Local<SharedArrayBuffer> shared_array_buffer,
- size_t byte_offset, size_t length);
- V8_INLINE static Uint16Array* Cast(Value* obj);
-
- private:
- Uint16Array();
- static void CheckCast(Value* obj);
-};
-
-
-/**
- * An instance of Int16Array constructor (ES6 draft 15.13.6).
- */
-class V8_EXPORT Int16Array : public TypedArray {
- public:
- static Local<Int16Array> New(Local<ArrayBuffer> array_buffer,
- size_t byte_offset, size_t length);
- static Local<Int16Array> New(Local<SharedArrayBuffer> shared_array_buffer,
- size_t byte_offset, size_t length);
- V8_INLINE static Int16Array* Cast(Value* obj);
-
- private:
- Int16Array();
- static void CheckCast(Value* obj);
-};
-
-
-/**
- * An instance of Uint32Array constructor (ES6 draft 15.13.6).
- */
-class V8_EXPORT Uint32Array : public TypedArray {
- public:
- static Local<Uint32Array> New(Local<ArrayBuffer> array_buffer,
- size_t byte_offset, size_t length);
- static Local<Uint32Array> New(Local<SharedArrayBuffer> shared_array_buffer,
- size_t byte_offset, size_t length);
- V8_INLINE static Uint32Array* Cast(Value* obj);
-
- private:
- Uint32Array();
- static void CheckCast(Value* obj);
-};
-
-
-/**
- * An instance of Int32Array constructor (ES6 draft 15.13.6).
- */
-class V8_EXPORT Int32Array : public TypedArray {
- public:
- static Local<Int32Array> New(Local<ArrayBuffer> array_buffer,
- size_t byte_offset, size_t length);
- static Local<Int32Array> New(Local<SharedArrayBuffer> shared_array_buffer,
- size_t byte_offset, size_t length);
- V8_INLINE static Int32Array* Cast(Value* obj);
-
- private:
- Int32Array();
- static void CheckCast(Value* obj);
-};
-
-
-/**
- * An instance of Float32Array constructor (ES6 draft 15.13.6).
- */
-class V8_EXPORT Float32Array : public TypedArray {
- public:
- static Local<Float32Array> New(Local<ArrayBuffer> array_buffer,
- size_t byte_offset, size_t length);
- static Local<Float32Array> New(Local<SharedArrayBuffer> shared_array_buffer,
- size_t byte_offset, size_t length);
- V8_INLINE static Float32Array* Cast(Value* obj);
-
- private:
- Float32Array();
- static void CheckCast(Value* obj);
-};
-
-
-/**
- * An instance of Float64Array constructor (ES6 draft 15.13.6).
- */
-class V8_EXPORT Float64Array : public TypedArray {
- public:
- static Local<Float64Array> New(Local<ArrayBuffer> array_buffer,
- size_t byte_offset, size_t length);
- static Local<Float64Array> New(Local<SharedArrayBuffer> shared_array_buffer,
- size_t byte_offset, size_t length);
- V8_INLINE static Float64Array* Cast(Value* obj);
-
- private:
- Float64Array();
- static void CheckCast(Value* obj);
-};
-
-/**
- * An instance of BigInt64Array constructor.
- */
-class V8_EXPORT BigInt64Array : public TypedArray {
- public:
- static Local<BigInt64Array> New(Local<ArrayBuffer> array_buffer,
- size_t byte_offset, size_t length);
- static Local<BigInt64Array> New(Local<SharedArrayBuffer> shared_array_buffer,
- size_t byte_offset, size_t length);
- V8_INLINE static BigInt64Array* Cast(Value* obj);
-
- private:
- BigInt64Array();
- static void CheckCast(Value* obj);
-};
-
-/**
- * An instance of BigUint64Array constructor.
- */
-class V8_EXPORT BigUint64Array : public TypedArray {
- public:
- static Local<BigUint64Array> New(Local<ArrayBuffer> array_buffer,
- size_t byte_offset, size_t length);
- static Local<BigUint64Array> New(Local<SharedArrayBuffer> shared_array_buffer,
- size_t byte_offset, size_t length);
- V8_INLINE static BigUint64Array* Cast(Value* obj);
-
- private:
- BigUint64Array();
- static void CheckCast(Value* obj);
-};
-
-/**
- * An instance of DataView constructor (ES6 draft 15.13.7).
- */
-class V8_EXPORT DataView : public ArrayBufferView {
- public:
- static Local<DataView> New(Local<ArrayBuffer> array_buffer,
- size_t byte_offset, size_t length);
- static Local<DataView> New(Local<SharedArrayBuffer> shared_array_buffer,
- size_t byte_offset, size_t length);
- V8_INLINE static DataView* Cast(Value* obj);
-
- private:
- DataView();
- static void CheckCast(Value* obj);
-};
-
-
-/**
- * An instance of the built-in SharedArrayBuffer constructor.
- */
-class V8_EXPORT SharedArrayBuffer : public Object {
- public:
- /**
- * Data length in bytes.
- */
- size_t ByteLength() const;
-
- /**
- * Create a new SharedArrayBuffer. Allocate |byte_length| bytes.
- * Allocated memory will be owned by a created SharedArrayBuffer and
- * will be deallocated when it is garbage-collected,
- * unless the object is externalized.
- */
- static Local<SharedArrayBuffer> New(Isolate* isolate, size_t byte_length);
-
- /**
- * Create a new SharedArrayBuffer with an existing backing store.
- * The created array keeps a reference to the backing store until the array
- * is garbage collected. Note that the IsExternal bit does not affect this
- * reference from the array to the backing store.
- *
- * In future IsExternal bit will be removed. Until then the bit is set as
- * follows. If the backing store does not own the underlying buffer, then
- * the array is created in externalized state. Otherwise, the array is created
- * in internalized state. In the latter case the array can be transitioned
- * to the externalized state using Externalize(backing_store).
- */
- static Local<SharedArrayBuffer> New(
- Isolate* isolate, std::shared_ptr<BackingStore> backing_store);
-
- /**
- * Returns a new standalone BackingStore that is allocated using the array
- * buffer allocator of the isolate. The result can be later passed to
- * SharedArrayBuffer::New.
- *
- * If the allocator returns nullptr, then the function may cause GCs in the
- * given isolate and re-try the allocation. If GCs do not help, then the
- * function will crash with an out-of-memory error.
- */
- static std::unique_ptr<BackingStore> NewBackingStore(Isolate* isolate,
- size_t byte_length);
- /**
- * Returns a new standalone BackingStore that takes over the ownership of
- * the given buffer. The destructor of the BackingStore invokes the given
- * deleter callback.
- *
- * The result can be later passed to SharedArrayBuffer::New. The raw pointer
- * to the buffer must not be passed again to any V8 functions.
- */
- static std::unique_ptr<BackingStore> NewBackingStore(
- void* data, size_t byte_length, v8::BackingStore::DeleterCallback deleter,
- void* deleter_data);
-
- /**
- * Get a shared pointer to the backing store of this array buffer. This
- * pointer coordinates the lifetime management of the internal storage
- * with any live ArrayBuffers on the heap, even across isolates. The embedder
- * should not attempt to manage lifetime of the storage through other means.
- */
- std::shared_ptr<BackingStore> GetBackingStore();
-
- V8_INLINE static SharedArrayBuffer* Cast(Value* obj);
-
- static const int kInternalFieldCount = V8_ARRAY_BUFFER_INTERNAL_FIELD_COUNT;
-
- private:
- SharedArrayBuffer();
- static void CheckCast(Value* obj);
-};
-
-
-/**
- * An instance of the built-in Date constructor (ECMA-262, 15.9).
- */
-class V8_EXPORT Date : public Object {
- public:
- static V8_WARN_UNUSED_RESULT MaybeLocal<Value> New(Local<Context> context,
- double time);
-
- /**
- * A specialization of Value::NumberValue that is more efficient
- * because we know the structure of this object.
- */
- double ValueOf() const;
-
- V8_INLINE static Date* Cast(Value* obj);
-
- private:
- static void CheckCast(Value* obj);
-};
-
-
-/**
- * A Number object (ECMA-262, 4.3.21).
- */
-class V8_EXPORT NumberObject : public Object {
- public:
- static Local<Value> New(Isolate* isolate, double value);
-
- double ValueOf() const;
-
- V8_INLINE static NumberObject* Cast(Value* obj);
-
- private:
- static void CheckCast(Value* obj);
-};
-
-/**
- * A BigInt object (https://tc39.github.io/proposal-bigint)
- */
-class V8_EXPORT BigIntObject : public Object {
- public:
- static Local<Value> New(Isolate* isolate, int64_t value);
-
- Local<BigInt> ValueOf() const;
-
- V8_INLINE static BigIntObject* Cast(Value* obj);
-
- private:
- static void CheckCast(Value* obj);
-};
-
-/**
- * A Boolean object (ECMA-262, 4.3.15).
- */
-class V8_EXPORT BooleanObject : public Object {
- public:
- static Local<Value> New(Isolate* isolate, bool value);
-
- bool ValueOf() const;
-
- V8_INLINE static BooleanObject* Cast(Value* obj);
-
- private:
- static void CheckCast(Value* obj);
-};
-
-
-/**
- * A String object (ECMA-262, 4.3.18).
- */
-class V8_EXPORT StringObject : public Object {
- public:
- static Local<Value> New(Isolate* isolate, Local<String> value);
-
- Local<String> ValueOf() const;
-
- V8_INLINE static StringObject* Cast(Value* obj);
-
- private:
- static void CheckCast(Value* obj);
-};
-
-
-/**
- * A Symbol object (ECMA-262 edition 6).
- */
-class V8_EXPORT SymbolObject : public Object {
- public:
- static Local<Value> New(Isolate* isolate, Local<Symbol> value);
-
- Local<Symbol> ValueOf() const;
-
- V8_INLINE static SymbolObject* Cast(Value* obj);
-
- private:
- static void CheckCast(Value* obj);
-};
-
-
-/**
- * An instance of the built-in RegExp constructor (ECMA-262, 15.10).
- */
-class V8_EXPORT RegExp : public Object {
- public:
- /**
- * Regular expression flag bits. They can be or'ed to enable a set
- * of flags.
- * The kLinear value ('l') is experimental and can only be used with
- * --enable-experimental-regexp-engine. RegExps with kLinear flag are
- * guaranteed to be executed in asymptotic linear time wrt. the length of
- * the subject string.
- */
- enum Flags {
- kNone = 0,
- kGlobal = 1 << 0,
- kIgnoreCase = 1 << 1,
- kMultiline = 1 << 2,
- kSticky = 1 << 3,
- kUnicode = 1 << 4,
- kDotAll = 1 << 5,
- kLinear = 1 << 6,
- kHasIndices = 1 << 7,
- };
-
- static constexpr int kFlagCount = 8;
-
- /**
- * Creates a regular expression from the given pattern string and
- * the flags bit field. May throw a JavaScript exception as
- * described in ECMA-262, 15.10.4.1.
- *
- * For example,
- * RegExp::New(v8::String::New("foo"),
- * static_cast<RegExp::Flags>(kGlobal | kMultiline))
- * is equivalent to evaluating "/foo/gm".
- */
- static V8_WARN_UNUSED_RESULT MaybeLocal<RegExp> New(Local<Context> context,
- Local<String> pattern,
- Flags flags);
-
- /**
- * Like New, but additionally specifies a backtrack limit. If the number of
- * backtracks done in one Exec call hits the limit, a match failure is
- * immediately returned.
- */
- static V8_WARN_UNUSED_RESULT MaybeLocal<RegExp> NewWithBacktrackLimit(
- Local<Context> context, Local<String> pattern, Flags flags,
- uint32_t backtrack_limit);
-
- /**
- * Executes the current RegExp instance on the given subject string.
- * Equivalent to RegExp.prototype.exec as described in
- *
- * https://tc39.es/ecma262/#sec-regexp.prototype.exec
- *
- * On success, an Array containing the matched strings is returned. On
- * failure, returns Null.
- *
- * Note: modifies global context state, accessible e.g. through RegExp.input.
- */
- V8_WARN_UNUSED_RESULT MaybeLocal<Object> Exec(Local<Context> context,
- Local<String> subject);
-
- /**
- * Returns the value of the source property: a string representing
- * the regular expression.
- */
- Local<String> GetSource() const;
-
- /**
- * Returns the flags bit field.
- */
- Flags GetFlags() const;
-
- V8_INLINE static RegExp* Cast(Value* obj);
-
- private:
- static void CheckCast(Value* obj);
-};
-
-/**
- * A JavaScript value that wraps a C++ void*. This type of value is mainly used
- * to associate C++ data structures with JavaScript objects.
- */
-class V8_EXPORT External : public Value {
- public:
- static Local<External> New(Isolate* isolate, void* value);
- V8_INLINE static External* Cast(Value* obj);
- void* Value() const;
- private:
- static void CheckCast(v8::Value* obj);
-};
-
-#define V8_INTRINSICS_LIST(F) \
- F(ArrayProto_entries, array_entries_iterator) \
- F(ArrayProto_forEach, array_for_each_iterator) \
- F(ArrayProto_keys, array_keys_iterator) \
- F(ArrayProto_values, array_values_iterator) \
- F(AsyncIteratorPrototype, initial_async_iterator_prototype) \
- F(ErrorPrototype, initial_error_prototype) \
- F(IteratorPrototype, initial_iterator_prototype) \
- F(ObjProto_valueOf, object_value_of_function)
-
-enum Intrinsic {
-#define V8_DECL_INTRINSIC(name, iname) k##name,
- V8_INTRINSICS_LIST(V8_DECL_INTRINSIC)
-#undef V8_DECL_INTRINSIC
-};
-
-
-// --- Templates ---
-
-
-/**
- * The superclass of object and function templates.
- */
-class V8_EXPORT Template : public Data {
- public:
- /**
- * Adds a property to each instance created by this template.
- *
- * The property must be defined either as a primitive value, or a template.
- */
- void Set(Local<Name> name, Local<Data> value,
- PropertyAttribute attributes = None);
- void SetPrivate(Local<Private> name, Local<Data> value,
- PropertyAttribute attributes = None);
- V8_INLINE void Set(Isolate* isolate, const char* name, Local<Data> value,
- PropertyAttribute attributes = None);
-
- void SetAccessorProperty(
- Local<Name> name,
- Local<FunctionTemplate> getter = Local<FunctionTemplate>(),
- Local<FunctionTemplate> setter = Local<FunctionTemplate>(),
- PropertyAttribute attribute = None,
- AccessControl settings = DEFAULT);
-
- /**
- * Whenever the property with the given name is accessed on objects
- * created from this Template the getter and setter callbacks
- * are called instead of getting and setting the property directly
- * on the JavaScript object.
- *
- * \param name The name of the property for which an accessor is added.
- * \param getter The callback to invoke when getting the property.
- * \param setter The callback to invoke when setting the property.
- * \param data A piece of data that will be passed to the getter and setter
- * callbacks whenever they are invoked.
- * \param settings Access control settings for the accessor. This is a bit
- * field consisting of one of more of
- * DEFAULT = 0, ALL_CAN_READ = 1, or ALL_CAN_WRITE = 2.
- * The default is to not allow cross-context access.
- * ALL_CAN_READ means that all cross-context reads are allowed.
- * ALL_CAN_WRITE means that all cross-context writes are allowed.
- * The combination ALL_CAN_READ | ALL_CAN_WRITE can be used to allow all
- * cross-context access.
- * \param attribute The attributes of the property for which an accessor
- * is added.
- * \param signature The signature describes valid receivers for the accessor
- * and is used to perform implicit instance checks against them. If the
- * receiver is incompatible (i.e. is not an instance of the constructor as
- * defined by FunctionTemplate::HasInstance()), an implicit TypeError is
- * thrown and no callback is invoked.
- */
- void SetNativeDataProperty(
- Local<String> name, AccessorGetterCallback getter,
- AccessorSetterCallback setter = nullptr,
- Local<Value> data = Local<Value>(), PropertyAttribute attribute = None,
- Local<AccessorSignature> signature = Local<AccessorSignature>(),
- AccessControl settings = DEFAULT,
- SideEffectType getter_side_effect_type = SideEffectType::kHasSideEffect,
- SideEffectType setter_side_effect_type = SideEffectType::kHasSideEffect);
- void SetNativeDataProperty(
- Local<Name> name, AccessorNameGetterCallback getter,
- AccessorNameSetterCallback setter = nullptr,
- Local<Value> data = Local<Value>(), PropertyAttribute attribute = None,
- Local<AccessorSignature> signature = Local<AccessorSignature>(),
- AccessControl settings = DEFAULT,
- SideEffectType getter_side_effect_type = SideEffectType::kHasSideEffect,
- SideEffectType setter_side_effect_type = SideEffectType::kHasSideEffect);
-
- /**
- * Like SetNativeDataProperty, but V8 will replace the native data property
- * with a real data property on first access.
- */
- void SetLazyDataProperty(
- Local<Name> name, AccessorNameGetterCallback getter,
- Local<Value> data = Local<Value>(), PropertyAttribute attribute = None,
- SideEffectType getter_side_effect_type = SideEffectType::kHasSideEffect,
- SideEffectType setter_side_effect_type = SideEffectType::kHasSideEffect);
-
- /**
- * During template instantiation, sets the value with the intrinsic property
- * from the correct context.
- */
- void SetIntrinsicDataProperty(Local<Name> name, Intrinsic intrinsic,
- PropertyAttribute attribute = None);
-
- private:
- Template();
-
- friend class ObjectTemplate;
- friend class FunctionTemplate;
-};
-
-// TODO(dcarney): Replace GenericNamedPropertyFooCallback with just
-// NamedPropertyFooCallback.
-
-/**
- * Interceptor for get requests on an object.
- *
- * Use `info.GetReturnValue().Set()` to set the return value of the
- * intercepted get request.
- *
- * \param property The name of the property for which the request was
- * intercepted.
- * \param info Information about the intercepted request, such as
- * isolate, receiver, return value, or whether running in `'use strict`' mode.
- * See `PropertyCallbackInfo`.
- *
- * \code
- * void GetterCallback(
- * Local<Name> name,
- * const v8::PropertyCallbackInfo<v8::Value>& info) {
- * info.GetReturnValue().Set(v8_num(42));
- * }
- *
- * v8::Local<v8::FunctionTemplate> templ =
- * v8::FunctionTemplate::New(isolate);
- * templ->InstanceTemplate()->SetHandler(
- * v8::NamedPropertyHandlerConfiguration(GetterCallback));
- * LocalContext env;
- * env->Global()
- * ->Set(env.local(), v8_str("obj"), templ->GetFunction(env.local())
- * .ToLocalChecked()
- * ->NewInstance(env.local())
- * .ToLocalChecked())
- * .FromJust();
- * v8::Local<v8::Value> result = CompileRun("obj.a = 17; obj.a");
- * CHECK(v8_num(42)->Equals(env.local(), result).FromJust());
- * \endcode
- *
- * See also `ObjectTemplate::SetHandler`.
- */
-using GenericNamedPropertyGetterCallback =
- void (*)(Local<Name> property, const PropertyCallbackInfo<Value>& info);
-
-/**
- * Interceptor for set requests on an object.
- *
- * Use `info.GetReturnValue()` to indicate whether the request was intercepted
- * or not. If the setter successfully intercepts the request, i.e., if the
- * request should not be further executed, call
- * `info.GetReturnValue().Set(value)`. If the setter
- * did not intercept the request, i.e., if the request should be handled as
- * if no interceptor is present, do not not call `Set()`.
- *
- * \param property The name of the property for which the request was
- * intercepted.
- * \param value The value which the property will have if the request
- * is not intercepted.
- * \param info Information about the intercepted request, such as
- * isolate, receiver, return value, or whether running in `'use strict'` mode.
- * See `PropertyCallbackInfo`.
- *
- * See also
- * `ObjectTemplate::SetHandler.`
- */
-using GenericNamedPropertySetterCallback =
- void (*)(Local<Name> property, Local<Value> value,
- const PropertyCallbackInfo<Value>& info);
-
-/**
- * Intercepts all requests that query the attributes of the
- * property, e.g., getOwnPropertyDescriptor(), propertyIsEnumerable(), and
- * defineProperty().
- *
- * Use `info.GetReturnValue().Set(value)` to set the property attributes. The
- * value is an integer encoding a `v8::PropertyAttribute`.
- *
- * \param property The name of the property for which the request was
- * intercepted.
- * \param info Information about the intercepted request, such as
- * isolate, receiver, return value, or whether running in `'use strict'` mode.
- * See `PropertyCallbackInfo`.
- *
- * \note Some functions query the property attributes internally, even though
- * they do not return the attributes. For example, `hasOwnProperty()` can
- * trigger this interceptor depending on the state of the object.
- *
- * See also
- * `ObjectTemplate::SetHandler.`
- */
-using GenericNamedPropertyQueryCallback =
- void (*)(Local<Name> property, const PropertyCallbackInfo<Integer>& info);
-
-/**
- * Interceptor for delete requests on an object.
- *
- * Use `info.GetReturnValue()` to indicate whether the request was intercepted
- * or not. If the deleter successfully intercepts the request, i.e., if the
- * request should not be further executed, call
- * `info.GetReturnValue().Set(value)` with a boolean `value`. The `value` is
- * used as the return value of `delete`.
- *
- * \param property The name of the property for which the request was
- * intercepted.
- * \param info Information about the intercepted request, such as
- * isolate, receiver, return value, or whether running in `'use strict'` mode.
- * See `PropertyCallbackInfo`.
- *
- * \note If you need to mimic the behavior of `delete`, i.e., throw in strict
- * mode instead of returning false, use `info.ShouldThrowOnError()` to determine
- * if you are in strict mode.
- *
- * See also `ObjectTemplate::SetHandler.`
- */
-using GenericNamedPropertyDeleterCallback =
- void (*)(Local<Name> property, const PropertyCallbackInfo<Boolean>& info);
-
-/**
- * Returns an array containing the names of the properties the named
- * property getter intercepts.
- *
- * Note: The values in the array must be of type v8::Name.
- */
-using GenericNamedPropertyEnumeratorCallback =
- void (*)(const PropertyCallbackInfo<Array>& info);
-
-/**
- * Interceptor for defineProperty requests on an object.
- *
- * Use `info.GetReturnValue()` to indicate whether the request was intercepted
- * or not. If the definer successfully intercepts the request, i.e., if the
- * request should not be further executed, call
- * `info.GetReturnValue().Set(value)`. If the definer
- * did not intercept the request, i.e., if the request should be handled as
- * if no interceptor is present, do not not call `Set()`.
- *
- * \param property The name of the property for which the request was
- * intercepted.
- * \param desc The property descriptor which is used to define the
- * property if the request is not intercepted.
- * \param info Information about the intercepted request, such as
- * isolate, receiver, return value, or whether running in `'use strict'` mode.
- * See `PropertyCallbackInfo`.
- *
- * See also `ObjectTemplate::SetHandler`.
- */
-using GenericNamedPropertyDefinerCallback =
- void (*)(Local<Name> property, const PropertyDescriptor& desc,
- const PropertyCallbackInfo<Value>& info);
-
-/**
- * Interceptor for getOwnPropertyDescriptor requests on an object.
- *
- * Use `info.GetReturnValue().Set()` to set the return value of the
- * intercepted request. The return value must be an object that
- * can be converted to a PropertyDescriptor, e.g., a `v8::value` returned from
- * `v8::Object::getOwnPropertyDescriptor`.
- *
- * \param property The name of the property for which the request was
- * intercepted.
- * \info Information about the intercepted request, such as
- * isolate, receiver, return value, or whether running in `'use strict'` mode.
- * See `PropertyCallbackInfo`.
- *
- * \note If GetOwnPropertyDescriptor is intercepted, it will
- * always return true, i.e., indicate that the property was found.
- *
- * See also `ObjectTemplate::SetHandler`.
- */
-using GenericNamedPropertyDescriptorCallback =
- void (*)(Local<Name> property, const PropertyCallbackInfo<Value>& info);
-
-/**
- * See `v8::GenericNamedPropertyGetterCallback`.
- */
-using IndexedPropertyGetterCallback =
- void (*)(uint32_t index, const PropertyCallbackInfo<Value>& info);
-
-/**
- * See `v8::GenericNamedPropertySetterCallback`.
- */
-using IndexedPropertySetterCallback =
- void (*)(uint32_t index, Local<Value> value,
- const PropertyCallbackInfo<Value>& info);
-
-/**
- * See `v8::GenericNamedPropertyQueryCallback`.
- */
-using IndexedPropertyQueryCallback =
- void (*)(uint32_t index, const PropertyCallbackInfo<Integer>& info);
-
-/**
- * See `v8::GenericNamedPropertyDeleterCallback`.
- */
-using IndexedPropertyDeleterCallback =
- void (*)(uint32_t index, const PropertyCallbackInfo<Boolean>& info);
-
-/**
- * Returns an array containing the indices of the properties the indexed
- * property getter intercepts.
- *
- * Note: The values in the array must be uint32_t.
- */
-using IndexedPropertyEnumeratorCallback =
- void (*)(const PropertyCallbackInfo<Array>& info);
-
-/**
- * See `v8::GenericNamedPropertyDefinerCallback`.
- */
-using IndexedPropertyDefinerCallback =
- void (*)(uint32_t index, const PropertyDescriptor& desc,
- const PropertyCallbackInfo<Value>& info);
-
-/**
- * See `v8::GenericNamedPropertyDescriptorCallback`.
- */
-using IndexedPropertyDescriptorCallback =
- void (*)(uint32_t index, const PropertyCallbackInfo<Value>& info);
-
-/**
- * Access type specification.
- */
-enum AccessType {
- ACCESS_GET,
- ACCESS_SET,
- ACCESS_HAS,
- ACCESS_DELETE,
- ACCESS_KEYS
-};
-
-
-/**
- * Returns true if the given context should be allowed to access the given
- * object.
- */
-using AccessCheckCallback = bool (*)(Local<Context> accessing_context,
- Local<Object> accessed_object,
- Local<Value> data);
-
-/**
- * A FunctionTemplate is used to create functions at runtime. There
- * can only be one function created from a FunctionTemplate in a
- * context. The lifetime of the created function is equal to the
- * lifetime of the context. So in case the embedder needs to create
- * temporary functions that can be collected using Scripts is
- * preferred.
- *
- * Any modification of a FunctionTemplate after first instantiation will trigger
- * a crash.
- *
- * A FunctionTemplate can have properties, these properties are added to the
- * function object when it is created.
- *
- * A FunctionTemplate has a corresponding instance template which is
- * used to create object instances when the function is used as a
- * constructor. Properties added to the instance template are added to
- * each object instance.
- *
- * A FunctionTemplate can have a prototype template. The prototype template
- * is used to create the prototype object of the function.
- *
- * The following example shows how to use a FunctionTemplate:
- *
- * \code
- * v8::Local<v8::FunctionTemplate> t = v8::FunctionTemplate::New(isolate);
- * t->Set(isolate, "func_property", v8::Number::New(isolate, 1));
- *
- * v8::Local<v8::Template> proto_t = t->PrototypeTemplate();
- * proto_t->Set(isolate,
- * "proto_method",
- * v8::FunctionTemplate::New(isolate, InvokeCallback));
- * proto_t->Set(isolate, "proto_const", v8::Number::New(isolate, 2));
- *
- * v8::Local<v8::ObjectTemplate> instance_t = t->InstanceTemplate();
- * instance_t->SetAccessor(
- String::NewFromUtf8Literal(isolate, "instance_accessor"),
- * InstanceAccessorCallback);
- * instance_t->SetHandler(
- * NamedPropertyHandlerConfiguration(PropertyHandlerCallback));
- * instance_t->Set(String::NewFromUtf8Literal(isolate, "instance_property"),
- * Number::New(isolate, 3));
- *
- * v8::Local<v8::Function> function = t->GetFunction();
- * v8::Local<v8::Object> instance = function->NewInstance();
- * \endcode
- *
- * Let's use "function" as the JS variable name of the function object
- * and "instance" for the instance object created above. The function
- * and the instance will have the following properties:
- *
- * \code
- * func_property in function == true;
- * function.func_property == 1;
- *
- * function.prototype.proto_method() invokes 'InvokeCallback'
- * function.prototype.proto_const == 2;
- *
- * instance instanceof function == true;
- * instance.instance_accessor calls 'InstanceAccessorCallback'
- * instance.instance_property == 3;
- * \endcode
- *
- * A FunctionTemplate can inherit from another one by calling the
- * FunctionTemplate::Inherit method. The following graph illustrates
- * the semantics of inheritance:
- *
- * \code
- * FunctionTemplate Parent -> Parent() . prototype -> { }
- * ^ ^
- * | Inherit(Parent) | .__proto__
- * | |
- * FunctionTemplate Child -> Child() . prototype -> { }
- * \endcode
- *
- * A FunctionTemplate 'Child' inherits from 'Parent', the prototype
- * object of the Child() function has __proto__ pointing to the
- * Parent() function's prototype object. An instance of the Child
- * function has all properties on Parent's instance templates.
- *
- * Let Parent be the FunctionTemplate initialized in the previous
- * section and create a Child FunctionTemplate by:
- *
- * \code
- * Local<FunctionTemplate> parent = t;
- * Local<FunctionTemplate> child = FunctionTemplate::New();
- * child->Inherit(parent);
- *
- * Local<Function> child_function = child->GetFunction();
- * Local<Object> child_instance = child_function->NewInstance();
- * \endcode
- *
- * The Child function and Child instance will have the following
- * properties:
- *
- * \code
- * child_func.prototype.__proto__ == function.prototype;
- * child_instance.instance_accessor calls 'InstanceAccessorCallback'
- * child_instance.instance_property == 3;
- * \endcode
- *
- * The additional 'c_function' parameter refers to a fast API call, which
- * must not trigger GC or JavaScript execution, or call into V8 in other
- * ways. For more information how to define them, see
- * include/v8-fast-api-calls.h. Please note that this feature is still
- * experimental.
- */
-class V8_EXPORT FunctionTemplate : public Template {
- public:
- /** Creates a function template.*/
- static Local<FunctionTemplate> New(
- Isolate* isolate, FunctionCallback callback = nullptr,
- Local<Value> data = Local<Value>(),
- Local<Signature> signature = Local<Signature>(), int length = 0,
- ConstructorBehavior behavior = ConstructorBehavior::kAllow,
- SideEffectType side_effect_type = SideEffectType::kHasSideEffect,
- const CFunction* c_function = nullptr, uint16_t instance_type = 0,
- uint16_t allowed_receiver_instance_type_range_start = 0,
- uint16_t allowed_receiver_instance_type_range_end = 0);
-
- /** Creates a function template for multiple overloaded fast API calls.*/
- static Local<FunctionTemplate> NewWithCFunctionOverloads(
- Isolate* isolate, FunctionCallback callback = nullptr,
- Local<Value> data = Local<Value>(),
- Local<Signature> signature = Local<Signature>(), int length = 0,
- ConstructorBehavior behavior = ConstructorBehavior::kAllow,
- SideEffectType side_effect_type = SideEffectType::kHasSideEffect,
- const MemorySpan<const CFunction>& c_function_overloads = {});
-
- /**
- * Creates a function template backed/cached by a private property.
- */
- static Local<FunctionTemplate> NewWithCache(
- Isolate* isolate, FunctionCallback callback,
- Local<Private> cache_property, Local<Value> data = Local<Value>(),
- Local<Signature> signature = Local<Signature>(), int length = 0,
- SideEffectType side_effect_type = SideEffectType::kHasSideEffect);
-
- /** Returns the unique function instance in the current execution context.*/
- V8_WARN_UNUSED_RESULT MaybeLocal<Function> GetFunction(
- Local<Context> context);
-
- /**
- * Similar to Context::NewRemoteContext, this creates an instance that
- * isn't backed by an actual object.
- *
- * The InstanceTemplate of this FunctionTemplate must have access checks with
- * handlers installed.
- */
- V8_WARN_UNUSED_RESULT MaybeLocal<Object> NewRemoteInstance();
-
- /**
- * Set the call-handler callback for a FunctionTemplate. This
- * callback is called whenever the function created from this
- * FunctionTemplate is called. The 'c_function' represents a fast
- * API call, see the comment above the class declaration.
- */
- void SetCallHandler(
- FunctionCallback callback, Local<Value> data = Local<Value>(),
- SideEffectType side_effect_type = SideEffectType::kHasSideEffect,
- const MemorySpan<const CFunction>& c_function_overloads = {});
-
- /** Set the predefined length property for the FunctionTemplate. */
- void SetLength(int length);
-
- /** Get the InstanceTemplate. */
- Local<ObjectTemplate> InstanceTemplate();
-
- /**
- * Causes the function template to inherit from a parent function template.
- * This means the function's prototype.__proto__ is set to the parent
- * function's prototype.
- **/
- void Inherit(Local<FunctionTemplate> parent);
-
- /**
- * A PrototypeTemplate is the template used to create the prototype object
- * of the function created by this template.
- */
- Local<ObjectTemplate> PrototypeTemplate();
-
- /**
- * A PrototypeProviderTemplate is another function template whose prototype
- * property is used for this template. This is mutually exclusive with setting
- * a prototype template indirectly by calling PrototypeTemplate() or using
- * Inherit().
- **/
- void SetPrototypeProviderTemplate(Local<FunctionTemplate> prototype_provider);
-
- /**
- * Set the class name of the FunctionTemplate. This is used for
- * printing objects created with the function created from the
- * FunctionTemplate as its constructor.
- */
- void SetClassName(Local<String> name);
-
-
- /**
- * When set to true, no access check will be performed on the receiver of a
- * function call. Currently defaults to true, but this is subject to change.
- */
- void SetAcceptAnyReceiver(bool value);
-
- /**
- * Sets the ReadOnly flag in the attributes of the 'prototype' property
- * of functions created from this FunctionTemplate to true.
- */
- void ReadOnlyPrototype();
-
- /**
- * Removes the prototype property from functions created from this
- * FunctionTemplate.
- */
- void RemovePrototype();
-
- /**
- * Returns true if the given object is an instance of this function
- * template.
- */
- bool HasInstance(Local<Value> object);
-
- /**
- * Returns true if the given value is an API object that was constructed by an
- * instance of this function template (without checking for inheriting
- * function templates).
- *
- * This is an experimental feature and may still change significantly.
- */
- bool IsLeafTemplateForApiObject(v8::Local<v8::Value> value) const;
-
- V8_INLINE static FunctionTemplate* Cast(Data* data);
-
- private:
- FunctionTemplate();
-
- static void CheckCast(Data* that);
- friend class Context;
- friend class ObjectTemplate;
-};
-
-/**
- * Configuration flags for v8::NamedPropertyHandlerConfiguration or
- * v8::IndexedPropertyHandlerConfiguration.
- */
-enum class PropertyHandlerFlags {
- /**
- * None.
- */
- kNone = 0,
-
- /**
- * See ALL_CAN_READ above.
- */
- kAllCanRead = 1,
-
- /** Will not call into interceptor for properties on the receiver or prototype
- * chain, i.e., only call into interceptor for properties that do not exist.
- * Currently only valid for named interceptors.
- */
- kNonMasking = 1 << 1,
-
- /**
- * Will not call into interceptor for symbol lookup. Only meaningful for
- * named interceptors.
- */
- kOnlyInterceptStrings = 1 << 2,
-
- /**
- * The getter, query, enumerator callbacks do not produce side effects.
- */
- kHasNoSideEffect = 1 << 3,
-};
-
-struct NamedPropertyHandlerConfiguration {
- NamedPropertyHandlerConfiguration(
- GenericNamedPropertyGetterCallback getter,
- GenericNamedPropertySetterCallback setter,
- GenericNamedPropertyQueryCallback query,
- GenericNamedPropertyDeleterCallback deleter,
- GenericNamedPropertyEnumeratorCallback enumerator,
- GenericNamedPropertyDefinerCallback definer,
- GenericNamedPropertyDescriptorCallback descriptor,
- Local<Value> data = Local<Value>(),
- PropertyHandlerFlags flags = PropertyHandlerFlags::kNone)
- : getter(getter),
- setter(setter),
- query(query),
- deleter(deleter),
- enumerator(enumerator),
- definer(definer),
- descriptor(descriptor),
- data(data),
- flags(flags) {}
-
- NamedPropertyHandlerConfiguration(
- /** Note: getter is required */
- GenericNamedPropertyGetterCallback getter = nullptr,
- GenericNamedPropertySetterCallback setter = nullptr,
- GenericNamedPropertyQueryCallback query = nullptr,
- GenericNamedPropertyDeleterCallback deleter = nullptr,
- GenericNamedPropertyEnumeratorCallback enumerator = nullptr,
- Local<Value> data = Local<Value>(),
- PropertyHandlerFlags flags = PropertyHandlerFlags::kNone)
- : getter(getter),
- setter(setter),
- query(query),
- deleter(deleter),
- enumerator(enumerator),
- definer(nullptr),
- descriptor(nullptr),
- data(data),
- flags(flags) {}
-
- NamedPropertyHandlerConfiguration(
- GenericNamedPropertyGetterCallback getter,
- GenericNamedPropertySetterCallback setter,
- GenericNamedPropertyDescriptorCallback descriptor,
- GenericNamedPropertyDeleterCallback deleter,
- GenericNamedPropertyEnumeratorCallback enumerator,
- GenericNamedPropertyDefinerCallback definer,
- Local<Value> data = Local<Value>(),
- PropertyHandlerFlags flags = PropertyHandlerFlags::kNone)
- : getter(getter),
- setter(setter),
- query(nullptr),
- deleter(deleter),
- enumerator(enumerator),
- definer(definer),
- descriptor(descriptor),
- data(data),
- flags(flags) {}
-
- GenericNamedPropertyGetterCallback getter;
- GenericNamedPropertySetterCallback setter;
- GenericNamedPropertyQueryCallback query;
- GenericNamedPropertyDeleterCallback deleter;
- GenericNamedPropertyEnumeratorCallback enumerator;
- GenericNamedPropertyDefinerCallback definer;
- GenericNamedPropertyDescriptorCallback descriptor;
- Local<Value> data;
- PropertyHandlerFlags flags;
-};
-
-
-struct IndexedPropertyHandlerConfiguration {
- IndexedPropertyHandlerConfiguration(
- IndexedPropertyGetterCallback getter,
- IndexedPropertySetterCallback setter, IndexedPropertyQueryCallback query,
- IndexedPropertyDeleterCallback deleter,
- IndexedPropertyEnumeratorCallback enumerator,
- IndexedPropertyDefinerCallback definer,
- IndexedPropertyDescriptorCallback descriptor,
- Local<Value> data = Local<Value>(),
- PropertyHandlerFlags flags = PropertyHandlerFlags::kNone)
- : getter(getter),
- setter(setter),
- query(query),
- deleter(deleter),
- enumerator(enumerator),
- definer(definer),
- descriptor(descriptor),
- data(data),
- flags(flags) {}
-
- IndexedPropertyHandlerConfiguration(
- /** Note: getter is required */
- IndexedPropertyGetterCallback getter = nullptr,
- IndexedPropertySetterCallback setter = nullptr,
- IndexedPropertyQueryCallback query = nullptr,
- IndexedPropertyDeleterCallback deleter = nullptr,
- IndexedPropertyEnumeratorCallback enumerator = nullptr,
- Local<Value> data = Local<Value>(),
- PropertyHandlerFlags flags = PropertyHandlerFlags::kNone)
- : getter(getter),
- setter(setter),
- query(query),
- deleter(deleter),
- enumerator(enumerator),
- definer(nullptr),
- descriptor(nullptr),
- data(data),
- flags(flags) {}
-
- IndexedPropertyHandlerConfiguration(
- IndexedPropertyGetterCallback getter,
- IndexedPropertySetterCallback setter,
- IndexedPropertyDescriptorCallback descriptor,
- IndexedPropertyDeleterCallback deleter,
- IndexedPropertyEnumeratorCallback enumerator,
- IndexedPropertyDefinerCallback definer,
- Local<Value> data = Local<Value>(),
- PropertyHandlerFlags flags = PropertyHandlerFlags::kNone)
- : getter(getter),
- setter(setter),
- query(nullptr),
- deleter(deleter),
- enumerator(enumerator),
- definer(definer),
- descriptor(descriptor),
- data(data),
- flags(flags) {}
-
- IndexedPropertyGetterCallback getter;
- IndexedPropertySetterCallback setter;
- IndexedPropertyQueryCallback query;
- IndexedPropertyDeleterCallback deleter;
- IndexedPropertyEnumeratorCallback enumerator;
- IndexedPropertyDefinerCallback definer;
- IndexedPropertyDescriptorCallback descriptor;
- Local<Value> data;
- PropertyHandlerFlags flags;
-};
-
-
-/**
- * An ObjectTemplate is used to create objects at runtime.
- *
- * Properties added to an ObjectTemplate are added to each object
- * created from the ObjectTemplate.
- */
-class V8_EXPORT ObjectTemplate : public Template {
- public:
- /** Creates an ObjectTemplate. */
- static Local<ObjectTemplate> New(
- Isolate* isolate,
- Local<FunctionTemplate> constructor = Local<FunctionTemplate>());
-
- /** Creates a new instance of this template.*/
- V8_WARN_UNUSED_RESULT MaybeLocal<Object> NewInstance(Local<Context> context);
-
- /**
- * Sets an accessor on the object template.
- *
- * Whenever the property with the given name is accessed on objects
- * created from this ObjectTemplate the getter and setter callbacks
- * are called instead of getting and setting the property directly
- * on the JavaScript object.
- *
- * \param name The name of the property for which an accessor is added.
- * \param getter The callback to invoke when getting the property.
- * \param setter The callback to invoke when setting the property.
- * \param data A piece of data that will be passed to the getter and setter
- * callbacks whenever they are invoked.
- * \param settings Access control settings for the accessor. This is a bit
- * field consisting of one of more of
- * DEFAULT = 0, ALL_CAN_READ = 1, or ALL_CAN_WRITE = 2.
- * The default is to not allow cross-context access.
- * ALL_CAN_READ means that all cross-context reads are allowed.
- * ALL_CAN_WRITE means that all cross-context writes are allowed.
- * The combination ALL_CAN_READ | ALL_CAN_WRITE can be used to allow all
- * cross-context access.
- * \param attribute The attributes of the property for which an accessor
- * is added.
- * \param signature The signature describes valid receivers for the accessor
- * and is used to perform implicit instance checks against them. If the
- * receiver is incompatible (i.e. is not an instance of the constructor as
- * defined by FunctionTemplate::HasInstance()), an implicit TypeError is
- * thrown and no callback is invoked.
- */
- void SetAccessor(
- Local<String> name, AccessorGetterCallback getter,
- AccessorSetterCallback setter = nullptr,
- Local<Value> data = Local<Value>(), AccessControl settings = DEFAULT,
- PropertyAttribute attribute = None,
- Local<AccessorSignature> signature = Local<AccessorSignature>(),
- SideEffectType getter_side_effect_type = SideEffectType::kHasSideEffect,
- SideEffectType setter_side_effect_type = SideEffectType::kHasSideEffect);
- void SetAccessor(
- Local<Name> name, AccessorNameGetterCallback getter,
- AccessorNameSetterCallback setter = nullptr,
- Local<Value> data = Local<Value>(), AccessControl settings = DEFAULT,
- PropertyAttribute attribute = None,
- Local<AccessorSignature> signature = Local<AccessorSignature>(),
- SideEffectType getter_side_effect_type = SideEffectType::kHasSideEffect,
- SideEffectType setter_side_effect_type = SideEffectType::kHasSideEffect);
-
- /**
- * Sets a named property handler on the object template.
- *
- * Whenever a property whose name is a string or a symbol is accessed on
- * objects created from this object template, the provided callback is
- * invoked instead of accessing the property directly on the JavaScript
- * object.
- *
- * @param configuration The NamedPropertyHandlerConfiguration that defines the
- * callbacks to invoke when accessing a property.
- */
- void SetHandler(const NamedPropertyHandlerConfiguration& configuration);
-
- /**
- * Sets an indexed property handler on the object template.
- *
- * Whenever an indexed property is accessed on objects created from
- * this object template, the provided callback is invoked instead of
- * accessing the property directly on the JavaScript object.
- *
- * \param getter The callback to invoke when getting a property.
- * \param setter The callback to invoke when setting a property.
- * \param query The callback to invoke to check if an object has a property.
- * \param deleter The callback to invoke when deleting a property.
- * \param enumerator The callback to invoke to enumerate all the indexed
- * properties of an object.
- * \param data A piece of data that will be passed to the callbacks
- * whenever they are invoked.
- */
- // TODO(dcarney): deprecate
- void SetIndexedPropertyHandler(
- IndexedPropertyGetterCallback getter,
- IndexedPropertySetterCallback setter = nullptr,
- IndexedPropertyQueryCallback query = nullptr,
- IndexedPropertyDeleterCallback deleter = nullptr,
- IndexedPropertyEnumeratorCallback enumerator = nullptr,
- Local<Value> data = Local<Value>()) {
- SetHandler(IndexedPropertyHandlerConfiguration(getter, setter, query,
- deleter, enumerator, data));
- }
-
- /**
- * Sets an indexed property handler on the object template.
- *
- * Whenever an indexed property is accessed on objects created from
- * this object template, the provided callback is invoked instead of
- * accessing the property directly on the JavaScript object.
- *
- * @param configuration The IndexedPropertyHandlerConfiguration that defines
- * the callbacks to invoke when accessing a property.
- */
- void SetHandler(const IndexedPropertyHandlerConfiguration& configuration);
-
- /**
- * Sets the callback to be used when calling instances created from
- * this template as a function. If no callback is set, instances
- * behave like normal JavaScript objects that cannot be called as a
- * function.
- */
- void SetCallAsFunctionHandler(FunctionCallback callback,
- Local<Value> data = Local<Value>());
-
- /**
- * Mark object instances of the template as undetectable.
- *
- * In many ways, undetectable objects behave as though they are not
- * there. They behave like 'undefined' in conditionals and when
- * printed. However, properties can be accessed and called as on
- * normal objects.
- */
- void MarkAsUndetectable();
-
- /**
- * Sets access check callback on the object template and enables access
- * checks.
- *
- * When accessing properties on instances of this object template,
- * the access check callback will be called to determine whether or
- * not to allow cross-context access to the properties.
- */
- void SetAccessCheckCallback(AccessCheckCallback callback,
- Local<Value> data = Local<Value>());
-
- /**
- * Like SetAccessCheckCallback but invokes an interceptor on failed access
- * checks instead of looking up all-can-read properties. You can only use
- * either this method or SetAccessCheckCallback, but not both at the same
- * time.
- */
- void SetAccessCheckCallbackAndHandler(
- AccessCheckCallback callback,
- const NamedPropertyHandlerConfiguration& named_handler,
- const IndexedPropertyHandlerConfiguration& indexed_handler,
- Local<Value> data = Local<Value>());
-
- /**
- * Gets the number of internal fields for objects generated from
- * this template.
- */
- int InternalFieldCount() const;
-
- /**
- * Sets the number of internal fields for objects generated from
- * this template.
- */
- void SetInternalFieldCount(int value);
-
- /**
- * Returns true if the object will be an immutable prototype exotic object.
- */
- bool IsImmutableProto() const;
-
- /**
- * Makes the ObjectTemplate for an immutable prototype exotic object, with an
- * immutable __proto__.
- */
- void SetImmutableProto();
-
- /**
- * Support for TC39 "dynamic code brand checks" proposal.
- *
- * This API allows to mark (& query) objects as "code like", which causes
- * them to be treated like Strings in the context of eval and function
- * constructor.
- *
- * Reference: https://github.com/tc39/proposal-dynamic-code-brand-checks
- */
- void SetCodeLike();
- bool IsCodeLike() const;
-
- V8_INLINE static ObjectTemplate* Cast(Data* data);
-
- private:
- ObjectTemplate();
- static Local<ObjectTemplate> New(internal::Isolate* isolate,
- Local<FunctionTemplate> constructor);
- static void CheckCast(Data* that);
- friend class FunctionTemplate;
-};
-
-/**
- * A Signature specifies which receiver is valid for a function.
- *
- * A receiver matches a given signature if the receiver (or any of its
- * hidden prototypes) was created from the signature's FunctionTemplate, or
- * from a FunctionTemplate that inherits directly or indirectly from the
- * signature's FunctionTemplate.
- */
-class V8_EXPORT Signature : public Data {
- public:
- static Local<Signature> New(
- Isolate* isolate,
- Local<FunctionTemplate> receiver = Local<FunctionTemplate>());
-
- V8_INLINE static Signature* Cast(Data* data);
-
- private:
- Signature();
-
- static void CheckCast(Data* that);
-};
-
-
-/**
- * An AccessorSignature specifies which receivers are valid parameters
- * to an accessor callback.
- */
-class V8_EXPORT AccessorSignature : public Data {
- public:
- static Local<AccessorSignature> New(
- Isolate* isolate,
- Local<FunctionTemplate> receiver = Local<FunctionTemplate>());
-
- V8_INLINE static AccessorSignature* Cast(Data* data);
-
- private:
- AccessorSignature();
-
- static void CheckCast(Data* that);
-};
-
-
-// --- Extensions ---
-
-/**
- * Ignore
- */
-class V8_EXPORT Extension {
- public:
- // Note that the strings passed into this constructor must live as long
- // as the Extension itself.
- Extension(const char* name, const char* source = nullptr, int dep_count = 0,
- const char** deps = nullptr, int source_length = -1);
- virtual ~Extension() { delete source_; }
- virtual Local<FunctionTemplate> GetNativeFunctionTemplate(
- Isolate* isolate, Local<String> name) {
- return Local<FunctionTemplate>();
- }
-
- const char* name() const { return name_; }
- size_t source_length() const { return source_length_; }
- const String::ExternalOneByteStringResource* source() const {
- return source_;
- }
- int dependency_count() const { return dep_count_; }
- const char** dependencies() const { return deps_; }
- void set_auto_enable(bool value) { auto_enable_ = value; }
- bool auto_enable() { return auto_enable_; }
-
- // Disallow copying and assigning.
- Extension(const Extension&) = delete;
- void operator=(const Extension&) = delete;
-
- private:
- const char* name_;
- size_t source_length_; // expected to initialize before source_
- String::ExternalOneByteStringResource* source_;
- int dep_count_;
- const char** deps_;
- bool auto_enable_;
-};
-
-void V8_EXPORT RegisterExtension(std::unique_ptr<Extension>);
-
-// --- Statics ---
-
-V8_INLINE Local<Primitive> Undefined(Isolate* isolate);
-V8_INLINE Local<Primitive> Null(Isolate* isolate);
-V8_INLINE Local<Boolean> True(Isolate* isolate);
-V8_INLINE Local<Boolean> False(Isolate* isolate);
-
-/**
- * A set of constraints that specifies the limits of the runtime's memory use.
- * You must set the heap size before initializing the VM - the size cannot be
- * adjusted after the VM is initialized.
- *
- * If you are using threads then you should hold the V8::Locker lock while
- * setting the stack limit and you must set a non-default stack limit separately
- * for each thread.
- *
- * The arguments for set_max_semi_space_size, set_max_old_space_size,
- * set_max_executable_size, set_code_range_size specify limits in MB.
- *
- * The argument for set_max_semi_space_size_in_kb is in KB.
- */
-class V8_EXPORT ResourceConstraints {
- public:
- /**
- * Configures the constraints with reasonable default values based on the
- * provided heap size limit. The heap size includes both the young and
- * the old generation.
- *
- * \param initial_heap_size_in_bytes The initial heap size or zero.
- * By default V8 starts with a small heap and dynamically grows it to
- * match the set of live objects. This may lead to ineffective
- * garbage collections at startup if the live set is large.
- * Setting the initial heap size avoids such garbage collections.
- * Note that this does not affect young generation garbage collections.
- *
- * \param maximum_heap_size_in_bytes The hard limit for the heap size.
- * When the heap size approaches this limit, V8 will perform series of
- * garbage collections and invoke the NearHeapLimitCallback. If the garbage
- * collections do not help and the callback does not increase the limit,
- * then V8 will crash with V8::FatalProcessOutOfMemory.
- */
- void ConfigureDefaultsFromHeapSize(size_t initial_heap_size_in_bytes,
- size_t maximum_heap_size_in_bytes);
-
- /**
- * Configures the constraints with reasonable default values based on the
- * capabilities of the current device the VM is running on.
- *
- * \param physical_memory The total amount of physical memory on the current
- * device, in bytes.
- * \param virtual_memory_limit The amount of virtual memory on the current
- * device, in bytes, or zero, if there is no limit.
- */
- void ConfigureDefaults(uint64_t physical_memory,
- uint64_t virtual_memory_limit);
-
- /**
- * The address beyond which the VM's stack may not grow.
- */
- uint32_t* stack_limit() const { return stack_limit_; }
- void set_stack_limit(uint32_t* value) { stack_limit_ = value; }
-
- /**
- * The amount of virtual memory reserved for generated code. This is relevant
- * for 64-bit architectures that rely on code range for calls in code.
- *
- * When V8_COMPRESS_POINTERS_IN_SHARED_CAGE is defined, there is a shared
- * process-wide code range that is lazily initialized. This value is used to
- * configure that shared code range when the first Isolate is
- * created. Subsequent Isolates ignore this value.
- */
- size_t code_range_size_in_bytes() const { return code_range_size_; }
- void set_code_range_size_in_bytes(size_t limit) { code_range_size_ = limit; }
-
- /**
- * The maximum size of the old generation.
- * When the old generation approaches this limit, V8 will perform series of
- * garbage collections and invoke the NearHeapLimitCallback.
- * If the garbage collections do not help and the callback does not
- * increase the limit, then V8 will crash with V8::FatalProcessOutOfMemory.
- */
- size_t max_old_generation_size_in_bytes() const {
- return max_old_generation_size_;
- }
- void set_max_old_generation_size_in_bytes(size_t limit) {
- max_old_generation_size_ = limit;
- }
-
- /**
- * The maximum size of the young generation, which consists of two semi-spaces
- * and a large object space. This affects frequency of Scavenge garbage
- * collections and should be typically much smaller that the old generation.
- */
- size_t max_young_generation_size_in_bytes() const {
- return max_young_generation_size_;
- }
- void set_max_young_generation_size_in_bytes(size_t limit) {
- max_young_generation_size_ = limit;
- }
-
- size_t initial_old_generation_size_in_bytes() const {
- return initial_old_generation_size_;
- }
- void set_initial_old_generation_size_in_bytes(size_t initial_size) {
- initial_old_generation_size_ = initial_size;
- }
-
- size_t initial_young_generation_size_in_bytes() const {
- return initial_young_generation_size_;
- }
- void set_initial_young_generation_size_in_bytes(size_t initial_size) {
- initial_young_generation_size_ = initial_size;
- }
-
- private:
- static constexpr size_t kMB = 1048576u;
- size_t code_range_size_ = 0;
- size_t max_old_generation_size_ = 0;
- size_t max_young_generation_size_ = 0;
- size_t initial_old_generation_size_ = 0;
- size_t initial_young_generation_size_ = 0;
- uint32_t* stack_limit_ = nullptr;
-};
-
-
-// --- Exceptions ---
-
-using FatalErrorCallback = void (*)(const char* location, const char* message);
-
-using OOMErrorCallback = void (*)(const char* location, bool is_heap_oom);
-
-using DcheckErrorCallback = void (*)(const char* file, int line,
- const char* message);
-
-using MessageCallback = void (*)(Local<Message> message, Local<Value> data);
-
-// --- Tracing ---
-
-enum LogEventStatus : int { kStart = 0, kEnd = 1, kStamp = 2 };
-using LogEventCallback = void (*)(const char* name,
- int /* LogEventStatus */ status);
-
-/**
- * Create new error objects by calling the corresponding error object
- * constructor with the message.
- */
-class V8_EXPORT Exception {
- public:
- static Local<Value> RangeError(Local<String> message);
- static Local<Value> ReferenceError(Local<String> message);
- static Local<Value> SyntaxError(Local<String> message);
- static Local<Value> TypeError(Local<String> message);
- static Local<Value> WasmCompileError(Local<String> message);
- static Local<Value> WasmLinkError(Local<String> message);
- static Local<Value> WasmRuntimeError(Local<String> message);
- static Local<Value> Error(Local<String> message);
-
- /**
- * Creates an error message for the given exception.
- * Will try to reconstruct the original stack trace from the exception value,
- * or capture the current stack trace if not available.
- */
- static Local<Message> CreateMessage(Isolate* isolate, Local<Value> exception);
-
- /**
- * Returns the original stack trace that was captured at the creation time
- * of a given exception, or an empty handle if not available.
- */
- static Local<StackTrace> GetStackTrace(Local<Value> exception);
-};
-
-
-// --- Counters Callbacks ---
-
-using CounterLookupCallback = int* (*)(const char* name);
-
-using CreateHistogramCallback = void* (*)(const char* name, int min, int max,
- size_t buckets);
-
-using AddHistogramSampleCallback = void (*)(void* histogram, int sample);
-
-// --- Crashkeys Callback ---
-enum class CrashKeyId {
- kIsolateAddress,
- kReadonlySpaceFirstPageAddress,
- kMapSpaceFirstPageAddress,
- kCodeSpaceFirstPageAddress,
- kDumpType,
-};
-
-using AddCrashKeyCallback = void (*)(CrashKeyId id, const std::string& value);
-
-// --- Enter/Leave Script Callback ---
-using BeforeCallEnteredCallback = void (*)(Isolate*);
-using CallCompletedCallback = void (*)(Isolate*);
-
-/**
- * HostImportModuleDynamicallyCallback is called when we require the
- * embedder to load a module. This is used as part of the dynamic
- * import syntax.
- *
- * The referrer contains metadata about the script/module that calls
- * import.
- *
- * The specifier is the name of the module that should be imported.
- *
- * The embedder must compile, instantiate, evaluate the Module, and
- * obtain its namespace object.
- *
- * The Promise returned from this function is forwarded to userland
- * JavaScript. The embedder must resolve this promise with the module
- * namespace object. In case of an exception, the embedder must reject
- * this promise with the exception. If the promise creation itself
- * fails (e.g. due to stack overflow), the embedder must propagate
- * that exception by returning an empty MaybeLocal.
- */
-using HostImportModuleDynamicallyCallback =
- MaybeLocal<Promise> (*)(Local<Context> context,
- Local<ScriptOrModule> referrer,
- Local<String> specifier);
-
-/**
- * HostImportModuleDynamicallyWithImportAssertionsCallback is called when we
- * require the embedder to load a module. This is used as part of the dynamic
- * import syntax.
- *
- * The referrer contains metadata about the script/module that calls
- * import.
- *
- * The specifier is the name of the module that should be imported.
- *
- * The import_assertions are import assertions for this request in the form:
- * [key1, value1, key2, value2, ...] where the keys and values are of type
- * v8::String. Note, unlike the FixedArray passed to ResolveModuleCallback and
- * returned from ModuleRequest::GetImportAssertions(), this array does not
- * contain the source Locations of the assertions.
- *
- * The embedder must compile, instantiate, evaluate the Module, and
- * obtain its namespace object.
- *
- * The Promise returned from this function is forwarded to userland
- * JavaScript. The embedder must resolve this promise with the module
- * namespace object. In case of an exception, the embedder must reject
- * this promise with the exception. If the promise creation itself
- * fails (e.g. due to stack overflow), the embedder must propagate
- * that exception by returning an empty MaybeLocal.
- */
-using HostImportModuleDynamicallyWithImportAssertionsCallback =
- MaybeLocal<Promise> (*)(Local<Context> context,
- Local<ScriptOrModule> referrer,
- Local<String> specifier,
- Local<FixedArray> import_assertions);
-
-/**
- * HostInitializeImportMetaObjectCallback is called the first time import.meta
- * is accessed for a module. Subsequent access will reuse the same value.
- *
- * The method combines two implementation-defined abstract operations into one:
- * HostGetImportMetaProperties and HostFinalizeImportMeta.
- *
- * The embedder should use v8::Object::CreateDataProperty to add properties on
- * the meta object.
- */
-using HostInitializeImportMetaObjectCallback = void (*)(Local<Context> context,
- Local<Module> module,
- Local<Object> meta);
-
-/**
- * PrepareStackTraceCallback is called when the stack property of an error is
- * first accessed. The return value will be used as the stack value. If this
- * callback is registed, the |Error.prepareStackTrace| API will be disabled.
- * |sites| is an array of call sites, specified in
- * https://v8.dev/docs/stack-trace-api
- */
-using PrepareStackTraceCallback = MaybeLocal<Value> (*)(Local<Context> context,
- Local<Value> error,
- Local<Array> sites);
-
-/**
- * PromiseHook with type kInit is called when a new promise is
- * created. When a new promise is created as part of the chain in the
- * case of Promise.then or in the intermediate promises created by
- * Promise.{race, all}/AsyncFunctionAwait, we pass the parent promise
- * otherwise we pass undefined.
- *
- * PromiseHook with type kResolve is called at the beginning of
- * resolve or reject function defined by CreateResolvingFunctions.
- *
- * PromiseHook with type kBefore is called at the beginning of the
- * PromiseReactionJob.
- *
- * PromiseHook with type kAfter is called right at the end of the
- * PromiseReactionJob.
- */
-enum class PromiseHookType { kInit, kResolve, kBefore, kAfter };
-
-using PromiseHook = void (*)(PromiseHookType type, Local<Promise> promise,
- Local<Value> parent);
-
-// --- Promise Reject Callback ---
-enum PromiseRejectEvent {
- kPromiseRejectWithNoHandler = 0,
- kPromiseHandlerAddedAfterReject = 1,
- kPromiseRejectAfterResolved = 2,
- kPromiseResolveAfterResolved = 3,
-};
-
-class PromiseRejectMessage {
- public:
- PromiseRejectMessage(Local<Promise> promise, PromiseRejectEvent event,
- Local<Value> value)
- : promise_(promise), event_(event), value_(value) {}
-
- V8_INLINE Local<Promise> GetPromise() const { return promise_; }
- V8_INLINE PromiseRejectEvent GetEvent() const { return event_; }
- V8_INLINE Local<Value> GetValue() const { return value_; }
-
- private:
- Local<Promise> promise_;
- PromiseRejectEvent event_;
- Local<Value> value_;
-};
-
-using PromiseRejectCallback = void (*)(PromiseRejectMessage message);
-
-// --- Microtasks Callbacks ---
-using MicrotasksCompletedCallbackWithData = void (*)(Isolate*, void*);
-using MicrotaskCallback = void (*)(void* data);
-
-/**
- * Policy for running microtasks:
- * - explicit: microtasks are invoked with the
- * Isolate::PerformMicrotaskCheckpoint() method;
- * - scoped: microtasks invocation is controlled by MicrotasksScope objects;
- * - auto: microtasks are invoked when the script call depth decrements
- * to zero.
- */
-enum class MicrotasksPolicy { kExplicit, kScoped, kAuto };
-
-/**
- * Represents the microtask queue, where microtasks are stored and processed.
- * https://html.spec.whatwg.org/multipage/webappapis.html#microtask-queue
- * https://html.spec.whatwg.org/multipage/webappapis.html#enqueuejob(queuename,-job,-arguments)
- * https://html.spec.whatwg.org/multipage/webappapis.html#perform-a-microtask-checkpoint
- *
- * A MicrotaskQueue instance may be associated to multiple Contexts by passing
- * it to Context::New(), and they can be detached by Context::DetachGlobal().
- * The embedder must keep the MicrotaskQueue instance alive until all associated
- * Contexts are gone or detached.
- *
- * Use the same instance of MicrotaskQueue for all Contexts that may access each
- * other synchronously. E.g. for Web embedding, use the same instance for all
- * origins that share the same URL scheme and eTLD+1.
- */
-class V8_EXPORT MicrotaskQueue {
- public:
- /**
- * Creates an empty MicrotaskQueue instance.
- */
- static std::unique_ptr<MicrotaskQueue> New(
- Isolate* isolate, MicrotasksPolicy policy = MicrotasksPolicy::kAuto);
-
- virtual ~MicrotaskQueue() = default;
-
- /**
- * Enqueues the callback to the queue.
- */
- virtual void EnqueueMicrotask(Isolate* isolate,
- Local<Function> microtask) = 0;
-
- /**
- * Enqueues the callback to the queue.
- */
- virtual void EnqueueMicrotask(v8::Isolate* isolate,
- MicrotaskCallback callback,
- void* data = nullptr) = 0;
-
- /**
- * Adds a callback to notify the embedder after microtasks were run. The
- * callback is triggered by explicit RunMicrotasks call or automatic
- * microtasks execution (see Isolate::SetMicrotasksPolicy).
- *
- * Callback will trigger even if microtasks were attempted to run,
- * but the microtasks queue was empty and no single microtask was actually
- * executed.
- *
- * Executing scripts inside the callback will not re-trigger microtasks and
- * the callback.
- */
- virtual void AddMicrotasksCompletedCallback(
- MicrotasksCompletedCallbackWithData callback, void* data = nullptr) = 0;
-
- /**
- * Removes callback that was installed by AddMicrotasksCompletedCallback.
- */
- virtual void RemoveMicrotasksCompletedCallback(
- MicrotasksCompletedCallbackWithData callback, void* data = nullptr) = 0;
-
- /**
- * Runs microtasks if no microtask is running on this MicrotaskQueue instance.
- */
- virtual void PerformCheckpoint(Isolate* isolate) = 0;
-
- /**
- * Returns true if a microtask is running on this MicrotaskQueue instance.
- */
- virtual bool IsRunningMicrotasks() const = 0;
-
- /**
- * Returns the current depth of nested MicrotasksScope that has
- * kRunMicrotasks.
- */
- virtual int GetMicrotasksScopeDepth() const = 0;
-
- MicrotaskQueue(const MicrotaskQueue&) = delete;
- MicrotaskQueue& operator=(const MicrotaskQueue&) = delete;
-
- private:
- friend class internal::MicrotaskQueue;
- MicrotaskQueue() = default;
-};
-
-/**
- * This scope is used to control microtasks when MicrotasksPolicy::kScoped
- * is used on Isolate. In this mode every non-primitive call to V8 should be
- * done inside some MicrotasksScope.
- * Microtasks are executed when topmost MicrotasksScope marked as kRunMicrotasks
- * exits.
- * kDoNotRunMicrotasks should be used to annotate calls not intended to trigger
- * microtasks.
- */
-class V8_EXPORT V8_NODISCARD MicrotasksScope {
- public:
- enum Type { kRunMicrotasks, kDoNotRunMicrotasks };
-
- MicrotasksScope(Isolate* isolate, Type type);
- MicrotasksScope(Isolate* isolate, MicrotaskQueue* microtask_queue, Type type);
- ~MicrotasksScope();
-
- /**
- * Runs microtasks if no kRunMicrotasks scope is currently active.
- */
- static void PerformCheckpoint(Isolate* isolate);
-
- /**
- * Returns current depth of nested kRunMicrotasks scopes.
- */
- static int GetCurrentDepth(Isolate* isolate);
-
- /**
- * Returns true while microtasks are being executed.
- */
- static bool IsRunningMicrotasks(Isolate* isolate);
-
- // Prevent copying.
- MicrotasksScope(const MicrotasksScope&) = delete;
- MicrotasksScope& operator=(const MicrotasksScope&) = delete;
-
- private:
- internal::Isolate* const isolate_;
- internal::MicrotaskQueue* const microtask_queue_;
- bool run_;
-};
-
-// --- Failed Access Check Callback ---
-using FailedAccessCheckCallback = void (*)(Local<Object> target,
- AccessType type, Local<Value> data);
-
-// --- AllowCodeGenerationFromStrings callbacks ---
-
-/**
- * Callback to check if code generation from strings is allowed. See
- * Context::AllowCodeGenerationFromStrings.
- */
-using AllowCodeGenerationFromStringsCallback = bool (*)(Local<Context> context,
- Local<String> source);
-
-struct ModifyCodeGenerationFromStringsResult {
- // If true, proceed with the codegen algorithm. Otherwise, block it.
- bool codegen_allowed = false;
- // Overwrite the original source with this string, if present.
- // Use the original source if empty.
- // This field is considered only if codegen_allowed is true.
- MaybeLocal<String> modified_source;
-};
-
-/**
- * Callback to check if codegen is allowed from a source object, and convert
- * the source to string if necessary. See: ModifyCodeGenerationFromStrings.
- */
-using ModifyCodeGenerationFromStringsCallback =
- ModifyCodeGenerationFromStringsResult (*)(Local<Context> context,
- Local<Value> source);
-using ModifyCodeGenerationFromStringsCallback2 =
- ModifyCodeGenerationFromStringsResult (*)(Local<Context> context,
- Local<Value> source,
- bool is_code_like);
-
-// --- WebAssembly compilation callbacks ---
-using ExtensionCallback = bool (*)(const FunctionCallbackInfo<Value>&);
-
-using AllowWasmCodeGenerationCallback = bool (*)(Local<Context> context,
- Local<String> source);
-
-// --- Callback for APIs defined on v8-supported objects, but implemented
-// by the embedder. Example: WebAssembly.{compile|instantiate}Streaming ---
-using ApiImplementationCallback = void (*)(const FunctionCallbackInfo<Value>&);
-
-// --- Callback for WebAssembly.compileStreaming ---
-using WasmStreamingCallback = void (*)(const FunctionCallbackInfo<Value>&);
-
-// --- Callback for loading source map file for Wasm profiling support
-using WasmLoadSourceMapCallback = Local<String> (*)(Isolate* isolate,
- const char* name);
-
-// --- Callback for checking if WebAssembly Simd is enabled ---
-using WasmSimdEnabledCallback = bool (*)(Local<Context> context);
-
-// --- Callback for checking if WebAssembly exceptions are enabled ---
-using WasmExceptionsEnabledCallback = bool (*)(Local<Context> context);
-
-// --- Callback for checking if the SharedArrayBuffer constructor is enabled ---
-using SharedArrayBufferConstructorEnabledCallback =
- bool (*)(Local<Context> context);
-
-// --- Garbage Collection Callbacks ---
-
-/**
- * Applications can register callback functions which will be called before and
- * after certain garbage collection operations. Allocations are not allowed in
- * the callback functions, you therefore cannot manipulate objects (set or
- * delete properties for example) since it is possible such operations will
- * result in the allocation of objects.
- */
-enum GCType {
- kGCTypeScavenge = 1 << 0,
- kGCTypeMarkSweepCompact = 1 << 1,
- kGCTypeIncrementalMarking = 1 << 2,
- kGCTypeProcessWeakCallbacks = 1 << 3,
- kGCTypeAll = kGCTypeScavenge | kGCTypeMarkSweepCompact |
- kGCTypeIncrementalMarking | kGCTypeProcessWeakCallbacks
-};
-
-/**
- * GCCallbackFlags is used to notify additional information about the GC
- * callback.
- * - kGCCallbackFlagConstructRetainedObjectInfos: The GC callback is for
- * constructing retained object infos.
- * - kGCCallbackFlagForced: The GC callback is for a forced GC for testing.
- * - kGCCallbackFlagSynchronousPhantomCallbackProcessing: The GC callback
- * is called synchronously without getting posted to an idle task.
- * - kGCCallbackFlagCollectAllAvailableGarbage: The GC callback is called
- * in a phase where V8 is trying to collect all available garbage
- * (e.g., handling a low memory notification).
- * - kGCCallbackScheduleIdleGarbageCollection: The GC callback is called to
- * trigger an idle garbage collection.
- */
-enum GCCallbackFlags {
- kNoGCCallbackFlags = 0,
- kGCCallbackFlagConstructRetainedObjectInfos = 1 << 1,
- kGCCallbackFlagForced = 1 << 2,
- kGCCallbackFlagSynchronousPhantomCallbackProcessing = 1 << 3,
- kGCCallbackFlagCollectAllAvailableGarbage = 1 << 4,
- kGCCallbackFlagCollectAllExternalMemory = 1 << 5,
- kGCCallbackScheduleIdleGarbageCollection = 1 << 6,
-};
-
-using GCCallback = void (*)(GCType type, GCCallbackFlags flags);
-
-using InterruptCallback = void (*)(Isolate* isolate, void* data);
-
-/**
- * This callback is invoked when the heap size is close to the heap limit and
- * V8 is likely to abort with out-of-memory error.
- * The callback can extend the heap limit by returning a value that is greater
- * than the current_heap_limit. The initial heap limit is the limit that was
- * set after heap setup.
- */
-using NearHeapLimitCallback = size_t (*)(void* data, size_t current_heap_limit,
- size_t initial_heap_limit);
-
-/**
- * Collection of shared per-process V8 memory information.
- *
- * Instances of this class can be passed to
- * v8::V8::GetSharedMemoryStatistics to get shared memory statistics from V8.
- */
-class V8_EXPORT SharedMemoryStatistics {
- public:
- SharedMemoryStatistics();
- size_t read_only_space_size() { return read_only_space_size_; }
- size_t read_only_space_used_size() { return read_only_space_used_size_; }
- size_t read_only_space_physical_size() {
- return read_only_space_physical_size_;
- }
-
- private:
- size_t read_only_space_size_;
- size_t read_only_space_used_size_;
- size_t read_only_space_physical_size_;
-
- friend class V8;
- friend class internal::ReadOnlyHeap;
-};
-
-/**
- * Collection of V8 heap information.
- *
- * Instances of this class can be passed to v8::Isolate::GetHeapStatistics to
- * get heap statistics from V8.
- */
-class V8_EXPORT HeapStatistics {
- public:
- HeapStatistics();
- size_t total_heap_size() { return total_heap_size_; }
- size_t total_heap_size_executable() { return total_heap_size_executable_; }
- size_t total_physical_size() { return total_physical_size_; }
- size_t total_available_size() { return total_available_size_; }
- size_t total_global_handles_size() { return total_global_handles_size_; }
- size_t used_global_handles_size() { return used_global_handles_size_; }
- size_t used_heap_size() { return used_heap_size_; }
- size_t heap_size_limit() { return heap_size_limit_; }
- size_t malloced_memory() { return malloced_memory_; }
- size_t external_memory() { return external_memory_; }
- size_t peak_malloced_memory() { return peak_malloced_memory_; }
- size_t number_of_native_contexts() { return number_of_native_contexts_; }
- size_t number_of_detached_contexts() { return number_of_detached_contexts_; }
-
- /**
- * Returns a 0/1 boolean, which signifies whether the V8 overwrite heap
- * garbage with a bit pattern.
- */
- size_t does_zap_garbage() { return does_zap_garbage_; }
-
- private:
- size_t total_heap_size_;
- size_t total_heap_size_executable_;
- size_t total_physical_size_;
- size_t total_available_size_;
- size_t used_heap_size_;
- size_t heap_size_limit_;
- size_t malloced_memory_;
- size_t external_memory_;
- size_t peak_malloced_memory_;
- bool does_zap_garbage_;
- size_t number_of_native_contexts_;
- size_t number_of_detached_contexts_;
- size_t total_global_handles_size_;
- size_t used_global_handles_size_;
-
- friend class V8;
- friend class Isolate;
-};
-
-
-class V8_EXPORT HeapSpaceStatistics {
- public:
- HeapSpaceStatistics();
- const char* space_name() { return space_name_; }
- size_t space_size() { return space_size_; }
- size_t space_used_size() { return space_used_size_; }
- size_t space_available_size() { return space_available_size_; }
- size_t physical_space_size() { return physical_space_size_; }
-
- private:
- const char* space_name_;
- size_t space_size_;
- size_t space_used_size_;
- size_t space_available_size_;
- size_t physical_space_size_;
-
- friend class Isolate;
-};
-
-
-class V8_EXPORT HeapObjectStatistics {
- public:
- HeapObjectStatistics();
- const char* object_type() { return object_type_; }
- const char* object_sub_type() { return object_sub_type_; }
- size_t object_count() { return object_count_; }
- size_t object_size() { return object_size_; }
-
- private:
- const char* object_type_;
- const char* object_sub_type_;
- size_t object_count_;
- size_t object_size_;
-
- friend class Isolate;
-};
-
-class V8_EXPORT HeapCodeStatistics {
- public:
- HeapCodeStatistics();
- size_t code_and_metadata_size() { return code_and_metadata_size_; }
- size_t bytecode_and_metadata_size() { return bytecode_and_metadata_size_; }
- size_t external_script_source_size() { return external_script_source_size_; }
-
- private:
- size_t code_and_metadata_size_;
- size_t bytecode_and_metadata_size_;
- size_t external_script_source_size_;
-
- friend class Isolate;
-};
-
-/**
- * A JIT code event is issued each time code is added, moved or removed.
- *
- * \note removal events are not currently issued.
- */
-struct JitCodeEvent {
- enum EventType {
- CODE_ADDED,
- CODE_MOVED,
- CODE_REMOVED,
- CODE_ADD_LINE_POS_INFO,
- CODE_START_LINE_INFO_RECORDING,
- CODE_END_LINE_INFO_RECORDING
- };
- // Definition of the code position type. The "POSITION" type means the place
- // in the source code which are of interest when making stack traces to
- // pin-point the source location of a stack frame as close as possible.
- // The "STATEMENT_POSITION" means the place at the beginning of each
- // statement, and is used to indicate possible break locations.
- enum PositionType { POSITION, STATEMENT_POSITION };
-
- // There are two different kinds of JitCodeEvents, one for JIT code generated
- // by the optimizing compiler, and one for byte code generated for the
- // interpreter. For JIT_CODE events, the |code_start| member of the event
- // points to the beginning of jitted assembly code, while for BYTE_CODE
- // events, |code_start| points to the first bytecode of the interpreted
- // function.
- enum CodeType { BYTE_CODE, JIT_CODE };
-
- // Type of event.
- EventType type;
- CodeType code_type;
- // Start of the instructions.
- void* code_start;
- // Size of the instructions.
- size_t code_len;
- // Script info for CODE_ADDED event.
- Local<UnboundScript> script;
- // User-defined data for *_LINE_INFO_* event. It's used to hold the source
- // code line information which is returned from the
- // CODE_START_LINE_INFO_RECORDING event. And it's passed to subsequent
- // CODE_ADD_LINE_POS_INFO and CODE_END_LINE_INFO_RECORDING events.
- void* user_data;
-
- struct name_t {
- // Name of the object associated with the code, note that the string is not
- // zero-terminated.
- const char* str;
- // Number of chars in str.
- size_t len;
- };
-
- struct line_info_t {
- // PC offset
- size_t offset;
- // Code position
- size_t pos;
- // The position type.
- PositionType position_type;
- };
-
- struct wasm_source_info_t {
- // Source file name.
- const char* filename;
- // Length of filename.
- size_t filename_size;
- // Line number table, which maps offsets of JITted code to line numbers of
- // source file.
- const line_info_t* line_number_table;
- // Number of entries in the line number table.
- size_t line_number_table_size;
- };
-
- wasm_source_info_t* wasm_source_info;
-
- union {
- // Only valid for CODE_ADDED.
- struct name_t name;
-
- // Only valid for CODE_ADD_LINE_POS_INFO
- struct line_info_t line_info;
-
- // New location of instructions. Only valid for CODE_MOVED.
- void* new_code_start;
- };
-
- Isolate* isolate;
-};
-
-/**
- * Option flags passed to the SetRAILMode function.
- * See documentation https://developers.google.com/web/tools/chrome-devtools/
- * profile/evaluate-performance/rail
- */
-enum RAILMode : unsigned {
- // Response performance mode: In this mode very low virtual machine latency
- // is provided. V8 will try to avoid JavaScript execution interruptions.
- // Throughput may be throttled.
- PERFORMANCE_RESPONSE,
- // Animation performance mode: In this mode low virtual machine latency is
- // provided. V8 will try to avoid as many JavaScript execution interruptions
- // as possible. Throughput may be throttled. This is the default mode.
- PERFORMANCE_ANIMATION,
- // Idle performance mode: The embedder is idle. V8 can complete deferred work
- // in this mode.
- PERFORMANCE_IDLE,
- // Load performance mode: In this mode high throughput is provided. V8 may
- // turn off latency optimizations.
- PERFORMANCE_LOAD
-};
-
-/**
- * Option flags passed to the SetJitCodeEventHandler function.
- */
-enum JitCodeEventOptions {
- kJitCodeEventDefault = 0,
- // Generate callbacks for already existent code.
- kJitCodeEventEnumExisting = 1
-};
-
-
-/**
- * Callback function passed to SetJitCodeEventHandler.
- *
- * \param event code add, move or removal event.
- */
-using JitCodeEventHandler = void (*)(const JitCodeEvent* event);
-
-/**
- * Callback function passed to SetUnhandledExceptionCallback.
- */
-#if defined(V8_OS_WIN)
-using UnhandledExceptionCallback =
- int (*)(_EXCEPTION_POINTERS* exception_pointers);
-#endif
-
-/**
- * Interface for iterating through all external resources in the heap.
- */
-class V8_EXPORT ExternalResourceVisitor {
- public:
- virtual ~ExternalResourceVisitor() = default;
- virtual void VisitExternalString(Local<String> string) {}
-};
-
-/**
- * Interface for iterating through all the persistent handles in the heap.
- */
-class V8_EXPORT PersistentHandleVisitor {
- public:
- virtual ~PersistentHandleVisitor() = default;
- virtual void VisitPersistentHandle(Persistent<Value>* value,
- uint16_t class_id) {}
-};
-
-/**
- * Memory pressure level for the MemoryPressureNotification.
- * kNone hints V8 that there is no memory pressure.
- * kModerate hints V8 to speed up incremental garbage collection at the cost of
- * of higher latency due to garbage collection pauses.
- * kCritical hints V8 to free memory as soon as possible. Garbage collection
- * pauses at this level will be large.
- */
-enum class MemoryPressureLevel { kNone, kModerate, kCritical };
-
-/**
- * Handler for embedder roots on non-unified heap garbage collections.
- */
-class V8_EXPORT EmbedderRootsHandler {
- public:
- virtual ~EmbedderRootsHandler() = default;
-
- /**
- * Returns true if the TracedGlobal handle should be considered as root for
- * the currently running non-tracing garbage collection and false otherwise.
- * The default implementation will keep all TracedGlobal references as roots.
- *
- * If this returns false, then V8 may decide that the object referred to by
- * such a handle is reclaimed. In that case:
- * - No action is required if handles are used with destructors, i.e., by just
- * using |TracedGlobal|.
- * - When run without destructors, i.e., by using |TracedReference|, V8 calls
- * |ResetRoot|.
- *
- * Note that the |handle| is different from the handle that the embedder holds
- * for retaining the object. The embedder may use |WrapperClassId()| to
- * distinguish cases where it wants handles to be treated as roots from not
- * being treated as roots.
- */
- virtual bool IsRoot(const v8::TracedReference<v8::Value>& handle) = 0;
- virtual bool IsRoot(const v8::TracedGlobal<v8::Value>& handle) = 0;
-
- /**
- * Used in combination with |IsRoot|. Called by V8 when an
- * object that is backed by a handle is reclaimed by a non-tracing garbage
- * collection. It is up to the embedder to reset the original handle.
- *
- * Note that the |handle| is different from the handle that the embedder holds
- * for retaining the object. It is up to the embedder to find the original
- * handle via the object or class id.
- */
- virtual void ResetRoot(const v8::TracedReference<v8::Value>& handle) = 0;
-};
-
-/**
- * Interface for tracing through the embedder heap. During a V8 garbage
- * collection, V8 collects hidden fields of all potential wrappers, and at the
- * end of its marking phase iterates the collection and asks the embedder to
- * trace through its heap and use reporter to report each JavaScript object
- * reachable from any of the given wrappers.
- */
-class V8_EXPORT EmbedderHeapTracer {
- public:
- using EmbedderStackState = cppgc::EmbedderStackState;
-
- enum TraceFlags : uint64_t {
- kNoFlags = 0,
- kReduceMemory = 1 << 0,
- kForced = 1 << 2,
- };
-
- /**
- * Interface for iterating through TracedGlobal handles.
- */
- class V8_EXPORT TracedGlobalHandleVisitor {
- public:
- virtual ~TracedGlobalHandleVisitor() = default;
- virtual void VisitTracedGlobalHandle(const TracedGlobal<Value>& handle) {}
- virtual void VisitTracedReference(const TracedReference<Value>& handle) {}
- };
-
- /**
- * Summary of a garbage collection cycle. See |TraceEpilogue| on how the
- * summary is reported.
- */
- struct TraceSummary {
- /**
- * Time spent managing the retained memory in milliseconds. This can e.g.
- * include the time tracing through objects in the embedder.
- */
- double time = 0.0;
-
- /**
- * Memory retained by the embedder through the |EmbedderHeapTracer|
- * mechanism in bytes.
- */
- size_t allocated_size = 0;
- };
-
- virtual ~EmbedderHeapTracer() = default;
-
- /**
- * Iterates all TracedGlobal handles created for the v8::Isolate the tracer is
- * attached to.
- */
- void IterateTracedGlobalHandles(TracedGlobalHandleVisitor* visitor);
-
- /**
- * Called by the embedder to set the start of the stack which is e.g. used by
- * V8 to determine whether handles are used from stack or heap.
- */
- void SetStackStart(void* stack_start);
-
- /**
- * Called by the embedder to notify V8 of an empty execution stack.
- */
- V8_DEPRECATE_SOON(
- "This call only optimized internal caches which V8 is able to figure out "
- "on its own now.")
- void NotifyEmptyEmbedderStack();
-
- /**
- * Called by v8 to register internal fields of found wrappers.
- *
- * The embedder is expected to store them somewhere and trace reachable
- * wrappers from them when called through |AdvanceTracing|.
- */
- virtual void RegisterV8References(
- const std::vector<std::pair<void*, void*> >& embedder_fields) = 0;
-
- void RegisterEmbedderReference(const BasicTracedReference<v8::Data>& ref);
-
- /**
- * Called at the beginning of a GC cycle.
- */
- virtual void TracePrologue(TraceFlags flags) {}
-
- /**
- * Called to advance tracing in the embedder.
- *
- * The embedder is expected to trace its heap starting from wrappers reported
- * by RegisterV8References method, and report back all reachable wrappers.
- * Furthermore, the embedder is expected to stop tracing by the given
- * deadline. A deadline of infinity means that tracing should be finished.
- *
- * Returns |true| if tracing is done, and false otherwise.
- */
- virtual bool AdvanceTracing(double deadline_in_ms) = 0;
-
- /*
- * Returns true if there no more tracing work to be done (see AdvanceTracing)
- * and false otherwise.
- */
- virtual bool IsTracingDone() = 0;
-
- /**
- * Called at the end of a GC cycle.
- *
- * Note that allocation is *not* allowed within |TraceEpilogue|. Can be
- * overriden to fill a |TraceSummary| that is used by V8 to schedule future
- * garbage collections.
- */
- virtual void TraceEpilogue(TraceSummary* trace_summary) {}
-
- /**
- * Called upon entering the final marking pause. No more incremental marking
- * steps will follow this call.
- */
- virtual void EnterFinalPause(EmbedderStackState stack_state) = 0;
-
- /*
- * Called by the embedder to request immediate finalization of the currently
- * running tracing phase that has been started with TracePrologue and not
- * yet finished with TraceEpilogue.
- *
- * Will be a noop when currently not in tracing.
- *
- * This is an experimental feature.
- */
- void FinalizeTracing();
-
- /**
- * See documentation on EmbedderRootsHandler.
- */
- virtual bool IsRootForNonTracingGC(
- const v8::TracedReference<v8::Value>& handle);
- virtual bool IsRootForNonTracingGC(const v8::TracedGlobal<v8::Value>& handle);
-
- /**
- * See documentation on EmbedderRootsHandler.
- */
- virtual void ResetHandleInNonTracingGC(
- const v8::TracedReference<v8::Value>& handle);
-
- /*
- * Called by the embedder to immediately perform a full garbage collection.
- *
- * Should only be used in testing code.
- */
- void GarbageCollectionForTesting(EmbedderStackState stack_state);
-
- /*
- * Called by the embedder to signal newly allocated or freed memory. Not bound
- * to tracing phases. Embedders should trade off when increments are reported
- * as V8 may consult global heuristics on whether to trigger garbage
- * collection on this change.
- */
- void IncreaseAllocatedSize(size_t bytes);
- void DecreaseAllocatedSize(size_t bytes);
-
- /*
- * Returns the v8::Isolate this tracer is attached too and |nullptr| if it
- * is not attached to any v8::Isolate.
- */
- v8::Isolate* isolate() const { return isolate_; }
-
- protected:
- v8::Isolate* isolate_ = nullptr;
-
- friend class internal::LocalEmbedderHeapTracer;
-};
-
-/**
- * Callback and supporting data used in SnapshotCreator to implement embedder
- * logic to serialize internal fields.
- * Internal fields that directly reference V8 objects are serialized without
- * calling this callback. Internal fields that contain aligned pointers are
- * serialized by this callback if it returns non-zero result. Otherwise it is
- * serialized verbatim.
- */
-struct SerializeInternalFieldsCallback {
- using CallbackFunction = StartupData (*)(Local<Object> holder, int index,
- void* data);
- SerializeInternalFieldsCallback(CallbackFunction function = nullptr,
- void* data_arg = nullptr)
- : callback(function), data(data_arg) {}
- CallbackFunction callback;
- void* data;
-};
-// Note that these fields are called "internal fields" in the API and called
-// "embedder fields" within V8.
-using SerializeEmbedderFieldsCallback = SerializeInternalFieldsCallback;
-
-/**
- * Callback and supporting data used to implement embedder logic to deserialize
- * internal fields.
- */
-struct DeserializeInternalFieldsCallback {
- using CallbackFunction = void (*)(Local<Object> holder, int index,
- StartupData payload, void* data);
- DeserializeInternalFieldsCallback(CallbackFunction function = nullptr,
- void* data_arg = nullptr)
- : callback(function), data(data_arg) {}
- void (*callback)(Local<Object> holder, int index, StartupData payload,
- void* data);
- void* data;
-};
-using DeserializeEmbedderFieldsCallback = DeserializeInternalFieldsCallback;
-
-/**
- * Controls how the default MeasureMemoryDelegate reports the result of
- * the memory measurement to JS. With kSummary only the total size is reported.
- * With kDetailed the result includes the size of each native context.
- */
-enum class MeasureMemoryMode { kSummary, kDetailed };
-
-/**
- * Controls how promptly a memory measurement request is executed.
- * By default the measurement is folded with the next scheduled GC which may
- * happen after a while and is forced after some timeout.
- * The kEager mode starts incremental GC right away and is useful for testing.
- * The kLazy mode does not force GC.
- */
-enum class MeasureMemoryExecution { kDefault, kEager, kLazy };
-
-/**
- * The delegate is used in Isolate::MeasureMemory API.
- *
- * It specifies the contexts that need to be measured and gets called when
- * the measurement is completed to report the results.
- */
-class V8_EXPORT MeasureMemoryDelegate {
- public:
- virtual ~MeasureMemoryDelegate() = default;
-
- /**
- * Returns true if the size of the given context needs to be measured.
- */
- virtual bool ShouldMeasure(Local<Context> context) = 0;
-
- /**
- * This function is called when memory measurement finishes.
- *
- * \param context_sizes_in_bytes a vector of (context, size) pairs that
- * includes each context for which ShouldMeasure returned true and that
- * was not garbage collected while the memory measurement was in progress.
- *
- * \param unattributed_size_in_bytes total size of objects that were not
- * attributed to any context (i.e. are likely shared objects).
- */
- virtual void MeasurementComplete(
- const std::vector<std::pair<Local<Context>, size_t>>&
- context_sizes_in_bytes,
- size_t unattributed_size_in_bytes) = 0;
-
- /**
- * Returns a default delegate that resolves the given promise when
- * the memory measurement completes.
- *
- * \param isolate the current isolate
- * \param context the current context
- * \param promise_resolver the promise resolver that is given the
- * result of the memory measurement.
- * \param mode the detail level of the result.
- */
- static std::unique_ptr<MeasureMemoryDelegate> Default(
- Isolate* isolate, Local<Context> context,
- Local<Promise::Resolver> promise_resolver, MeasureMemoryMode mode);
-};
-
-/**
- * Isolate represents an isolated instance of the V8 engine. V8 isolates have
- * completely separate states. Objects from one isolate must not be used in
- * other isolates. The embedder can create multiple isolates and use them in
- * parallel in multiple threads. An isolate can be entered by at most one
- * thread at any given time. The Locker/Unlocker API must be used to
- * synchronize.
- */
-class V8_EXPORT Isolate {
- public:
- /**
- * Initial configuration parameters for a new Isolate.
- */
- struct V8_EXPORT CreateParams {
- CreateParams();
- ~CreateParams();
-
- /**
- * Allows the host application to provide the address of a function that is
- * notified each time code is added, moved or removed.
- */
- JitCodeEventHandler code_event_handler = nullptr;
-
- /**
- * ResourceConstraints to use for the new Isolate.
- */
- ResourceConstraints constraints;
-
- /**
- * Explicitly specify a startup snapshot blob. The embedder owns the blob.
- */
- StartupData* snapshot_blob = nullptr;
-
- /**
- * Enables the host application to provide a mechanism for recording
- * statistics counters.
- */
- CounterLookupCallback counter_lookup_callback = nullptr;
-
- /**
- * Enables the host application to provide a mechanism for recording
- * histograms. The CreateHistogram function returns a
- * histogram which will later be passed to the AddHistogramSample
- * function.
- */
- CreateHistogramCallback create_histogram_callback = nullptr;
- AddHistogramSampleCallback add_histogram_sample_callback = nullptr;
-
- /**
- * The ArrayBuffer::Allocator to use for allocating and freeing the backing
- * store of ArrayBuffers.
- *
- * If the shared_ptr version is used, the Isolate instance and every
- * |BackingStore| allocated using this allocator hold a std::shared_ptr
- * to the allocator, in order to facilitate lifetime
- * management for the allocator instance.
- */
- ArrayBuffer::Allocator* array_buffer_allocator = nullptr;
- std::shared_ptr<ArrayBuffer::Allocator> array_buffer_allocator_shared;
-
- /**
- * Specifies an optional nullptr-terminated array of raw addresses in the
- * embedder that V8 can match against during serialization and use for
- * deserialization. This array and its content must stay valid for the
- * entire lifetime of the isolate.
- */
- const intptr_t* external_references = nullptr;
-
- /**
- * Whether calling Atomics.wait (a function that may block) is allowed in
- * this isolate. This can also be configured via SetAllowAtomicsWait.
- */
- bool allow_atomics_wait = true;
-
- /**
- * Termination is postponed when there is no active SafeForTerminationScope.
- */
- bool only_terminate_in_safe_scope = false;
-
- /**
- * The following parameters describe the offsets for addressing type info
- * for wrapped API objects and are used by the fast C API
- * (for details see v8-fast-api-calls.h).
- */
- int embedder_wrapper_type_index = -1;
- int embedder_wrapper_object_index = -1;
- };
-
- /**
- * Stack-allocated class which sets the isolate for all operations
- * executed within a local scope.
- */
- class V8_EXPORT V8_NODISCARD Scope {
- public:
- explicit Scope(Isolate* isolate) : isolate_(isolate) {
- isolate->Enter();
- }
-
- ~Scope() { isolate_->Exit(); }
-
- // Prevent copying of Scope objects.
- Scope(const Scope&) = delete;
- Scope& operator=(const Scope&) = delete;
-
- private:
- Isolate* const isolate_;
- };
-
- /**
- * Assert that no Javascript code is invoked.
- */
- class V8_EXPORT V8_NODISCARD DisallowJavascriptExecutionScope {
- public:
- enum OnFailure { CRASH_ON_FAILURE, THROW_ON_FAILURE, DUMP_ON_FAILURE };
-
- DisallowJavascriptExecutionScope(Isolate* isolate, OnFailure on_failure);
- ~DisallowJavascriptExecutionScope();
-
- // Prevent copying of Scope objects.
- DisallowJavascriptExecutionScope(const DisallowJavascriptExecutionScope&) =
- delete;
- DisallowJavascriptExecutionScope& operator=(
- const DisallowJavascriptExecutionScope&) = delete;
-
- private:
- OnFailure on_failure_;
- Isolate* isolate_;
-
- bool was_execution_allowed_assert_;
- bool was_execution_allowed_throws_;
- bool was_execution_allowed_dump_;
- };
-
- /**
- * Introduce exception to DisallowJavascriptExecutionScope.
- */
- class V8_EXPORT V8_NODISCARD AllowJavascriptExecutionScope {
- public:
- explicit AllowJavascriptExecutionScope(Isolate* isolate);
- ~AllowJavascriptExecutionScope();
-
- // Prevent copying of Scope objects.
- AllowJavascriptExecutionScope(const AllowJavascriptExecutionScope&) =
- delete;
- AllowJavascriptExecutionScope& operator=(
- const AllowJavascriptExecutionScope&) = delete;
-
- private:
- Isolate* isolate_;
- bool was_execution_allowed_assert_;
- bool was_execution_allowed_throws_;
- bool was_execution_allowed_dump_;
- };
-
- /**
- * Do not run microtasks while this scope is active, even if microtasks are
- * automatically executed otherwise.
- */
- class V8_EXPORT V8_NODISCARD SuppressMicrotaskExecutionScope {
- public:
- explicit SuppressMicrotaskExecutionScope(
- Isolate* isolate, MicrotaskQueue* microtask_queue = nullptr);
- ~SuppressMicrotaskExecutionScope();
-
- // Prevent copying of Scope objects.
- SuppressMicrotaskExecutionScope(const SuppressMicrotaskExecutionScope&) =
- delete;
- SuppressMicrotaskExecutionScope& operator=(
- const SuppressMicrotaskExecutionScope&) = delete;
-
- private:
- internal::Isolate* const isolate_;
- internal::MicrotaskQueue* const microtask_queue_;
- internal::Address previous_stack_height_;
-
- friend class internal::ThreadLocalTop;
- };
-
- /**
- * This scope allows terminations inside direct V8 API calls and forbid them
- * inside any recursive API calls without explicit SafeForTerminationScope.
- */
- class V8_EXPORT V8_NODISCARD SafeForTerminationScope {
- public:
- explicit SafeForTerminationScope(v8::Isolate* isolate);
- ~SafeForTerminationScope();
-
- // Prevent copying of Scope objects.
- SafeForTerminationScope(const SafeForTerminationScope&) = delete;
- SafeForTerminationScope& operator=(const SafeForTerminationScope&) = delete;
-
- private:
- internal::Isolate* isolate_;
- bool prev_value_;
- };
-
- /**
- * Types of garbage collections that can be requested via
- * RequestGarbageCollectionForTesting.
- */
- enum GarbageCollectionType {
- kFullGarbageCollection,
- kMinorGarbageCollection
- };
-
- /**
- * Features reported via the SetUseCounterCallback callback. Do not change
- * assigned numbers of existing items; add new features to the end of this
- * list.
- */
- enum UseCounterFeature {
- kUseAsm = 0,
- kBreakIterator = 1,
- kLegacyConst = 2,
- kMarkDequeOverflow = 3,
- kStoreBufferOverflow = 4,
- kSlotsBufferOverflow = 5,
- kObjectObserve = 6,
- kForcedGC = 7,
- kSloppyMode = 8,
- kStrictMode = 9,
- kStrongMode = 10,
- kRegExpPrototypeStickyGetter = 11,
- kRegExpPrototypeToString = 12,
- kRegExpPrototypeUnicodeGetter = 13,
- kIntlV8Parse = 14,
- kIntlPattern = 15,
- kIntlResolved = 16,
- kPromiseChain = 17,
- kPromiseAccept = 18,
- kPromiseDefer = 19,
- kHtmlCommentInExternalScript = 20,
- kHtmlComment = 21,
- kSloppyModeBlockScopedFunctionRedefinition = 22,
- kForInInitializer = 23,
- kArrayProtectorDirtied = 24,
- kArraySpeciesModified = 25,
- kArrayPrototypeConstructorModified = 26,
- kArrayInstanceProtoModified = 27,
- kArrayInstanceConstructorModified = 28,
- kLegacyFunctionDeclaration = 29,
- kRegExpPrototypeSourceGetter = 30, // Unused.
- kRegExpPrototypeOldFlagGetter = 31, // Unused.
- kDecimalWithLeadingZeroInStrictMode = 32,
- kLegacyDateParser = 33,
- kDefineGetterOrSetterWouldThrow = 34,
- kFunctionConstructorReturnedUndefined = 35,
- kAssigmentExpressionLHSIsCallInSloppy = 36,
- kAssigmentExpressionLHSIsCallInStrict = 37,
- kPromiseConstructorReturnedUndefined = 38,
- kConstructorNonUndefinedPrimitiveReturn = 39,
- kLabeledExpressionStatement = 40,
- kLineOrParagraphSeparatorAsLineTerminator = 41,
- kIndexAccessor = 42,
- kErrorCaptureStackTrace = 43,
- kErrorPrepareStackTrace = 44,
- kErrorStackTraceLimit = 45,
- kWebAssemblyInstantiation = 46,
- kDeoptimizerDisableSpeculation = 47,
- kArrayPrototypeSortJSArrayModifiedPrototype = 48,
- kFunctionTokenOffsetTooLongForToString = 49,
- kWasmSharedMemory = 50,
- kWasmThreadOpcodes = 51,
- kAtomicsNotify = 52, // Unused.
- kAtomicsWake = 53, // Unused.
- kCollator = 54,
- kNumberFormat = 55,
- kDateTimeFormat = 56,
- kPluralRules = 57,
- kRelativeTimeFormat = 58,
- kLocale = 59,
- kListFormat = 60,
- kSegmenter = 61,
- kStringLocaleCompare = 62,
- kStringToLocaleUpperCase = 63,
- kStringToLocaleLowerCase = 64,
- kNumberToLocaleString = 65,
- kDateToLocaleString = 66,
- kDateToLocaleDateString = 67,
- kDateToLocaleTimeString = 68,
- kAttemptOverrideReadOnlyOnPrototypeSloppy = 69,
- kAttemptOverrideReadOnlyOnPrototypeStrict = 70,
- kOptimizedFunctionWithOneShotBytecode = 71, // Unused.
- kRegExpMatchIsTrueishOnNonJSRegExp = 72,
- kRegExpMatchIsFalseishOnJSRegExp = 73,
- kDateGetTimezoneOffset = 74, // Unused.
- kStringNormalize = 75,
- kCallSiteAPIGetFunctionSloppyCall = 76,
- kCallSiteAPIGetThisSloppyCall = 77,
- kRegExpMatchAllWithNonGlobalRegExp = 78,
- kRegExpExecCalledOnSlowRegExp = 79,
- kRegExpReplaceCalledOnSlowRegExp = 80,
- kDisplayNames = 81,
- kSharedArrayBufferConstructed = 82,
- kArrayPrototypeHasElements = 83,
- kObjectPrototypeHasElements = 84,
- kNumberFormatStyleUnit = 85,
- kDateTimeFormatRange = 86,
- kDateTimeFormatDateTimeStyle = 87,
- kBreakIteratorTypeWord = 88,
- kBreakIteratorTypeLine = 89,
- kInvalidatedArrayBufferDetachingProtector = 90,
- kInvalidatedArrayConstructorProtector = 91,
- kInvalidatedArrayIteratorLookupChainProtector = 92,
- kInvalidatedArraySpeciesLookupChainProtector = 93,
- kInvalidatedIsConcatSpreadableLookupChainProtector = 94,
- kInvalidatedMapIteratorLookupChainProtector = 95,
- kInvalidatedNoElementsProtector = 96,
- kInvalidatedPromiseHookProtector = 97,
- kInvalidatedPromiseResolveLookupChainProtector = 98,
- kInvalidatedPromiseSpeciesLookupChainProtector = 99,
- kInvalidatedPromiseThenLookupChainProtector = 100,
- kInvalidatedRegExpSpeciesLookupChainProtector = 101,
- kInvalidatedSetIteratorLookupChainProtector = 102,
- kInvalidatedStringIteratorLookupChainProtector = 103,
- kInvalidatedStringLengthOverflowLookupChainProtector = 104,
- kInvalidatedTypedArraySpeciesLookupChainProtector = 105,
- kWasmSimdOpcodes = 106,
- kVarRedeclaredCatchBinding = 107,
- kWasmRefTypes = 108,
- kWasmBulkMemory = 109, // Unused.
- kWasmMultiValue = 110,
- kWasmExceptionHandling = 111,
- kInvalidatedMegaDOMProtector = 112,
-
- // If you add new values here, you'll also need to update Chromium's:
- // web_feature.mojom, use_counter_callback.cc, and enums.xml. V8 changes to
- // this list need to be landed first, then changes on the Chromium side.
- kUseCounterFeatureCount // This enum value must be last.
- };
-
- enum MessageErrorLevel {
- kMessageLog = (1 << 0),
- kMessageDebug = (1 << 1),
- kMessageInfo = (1 << 2),
- kMessageError = (1 << 3),
- kMessageWarning = (1 << 4),
- kMessageAll = kMessageLog | kMessageDebug | kMessageInfo | kMessageError |
- kMessageWarning,
- };
-
- using UseCounterCallback = void (*)(Isolate* isolate,
- UseCounterFeature feature);
-
- /**
- * Allocates a new isolate but does not initialize it. Does not change the
- * currently entered isolate.
- *
- * Only Isolate::GetData() and Isolate::SetData(), which access the
- * embedder-controlled parts of the isolate, are allowed to be called on the
- * uninitialized isolate. To initialize the isolate, call
- * Isolate::Initialize().
- *
- * When an isolate is no longer used its resources should be freed
- * by calling Dispose(). Using the delete operator is not allowed.
- *
- * V8::Initialize() must have run prior to this.
- */
- static Isolate* Allocate();
-
- /**
- * Initialize an Isolate previously allocated by Isolate::Allocate().
- */
- static void Initialize(Isolate* isolate, const CreateParams& params);
-
- /**
- * Creates a new isolate. Does not change the currently entered
- * isolate.
- *
- * When an isolate is no longer used its resources should be freed
- * by calling Dispose(). Using the delete operator is not allowed.
- *
- * V8::Initialize() must have run prior to this.
- */
- static Isolate* New(const CreateParams& params);
-
- /**
- * Returns the entered isolate for the current thread or NULL in
- * case there is no current isolate.
- *
- * This method must not be invoked before V8::Initialize() was invoked.
- */
- static Isolate* GetCurrent();
-
- /**
- * Returns the entered isolate for the current thread or NULL in
- * case there is no current isolate.
- *
- * No checks are performed by this method.
- */
- static Isolate* TryGetCurrent();
-
- /**
- * Clears the set of objects held strongly by the heap. This set of
- * objects are originally built when a WeakRef is created or
- * successfully dereferenced.
- *
- * This is invoked automatically after microtasks are run. See
- * MicrotasksPolicy for when microtasks are run.
- *
- * This needs to be manually invoked only if the embedder is manually running
- * microtasks via a custom MicrotaskQueue class's PerformCheckpoint. In that
- * case, it is the embedder's responsibility to make this call at a time which
- * does not interrupt synchronous ECMAScript code execution.
- */
- void ClearKeptObjects();
-
- /**
- * Custom callback used by embedders to help V8 determine if it should abort
- * when it throws and no internal handler is predicted to catch the
- * exception. If --abort-on-uncaught-exception is used on the command line,
- * then V8 will abort if either:
- * - no custom callback is set.
- * - the custom callback set returns true.
- * Otherwise, the custom callback will not be called and V8 will not abort.
- */
- using AbortOnUncaughtExceptionCallback = bool (*)(Isolate*);
- void SetAbortOnUncaughtExceptionCallback(
- AbortOnUncaughtExceptionCallback callback);
-
- /**
- * This specifies the callback called by the upcoming dynamic
- * import() language feature to load modules.
- */
- V8_DEPRECATED(
- "Use the version of SetHostImportModuleDynamicallyCallback that takes a "
- "HostImportModuleDynamicallyWithImportAssertionsCallback instead")
- void SetHostImportModuleDynamicallyCallback(
- HostImportModuleDynamicallyCallback callback);
-
- /**
- * This specifies the callback called by the upcoming dynamic
- * import() language feature to load modules.
- */
- void SetHostImportModuleDynamicallyCallback(
- HostImportModuleDynamicallyWithImportAssertionsCallback callback);
-
- /**
- * This specifies the callback called by the upcoming import.meta
- * language feature to retrieve host-defined meta data for a module.
- */
- void SetHostInitializeImportMetaObjectCallback(
- HostInitializeImportMetaObjectCallback callback);
-
- /**
- * This specifies the callback called when the stack property of Error
- * is accessed.
- */
- void SetPrepareStackTraceCallback(PrepareStackTraceCallback callback);
-
- /**
- * Optional notification that the system is running low on memory.
- * V8 uses these notifications to guide heuristics.
- * It is allowed to call this function from another thread while
- * the isolate is executing long running JavaScript code.
- */
- void MemoryPressureNotification(MemoryPressureLevel level);
-
- /**
- * Drop non-essential caches. Should only be called from testing code.
- * The method can potentially block for a long time and does not necessarily
- * trigger GC.
- */
- void ClearCachesForTesting();
-
- /**
- * Methods below this point require holding a lock (using Locker) in
- * a multi-threaded environment.
- */
-
- /**
- * Sets this isolate as the entered one for the current thread.
- * Saves the previously entered one (if any), so that it can be
- * restored when exiting. Re-entering an isolate is allowed.
- */
- void Enter();
-
- /**
- * Exits this isolate by restoring the previously entered one in the
- * current thread. The isolate may still stay the same, if it was
- * entered more than once.
- *
- * Requires: this == Isolate::GetCurrent().
- */
- void Exit();
-
- /**
- * Disposes the isolate. The isolate must not be entered by any
- * thread to be disposable.
- */
- void Dispose();
-
- /**
- * Dumps activated low-level V8 internal stats. This can be used instead
- * of performing a full isolate disposal.
- */
- void DumpAndResetStats();
-
- /**
- * Discards all V8 thread-specific data for the Isolate. Should be used
- * if a thread is terminating and it has used an Isolate that will outlive
- * the thread -- all thread-specific data for an Isolate is discarded when
- * an Isolate is disposed so this call is pointless if an Isolate is about
- * to be Disposed.
- */
- void DiscardThreadSpecificMetadata();
-
- /**
- * Associate embedder-specific data with the isolate. |slot| has to be
- * between 0 and GetNumberOfDataSlots() - 1.
- */
- V8_INLINE void SetData(uint32_t slot, void* data);
-
- /**
- * Retrieve embedder-specific data from the isolate.
- * Returns NULL if SetData has never been called for the given |slot|.
- */
- V8_INLINE void* GetData(uint32_t slot);
-
- /**
- * Returns the maximum number of available embedder data slots. Valid slots
- * are in the range of 0 - GetNumberOfDataSlots() - 1.
- */
- V8_INLINE static uint32_t GetNumberOfDataSlots();
-
- /**
- * Return data that was previously attached to the isolate snapshot via
- * SnapshotCreator, and removes the reference to it.
- * Repeated call with the same index returns an empty MaybeLocal.
- */
- template <class T>
- V8_INLINE MaybeLocal<T> GetDataFromSnapshotOnce(size_t index);
-
- /**
- * Get statistics about the heap memory usage.
- */
- void GetHeapStatistics(HeapStatistics* heap_statistics);
-
- /**
- * Returns the number of spaces in the heap.
- */
- size_t NumberOfHeapSpaces();
-
- /**
- * Get the memory usage of a space in the heap.
- *
- * \param space_statistics The HeapSpaceStatistics object to fill in
- * statistics.
- * \param index The index of the space to get statistics from, which ranges
- * from 0 to NumberOfHeapSpaces() - 1.
- * \returns true on success.
- */
- bool GetHeapSpaceStatistics(HeapSpaceStatistics* space_statistics,
- size_t index);
-
- /**
- * Returns the number of types of objects tracked in the heap at GC.
- */
- size_t NumberOfTrackedHeapObjectTypes();
-
- /**
- * Get statistics about objects in the heap.
- *
- * \param object_statistics The HeapObjectStatistics object to fill in
- * statistics of objects of given type, which were live in the previous GC.
- * \param type_index The index of the type of object to fill details about,
- * which ranges from 0 to NumberOfTrackedHeapObjectTypes() - 1.
- * \returns true on success.
- */
- bool GetHeapObjectStatisticsAtLastGC(HeapObjectStatistics* object_statistics,
- size_t type_index);
-
- /**
- * Get statistics about code and its metadata in the heap.
- *
- * \param object_statistics The HeapCodeStatistics object to fill in
- * statistics of code, bytecode and their metadata.
- * \returns true on success.
- */
- bool GetHeapCodeAndMetadataStatistics(HeapCodeStatistics* object_statistics);
-
- /**
- * This API is experimental and may change significantly.
- *
- * Enqueues a memory measurement request and invokes the delegate with the
- * results.
- *
- * \param delegate the delegate that defines which contexts to measure and
- * reports the results.
- *
- * \param execution promptness executing the memory measurement.
- * The kEager value is expected to be used only in tests.
- */
- bool MeasureMemory(
- std::unique_ptr<MeasureMemoryDelegate> delegate,
- MeasureMemoryExecution execution = MeasureMemoryExecution::kDefault);
-
- /**
- * Get a call stack sample from the isolate.
- * \param state Execution state.
- * \param frames Caller allocated buffer to store stack frames.
- * \param frames_limit Maximum number of frames to capture. The buffer must
- * be large enough to hold the number of frames.
- * \param sample_info The sample info is filled up by the function
- * provides number of actual captured stack frames and
- * the current VM state.
- * \note GetStackSample should only be called when the JS thread is paused or
- * interrupted. Otherwise the behavior is undefined.
- */
- void GetStackSample(const RegisterState& state, void** frames,
- size_t frames_limit, SampleInfo* sample_info);
-
- /**
- * Adjusts the amount of registered external memory. Used to give V8 an
- * indication of the amount of externally allocated memory that is kept alive
- * by JavaScript objects. V8 uses this to decide when to perform global
- * garbage collections. Registering externally allocated memory will trigger
- * global garbage collections more often than it would otherwise in an attempt
- * to garbage collect the JavaScript objects that keep the externally
- * allocated memory alive.
- *
- * \param change_in_bytes the change in externally allocated memory that is
- * kept alive by JavaScript objects.
- * \returns the adjusted value.
- */
- int64_t AdjustAmountOfExternalAllocatedMemory(int64_t change_in_bytes);
-
- /**
- * Returns the number of phantom handles without callbacks that were reset
- * by the garbage collector since the last call to this function.
- */
- size_t NumberOfPhantomHandleResetsSinceLastCall();
-
- /**
- * Returns heap profiler for this isolate. Will return NULL until the isolate
- * is initialized.
- */
- HeapProfiler* GetHeapProfiler();
-
- /**
- * Tells the VM whether the embedder is idle or not.
- */
- void SetIdle(bool is_idle);
-
- /** Returns the ArrayBuffer::Allocator used in this isolate. */
- ArrayBuffer::Allocator* GetArrayBufferAllocator();
-
- /** Returns true if this isolate has a current context. */
- bool InContext();
-
- /**
- * Returns the context of the currently running JavaScript, or the context
- * on the top of the stack if no JavaScript is running.
- */
- Local<Context> GetCurrentContext();
-
- /**
- * Returns either the last context entered through V8's C++ API, or the
- * context of the currently running microtask while processing microtasks.
- * If a context is entered while executing a microtask, that context is
- * returned.
- */
- Local<Context> GetEnteredOrMicrotaskContext();
-
- /**
- * Returns the Context that corresponds to the Incumbent realm in HTML spec.
- * https://html.spec.whatwg.org/multipage/webappapis.html#incumbent
- */
- Local<Context> GetIncumbentContext();
-
- /**
- * Schedules a v8::Exception::Error with the given message.
- * See ThrowException for more details. Templatized to provide compile-time
- * errors in case of too long strings (see v8::String::NewFromUtf8Literal).
- */
- template <int N>
- Local<Value> ThrowError(const char (&message)[N]) {
- return ThrowError(String::NewFromUtf8Literal(this, message));
- }
- Local<Value> ThrowError(Local<String> message);
-
- /**
- * Schedules an exception to be thrown when returning to JavaScript. When an
- * exception has been scheduled it is illegal to invoke any JavaScript
- * operation; the caller must return immediately and only after the exception
- * has been handled does it become legal to invoke JavaScript operations.
- */
- Local<Value> ThrowException(Local<Value> exception);
-
- using GCCallback = void (*)(Isolate* isolate, GCType type,
- GCCallbackFlags flags);
- using GCCallbackWithData = void (*)(Isolate* isolate, GCType type,
- GCCallbackFlags flags, void* data);
-
- /**
- * Enables the host application to receive a notification before a
- * garbage collection. Allocations are allowed in the callback function,
- * but the callback is not re-entrant: if the allocation inside it will
- * trigger the garbage collection, the callback won't be called again.
- * It is possible to specify the GCType filter for your callback. But it is
- * not possible to register the same callback function two times with
- * different GCType filters.
- */
- void AddGCPrologueCallback(GCCallbackWithData callback, void* data = nullptr,
- GCType gc_type_filter = kGCTypeAll);
- void AddGCPrologueCallback(GCCallback callback,
- GCType gc_type_filter = kGCTypeAll);
-
- /**
- * This function removes callback which was installed by
- * AddGCPrologueCallback function.
- */
- void RemoveGCPrologueCallback(GCCallbackWithData, void* data = nullptr);
- void RemoveGCPrologueCallback(GCCallback callback);
-
- /**
- * Sets the embedder heap tracer for the isolate.
- */
- void SetEmbedderHeapTracer(EmbedderHeapTracer* tracer);
-
- /*
- * Gets the currently active heap tracer for the isolate.
- */
- EmbedderHeapTracer* GetEmbedderHeapTracer();
-
- /**
- * Sets an embedder roots handle that V8 should consider when performing
- * non-unified heap garbage collections.
- *
- * Using only EmbedderHeapTracer automatically sets up a default handler.
- * The intended use case is for setting a custom handler after invoking
- * `AttachCppHeap()`.
- *
- * V8 does not take ownership of the handler.
- */
- void SetEmbedderRootsHandler(EmbedderRootsHandler* handler);
-
- /**
- * Attaches a managed C++ heap as an extension to the JavaScript heap. The
- * embedder maintains ownership of the CppHeap. At most one C++ heap can be
- * attached to V8.
- *
- * This is an experimental feature and may still change significantly.
- */
- void AttachCppHeap(CppHeap*);
-
- /**
- * Detaches a managed C++ heap if one was attached using `AttachCppHeap()`.
- *
- * This is an experimental feature and may still change significantly.
- */
- void DetachCppHeap();
-
- /**
- * This is an experimental feature and may still change significantly.
-
- * \returns the C++ heap managed by V8. Only available if such a heap has been
- * attached using `AttachCppHeap()`.
- */
- CppHeap* GetCppHeap() const;
-
- /**
- * Use for |AtomicsWaitCallback| to indicate the type of event it receives.
- */
- enum class AtomicsWaitEvent {
- /** Indicates that this call is happening before waiting. */
- kStartWait,
- /** `Atomics.wait()` finished because of an `Atomics.wake()` call. */
- kWokenUp,
- /** `Atomics.wait()` finished because it timed out. */
- kTimedOut,
- /** `Atomics.wait()` was interrupted through |TerminateExecution()|. */
- kTerminatedExecution,
- /** `Atomics.wait()` was stopped through |AtomicsWaitWakeHandle|. */
- kAPIStopped,
- /** `Atomics.wait()` did not wait, as the initial condition was not met. */
- kNotEqual
- };
-
- /**
- * Passed to |AtomicsWaitCallback| as a means of stopping an ongoing
- * `Atomics.wait` call.
- */
- class V8_EXPORT AtomicsWaitWakeHandle {
- public:
- /**
- * Stop this `Atomics.wait()` call and call the |AtomicsWaitCallback|
- * with |kAPIStopped|.
- *
- * This function may be called from another thread. The caller has to ensure
- * through proper synchronization that it is not called after
- * the finishing |AtomicsWaitCallback|.
- *
- * Note that the ECMAScript specification does not plan for the possibility
- * of wakeups that are neither coming from a timeout or an `Atomics.wake()`
- * call, so this may invalidate assumptions made by existing code.
- * The embedder may accordingly wish to schedule an exception in the
- * finishing |AtomicsWaitCallback|.
- */
- void Wake();
- };
-
- /**
- * Embedder callback for `Atomics.wait()` that can be added through
- * |SetAtomicsWaitCallback|.
- *
- * This will be called just before starting to wait with the |event| value
- * |kStartWait| and after finishing waiting with one of the other
- * values of |AtomicsWaitEvent| inside of an `Atomics.wait()` call.
- *
- * |array_buffer| will refer to the underlying SharedArrayBuffer,
- * |offset_in_bytes| to the location of the waited-on memory address inside
- * the SharedArrayBuffer.
- *
- * |value| and |timeout_in_ms| will be the values passed to
- * the `Atomics.wait()` call. If no timeout was used, |timeout_in_ms|
- * will be `INFINITY`.
- *
- * In the |kStartWait| callback, |stop_handle| will be an object that
- * is only valid until the corresponding finishing callback and that
- * can be used to stop the wait process while it is happening.
- *
- * This callback may schedule exceptions, *unless* |event| is equal to
- * |kTerminatedExecution|.
- */
- using AtomicsWaitCallback = void (*)(AtomicsWaitEvent event,
- Local<SharedArrayBuffer> array_buffer,
- size_t offset_in_bytes, int64_t value,
- double timeout_in_ms,
- AtomicsWaitWakeHandle* stop_handle,
- void* data);
-
- /**
- * Set a new |AtomicsWaitCallback|. This overrides an earlier
- * |AtomicsWaitCallback|, if there was any. If |callback| is nullptr,
- * this unsets the callback. |data| will be passed to the callback
- * as its last parameter.
- */
- void SetAtomicsWaitCallback(AtomicsWaitCallback callback, void* data);
-
- /**
- * Enables the host application to receive a notification after a
- * garbage collection. Allocations are allowed in the callback function,
- * but the callback is not re-entrant: if the allocation inside it will
- * trigger the garbage collection, the callback won't be called again.
- * It is possible to specify the GCType filter for your callback. But it is
- * not possible to register the same callback function two times with
- * different GCType filters.
- */
- void AddGCEpilogueCallback(GCCallbackWithData callback, void* data = nullptr,
- GCType gc_type_filter = kGCTypeAll);
- void AddGCEpilogueCallback(GCCallback callback,
- GCType gc_type_filter = kGCTypeAll);
-
- /**
- * This function removes callback which was installed by
- * AddGCEpilogueCallback function.
- */
- void RemoveGCEpilogueCallback(GCCallbackWithData callback,
- void* data = nullptr);
- void RemoveGCEpilogueCallback(GCCallback callback);
-
- using GetExternallyAllocatedMemoryInBytesCallback = size_t (*)();
-
- /**
- * Set the callback that tells V8 how much memory is currently allocated
- * externally of the V8 heap. Ideally this memory is somehow connected to V8
- * objects and may get freed-up when the corresponding V8 objects get
- * collected by a V8 garbage collection.
- */
- void SetGetExternallyAllocatedMemoryInBytesCallback(
- GetExternallyAllocatedMemoryInBytesCallback callback);
-
- /**
- * Forcefully terminate the current thread of JavaScript execution
- * in the given isolate.
- *
- * This method can be used by any thread even if that thread has not
- * acquired the V8 lock with a Locker object.
- */
- void TerminateExecution();
-
- /**
- * Is V8 terminating JavaScript execution.
- *
- * Returns true if JavaScript execution is currently terminating
- * because of a call to TerminateExecution. In that case there are
- * still JavaScript frames on the stack and the termination
- * exception is still active.
- */
- bool IsExecutionTerminating();
-
- /**
- * Resume execution capability in the given isolate, whose execution
- * was previously forcefully terminated using TerminateExecution().
- *
- * When execution is forcefully terminated using TerminateExecution(),
- * the isolate can not resume execution until all JavaScript frames
- * have propagated the uncatchable exception which is generated. This
- * method allows the program embedding the engine to handle the
- * termination event and resume execution capability, even if
- * JavaScript frames remain on the stack.
- *
- * This method can be used by any thread even if that thread has not
- * acquired the V8 lock with a Locker object.
- */
- void CancelTerminateExecution();
-
- /**
- * Request V8 to interrupt long running JavaScript code and invoke
- * the given |callback| passing the given |data| to it. After |callback|
- * returns control will be returned to the JavaScript code.
- * There may be a number of interrupt requests in flight.
- * Can be called from another thread without acquiring a |Locker|.
- * Registered |callback| must not reenter interrupted Isolate.
- */
- void RequestInterrupt(InterruptCallback callback, void* data);
-
- /**
- * Returns true if there is ongoing background work within V8 that will
- * eventually post a foreground task, like asynchronous WebAssembly
- * compilation.
- */
- bool HasPendingBackgroundTasks();
-
- /**
- * Request garbage collection in this Isolate. It is only valid to call this
- * function if --expose_gc was specified.
- *
- * This should only be used for testing purposes and not to enforce a garbage
- * collection schedule. It has strong negative impact on the garbage
- * collection performance. Use IdleNotificationDeadline() or
- * LowMemoryNotification() instead to influence the garbage collection
- * schedule.
- */
- void RequestGarbageCollectionForTesting(GarbageCollectionType type);
-
- /**
- * Set the callback to invoke for logging event.
- */
- void SetEventLogger(LogEventCallback that);
-
- /**
- * Adds a callback to notify the host application right before a script
- * is about to run. If a script re-enters the runtime during executing, the
- * BeforeCallEnteredCallback is invoked for each re-entrance.
- * Executing scripts inside the callback will re-trigger the callback.
- */
- void AddBeforeCallEnteredCallback(BeforeCallEnteredCallback callback);
-
- /**
- * Removes callback that was installed by AddBeforeCallEnteredCallback.
- */
- void RemoveBeforeCallEnteredCallback(BeforeCallEnteredCallback callback);
-
- /**
- * Adds a callback to notify the host application when a script finished
- * running. If a script re-enters the runtime during executing, the
- * CallCompletedCallback is only invoked when the outer-most script
- * execution ends. Executing scripts inside the callback do not trigger
- * further callbacks.
- */
- void AddCallCompletedCallback(CallCompletedCallback callback);
-
- /**
- * Removes callback that was installed by AddCallCompletedCallback.
- */
- void RemoveCallCompletedCallback(CallCompletedCallback callback);
-
- /**
- * Set the PromiseHook callback for various promise lifecycle
- * events.
- */
- void SetPromiseHook(PromiseHook hook);
-
- /**
- * Set callback to notify about promise reject with no handler, or
- * revocation of such a previous notification once the handler is added.
- */
- void SetPromiseRejectCallback(PromiseRejectCallback callback);
-
- /**
- * Runs the default MicrotaskQueue until it gets empty and perform other
- * microtask checkpoint steps, such as calling ClearKeptObjects. Asserts that
- * the MicrotasksPolicy is not kScoped. Any exceptions thrown by microtask
- * callbacks are swallowed.
- */
- void PerformMicrotaskCheckpoint();
-
- /**
- * Enqueues the callback to the default MicrotaskQueue
- */
- void EnqueueMicrotask(Local<Function> microtask);
-
- /**
- * Enqueues the callback to the default MicrotaskQueue
- */
- void EnqueueMicrotask(MicrotaskCallback callback, void* data = nullptr);
-
- /**
- * Controls how Microtasks are invoked. See MicrotasksPolicy for details.
- */
- void SetMicrotasksPolicy(MicrotasksPolicy policy);
-
- /**
- * Returns the policy controlling how Microtasks are invoked.
- */
- MicrotasksPolicy GetMicrotasksPolicy() const;
-
- /**
- * Adds a callback to notify the host application after
- * microtasks were run on the default MicrotaskQueue. The callback is
- * triggered by explicit RunMicrotasks call or automatic microtasks execution
- * (see SetMicrotaskPolicy).
- *
- * Callback will trigger even if microtasks were attempted to run,
- * but the microtasks queue was empty and no single microtask was actually
- * executed.
- *
- * Executing scripts inside the callback will not re-trigger microtasks and
- * the callback.
- */
- void AddMicrotasksCompletedCallback(
- MicrotasksCompletedCallbackWithData callback, void* data = nullptr);
-
- /**
- * Removes callback that was installed by AddMicrotasksCompletedCallback.
- */
- void RemoveMicrotasksCompletedCallback(
- MicrotasksCompletedCallbackWithData callback, void* data = nullptr);
-
- /**
- * Sets a callback for counting the number of times a feature of V8 is used.
- */
- void SetUseCounterCallback(UseCounterCallback callback);
-
- /**
- * Enables the host application to provide a mechanism for recording
- * statistics counters.
- */
- void SetCounterFunction(CounterLookupCallback);
-
- /**
- * Enables the host application to provide a mechanism for recording
- * histograms. The CreateHistogram function returns a
- * histogram which will later be passed to the AddHistogramSample
- * function.
- */
- void SetCreateHistogramFunction(CreateHistogramCallback);
- void SetAddHistogramSampleFunction(AddHistogramSampleCallback);
-
- /**
- * Enables the host application to provide a mechanism for recording
- * event based metrics. In order to use this interface
- * include/v8-metrics.h
- * needs to be included and the recorder needs to be derived from the
- * Recorder base class defined there.
- * This method can only be called once per isolate and must happen during
- * isolate initialization before background threads are spawned.
- */
- void SetMetricsRecorder(
- const std::shared_ptr<metrics::Recorder>& metrics_recorder);
-
- /**
- * Enables the host application to provide a mechanism for recording a
- * predefined set of data as crash keys to be used in postmortem debugging in
- * case of a crash.
- */
- void SetAddCrashKeyCallback(AddCrashKeyCallback);
-
- /**
- * Optional notification that the embedder is idle.
- * V8 uses the notification to perform garbage collection.
- * This call can be used repeatedly if the embedder remains idle.
- * Returns true if the embedder should stop calling IdleNotificationDeadline
- * until real work has been done. This indicates that V8 has done
- * as much cleanup as it will be able to do.
- *
- * The deadline_in_seconds argument specifies the deadline V8 has to finish
- * garbage collection work. deadline_in_seconds is compared with
- * MonotonicallyIncreasingTime() and should be based on the same timebase as
- * that function. There is no guarantee that the actual work will be done
- * within the time limit.
- */
- bool IdleNotificationDeadline(double deadline_in_seconds);
-
- /**
- * Optional notification that the system is running low on memory.
- * V8 uses these notifications to attempt to free memory.
- */
- void LowMemoryNotification();
-
- /**
- * Optional notification that a context has been disposed. V8 uses these
- * notifications to guide the GC heuristic and cancel FinalizationRegistry
- * cleanup tasks. Returns the number of context disposals - including this one
- * - since the last time V8 had a chance to clean up.
- *
- * The optional parameter |dependant_context| specifies whether the disposed
- * context was depending on state from other contexts or not.
- */
- int ContextDisposedNotification(bool dependant_context = true);
-
- /**
- * Optional notification that the isolate switched to the foreground.
- * V8 uses these notifications to guide heuristics.
- */
- void IsolateInForegroundNotification();
-
- /**
- * Optional notification that the isolate switched to the background.
- * V8 uses these notifications to guide heuristics.
- */
- void IsolateInBackgroundNotification();
-
- /**
- * Optional notification which will enable the memory savings mode.
- * V8 uses this notification to guide heuristics which may result in a
- * smaller memory footprint at the cost of reduced runtime performance.
- */
- void EnableMemorySavingsMode();
-
- /**
- * Optional notification which will disable the memory savings mode.
- */
- void DisableMemorySavingsMode();
-
- /**
- * Optional notification to tell V8 the current performance requirements
- * of the embedder based on RAIL.
- * V8 uses these notifications to guide heuristics.
- * This is an unfinished experimental feature. Semantics and implementation
- * may change frequently.
- */
- void SetRAILMode(RAILMode rail_mode);
-
- /**
- * Update load start time of the RAIL mode
- */
- void UpdateLoadStartTime();
-
- /**
- * Optional notification to tell V8 the current isolate is used for debugging
- * and requires higher heap limit.
- */
- void IncreaseHeapLimitForDebugging();
-
- /**
- * Restores the original heap limit after IncreaseHeapLimitForDebugging().
- */
- void RestoreOriginalHeapLimit();
-
- /**
- * Returns true if the heap limit was increased for debugging and the
- * original heap limit was not restored yet.
- */
- bool IsHeapLimitIncreasedForDebugging();
-
- /**
- * Allows the host application to provide the address of a function that is
- * notified each time code is added, moved or removed.
- *
- * \param options options for the JIT code event handler.
- * \param event_handler the JIT code event handler, which will be invoked
- * each time code is added, moved or removed.
- * \note \p event_handler won't get notified of existent code.
- * \note since code removal notifications are not currently issued, the
- * \p event_handler may get notifications of code that overlaps earlier
- * code notifications. This happens when code areas are reused, and the
- * earlier overlapping code areas should therefore be discarded.
- * \note the events passed to \p event_handler and the strings they point to
- * are not guaranteed to live past each call. The \p event_handler must
- * copy strings and other parameters it needs to keep around.
- * \note the set of events declared in JitCodeEvent::EventType is expected to
- * grow over time, and the JitCodeEvent structure is expected to accrue
- * new members. The \p event_handler function must ignore event codes
- * it does not recognize to maintain future compatibility.
- * \note Use Isolate::CreateParams to get events for code executed during
- * Isolate setup.
- */
- void SetJitCodeEventHandler(JitCodeEventOptions options,
- JitCodeEventHandler event_handler);
-
- /**
- * Modifies the stack limit for this Isolate.
- *
- * \param stack_limit An address beyond which the Vm's stack may not grow.
- *
- * \note If you are using threads then you should hold the V8::Locker lock
- * while setting the stack limit and you must set a non-default stack
- * limit separately for each thread.
- */
- void SetStackLimit(uintptr_t stack_limit);
-
- /**
- * Returns a memory range that can potentially contain jitted code. Code for
- * V8's 'builtins' will not be in this range if embedded builtins is enabled.
- *
- * On Win64, embedders are advised to install function table callbacks for
- * these ranges, as default SEH won't be able to unwind through jitted code.
- * The first page of the code range is reserved for the embedder and is
- * committed, writable, and executable, to be used to store unwind data, as
- * documented in
- * https://docs.microsoft.com/en-us/cpp/build/exception-handling-x64.
- *
- * Might be empty on other platforms.
- *
- * https://code.google.com/p/v8/issues/detail?id=3598
- */
- void GetCodeRange(void** start, size_t* length_in_bytes);
-
- /**
- * As GetCodeRange, but for embedded builtins (these live in a distinct
- * memory region from other V8 Code objects).
- */
- void GetEmbeddedCodeRange(const void** start, size_t* length_in_bytes);
-
- /**
- * Returns the JSEntryStubs necessary for use with the Unwinder API.
- */
- JSEntryStubs GetJSEntryStubs();
-
- static constexpr size_t kMinCodePagesBufferSize = 32;
-
- /**
- * Copies the code heap pages currently in use by V8 into |code_pages_out|.
- * |code_pages_out| must have at least kMinCodePagesBufferSize capacity and
- * must be empty.
- *
- * Signal-safe, does not allocate, does not access the V8 heap.
- * No code on the stack can rely on pages that might be missing.
- *
- * Returns the number of pages available to be copied, which might be greater
- * than |capacity|. In this case, only |capacity| pages will be copied into
- * |code_pages_out|. The caller should provide a bigger buffer on the next
- * call in order to get all available code pages, but this is not required.
- */
- size_t CopyCodePages(size_t capacity, MemoryRange* code_pages_out);
-
- /** Set the callback to invoke in case of fatal errors. */
- void SetFatalErrorHandler(FatalErrorCallback that);
-
- /** Set the callback to invoke in case of OOM errors. */
- void SetOOMErrorHandler(OOMErrorCallback that);
-
- /**
- * Add a callback to invoke in case the heap size is close to the heap limit.
- * If multiple callbacks are added, only the most recently added callback is
- * invoked.
- */
- void AddNearHeapLimitCallback(NearHeapLimitCallback callback, void* data);
-
- /**
- * Remove the given callback and restore the heap limit to the
- * given limit. If the given limit is zero, then it is ignored.
- * If the current heap size is greater than the given limit,
- * then the heap limit is restored to the minimal limit that
- * is possible for the current heap size.
- */
- void RemoveNearHeapLimitCallback(NearHeapLimitCallback callback,
- size_t heap_limit);
-
- /**
- * If the heap limit was changed by the NearHeapLimitCallback, then the
- * initial heap limit will be restored once the heap size falls below the
- * given threshold percentage of the initial heap limit.
- * The threshold percentage is a number in (0.0, 1.0) range.
- */
- void AutomaticallyRestoreInitialHeapLimit(double threshold_percent = 0.5);
-
- /**
- * Set the callback to invoke to check if code generation from
- * strings should be allowed.
- */
- void SetModifyCodeGenerationFromStringsCallback(
- ModifyCodeGenerationFromStringsCallback2 callback);
-
- /**
- * Set the callback to invoke to check if wasm code generation should
- * be allowed.
- */
- void SetAllowWasmCodeGenerationCallback(
- AllowWasmCodeGenerationCallback callback);
-
- /**
- * Embedder over{ride|load} injection points for wasm APIs. The expectation
- * is that the embedder sets them at most once.
- */
- void SetWasmModuleCallback(ExtensionCallback callback);
- void SetWasmInstanceCallback(ExtensionCallback callback);
-
- void SetWasmStreamingCallback(WasmStreamingCallback callback);
-
- void SetWasmLoadSourceMapCallback(WasmLoadSourceMapCallback callback);
-
- void SetWasmSimdEnabledCallback(WasmSimdEnabledCallback callback);
-
- void SetWasmExceptionsEnabledCallback(WasmExceptionsEnabledCallback callback);
-
- void SetSharedArrayBufferConstructorEnabledCallback(
- SharedArrayBufferConstructorEnabledCallback callback);
-
- /**
- * This function can be called by the embedder to signal V8 that the dynamic
- * enabling of features has finished. V8 can now set up dynamically added
- * features.
- */
- void InstallConditionalFeatures(Local<Context> context);
-
- /**
- * Check if V8 is dead and therefore unusable. This is the case after
- * fatal errors such as out-of-memory situations.
- */
- bool IsDead();
-
- /**
- * Adds a message listener (errors only).
- *
- * The same message listener can be added more than once and in that
- * case it will be called more than once for each message.
- *
- * If data is specified, it will be passed to the callback when it is called.
- * Otherwise, the exception object will be passed to the callback instead.
- */
- bool AddMessageListener(MessageCallback that,
- Local<Value> data = Local<Value>());
-
- /**
- * Adds a message listener.
- *
- * The same message listener can be added more than once and in that
- * case it will be called more than once for each message.
- *
- * If data is specified, it will be passed to the callback when it is called.
- * Otherwise, the exception object will be passed to the callback instead.
- *
- * A listener can listen for particular error levels by providing a mask.
- */
- bool AddMessageListenerWithErrorLevel(MessageCallback that,
- int message_levels,
- Local<Value> data = Local<Value>());
-
- /**
- * Remove all message listeners from the specified callback function.
- */
- void RemoveMessageListeners(MessageCallback that);
-
- /** Callback function for reporting failed access checks.*/
- void SetFailedAccessCheckCallbackFunction(FailedAccessCheckCallback);
-
- /**
- * Tells V8 to capture current stack trace when uncaught exception occurs
- * and report it to the message listeners. The option is off by default.
- */
- void SetCaptureStackTraceForUncaughtExceptions(
- bool capture, int frame_limit = 10,
- StackTrace::StackTraceOptions options = StackTrace::kOverview);
-
- /**
- * Iterates through all external resources referenced from current isolate
- * heap. GC is not invoked prior to iterating, therefore there is no
- * guarantee that visited objects are still alive.
- */
- void VisitExternalResources(ExternalResourceVisitor* visitor);
-
- /**
- * Iterates through all the persistent handles in the current isolate's heap
- * that have class_ids.
- */
- void VisitHandlesWithClassIds(PersistentHandleVisitor* visitor);
-
- /**
- * Iterates through all the persistent handles in the current isolate's heap
- * that have class_ids and are weak to be marked as inactive if there is no
- * pending activity for the handle.
- */
- void VisitWeakHandles(PersistentHandleVisitor* visitor);
-
- /**
- * Check if this isolate is in use.
- * True if at least one thread Enter'ed this isolate.
- */
- bool IsInUse();
-
- /**
- * Set whether calling Atomics.wait (a function that may block) is allowed in
- * this isolate. This can also be configured via
- * CreateParams::allow_atomics_wait.
- */
- void SetAllowAtomicsWait(bool allow);
-
- /**
- * Time zone redetection indicator for
- * DateTimeConfigurationChangeNotification.
- *
- * kSkip indicates V8 that the notification should not trigger redetecting
- * host time zone. kRedetect indicates V8 that host time zone should be
- * redetected, and used to set the default time zone.
- *
- * The host time zone detection may require file system access or similar
- * operations unlikely to be available inside a sandbox. If v8 is run inside a
- * sandbox, the host time zone has to be detected outside the sandbox before
- * calling DateTimeConfigurationChangeNotification function.
- */
- enum class TimeZoneDetection { kSkip, kRedetect };
-
- /**
- * Notification that the embedder has changed the time zone, daylight savings
- * time or other date / time configuration parameters. V8 keeps a cache of
- * various values used for date / time computation. This notification will
- * reset those cached values for the current context so that date / time
- * configuration changes would be reflected.
- *
- * This API should not be called more than needed as it will negatively impact
- * the performance of date operations.
- */
- void DateTimeConfigurationChangeNotification(
- TimeZoneDetection time_zone_detection = TimeZoneDetection::kSkip);
-
- /**
- * Notification that the embedder has changed the locale. V8 keeps a cache of
- * various values used for locale computation. This notification will reset
- * those cached values for the current context so that locale configuration
- * changes would be reflected.
- *
- * This API should not be called more than needed as it will negatively impact
- * the performance of locale operations.
- */
- void LocaleConfigurationChangeNotification();
-
- Isolate() = delete;
- ~Isolate() = delete;
- Isolate(const Isolate&) = delete;
- Isolate& operator=(const Isolate&) = delete;
- // Deleting operator new and delete here is allowed as ctor and dtor is also
- // deleted.
- void* operator new(size_t size) = delete;
- void* operator new[](size_t size) = delete;
- void operator delete(void*, size_t) = delete;
- void operator delete[](void*, size_t) = delete;
-
- private:
- template <class K, class V, class Traits>
- friend class PersistentValueMapBase;
-
- internal::Address* GetDataFromSnapshotOnce(size_t index);
- void ReportExternalAllocationLimitReached();
-};
-
-class V8_EXPORT StartupData {
- public:
- /**
- * Whether the data created can be rehashed and and the hash seed can be
- * recomputed when deserialized.
- * Only valid for StartupData returned by SnapshotCreator::CreateBlob().
- */
- bool CanBeRehashed() const;
- /**
- * Allows embedders to verify whether the data is valid for the current
- * V8 instance.
- */
- bool IsValid() const;
-
- const char* data;
- int raw_size;
-};
-
-/**
- * EntropySource is used as a callback function when v8 needs a source
- * of entropy.
- */
-using EntropySource = bool (*)(unsigned char* buffer, size_t length);
-
-/**
- * ReturnAddressLocationResolver is used as a callback function when v8 is
- * resolving the location of a return address on the stack. Profilers that
- * change the return address on the stack can use this to resolve the stack
- * location to wherever the profiler stashed the original return address.
- *
- * \param return_addr_location A location on stack where a machine
- * return address resides.
- * \returns Either return_addr_location, or else a pointer to the profiler's
- * copy of the original return address.
- *
- * \note The resolver function must not cause garbage collection.
- */
-using ReturnAddressLocationResolver =
- uintptr_t (*)(uintptr_t return_addr_location);
-
-/**
- * Container class for static utility functions.
- */
-class V8_EXPORT V8 {
- public:
- /**
- * Hand startup data to V8, in case the embedder has chosen to build
- * V8 with external startup data.
- *
- * Note:
- * - By default the startup data is linked into the V8 library, in which
- * case this function is not meaningful.
- * - If this needs to be called, it needs to be called before V8
- * tries to make use of its built-ins.
- * - To avoid unnecessary copies of data, V8 will point directly into the
- * given data blob, so pretty please keep it around until V8 exit.
- * - Compression of the startup blob might be useful, but needs to
- * handled entirely on the embedders' side.
- * - The call will abort if the data is invalid.
- */
- static void SetSnapshotDataBlob(StartupData* startup_blob);
-
- /** Set the callback to invoke in case of Dcheck failures. */
- static void SetDcheckErrorHandler(DcheckErrorCallback that);
-
-
- /**
- * Sets V8 flags from a string.
- */
- static void SetFlagsFromString(const char* str);
- static void SetFlagsFromString(const char* str, size_t length);
-
- /**
- * Sets V8 flags from the command line.
- */
- static void SetFlagsFromCommandLine(int* argc,
- char** argv,
- bool remove_flags);
-
- /** Get the version string. */
- static const char* GetVersion();
-
- /**
- * Initializes V8. This function needs to be called before the first Isolate
- * is created. It always returns true.
- */
- V8_INLINE static bool Initialize() {
- const int kBuildConfiguration =
- (internal::PointerCompressionIsEnabled() ? kPointerCompression : 0) |
- (internal::SmiValuesAre31Bits() ? k31BitSmis : 0) |
- (internal::HeapSandboxIsEnabled() ? kHeapSandbox : 0);
- return Initialize(kBuildConfiguration);
- }
-
- /**
- * Allows the host application to provide a callback which can be used
- * as a source of entropy for random number generators.
- */
- static void SetEntropySource(EntropySource source);
-
- /**
- * Allows the host application to provide a callback that allows v8 to
- * cooperate with a profiler that rewrites return addresses on stack.
- */
- static void SetReturnAddressLocationResolver(
- ReturnAddressLocationResolver return_address_resolver);
-
- /**
- * Releases any resources used by v8 and stops any utility threads
- * that may be running. Note that disposing v8 is permanent, it
- * cannot be reinitialized.
- *
- * It should generally not be necessary to dispose v8 before exiting
- * a process, this should happen automatically. It is only necessary
- * to use if the process needs the resources taken up by v8.
- */
- static bool Dispose();
-
- /**
- * Initialize the ICU library bundled with V8. The embedder should only
- * invoke this method when using the bundled ICU. Returns true on success.
- *
- * If V8 was compiled with the ICU data in an external file, the location
- * of the data file has to be provided.
- */
- static bool InitializeICU(const char* icu_data_file = nullptr);
-
- /**
- * Initialize the ICU library bundled with V8. The embedder should only
- * invoke this method when using the bundled ICU. If V8 was compiled with
- * the ICU data in an external file and when the default location of that
- * file should be used, a path to the executable must be provided.
- * Returns true on success.
- *
- * The default is a file called icudtl.dat side-by-side with the executable.
- *
- * Optionally, the location of the data file can be provided to override the
- * default.
- */
- static bool InitializeICUDefaultLocation(const char* exec_path,
- const char* icu_data_file = nullptr);
-
- /**
- * Initialize the external startup data. The embedder only needs to
- * invoke this method when external startup data was enabled in a build.
- *
- * If V8 was compiled with the startup data in an external file, then
- * V8 needs to be given those external files during startup. There are
- * three ways to do this:
- * - InitializeExternalStartupData(const char*)
- * This will look in the given directory for the file "snapshot_blob.bin".
- * - InitializeExternalStartupDataFromFile(const char*)
- * As above, but will directly use the given file name.
- * - Call SetSnapshotDataBlob.
- * This will read the blobs from the given data structure and will
- * not perform any file IO.
- */
- static void InitializeExternalStartupData(const char* directory_path);
- static void InitializeExternalStartupDataFromFile(const char* snapshot_blob);
-
- /**
- * Sets the v8::Platform to use. This should be invoked before V8 is
- * initialized.
- */
- static void InitializePlatform(Platform* platform);
-
- /**
- * Clears all references to the v8::Platform. This should be invoked after
- * V8 was disposed.
- */
- static void ShutdownPlatform();
-
- /**
- * Activate trap-based bounds checking for WebAssembly.
- *
- * \param use_v8_signal_handler Whether V8 should install its own signal
- * handler or rely on the embedder's.
- */
- static bool EnableWebAssemblyTrapHandler(bool use_v8_signal_handler);
-
-#if defined(V8_OS_WIN)
- /**
- * On Win64, by default V8 does not emit unwinding data for jitted code,
- * which means the OS cannot walk the stack frames and the system Structured
- * Exception Handling (SEH) cannot unwind through V8-generated code:
- * https://code.google.com/p/v8/issues/detail?id=3598.
- *
- * This function allows embedders to register a custom exception handler for
- * exceptions in V8-generated code.
- */
- static void SetUnhandledExceptionCallback(
- UnhandledExceptionCallback unhandled_exception_callback);
-#endif
-
- /**
- * Get statistics about the shared memory usage.
- */
- static void GetSharedMemoryStatistics(SharedMemoryStatistics* statistics);
-
- private:
- V8();
-
- enum BuildConfigurationFeatures {
- kPointerCompression = 1 << 0,
- k31BitSmis = 1 << 1,
- kHeapSandbox = 1 << 2,
- };
-
- /**
- * Checks that the embedder build configuration is compatible with
- * the V8 binary and if so initializes V8.
- */
- static bool Initialize(int build_config);
-
- static internal::Address* GlobalizeReference(internal::Isolate* isolate,
- internal::Address* handle);
- static internal::Address* GlobalizeTracedReference(internal::Isolate* isolate,
- internal::Address* handle,
- internal::Address* slot,
- bool has_destructor);
- static void MoveGlobalReference(internal::Address** from,
- internal::Address** to);
- static void MoveTracedGlobalReference(internal::Address** from,
- internal::Address** to);
- static void CopyTracedGlobalReference(const internal::Address* const* from,
- internal::Address** to);
- static internal::Address* CopyGlobalReference(internal::Address* from);
- static void DisposeGlobal(internal::Address* global_handle);
- static void DisposeTracedGlobal(internal::Address* global_handle);
- static void MakeWeak(internal::Address* location, void* data,
- WeakCallbackInfo<void>::Callback weak_callback,
- WeakCallbackType type);
- static void MakeWeak(internal::Address** location_addr);
- static void* ClearWeak(internal::Address* location);
- static void SetFinalizationCallbackTraced(
- internal::Address* location, void* parameter,
- WeakCallbackInfo<void>::Callback callback);
- static void AnnotateStrongRetainer(internal::Address* location,
- const char* label);
- static Value* Eternalize(Isolate* isolate, Value* handle);
-
- template <class K, class V, class T>
- friend class PersistentValueMapBase;
-
- static void FromJustIsNothing();
- static void ToLocalEmpty();
- static void InternalFieldOutOfBounds(int index);
- template <class T>
- friend class BasicTracedReference;
- template <class T>
- friend class Global;
- template <class T> friend class Local;
- template <class T>
- friend class MaybeLocal;
- template <class T>
- friend class Maybe;
- template <class T>
- friend class TracedGlobal;
- friend class TracedReferenceBase;
- template <class T>
- friend class TracedReference;
- template <class T>
- friend class WeakCallbackInfo;
- template <class T> friend class Eternal;
- template <class T> friend class PersistentBase;
- template <class T, class M> friend class Persistent;
- friend class Context;
-};
-
-/**
- * Helper class to create a snapshot data blob.
- *
- * The Isolate used by a SnapshotCreator is owned by it, and will be entered
- * and exited by the constructor and destructor, respectively; The destructor
- * will also destroy the Isolate. Experimental language features, including
- * those available by default, are not available while creating a snapshot.
- */
-class V8_EXPORT SnapshotCreator {
- public:
- enum class FunctionCodeHandling { kClear, kKeep };
-
- /**
- * Initialize and enter an isolate, and set it up for serialization.
- * The isolate is either created from scratch or from an existing snapshot.
- * The caller keeps ownership of the argument snapshot.
- * \param existing_blob existing snapshot from which to create this one.
- * \param external_references a null-terminated array of external references
- * that must be equivalent to CreateParams::external_references.
- */
- SnapshotCreator(Isolate* isolate,
- const intptr_t* external_references = nullptr,
- StartupData* existing_blob = nullptr);
-
- /**
- * Create and enter an isolate, and set it up for serialization.
- * The isolate is either created from scratch or from an existing snapshot.
- * The caller keeps ownership of the argument snapshot.
- * \param existing_blob existing snapshot from which to create this one.
- * \param external_references a null-terminated array of external references
- * that must be equivalent to CreateParams::external_references.
- */
- SnapshotCreator(const intptr_t* external_references = nullptr,
- StartupData* existing_blob = nullptr);
-
- /**
- * Destroy the snapshot creator, and exit and dispose of the Isolate
- * associated with it.
- */
- ~SnapshotCreator();
-
- /**
- * \returns the isolate prepared by the snapshot creator.
- */
- Isolate* GetIsolate();
-
- /**
- * Set the default context to be included in the snapshot blob.
- * The snapshot will not contain the global proxy, and we expect one or a
- * global object template to create one, to be provided upon deserialization.
- *
- * \param callback optional callback to serialize internal fields.
- */
- void SetDefaultContext(Local<Context> context,
- SerializeInternalFieldsCallback callback =
- SerializeInternalFieldsCallback());
-
- /**
- * Add additional context to be included in the snapshot blob.
- * The snapshot will include the global proxy.
- *
- * \param callback optional callback to serialize internal fields.
- *
- * \returns the index of the context in the snapshot blob.
- */
- size_t AddContext(Local<Context> context,
- SerializeInternalFieldsCallback callback =
- SerializeInternalFieldsCallback());
-
- /**
- * Attach arbitrary V8::Data to the context snapshot, which can be retrieved
- * via Context::GetDataFromSnapshot after deserialization. This data does not
- * survive when a new snapshot is created from an existing snapshot.
- * \returns the index for retrieval.
- */
- template <class T>
- V8_INLINE size_t AddData(Local<Context> context, Local<T> object);
-
- /**
- * Attach arbitrary V8::Data to the isolate snapshot, which can be retrieved
- * via Isolate::GetDataFromSnapshot after deserialization. This data does not
- * survive when a new snapshot is created from an existing snapshot.
- * \returns the index for retrieval.
- */
- template <class T>
- V8_INLINE size_t AddData(Local<T> object);
-
- /**
- * Created a snapshot data blob.
- * This must not be called from within a handle scope.
- * \param function_code_handling whether to include compiled function code
- * in the snapshot.
- * \returns { nullptr, 0 } on failure, and a startup snapshot on success. The
- * caller acquires ownership of the data array in the return value.
- */
- StartupData CreateBlob(FunctionCodeHandling function_code_handling);
-
- // Disallow copying and assigning.
- SnapshotCreator(const SnapshotCreator&) = delete;
- void operator=(const SnapshotCreator&) = delete;
-
- private:
- size_t AddData(Local<Context> context, internal::Address object);
- size_t AddData(internal::Address object);
-
- void* data_;
-};
-
-/**
- * A simple Maybe type, representing an object which may or may not have a
- * value, see https://hackage.haskell.org/package/base/docs/Data-Maybe.html.
- *
- * If an API method returns a Maybe<>, the API method can potentially fail
- * either because an exception is thrown, or because an exception is pending,
- * e.g. because a previous API call threw an exception that hasn't been caught
- * yet, or because a TerminateExecution exception was thrown. In that case, a
- * "Nothing" value is returned.
- */
-template <class T>
-class Maybe {
- public:
- V8_INLINE bool IsNothing() const { return !has_value_; }
- V8_INLINE bool IsJust() const { return has_value_; }
-
- /**
- * An alias for |FromJust|. Will crash if the Maybe<> is nothing.
- */
- V8_INLINE T ToChecked() const { return FromJust(); }
-
- /**
- * Short-hand for ToChecked(), which doesn't return a value. To be used, where
- * the actual value of the Maybe is not needed like Object::Set.
- */
- V8_INLINE void Check() const {
- if (V8_UNLIKELY(!IsJust())) V8::FromJustIsNothing();
- }
-
- /**
- * Converts this Maybe<> to a value of type T. If this Maybe<> is
- * nothing (empty), |false| is returned and |out| is left untouched.
- */
- V8_WARN_UNUSED_RESULT V8_INLINE bool To(T* out) const {
- if (V8_LIKELY(IsJust())) *out = value_;
- return IsJust();
- }
-
- /**
- * Converts this Maybe<> to a value of type T. If this Maybe<> is
- * nothing (empty), V8 will crash the process.
- */
- V8_INLINE T FromJust() const {
- if (V8_UNLIKELY(!IsJust())) V8::FromJustIsNothing();
- return value_;
- }
-
- /**
- * Converts this Maybe<> to a value of type T, using a default value if this
- * Maybe<> is nothing (empty).
- */
- V8_INLINE T FromMaybe(const T& default_value) const {
- return has_value_ ? value_ : default_value;
- }
-
- V8_INLINE bool operator==(const Maybe& other) const {
- return (IsJust() == other.IsJust()) &&
- (!IsJust() || FromJust() == other.FromJust());
- }
-
- V8_INLINE bool operator!=(const Maybe& other) const {
- return !operator==(other);
- }
-
- private:
- Maybe() : has_value_(false) {}
- explicit Maybe(const T& t) : has_value_(true), value_(t) {}
-
- bool has_value_;
- T value_;
-
- template <class U>
- friend Maybe<U> Nothing();
- template <class U>
- friend Maybe<U> Just(const U& u);
-};
-
-template <class T>
-inline Maybe<T> Nothing() {
- return Maybe<T>();
-}
-
-template <class T>
-inline Maybe<T> Just(const T& t) {
- return Maybe<T>(t);
-}
-
-// A template specialization of Maybe<T> for the case of T = void.
-template <>
-class Maybe<void> {
- public:
- V8_INLINE bool IsNothing() const { return !is_valid_; }
- V8_INLINE bool IsJust() const { return is_valid_; }
-
- V8_INLINE bool operator==(const Maybe& other) const {
- return IsJust() == other.IsJust();
- }
-
- V8_INLINE bool operator!=(const Maybe& other) const {
- return !operator==(other);
- }
-
- private:
- struct JustTag {};
-
- Maybe() : is_valid_(false) {}
- explicit Maybe(JustTag) : is_valid_(true) {}
-
- bool is_valid_;
-
- template <class U>
- friend Maybe<U> Nothing();
- friend Maybe<void> JustVoid();
-};
-
-inline Maybe<void> JustVoid() { return Maybe<void>(Maybe<void>::JustTag()); }
-
-/**
- * An external exception handler.
- */
-class V8_EXPORT TryCatch {
- public:
- /**
- * Creates a new try/catch block and registers it with v8. Note that
- * all TryCatch blocks should be stack allocated because the memory
- * location itself is compared against JavaScript try/catch blocks.
- */
- explicit TryCatch(Isolate* isolate);
-
- /**
- * Unregisters and deletes this try/catch block.
- */
- ~TryCatch();
-
- /**
- * Returns true if an exception has been caught by this try/catch block.
- */
- bool HasCaught() const;
-
- /**
- * For certain types of exceptions, it makes no sense to continue execution.
- *
- * If CanContinue returns false, the correct action is to perform any C++
- * cleanup needed and then return. If CanContinue returns false and
- * HasTerminated returns true, it is possible to call
- * CancelTerminateExecution in order to continue calling into the engine.
- */
- bool CanContinue() const;
-
- /**
- * Returns true if an exception has been caught due to script execution
- * being terminated.
- *
- * There is no JavaScript representation of an execution termination
- * exception. Such exceptions are thrown when the TerminateExecution
- * methods are called to terminate a long-running script.
- *
- * If such an exception has been thrown, HasTerminated will return true,
- * indicating that it is possible to call CancelTerminateExecution in order
- * to continue calling into the engine.
- */
- bool HasTerminated() const;
-
- /**
- * Throws the exception caught by this TryCatch in a way that avoids
- * it being caught again by this same TryCatch. As with ThrowException
- * it is illegal to execute any JavaScript operations after calling
- * ReThrow; the caller must return immediately to where the exception
- * is caught.
- */
- Local<Value> ReThrow();
-
- /**
- * Returns the exception caught by this try/catch block. If no exception has
- * been caught an empty handle is returned.
- */
- Local<Value> Exception() const;
-
- /**
- * Returns the .stack property of an object. If no .stack
- * property is present an empty handle is returned.
- */
- V8_WARN_UNUSED_RESULT static MaybeLocal<Value> StackTrace(
- Local<Context> context, Local<Value> exception);
-
- /**
- * Returns the .stack property of the thrown object. If no .stack property is
- * present or if this try/catch block has not caught an exception, an empty
- * handle is returned.
- */
- V8_WARN_UNUSED_RESULT MaybeLocal<Value> StackTrace(
- Local<Context> context) const;
-
- /**
- * Returns the message associated with this exception. If there is
- * no message associated an empty handle is returned.
- */
- Local<v8::Message> Message() const;
-
- /**
- * Clears any exceptions that may have been caught by this try/catch block.
- * After this method has been called, HasCaught() will return false. Cancels
- * the scheduled exception if it is caught and ReThrow() is not called before.
- *
- * It is not necessary to clear a try/catch block before using it again; if
- * another exception is thrown the previously caught exception will just be
- * overwritten. However, it is often a good idea since it makes it easier
- * to determine which operation threw a given exception.
- */
- void Reset();
-
- /**
- * Set verbosity of the external exception handler.
- *
- * By default, exceptions that are caught by an external exception
- * handler are not reported. Call SetVerbose with true on an
- * external exception handler to have exceptions caught by the
- * handler reported as if they were not caught.
- */
- void SetVerbose(bool value);
-
- /**
- * Returns true if verbosity is enabled.
- */
- bool IsVerbose() const;
-
- /**
- * Set whether or not this TryCatch should capture a Message object
- * which holds source information about where the exception
- * occurred. True by default.
- */
- void SetCaptureMessage(bool value);
-
- /**
- * There are cases when the raw address of C++ TryCatch object cannot be
- * used for comparisons with addresses into the JS stack. The cases are:
- * 1) ARM, ARM64 and MIPS simulators which have separate JS stack.
- * 2) Address sanitizer allocates local C++ object in the heap when
- * UseAfterReturn mode is enabled.
- * This method returns address that can be used for comparisons with
- * addresses into the JS stack. When neither simulator nor ASAN's
- * UseAfterReturn is enabled, then the address returned will be the address
- * of the C++ try catch handler itself.
- */
- static void* JSStackComparableAddress(TryCatch* handler) {
- if (handler == nullptr) return nullptr;
- return handler->js_stack_comparable_address_;
- }
-
- TryCatch(const TryCatch&) = delete;
- void operator=(const TryCatch&) = delete;
-
- private:
- // Declaring operator new and delete as deleted is not spec compliant.
- // Therefore declare them private instead to disable dynamic alloc
- void* operator new(size_t size);
- void* operator new[](size_t size);
- void operator delete(void*, size_t);
- void operator delete[](void*, size_t);
-
- void ResetInternal();
-
- internal::Isolate* isolate_;
- TryCatch* next_;
- void* exception_;
- void* message_obj_;
- void* js_stack_comparable_address_;
- bool is_verbose_ : 1;
- bool can_continue_ : 1;
- bool capture_message_ : 1;
- bool rethrow_ : 1;
- bool has_terminated_ : 1;
-
- friend class internal::Isolate;
-};
-
-
-// --- Context ---
-
-
-/**
- * A container for extension names.
- */
-class V8_EXPORT ExtensionConfiguration {
- public:
- ExtensionConfiguration() : name_count_(0), names_(nullptr) {}
- ExtensionConfiguration(int name_count, const char* names[])
- : name_count_(name_count), names_(names) { }
-
- const char** begin() const { return &names_[0]; }
- const char** end() const { return &names_[name_count_]; }
-
- private:
- const int name_count_;
- const char** names_;
-};
-
-/**
- * A sandboxed execution context with its own set of built-in objects
- * and functions.
- */
-class V8_EXPORT Context : public Data {
- public:
- /**
- * Returns the global proxy object.
- *
- * Global proxy object is a thin wrapper whose prototype points to actual
- * context's global object with the properties like Object, etc. This is done
- * that way for security reasons (for more details see
- * https://wiki.mozilla.org/Gecko:SplitWindow).
- *
- * Please note that changes to global proxy object prototype most probably
- * would break VM---v8 expects only global object as a prototype of global
- * proxy object.
- */
- Local<Object> Global();
-
- /**
- * Detaches the global object from its context before
- * the global object can be reused to create a new context.
- */
- void DetachGlobal();
-
- /**
- * Creates a new context and returns a handle to the newly allocated
- * context.
- *
- * \param isolate The isolate in which to create the context.
- *
- * \param extensions An optional extension configuration containing
- * the extensions to be installed in the newly created context.
- *
- * \param global_template An optional object template from which the
- * global object for the newly created context will be created.
- *
- * \param global_object An optional global object to be reused for
- * the newly created context. This global object must have been
- * created by a previous call to Context::New with the same global
- * template. The state of the global object will be completely reset
- * and only object identify will remain.
- */
- static Local<Context> New(
- Isolate* isolate, ExtensionConfiguration* extensions = nullptr,
- MaybeLocal<ObjectTemplate> global_template = MaybeLocal<ObjectTemplate>(),
- MaybeLocal<Value> global_object = MaybeLocal<Value>(),
- DeserializeInternalFieldsCallback internal_fields_deserializer =
- DeserializeInternalFieldsCallback(),
- MicrotaskQueue* microtask_queue = nullptr);
-
- /**
- * Create a new context from a (non-default) context snapshot. There
- * is no way to provide a global object template since we do not create
- * a new global object from template, but we can reuse a global object.
- *
- * \param isolate See v8::Context::New.
- *
- * \param context_snapshot_index The index of the context snapshot to
- * deserialize from. Use v8::Context::New for the default snapshot.
- *
- * \param embedder_fields_deserializer Optional callback to deserialize
- * internal fields. It should match the SerializeInternalFieldCallback used
- * to serialize.
- *
- * \param extensions See v8::Context::New.
- *
- * \param global_object See v8::Context::New.
- */
- static MaybeLocal<Context> FromSnapshot(
- Isolate* isolate, size_t context_snapshot_index,
- DeserializeInternalFieldsCallback embedder_fields_deserializer =
- DeserializeInternalFieldsCallback(),
- ExtensionConfiguration* extensions = nullptr,
- MaybeLocal<Value> global_object = MaybeLocal<Value>(),
- MicrotaskQueue* microtask_queue = nullptr);
-
- /**
- * Returns an global object that isn't backed by an actual context.
- *
- * The global template needs to have access checks with handlers installed.
- * If an existing global object is passed in, the global object is detached
- * from its context.
- *
- * Note that this is different from a detached context where all accesses to
- * the global proxy will fail. Instead, the access check handlers are invoked.
- *
- * It is also not possible to detach an object returned by this method.
- * Instead, the access check handlers need to return nothing to achieve the
- * same effect.
- *
- * It is possible, however, to create a new context from the global object
- * returned by this method.
- */
- static MaybeLocal<Object> NewRemoteContext(
- Isolate* isolate, Local<ObjectTemplate> global_template,
- MaybeLocal<Value> global_object = MaybeLocal<Value>());
-
- /**
- * Sets the security token for the context. To access an object in
- * another context, the security tokens must match.
- */
- void SetSecurityToken(Local<Value> token);
-
- /** Restores the security token to the default value. */
- void UseDefaultSecurityToken();
-
- /** Returns the security token of this context.*/
- Local<Value> GetSecurityToken();
-
- /**
- * Enter this context. After entering a context, all code compiled
- * and run is compiled and run in this context. If another context
- * is already entered, this old context is saved so it can be
- * restored when the new context is exited.
- */
- void Enter();
-
- /**
- * Exit this context. Exiting the current context restores the
- * context that was in place when entering the current context.
- */
- void Exit();
-
- /** Returns the isolate associated with a current context. */
- Isolate* GetIsolate();
-
- /** Returns the microtask queue associated with a current context. */
- MicrotaskQueue* GetMicrotaskQueue();
-
- /**
- * The field at kDebugIdIndex used to be reserved for the inspector.
- * It now serves no purpose.
- */
- enum EmbedderDataFields { kDebugIdIndex = 0 };
-
- /**
- * Return the number of fields allocated for embedder data.
- */
- uint32_t GetNumberOfEmbedderDataFields();
-
- /**
- * Gets the embedder data with the given index, which must have been set by a
- * previous call to SetEmbedderData with the same index.
- */
- V8_INLINE Local<Value> GetEmbedderData(int index);
-
- /**
- * Gets the binding object used by V8 extras. Extra natives get a reference
- * to this object and can use it to "export" functionality by adding
- * properties. Extra natives can also "import" functionality by accessing
- * properties added by the embedder using the V8 API.
- */
- Local<Object> GetExtrasBindingObject();
-
- /**
- * Sets the embedder data with the given index, growing the data as
- * needed. Note that index 0 currently has a special meaning for Chrome's
- * debugger.
- */
- void SetEmbedderData(int index, Local<Value> value);
-
- /**
- * Gets a 2-byte-aligned native pointer from the embedder data with the given
- * index, which must have been set by a previous call to
- * SetAlignedPointerInEmbedderData with the same index. Note that index 0
- * currently has a special meaning for Chrome's debugger.
- */
- V8_INLINE void* GetAlignedPointerFromEmbedderData(int index);
-
- /**
- * Sets a 2-byte-aligned native pointer in the embedder data with the given
- * index, growing the data as needed. Note that index 0 currently has a
- * special meaning for Chrome's debugger.
- */
- void SetAlignedPointerInEmbedderData(int index, void* value);
-
- /**
- * Control whether code generation from strings is allowed. Calling
- * this method with false will disable 'eval' and the 'Function'
- * constructor for code running in this context. If 'eval' or the
- * 'Function' constructor are used an exception will be thrown.
- *
- * If code generation from strings is not allowed the
- * V8::AllowCodeGenerationFromStrings callback will be invoked if
- * set before blocking the call to 'eval' or the 'Function'
- * constructor. If that callback returns true, the call will be
- * allowed, otherwise an exception will be thrown. If no callback is
- * set an exception will be thrown.
- */
- void AllowCodeGenerationFromStrings(bool allow);
-
- /**
- * Returns true if code generation from strings is allowed for the context.
- * For more details see AllowCodeGenerationFromStrings(bool) documentation.
- */
- bool IsCodeGenerationFromStringsAllowed() const;
-
- /**
- * Sets the error description for the exception that is thrown when
- * code generation from strings is not allowed and 'eval' or the 'Function'
- * constructor are called.
- */
- void SetErrorMessageForCodeGenerationFromStrings(Local<String> message);
-
- /**
- * Return data that was previously attached to the context snapshot via
- * SnapshotCreator, and removes the reference to it.
- * Repeated call with the same index returns an empty MaybeLocal.
- */
- template <class T>
- V8_INLINE MaybeLocal<T> GetDataFromSnapshotOnce(size_t index);
-
- /**
- * If callback is set, abort any attempt to execute JavaScript in this
- * context, call the specified callback, and throw an exception.
- * To unset abort, pass nullptr as callback.
- */
- using AbortScriptExecutionCallback = void (*)(Isolate* isolate,
- Local<Context> context);
- void SetAbortScriptExecution(AbortScriptExecutionCallback callback);
-
- /**
- * Returns the value that was set or restored by
- * SetContinuationPreservedEmbedderData(), if any.
- */
- Local<Value> GetContinuationPreservedEmbedderData() const;
-
- /**
- * Sets a value that will be stored on continuations and reset while the
- * continuation runs.
- */
- void SetContinuationPreservedEmbedderData(Local<Value> context);
-
- /**
- * Set or clear hooks to be invoked for promise lifecycle operations.
- * To clear a hook, set it to an empty v8::Function. Each function will
- * receive the observed promise as the first argument. If a chaining
- * operation is used on a promise, the init will additionally receive
- * the parent promise as the second argument.
- */
- void SetPromiseHooks(Local<Function> init_hook,
- Local<Function> before_hook,
- Local<Function> after_hook,
- Local<Function> resolve_hook);
-
- /**
- * Stack-allocated class which sets the execution context for all
- * operations executed within a local scope.
- */
- class V8_NODISCARD Scope {
- public:
- explicit V8_INLINE Scope(Local<Context> context) : context_(context) {
- context_->Enter();
- }
- V8_INLINE ~Scope() { context_->Exit(); }
-
- private:
- Local<Context> context_;
- };
-
- /**
- * Stack-allocated class to support the backup incumbent settings object
- * stack.
- * https://html.spec.whatwg.org/multipage/webappapis.html#backup-incumbent-settings-object-stack
- */
- class V8_EXPORT V8_NODISCARD BackupIncumbentScope final {
- public:
- /**
- * |backup_incumbent_context| is pushed onto the backup incumbent settings
- * object stack.
- */
- explicit BackupIncumbentScope(Local<Context> backup_incumbent_context);
- ~BackupIncumbentScope();
-
- /**
- * Returns address that is comparable with JS stack address. Note that JS
- * stack may be allocated separately from the native stack. See also
- * |TryCatch::JSStackComparableAddress| for details.
- */
- uintptr_t JSStackComparableAddress() const {
- return js_stack_comparable_address_;
- }
-
- private:
- friend class internal::Isolate;
-
- Local<Context> backup_incumbent_context_;
- uintptr_t js_stack_comparable_address_ = 0;
- const BackupIncumbentScope* prev_ = nullptr;
- };
-
- V8_INLINE static Context* Cast(Data* data);
-
- private:
- friend class Value;
- friend class Script;
- friend class Object;
- friend class Function;
-
- static void CheckCast(Data* obj);
-
- internal::Address* GetDataFromSnapshotOnce(size_t index);
- Local<Value> SlowGetEmbedderData(int index);
- void* SlowGetAlignedPointerFromEmbedderData(int index);
-};
-
-/**
- * Multiple threads in V8 are allowed, but only one thread at a time is allowed
- * to use any given V8 isolate, see the comments in the Isolate class. The
- * definition of 'using a V8 isolate' includes accessing handles or holding onto
- * object pointers obtained from V8 handles while in the particular V8 isolate.
- * It is up to the user of V8 to ensure, perhaps with locking, that this
- * constraint is not violated. In addition to any other synchronization
- * mechanism that may be used, the v8::Locker and v8::Unlocker classes must be
- * used to signal thread switches to V8.
- *
- * v8::Locker is a scoped lock object. While it's active, i.e. between its
- * construction and destruction, the current thread is allowed to use the locked
- * isolate. V8 guarantees that an isolate can be locked by at most one thread at
- * any time. In other words, the scope of a v8::Locker is a critical section.
- *
- * Sample usage:
-* \code
- * ...
- * {
- * v8::Locker locker(isolate);
- * v8::Isolate::Scope isolate_scope(isolate);
- * ...
- * // Code using V8 and isolate goes here.
- * ...
- * } // Destructor called here
- * \endcode
- *
- * If you wish to stop using V8 in a thread A you can do this either by
- * destroying the v8::Locker object as above or by constructing a v8::Unlocker
- * object:
- *
- * \code
- * {
- * isolate->Exit();
- * v8::Unlocker unlocker(isolate);
- * ...
- * // Code not using V8 goes here while V8 can run in another thread.
- * ...
- * } // Destructor called here.
- * isolate->Enter();
- * \endcode
- *
- * The Unlocker object is intended for use in a long-running callback from V8,
- * where you want to release the V8 lock for other threads to use.
- *
- * The v8::Locker is a recursive lock, i.e. you can lock more than once in a
- * given thread. This can be useful if you have code that can be called either
- * from code that holds the lock or from code that does not. The Unlocker is
- * not recursive so you can not have several Unlockers on the stack at once, and
- * you can not use an Unlocker in a thread that is not inside a Locker's scope.
- *
- * An unlocker will unlock several lockers if it has to and reinstate the
- * correct depth of locking on its destruction, e.g.:
- *
- * \code
- * // V8 not locked.
- * {
- * v8::Locker locker(isolate);
- * Isolate::Scope isolate_scope(isolate);
- * // V8 locked.
- * {
- * v8::Locker another_locker(isolate);
- * // V8 still locked (2 levels).
- * {
- * isolate->Exit();
- * v8::Unlocker unlocker(isolate);
- * // V8 not locked.
- * }
- * isolate->Enter();
- * // V8 locked again (2 levels).
- * }
- * // V8 still locked (1 level).
- * }
- * // V8 Now no longer locked.
- * \endcode
- */
-class V8_EXPORT Unlocker {
- public:
- /**
- * Initialize Unlocker for a given Isolate.
- */
- V8_INLINE explicit Unlocker(Isolate* isolate) { Initialize(isolate); }
-
- ~Unlocker();
- private:
- void Initialize(Isolate* isolate);
-
- internal::Isolate* isolate_;
-};
-
-
-class V8_EXPORT Locker {
- public:
- /**
- * Initialize Locker for a given Isolate.
- */
- V8_INLINE explicit Locker(Isolate* isolate) { Initialize(isolate); }
-
- ~Locker();
-
- /**
- * Returns whether or not the locker for a given isolate, is locked by the
- * current thread.
- */
- static bool IsLocked(Isolate* isolate);
-
- /**
- * Returns whether v8::Locker is being used by this V8 instance.
- */
- static bool IsActive();
-
- // Disallow copying and assigning.
- Locker(const Locker&) = delete;
- void operator=(const Locker&) = delete;
-
- private:
- void Initialize(Isolate* isolate);
-
- bool has_lock_;
- bool top_level_;
- internal::Isolate* isolate_;
-};
-
-/**
- * Various helpers for skipping over V8 frames in a given stack.
- *
- * The unwinder API is only supported on the x64, ARM64 and ARM32 architectures.
- */
-class V8_EXPORT Unwinder {
- public:
- /**
- * Attempt to unwind the stack to the most recent C++ frame. This function is
- * signal-safe and does not access any V8 state and thus doesn't require an
- * Isolate.
- *
- * The unwinder needs to know the location of the JS Entry Stub (a piece of
- * code that is run when C++ code calls into generated JS code). This is used
- * for edge cases where the current frame is being constructed or torn down
- * when the stack sample occurs.
- *
- * The unwinder also needs the virtual memory range of all possible V8 code
- * objects. There are two ranges required - the heap code range and the range
- * for code embedded in the binary.
- *
- * Available on x64, ARM64 and ARM32.
- *
- * \param code_pages A list of all of the ranges in which V8 has allocated
- * executable code. The caller should obtain this list by calling
- * Isolate::CopyCodePages() during the same interrupt/thread suspension that
- * captures the stack.
- * \param register_state The current registers. This is an in-out param that
- * will be overwritten with the register values after unwinding, on success.
- * \param stack_base The resulting stack pointer and frame pointer values are
- * bounds-checked against the stack_base and the original stack pointer value
- * to ensure that they are valid locations in the given stack. If these values
- * or any intermediate frame pointer values used during unwinding are ever out
- * of these bounds, unwinding will fail.
- *
- * \return True on success.
- */
- static bool TryUnwindV8Frames(const JSEntryStubs& entry_stubs,
- size_t code_pages_length,
- const MemoryRange* code_pages,
- RegisterState* register_state,
- const void* stack_base);
-
- /**
- * Whether the PC is within the V8 code range represented by code_pages.
- *
- * If this returns false, then calling UnwindV8Frames() with the same PC
- * and unwind_state will always fail. If it returns true, then unwinding may
- * (but not necessarily) be successful.
- *
- * Available on x64, ARM64 and ARM32
- */
- static bool PCIsInV8(size_t code_pages_length, const MemoryRange* code_pages,
- void* pc);
-};
-
-// --- Implementation ---
-
-template <class T>
-Local<T> Local<T>::New(Isolate* isolate, Local<T> that) {
- return New(isolate, that.val_);
-}
-
-template <class T>
-Local<T> Local<T>::New(Isolate* isolate, const PersistentBase<T>& that) {
- return New(isolate, that.val_);
-}
-
-template <class T>
-Local<T> Local<T>::New(Isolate* isolate, const BasicTracedReference<T>& that) {
- return New(isolate, *that);
-}
-
-template <class T>
-Local<T> Local<T>::New(Isolate* isolate, T* that) {
- if (that == nullptr) return Local<T>();
- T* that_ptr = that;
- internal::Address* p = reinterpret_cast<internal::Address*>(that_ptr);
- return Local<T>(reinterpret_cast<T*>(HandleScope::CreateHandle(
- reinterpret_cast<internal::Isolate*>(isolate), *p)));
-}
-
-
-template<class T>
-template<class S>
-void Eternal<T>::Set(Isolate* isolate, Local<S> handle) {
- static_assert(std::is_base_of<T, S>::value, "type check");
- val_ = reinterpret_cast<T*>(
- V8::Eternalize(isolate, reinterpret_cast<Value*>(*handle)));
-}
-
-template <class T>
-Local<T> Eternal<T>::Get(Isolate* isolate) const {
- // The eternal handle will never go away, so as with the roots, we don't even
- // need to open a handle.
- return Local<T>(val_);
-}
-
-
-template <class T>
-Local<T> MaybeLocal<T>::ToLocalChecked() {
- if (V8_UNLIKELY(val_ == nullptr)) V8::ToLocalEmpty();
- return Local<T>(val_);
-}
-
-
-template <class T>
-void* WeakCallbackInfo<T>::GetInternalField(int index) const {
-#ifdef V8_ENABLE_CHECKS
- if (index < 0 || index >= kEmbedderFieldsInWeakCallback) {
- V8::InternalFieldOutOfBounds(index);
- }
-#endif
- return embedder_fields_[index];
-}
-
-
-template <class T>
-T* PersistentBase<T>::New(Isolate* isolate, T* that) {
- if (that == nullptr) return nullptr;
- internal::Address* p = reinterpret_cast<internal::Address*>(that);
- return reinterpret_cast<T*>(
- V8::GlobalizeReference(reinterpret_cast<internal::Isolate*>(isolate),
- p));
-}
-
-
-template <class T, class M>
-template <class S, class M2>
-void Persistent<T, M>::Copy(const Persistent<S, M2>& that) {
- static_assert(std::is_base_of<T, S>::value, "type check");
- this->Reset();
- if (that.IsEmpty()) return;
- internal::Address* p = reinterpret_cast<internal::Address*>(that.val_);
- this->val_ = reinterpret_cast<T*>(V8::CopyGlobalReference(p));
- M::Copy(that, this);
-}
-
-template <class T>
-bool PersistentBase<T>::IsWeak() const {
- using I = internal::Internals;
- if (this->IsEmpty()) return false;
- return I::GetNodeState(reinterpret_cast<internal::Address*>(this->val_)) ==
- I::kNodeStateIsWeakValue;
-}
-
-
-template <class T>
-void PersistentBase<T>::Reset() {
- if (this->IsEmpty()) return;
- V8::DisposeGlobal(reinterpret_cast<internal::Address*>(this->val_));
- val_ = nullptr;
-}
-
-
-template <class T>
-template <class S>
-void PersistentBase<T>::Reset(Isolate* isolate, const Local<S>& other) {
- static_assert(std::is_base_of<T, S>::value, "type check");
- Reset();
- if (other.IsEmpty()) return;
- this->val_ = New(isolate, other.val_);
-}
-
-
-template <class T>
-template <class S>
-void PersistentBase<T>::Reset(Isolate* isolate,
- const PersistentBase<S>& other) {
- static_assert(std::is_base_of<T, S>::value, "type check");
- Reset();
- if (other.IsEmpty()) return;
- this->val_ = New(isolate, other.val_);
-}
-
-
-template <class T>
-template <typename P>
-V8_INLINE void PersistentBase<T>::SetWeak(
- P* parameter, typename WeakCallbackInfo<P>::Callback callback,
- WeakCallbackType type) {
- using Callback = WeakCallbackInfo<void>::Callback;
-#if (__GNUC__ >= 8) && !defined(__clang__)
-#pragma GCC diagnostic push
-#pragma GCC diagnostic ignored "-Wcast-function-type"
-#endif
- V8::MakeWeak(reinterpret_cast<internal::Address*>(this->val_), parameter,
- reinterpret_cast<Callback>(callback), type);
-#if (__GNUC__ >= 8) && !defined(__clang__)
-#pragma GCC diagnostic pop
-#endif
-}
-
-template <class T>
-void PersistentBase<T>::SetWeak() {
- V8::MakeWeak(reinterpret_cast<internal::Address**>(&this->val_));
-}
-
-template <class T>
-template <typename P>
-P* PersistentBase<T>::ClearWeak() {
- return reinterpret_cast<P*>(
- V8::ClearWeak(reinterpret_cast<internal::Address*>(this->val_)));
-}
-
-template <class T>
-void PersistentBase<T>::AnnotateStrongRetainer(const char* label) {
- V8::AnnotateStrongRetainer(reinterpret_cast<internal::Address*>(this->val_),
- label);
-}
-
-template <class T>
-void PersistentBase<T>::SetWrapperClassId(uint16_t class_id) {
- using I = internal::Internals;
- if (this->IsEmpty()) return;
- internal::Address* obj = reinterpret_cast<internal::Address*>(this->val_);
- uint8_t* addr = reinterpret_cast<uint8_t*>(obj) + I::kNodeClassIdOffset;
- *reinterpret_cast<uint16_t*>(addr) = class_id;
-}
-
-
-template <class T>
-uint16_t PersistentBase<T>::WrapperClassId() const {
- using I = internal::Internals;
- if (this->IsEmpty()) return 0;
- internal::Address* obj = reinterpret_cast<internal::Address*>(this->val_);
- uint8_t* addr = reinterpret_cast<uint8_t*>(obj) + I::kNodeClassIdOffset;
- return *reinterpret_cast<uint16_t*>(addr);
-}
-
-template <class T>
-Global<T>::Global(Global&& other) : PersistentBase<T>(other.val_) {
- if (other.val_ != nullptr) {
- V8::MoveGlobalReference(reinterpret_cast<internal::Address**>(&other.val_),
- reinterpret_cast<internal::Address**>(&this->val_));
- other.val_ = nullptr;
- }
-}
-
-template <class T>
-template <class S>
-Global<T>& Global<T>::operator=(Global<S>&& rhs) {
- static_assert(std::is_base_of<T, S>::value, "type check");
- if (this != &rhs) {
- this->Reset();
- if (rhs.val_ != nullptr) {
- this->val_ = rhs.val_;
- V8::MoveGlobalReference(
- reinterpret_cast<internal::Address**>(&rhs.val_),
- reinterpret_cast<internal::Address**>(&this->val_));
- rhs.val_ = nullptr;
- }
- }
- return *this;
-}
-
-template <class T>
-internal::Address* BasicTracedReference<T>::New(
- Isolate* isolate, T* that, void* slot, DestructionMode destruction_mode) {
- if (that == nullptr) return nullptr;
- internal::Address* p = reinterpret_cast<internal::Address*>(that);
- return V8::GlobalizeTracedReference(
- reinterpret_cast<internal::Isolate*>(isolate), p,
- reinterpret_cast<internal::Address*>(slot),
- destruction_mode == kWithDestructor);
-}
-
-void TracedReferenceBase::Reset() {
- if (IsEmpty()) return;
- V8::DisposeTracedGlobal(reinterpret_cast<internal::Address*>(val_));
- SetSlotThreadSafe(nullptr);
-}
-
-v8::Local<v8::Value> TracedReferenceBase::Get(v8::Isolate* isolate) const {
- if (IsEmpty()) return Local<Value>();
- return Local<Value>::New(isolate, reinterpret_cast<Value*>(val_));
-}
-
-V8_INLINE bool operator==(const TracedReferenceBase& lhs,
- const TracedReferenceBase& rhs) {
- v8::internal::Address* a = reinterpret_cast<v8::internal::Address*>(lhs.val_);
- v8::internal::Address* b = reinterpret_cast<v8::internal::Address*>(rhs.val_);
- if (a == nullptr) return b == nullptr;
- if (b == nullptr) return false;
- return *a == *b;
-}
-
-template <typename U>
-V8_INLINE bool operator==(const TracedReferenceBase& lhs,
- const v8::Local<U>& rhs) {
- v8::internal::Address* a = reinterpret_cast<v8::internal::Address*>(lhs.val_);
- v8::internal::Address* b = reinterpret_cast<v8::internal::Address*>(*rhs);
- if (a == nullptr) return b == nullptr;
- if (b == nullptr) return false;
- return *a == *b;
-}
-
-template <typename U>
-V8_INLINE bool operator==(const v8::Local<U>& lhs,
- const TracedReferenceBase& rhs) {
- return rhs == lhs;
-}
-
-V8_INLINE bool operator!=(const TracedReferenceBase& lhs,
- const TracedReferenceBase& rhs) {
- return !(lhs == rhs);
-}
-
-template <typename U>
-V8_INLINE bool operator!=(const TracedReferenceBase& lhs,
- const v8::Local<U>& rhs) {
- return !(lhs == rhs);
-}
-
-template <typename U>
-V8_INLINE bool operator!=(const v8::Local<U>& lhs,
- const TracedReferenceBase& rhs) {
- return !(rhs == lhs);
-}
-
-template <class T>
-template <class S>
-void TracedGlobal<T>::Reset(Isolate* isolate, const Local<S>& other) {
- static_assert(std::is_base_of<T, S>::value, "type check");
- Reset();
- if (other.IsEmpty()) return;
- this->val_ = this->New(isolate, other.val_, &this->val_,
- BasicTracedReference<T>::kWithDestructor);
-}
-
-template <class T>
-template <class S>
-TracedGlobal<T>& TracedGlobal<T>::operator=(TracedGlobal<S>&& rhs) {
- static_assert(std::is_base_of<T, S>::value, "type check");
- *this = std::move(rhs.template As<T>());
- return *this;
-}
-
-template <class T>
-template <class S>
-TracedGlobal<T>& TracedGlobal<T>::operator=(const TracedGlobal<S>& rhs) {
- static_assert(std::is_base_of<T, S>::value, "type check");
- *this = rhs.template As<T>();
- return *this;
-}
-
-template <class T>
-TracedGlobal<T>& TracedGlobal<T>::operator=(TracedGlobal&& rhs) {
- if (this != &rhs) {
- V8::MoveTracedGlobalReference(
- reinterpret_cast<internal::Address**>(&rhs.val_),
- reinterpret_cast<internal::Address**>(&this->val_));
- }
- return *this;
-}
-
-template <class T>
-TracedGlobal<T>& TracedGlobal<T>::operator=(const TracedGlobal& rhs) {
- if (this != &rhs) {
- this->Reset();
- if (rhs.val_ != nullptr) {
- V8::CopyTracedGlobalReference(
- reinterpret_cast<const internal::Address* const*>(&rhs.val_),
- reinterpret_cast<internal::Address**>(&this->val_));
- }
- }
- return *this;
-}
-
-template <class T>
-template <class S>
-void TracedReference<T>::Reset(Isolate* isolate, const Local<S>& other) {
- static_assert(std::is_base_of<T, S>::value, "type check");
- this->Reset();
- if (other.IsEmpty()) return;
- this->SetSlotThreadSafe(
- this->New(isolate, other.val_, &this->val_,
- BasicTracedReference<T>::kWithoutDestructor));
-}
-
-template <class T>
-template <class S>
-TracedReference<T>& TracedReference<T>::operator=(TracedReference<S>&& rhs) {
- static_assert(std::is_base_of<T, S>::value, "type check");
- *this = std::move(rhs.template As<T>());
- return *this;
-}
-
-template <class T>
-template <class S>
-TracedReference<T>& TracedReference<T>::operator=(
- const TracedReference<S>& rhs) {
- static_assert(std::is_base_of<T, S>::value, "type check");
- *this = rhs.template As<T>();
- return *this;
-}
-
-template <class T>
-TracedReference<T>& TracedReference<T>::operator=(TracedReference&& rhs) {
- if (this != &rhs) {
- V8::MoveTracedGlobalReference(
- reinterpret_cast<internal::Address**>(&rhs.val_),
- reinterpret_cast<internal::Address**>(&this->val_));
- }
- return *this;
-}
-
-template <class T>
-TracedReference<T>& TracedReference<T>::operator=(const TracedReference& rhs) {
- if (this != &rhs) {
- this->Reset();
- if (rhs.val_ != nullptr) {
- V8::CopyTracedGlobalReference(
- reinterpret_cast<const internal::Address* const*>(&rhs.val_),
- reinterpret_cast<internal::Address**>(&this->val_));
- }
- }
- return *this;
-}
-
-void TracedReferenceBase::SetWrapperClassId(uint16_t class_id) {
- using I = internal::Internals;
- if (IsEmpty()) return;
- internal::Address* obj = reinterpret_cast<internal::Address*>(val_);
- uint8_t* addr = reinterpret_cast<uint8_t*>(obj) + I::kNodeClassIdOffset;
- *reinterpret_cast<uint16_t*>(addr) = class_id;
-}
-
-uint16_t TracedReferenceBase::WrapperClassId() const {
- using I = internal::Internals;
- if (IsEmpty()) return 0;
- internal::Address* obj = reinterpret_cast<internal::Address*>(val_);
- uint8_t* addr = reinterpret_cast<uint8_t*>(obj) + I::kNodeClassIdOffset;
- return *reinterpret_cast<uint16_t*>(addr);
-}
-
-template <class T>
-void TracedGlobal<T>::SetFinalizationCallback(
- void* parameter, typename WeakCallbackInfo<void>::Callback callback) {
- V8::SetFinalizationCallbackTraced(
- reinterpret_cast<internal::Address*>(this->val_), parameter, callback);
-}
-
-template <typename T>
-ReturnValue<T>::ReturnValue(internal::Address* slot) : value_(slot) {}
-
-template <typename T>
-template <typename S>
-void ReturnValue<T>::Set(const Global<S>& handle) {
- static_assert(std::is_base_of<T, S>::value, "type check");
- if (V8_UNLIKELY(handle.IsEmpty())) {
- *value_ = GetDefaultValue();
- } else {
- *value_ = *reinterpret_cast<internal::Address*>(*handle);
- }
-}
-
-template <typename T>
-template <typename S>
-void ReturnValue<T>::Set(const BasicTracedReference<S>& handle) {
- static_assert(std::is_base_of<T, S>::value, "type check");
- if (V8_UNLIKELY(handle.IsEmpty())) {
- *value_ = GetDefaultValue();
- } else {
- *value_ = *reinterpret_cast<internal::Address*>(handle.val_);
- }
-}
-
-template <typename T>
-template <typename S>
-void ReturnValue<T>::Set(const Local<S> handle) {
- static_assert(std::is_void<T>::value || std::is_base_of<T, S>::value,
- "type check");
- if (V8_UNLIKELY(handle.IsEmpty())) {
- *value_ = GetDefaultValue();
- } else {
- *value_ = *reinterpret_cast<internal::Address*>(*handle);
- }
-}
-
-template<typename T>
-void ReturnValue<T>::Set(double i) {
- static_assert(std::is_base_of<T, Number>::value, "type check");
- Set(Number::New(GetIsolate(), i));
-}
-
-template<typename T>
-void ReturnValue<T>::Set(int32_t i) {
- static_assert(std::is_base_of<T, Integer>::value, "type check");
- using I = internal::Internals;
- if (V8_LIKELY(I::IsValidSmi(i))) {
- *value_ = I::IntToSmi(i);
- return;
- }
- Set(Integer::New(GetIsolate(), i));
-}
-
-template<typename T>
-void ReturnValue<T>::Set(uint32_t i) {
- static_assert(std::is_base_of<T, Integer>::value, "type check");
- // Can't simply use INT32_MAX here for whatever reason.
- bool fits_into_int32_t = (i & (1U << 31)) == 0;
- if (V8_LIKELY(fits_into_int32_t)) {
- Set(static_cast<int32_t>(i));
- return;
- }
- Set(Integer::NewFromUnsigned(GetIsolate(), i));
-}
-
-template<typename T>
-void ReturnValue<T>::Set(bool value) {
- static_assert(std::is_base_of<T, Boolean>::value, "type check");
- using I = internal::Internals;
- int root_index;
- if (value) {
- root_index = I::kTrueValueRootIndex;
- } else {
- root_index = I::kFalseValueRootIndex;
- }
- *value_ = *I::GetRoot(GetIsolate(), root_index);
-}
-
-template<typename T>
-void ReturnValue<T>::SetNull() {
- static_assert(std::is_base_of<T, Primitive>::value, "type check");
- using I = internal::Internals;
- *value_ = *I::GetRoot(GetIsolate(), I::kNullValueRootIndex);
-}
-
-template<typename T>
-void ReturnValue<T>::SetUndefined() {
- static_assert(std::is_base_of<T, Primitive>::value, "type check");
- using I = internal::Internals;
- *value_ = *I::GetRoot(GetIsolate(), I::kUndefinedValueRootIndex);
-}
-
-template<typename T>
-void ReturnValue<T>::SetEmptyString() {
- static_assert(std::is_base_of<T, String>::value, "type check");
- using I = internal::Internals;
- *value_ = *I::GetRoot(GetIsolate(), I::kEmptyStringRootIndex);
-}
-
-template <typename T>
-Isolate* ReturnValue<T>::GetIsolate() const {
- // Isolate is always the pointer below the default value on the stack.
- return *reinterpret_cast<Isolate**>(&value_[-2]);
-}
-
-template <typename T>
-Local<Value> ReturnValue<T>::Get() const {
- using I = internal::Internals;
- if (*value_ == *I::GetRoot(GetIsolate(), I::kTheHoleValueRootIndex))
- return Local<Value>(*Undefined(GetIsolate()));
- return Local<Value>::New(GetIsolate(), reinterpret_cast<Value*>(value_));
-}
-
-template <typename T>
-template <typename S>
-void ReturnValue<T>::Set(S* whatever) {
- static_assert(sizeof(S) < 0, "incompilable to prevent inadvertent misuse");
-}
-
-template <typename T>
-internal::Address ReturnValue<T>::GetDefaultValue() {
- // Default value is always the pointer below value_ on the stack.
- return value_[-1];
-}
-
-template <typename T>
-FunctionCallbackInfo<T>::FunctionCallbackInfo(internal::Address* implicit_args,
- internal::Address* values,
- int length)
- : implicit_args_(implicit_args), values_(values), length_(length) {}
-
-template<typename T>
-Local<Value> FunctionCallbackInfo<T>::operator[](int i) const {
- // values_ points to the first argument (not the receiver).
- if (i < 0 || length_ <= i) return Local<Value>(*Undefined(GetIsolate()));
- return Local<Value>(reinterpret_cast<Value*>(values_ + i));
-}
-
-
-template<typename T>
-Local<Object> FunctionCallbackInfo<T>::This() const {
- // values_ points to the first argument (not the receiver).
- return Local<Object>(reinterpret_cast<Object*>(values_ - 1));
-}
-
-
-template<typename T>
-Local<Object> FunctionCallbackInfo<T>::Holder() const {
- return Local<Object>(reinterpret_cast<Object*>(
- &implicit_args_[kHolderIndex]));
-}
-
-template <typename T>
-Local<Value> FunctionCallbackInfo<T>::NewTarget() const {
- return Local<Value>(
- reinterpret_cast<Value*>(&implicit_args_[kNewTargetIndex]));
-}
-
-template <typename T>
-Local<Value> FunctionCallbackInfo<T>::Data() const {
- return Local<Value>(reinterpret_cast<Value*>(&implicit_args_[kDataIndex]));
-}
-
-
-template<typename T>
-Isolate* FunctionCallbackInfo<T>::GetIsolate() const {
- return *reinterpret_cast<Isolate**>(&implicit_args_[kIsolateIndex]);
-}
-
-
-template<typename T>
-ReturnValue<T> FunctionCallbackInfo<T>::GetReturnValue() const {
- return ReturnValue<T>(&implicit_args_[kReturnValueIndex]);
-}
-
-
-template<typename T>
-bool FunctionCallbackInfo<T>::IsConstructCall() const {
- return !NewTarget()->IsUndefined();
-}
-
-
-template<typename T>
-int FunctionCallbackInfo<T>::Length() const {
- return length_;
-}
-
-ScriptOrigin::ScriptOrigin(
- Local<Value> resource_name, Local<Integer> line_offset,
- Local<Integer> column_offset, Local<Boolean> is_shared_cross_origin,
- Local<Integer> script_id, Local<Value> source_map_url,
- Local<Boolean> is_opaque, Local<Boolean> is_wasm, Local<Boolean> is_module,
- Local<PrimitiveArray> host_defined_options)
- : ScriptOrigin(
- Isolate::GetCurrent(), resource_name,
- line_offset.IsEmpty() ? 0 : static_cast<int>(line_offset->Value()),
- column_offset.IsEmpty() ? 0
- : static_cast<int>(column_offset->Value()),
- !is_shared_cross_origin.IsEmpty() && is_shared_cross_origin->IsTrue(),
- static_cast<int>(script_id.IsEmpty() ? -1 : script_id->Value()),
- source_map_url, !is_opaque.IsEmpty() && is_opaque->IsTrue(),
- !is_wasm.IsEmpty() && is_wasm->IsTrue(),
- !is_module.IsEmpty() && is_module->IsTrue(), host_defined_options) {}
-
-ScriptOrigin::ScriptOrigin(Local<Value> resource_name, int line_offset,
- int column_offset, bool is_shared_cross_origin,
- int script_id, Local<Value> source_map_url,
- bool is_opaque, bool is_wasm, bool is_module,
- Local<PrimitiveArray> host_defined_options)
- : isolate_(Isolate::GetCurrent()),
- resource_name_(resource_name),
- resource_line_offset_(line_offset),
- resource_column_offset_(column_offset),
- options_(is_shared_cross_origin, is_opaque, is_wasm, is_module),
- script_id_(script_id),
- source_map_url_(source_map_url),
- host_defined_options_(host_defined_options) {}
-
-ScriptOrigin::ScriptOrigin(Isolate* isolate, Local<Value> resource_name,
- int line_offset, int column_offset,
- bool is_shared_cross_origin, int script_id,
- Local<Value> source_map_url, bool is_opaque,
- bool is_wasm, bool is_module,
- Local<PrimitiveArray> host_defined_options)
- : isolate_(isolate),
- resource_name_(resource_name),
- resource_line_offset_(line_offset),
- resource_column_offset_(column_offset),
- options_(is_shared_cross_origin, is_opaque, is_wasm, is_module),
- script_id_(script_id),
- source_map_url_(source_map_url),
- host_defined_options_(host_defined_options) {}
-
-Local<Value> ScriptOrigin::ResourceName() const { return resource_name_; }
-
-Local<PrimitiveArray> ScriptOrigin::HostDefinedOptions() const {
- return host_defined_options_;
-}
-
-Local<Integer> ScriptOrigin::ResourceLineOffset() const {
- return v8::Integer::New(isolate_, resource_line_offset_);
-}
-
-Local<Integer> ScriptOrigin::ResourceColumnOffset() const {
- return v8::Integer::New(isolate_, resource_column_offset_);
-}
-
-Local<Integer> ScriptOrigin::ScriptID() const {
- return v8::Integer::New(isolate_, script_id_);
-}
-
-int ScriptOrigin::LineOffset() const { return resource_line_offset_; }
-
-int ScriptOrigin::ColumnOffset() const { return resource_column_offset_; }
-
-int ScriptOrigin::ScriptId() const { return script_id_; }
-
-Local<Value> ScriptOrigin::SourceMapUrl() const { return source_map_url_; }
-
-ScriptCompiler::Source::Source(Local<String> string, const ScriptOrigin& origin,
- CachedData* data,
- ConsumeCodeCacheTask* consume_cache_task)
- : source_string(string),
- resource_name(origin.ResourceName()),
- resource_line_offset(origin.LineOffset()),
- resource_column_offset(origin.ColumnOffset()),
- resource_options(origin.Options()),
- source_map_url(origin.SourceMapUrl()),
- host_defined_options(origin.HostDefinedOptions()),
- cached_data(data),
- consume_cache_task(consume_cache_task) {}
-
-ScriptCompiler::Source::Source(Local<String> string, CachedData* data,
- ConsumeCodeCacheTask* consume_cache_task)
- : source_string(string),
- cached_data(data),
- consume_cache_task(consume_cache_task) {}
-
-const ScriptCompiler::CachedData* ScriptCompiler::Source::GetCachedData()
- const {
- return cached_data.get();
-}
-
-const ScriptOriginOptions& ScriptCompiler::Source::GetResourceOptions() const {
- return resource_options;
-}
-
-Local<Boolean> Boolean::New(Isolate* isolate, bool value) {
- return value ? True(isolate) : False(isolate);
-}
-
-void Template::Set(Isolate* isolate, const char* name, Local<Data> value,
- PropertyAttribute attributes) {
- Set(String::NewFromUtf8(isolate, name, NewStringType::kInternalized)
- .ToLocalChecked(),
- value, attributes);
-}
-
-FunctionTemplate* FunctionTemplate::Cast(Data* data) {
-#ifdef V8_ENABLE_CHECKS
- CheckCast(data);
-#endif
- return reinterpret_cast<FunctionTemplate*>(data);
-}
-
-ObjectTemplate* ObjectTemplate::Cast(Data* data) {
-#ifdef V8_ENABLE_CHECKS
- CheckCast(data);
-#endif
- return reinterpret_cast<ObjectTemplate*>(data);
-}
-
-Signature* Signature::Cast(Data* data) {
-#ifdef V8_ENABLE_CHECKS
- CheckCast(data);
-#endif
- return reinterpret_cast<Signature*>(data);
-}
-
-AccessorSignature* AccessorSignature::Cast(Data* data) {
-#ifdef V8_ENABLE_CHECKS
- CheckCast(data);
-#endif
- return reinterpret_cast<AccessorSignature*>(data);
-}
-
-Local<Value> Object::GetInternalField(int index) {
-#ifndef V8_ENABLE_CHECKS
- using A = internal::Address;
- using I = internal::Internals;
- A obj = *reinterpret_cast<A*>(this);
- // Fast path: If the object is a plain JSObject, which is the common case, we
- // know where to find the internal fields and can return the value directly.
- int instance_type = I::GetInstanceType(obj);
- if (v8::internal::CanHaveInternalField(instance_type)) {
- int offset = I::kJSObjectHeaderSize + (I::kEmbedderDataSlotSize * index);
- A value = I::ReadRawField<A>(obj, offset);
-#ifdef V8_COMPRESS_POINTERS
- // We read the full pointer value and then decompress it in order to avoid
- // dealing with potential endiannes issues.
- value = I::DecompressTaggedAnyField(obj, static_cast<uint32_t>(value));
-#endif
- internal::Isolate* isolate =
- internal::IsolateFromNeverReadOnlySpaceObject(obj);
- A* result = HandleScope::CreateHandle(isolate, value);
- return Local<Value>(reinterpret_cast<Value*>(result));
- }
-#endif
- return SlowGetInternalField(index);
-}
-
-
-void* Object::GetAlignedPointerFromInternalField(int index) {
-#ifndef V8_ENABLE_CHECKS
- using A = internal::Address;
- using I = internal::Internals;
- A obj = *reinterpret_cast<A*>(this);
- // Fast path: If the object is a plain JSObject, which is the common case, we
- // know where to find the internal fields and can return the value directly.
- auto instance_type = I::GetInstanceType(obj);
- if (v8::internal::CanHaveInternalField(instance_type)) {
- int offset = I::kJSObjectHeaderSize + (I::kEmbedderDataSlotSize * index);
-#ifdef V8_HEAP_SANDBOX
- offset += I::kEmbedderDataSlotRawPayloadOffset;
-#endif
- internal::Isolate* isolate = I::GetIsolateForHeapSandbox(obj);
- A value = I::ReadExternalPointerField(
- isolate, obj, offset, internal::kEmbedderDataSlotPayloadTag);
- return reinterpret_cast<void*>(value);
- }
-#endif
- return SlowGetAlignedPointerFromInternalField(index);
-}
-
-String* String::Cast(v8::Data* data) {
-#ifdef V8_ENABLE_CHECKS
- CheckCast(data);
-#endif
- return static_cast<String*>(data);
-}
-
-Local<String> String::Empty(Isolate* isolate) {
- using S = internal::Address;
- using I = internal::Internals;
- I::CheckInitialized(isolate);
- S* slot = I::GetRoot(isolate, I::kEmptyStringRootIndex);
- return Local<String>(reinterpret_cast<String*>(slot));
-}
-
-
-String::ExternalStringResource* String::GetExternalStringResource() const {
- using A = internal::Address;
- using I = internal::Internals;
- A obj = *reinterpret_cast<const A*>(this);
-
- ExternalStringResource* result;
- if (I::IsExternalTwoByteString(I::GetInstanceType(obj))) {
- internal::Isolate* isolate = I::GetIsolateForHeapSandbox(obj);
- A value =
- I::ReadExternalPointerField(isolate, obj, I::kStringResourceOffset,
- internal::kExternalStringResourceTag);
- result = reinterpret_cast<String::ExternalStringResource*>(value);
- } else {
- result = GetExternalStringResourceSlow();
- }
-#ifdef V8_ENABLE_CHECKS
- VerifyExternalStringResource(result);
-#endif
- return result;
-}
-
-
-String::ExternalStringResourceBase* String::GetExternalStringResourceBase(
- String::Encoding* encoding_out) const {
- using A = internal::Address;
- using I = internal::Internals;
- A obj = *reinterpret_cast<const A*>(this);
- int type = I::GetInstanceType(obj) & I::kFullStringRepresentationMask;
- *encoding_out = static_cast<Encoding>(type & I::kStringEncodingMask);
- ExternalStringResourceBase* resource;
- if (type == I::kExternalOneByteRepresentationTag ||
- type == I::kExternalTwoByteRepresentationTag) {
- internal::Isolate* isolate = I::GetIsolateForHeapSandbox(obj);
- A value =
- I::ReadExternalPointerField(isolate, obj, I::kStringResourceOffset,
- internal::kExternalStringResourceTag);
- resource = reinterpret_cast<ExternalStringResourceBase*>(value);
- } else {
- resource = GetExternalStringResourceBaseSlow(encoding_out);
- }
-#ifdef V8_ENABLE_CHECKS
- VerifyExternalStringResourceBase(resource, *encoding_out);
-#endif
- return resource;
-}
-
-
-bool Value::IsUndefined() const {
-#ifdef V8_ENABLE_CHECKS
- return FullIsUndefined();
-#else
- return QuickIsUndefined();
-#endif
-}
-
-bool Value::QuickIsUndefined() const {
- using A = internal::Address;
- using I = internal::Internals;
- A obj = *reinterpret_cast<const A*>(this);
- if (!I::HasHeapObjectTag(obj)) return false;
- if (I::GetInstanceType(obj) != I::kOddballType) return false;
- return (I::GetOddballKind(obj) == I::kUndefinedOddballKind);
-}
-
-
-bool Value::IsNull() const {
-#ifdef V8_ENABLE_CHECKS
- return FullIsNull();
-#else
- return QuickIsNull();
-#endif
-}
-
-bool Value::QuickIsNull() const {
- using A = internal::Address;
- using I = internal::Internals;
- A obj = *reinterpret_cast<const A*>(this);
- if (!I::HasHeapObjectTag(obj)) return false;
- if (I::GetInstanceType(obj) != I::kOddballType) return false;
- return (I::GetOddballKind(obj) == I::kNullOddballKind);
-}
-
-bool Value::IsNullOrUndefined() const {
-#ifdef V8_ENABLE_CHECKS
- return FullIsNull() || FullIsUndefined();
-#else
- return QuickIsNullOrUndefined();
-#endif
-}
-
-bool Value::QuickIsNullOrUndefined() const {
- using A = internal::Address;
- using I = internal::Internals;
- A obj = *reinterpret_cast<const A*>(this);
- if (!I::HasHeapObjectTag(obj)) return false;
- if (I::GetInstanceType(obj) != I::kOddballType) return false;
- int kind = I::GetOddballKind(obj);
- return kind == I::kNullOddballKind || kind == I::kUndefinedOddballKind;
-}
-
-bool Value::IsString() const {
-#ifdef V8_ENABLE_CHECKS
- return FullIsString();
-#else
- return QuickIsString();
-#endif
-}
-
-bool Value::QuickIsString() const {
- using A = internal::Address;
- using I = internal::Internals;
- A obj = *reinterpret_cast<const A*>(this);
- if (!I::HasHeapObjectTag(obj)) return false;
- return (I::GetInstanceType(obj) < I::kFirstNonstringType);
-}
-
-
-template <class T> Value* Value::Cast(T* value) {
- return static_cast<Value*>(value);
-}
-
-template <>
-V8_INLINE Value* Value::Cast(Data* value) {
-#ifdef V8_ENABLE_CHECKS
- CheckCast(value);
-#endif
- return static_cast<Value*>(value);
-}
-
-Boolean* Boolean::Cast(v8::Data* data) {
-#ifdef V8_ENABLE_CHECKS
- CheckCast(data);
-#endif
- return static_cast<Boolean*>(data);
-}
-
-Name* Name::Cast(v8::Data* data) {
-#ifdef V8_ENABLE_CHECKS
- CheckCast(data);
-#endif
- return static_cast<Name*>(data);
-}
-
-Symbol* Symbol::Cast(v8::Data* data) {
-#ifdef V8_ENABLE_CHECKS
- CheckCast(data);
-#endif
- return static_cast<Symbol*>(data);
-}
-
-Private* Private::Cast(Data* data) {
-#ifdef V8_ENABLE_CHECKS
- CheckCast(data);
-#endif
- return reinterpret_cast<Private*>(data);
-}
-
-ModuleRequest* ModuleRequest::Cast(Data* data) {
-#ifdef V8_ENABLE_CHECKS
- CheckCast(data);
-#endif
- return reinterpret_cast<ModuleRequest*>(data);
-}
-
-Module* Module::Cast(Data* data) {
-#ifdef V8_ENABLE_CHECKS
- CheckCast(data);
-#endif
- return reinterpret_cast<Module*>(data);
-}
-
-Number* Number::Cast(v8::Data* data) {
-#ifdef V8_ENABLE_CHECKS
- CheckCast(data);
-#endif
- return static_cast<Number*>(data);
-}
-
-Integer* Integer::Cast(v8::Data* data) {
-#ifdef V8_ENABLE_CHECKS
- CheckCast(data);
-#endif
- return static_cast<Integer*>(data);
-}
-
-Int32* Int32::Cast(v8::Data* data) {
-#ifdef V8_ENABLE_CHECKS
- CheckCast(data);
-#endif
- return static_cast<Int32*>(data);
-}
-
-Uint32* Uint32::Cast(v8::Data* data) {
-#ifdef V8_ENABLE_CHECKS
- CheckCast(data);
-#endif
- return static_cast<Uint32*>(data);
-}
-
-BigInt* BigInt::Cast(v8::Data* data) {
-#ifdef V8_ENABLE_CHECKS
- CheckCast(data);
-#endif
- return static_cast<BigInt*>(data);
-}
-
-Context* Context::Cast(v8::Data* data) {
-#ifdef V8_ENABLE_CHECKS
- CheckCast(data);
-#endif
- return static_cast<Context*>(data);
-}
-
-Date* Date::Cast(v8::Value* value) {
-#ifdef V8_ENABLE_CHECKS
- CheckCast(value);
-#endif
- return static_cast<Date*>(value);
-}
-
-
-StringObject* StringObject::Cast(v8::Value* value) {
-#ifdef V8_ENABLE_CHECKS
- CheckCast(value);
-#endif
- return static_cast<StringObject*>(value);
-}
-
-
-SymbolObject* SymbolObject::Cast(v8::Value* value) {
-#ifdef V8_ENABLE_CHECKS
- CheckCast(value);
-#endif
- return static_cast<SymbolObject*>(value);
-}
-
-
-NumberObject* NumberObject::Cast(v8::Value* value) {
-#ifdef V8_ENABLE_CHECKS
- CheckCast(value);
-#endif
- return static_cast<NumberObject*>(value);
-}
-
-BigIntObject* BigIntObject::Cast(v8::Value* value) {
-#ifdef V8_ENABLE_CHECKS
- CheckCast(value);
-#endif
- return static_cast<BigIntObject*>(value);
-}
-
-BooleanObject* BooleanObject::Cast(v8::Value* value) {
-#ifdef V8_ENABLE_CHECKS
- CheckCast(value);
-#endif
- return static_cast<BooleanObject*>(value);
-}
-
-
-RegExp* RegExp::Cast(v8::Value* value) {
-#ifdef V8_ENABLE_CHECKS
- CheckCast(value);
-#endif
- return static_cast<RegExp*>(value);
-}
-
-
-Object* Object::Cast(v8::Value* value) {
-#ifdef V8_ENABLE_CHECKS
- CheckCast(value);
-#endif
- return static_cast<Object*>(value);
-}
-
-
-Array* Array::Cast(v8::Value* value) {
-#ifdef V8_ENABLE_CHECKS
- CheckCast(value);
-#endif
- return static_cast<Array*>(value);
-}
-
-
-Map* Map::Cast(v8::Value* value) {
-#ifdef V8_ENABLE_CHECKS
- CheckCast(value);
-#endif
- return static_cast<Map*>(value);
-}
-
-
-Set* Set::Cast(v8::Value* value) {
-#ifdef V8_ENABLE_CHECKS
- CheckCast(value);
-#endif
- return static_cast<Set*>(value);
-}
-
-
-Promise* Promise::Cast(v8::Value* value) {
-#ifdef V8_ENABLE_CHECKS
- CheckCast(value);
-#endif
- return static_cast<Promise*>(value);
-}
-
-
-Proxy* Proxy::Cast(v8::Value* value) {
-#ifdef V8_ENABLE_CHECKS
- CheckCast(value);
-#endif
- return static_cast<Proxy*>(value);
-}
-
-WasmMemoryObject* WasmMemoryObject::Cast(v8::Value* value) {
-#ifdef V8_ENABLE_CHECKS
- CheckCast(value);
-#endif
- return static_cast<WasmMemoryObject*>(value);
-}
-
-WasmModuleObject* WasmModuleObject::Cast(v8::Value* value) {
-#ifdef V8_ENABLE_CHECKS
- CheckCast(value);
-#endif
- return static_cast<WasmModuleObject*>(value);
-}
-
-Promise::Resolver* Promise::Resolver::Cast(v8::Value* value) {
-#ifdef V8_ENABLE_CHECKS
- CheckCast(value);
-#endif
- return static_cast<Promise::Resolver*>(value);
-}
-
-
-ArrayBuffer* ArrayBuffer::Cast(v8::Value* value) {
-#ifdef V8_ENABLE_CHECKS
- CheckCast(value);
-#endif
- return static_cast<ArrayBuffer*>(value);
-}
-
-
-ArrayBufferView* ArrayBufferView::Cast(v8::Value* value) {
-#ifdef V8_ENABLE_CHECKS
- CheckCast(value);
-#endif
- return static_cast<ArrayBufferView*>(value);
-}
-
-
-TypedArray* TypedArray::Cast(v8::Value* value) {
-#ifdef V8_ENABLE_CHECKS
- CheckCast(value);
-#endif
- return static_cast<TypedArray*>(value);
-}
-
-
-Uint8Array* Uint8Array::Cast(v8::Value* value) {
-#ifdef V8_ENABLE_CHECKS
- CheckCast(value);
-#endif
- return static_cast<Uint8Array*>(value);
-}
-
-
-Int8Array* Int8Array::Cast(v8::Value* value) {
-#ifdef V8_ENABLE_CHECKS
- CheckCast(value);
-#endif
- return static_cast<Int8Array*>(value);
-}
-
-
-Uint16Array* Uint16Array::Cast(v8::Value* value) {
-#ifdef V8_ENABLE_CHECKS
- CheckCast(value);
-#endif
- return static_cast<Uint16Array*>(value);
-}
-
-
-Int16Array* Int16Array::Cast(v8::Value* value) {
-#ifdef V8_ENABLE_CHECKS
- CheckCast(value);
-#endif
- return static_cast<Int16Array*>(value);
-}
-
-
-Uint32Array* Uint32Array::Cast(v8::Value* value) {
-#ifdef V8_ENABLE_CHECKS
- CheckCast(value);
-#endif
- return static_cast<Uint32Array*>(value);
-}
-
-
-Int32Array* Int32Array::Cast(v8::Value* value) {
-#ifdef V8_ENABLE_CHECKS
- CheckCast(value);
-#endif
- return static_cast<Int32Array*>(value);
-}
-
-
-Float32Array* Float32Array::Cast(v8::Value* value) {
-#ifdef V8_ENABLE_CHECKS
- CheckCast(value);
-#endif
- return static_cast<Float32Array*>(value);
-}
-
-
-Float64Array* Float64Array::Cast(v8::Value* value) {
-#ifdef V8_ENABLE_CHECKS
- CheckCast(value);
-#endif
- return static_cast<Float64Array*>(value);
-}
-
-BigInt64Array* BigInt64Array::Cast(v8::Value* value) {
-#ifdef V8_ENABLE_CHECKS
- CheckCast(value);
-#endif
- return static_cast<BigInt64Array*>(value);
-}
-
-BigUint64Array* BigUint64Array::Cast(v8::Value* value) {
-#ifdef V8_ENABLE_CHECKS
- CheckCast(value);
-#endif
- return static_cast<BigUint64Array*>(value);
-}
-
-Uint8ClampedArray* Uint8ClampedArray::Cast(v8::Value* value) {
-#ifdef V8_ENABLE_CHECKS
- CheckCast(value);
-#endif
- return static_cast<Uint8ClampedArray*>(value);
-}
-
-
-DataView* DataView::Cast(v8::Value* value) {
-#ifdef V8_ENABLE_CHECKS
- CheckCast(value);
-#endif
- return static_cast<DataView*>(value);
-}
-
-
-SharedArrayBuffer* SharedArrayBuffer::Cast(v8::Value* value) {
-#ifdef V8_ENABLE_CHECKS
- CheckCast(value);
-#endif
- return static_cast<SharedArrayBuffer*>(value);
-}
-
-
-Function* Function::Cast(v8::Value* value) {
-#ifdef V8_ENABLE_CHECKS
- CheckCast(value);
-#endif
- return static_cast<Function*>(value);
-}
-
-
-External* External::Cast(v8::Value* value) {
-#ifdef V8_ENABLE_CHECKS
- CheckCast(value);
-#endif
- return static_cast<External*>(value);
-}
-
-
-template<typename T>
-Isolate* PropertyCallbackInfo<T>::GetIsolate() const {
- return *reinterpret_cast<Isolate**>(&args_[kIsolateIndex]);
-}
-
-
-template<typename T>
-Local<Value> PropertyCallbackInfo<T>::Data() const {
- return Local<Value>(reinterpret_cast<Value*>(&args_[kDataIndex]));
-}
-
-
-template<typename T>
-Local<Object> PropertyCallbackInfo<T>::This() const {
- return Local<Object>(reinterpret_cast<Object*>(&args_[kThisIndex]));
-}
-
-
-template<typename T>
-Local<Object> PropertyCallbackInfo<T>::Holder() const {
- return Local<Object>(reinterpret_cast<Object*>(&args_[kHolderIndex]));
-}
-
-
-template<typename T>
-ReturnValue<T> PropertyCallbackInfo<T>::GetReturnValue() const {
- return ReturnValue<T>(&args_[kReturnValueIndex]);
-}
-
-template <typename T>
-bool PropertyCallbackInfo<T>::ShouldThrowOnError() const {
- using I = internal::Internals;
- if (args_[kShouldThrowOnErrorIndex] !=
- I::IntToSmi(I::kInferShouldThrowMode)) {
- return args_[kShouldThrowOnErrorIndex] != I::IntToSmi(I::kDontThrow);
- }
- return v8::internal::ShouldThrowOnError(
- reinterpret_cast<v8::internal::Isolate*>(GetIsolate()));
-}
-
-Local<Primitive> Undefined(Isolate* isolate) {
- using S = internal::Address;
- using I = internal::Internals;
- I::CheckInitialized(isolate);
- S* slot = I::GetRoot(isolate, I::kUndefinedValueRootIndex);
- return Local<Primitive>(reinterpret_cast<Primitive*>(slot));
-}
-
-
-Local<Primitive> Null(Isolate* isolate) {
- using S = internal::Address;
- using I = internal::Internals;
- I::CheckInitialized(isolate);
- S* slot = I::GetRoot(isolate, I::kNullValueRootIndex);
- return Local<Primitive>(reinterpret_cast<Primitive*>(slot));
-}
-
-
-Local<Boolean> True(Isolate* isolate) {
- using S = internal::Address;
- using I = internal::Internals;
- I::CheckInitialized(isolate);
- S* slot = I::GetRoot(isolate, I::kTrueValueRootIndex);
- return Local<Boolean>(reinterpret_cast<Boolean*>(slot));
-}
-
-
-Local<Boolean> False(Isolate* isolate) {
- using S = internal::Address;
- using I = internal::Internals;
- I::CheckInitialized(isolate);
- S* slot = I::GetRoot(isolate, I::kFalseValueRootIndex);
- return Local<Boolean>(reinterpret_cast<Boolean*>(slot));
-}
-
-
-void Isolate::SetData(uint32_t slot, void* data) {
- using I = internal::Internals;
- I::SetEmbedderData(this, slot, data);
-}
-
-
-void* Isolate::GetData(uint32_t slot) {
- using I = internal::Internals;
- return I::GetEmbedderData(this, slot);
-}
-
-
-uint32_t Isolate::GetNumberOfDataSlots() {
- using I = internal::Internals;
- return I::kNumIsolateDataSlots;
-}
-
-template <class T>
-MaybeLocal<T> Isolate::GetDataFromSnapshotOnce(size_t index) {
- T* data = reinterpret_cast<T*>(GetDataFromSnapshotOnce(index));
- if (data) internal::PerformCastCheck(data);
- return Local<T>(data);
-}
-
-Local<Value> Context::GetEmbedderData(int index) {
-#ifndef V8_ENABLE_CHECKS
- using A = internal::Address;
- using I = internal::Internals;
- A ctx = *reinterpret_cast<const A*>(this);
- A embedder_data =
- I::ReadTaggedPointerField(ctx, I::kNativeContextEmbedderDataOffset);
- int value_offset =
- I::kEmbedderDataArrayHeaderSize + (I::kEmbedderDataSlotSize * index);
- A value = I::ReadRawField<A>(embedder_data, value_offset);
-#ifdef V8_COMPRESS_POINTERS
- // We read the full pointer value and then decompress it in order to avoid
- // dealing with potential endiannes issues.
- value =
- I::DecompressTaggedAnyField(embedder_data, static_cast<uint32_t>(value));
-#endif
- internal::Isolate* isolate = internal::IsolateFromNeverReadOnlySpaceObject(
- *reinterpret_cast<A*>(this));
- A* result = HandleScope::CreateHandle(isolate, value);
- return Local<Value>(reinterpret_cast<Value*>(result));
-#else
- return SlowGetEmbedderData(index);
-#endif
-}
-
-
-void* Context::GetAlignedPointerFromEmbedderData(int index) {
-#ifndef V8_ENABLE_CHECKS
- using A = internal::Address;
- using I = internal::Internals;
- A ctx = *reinterpret_cast<const A*>(this);
- A embedder_data =
- I::ReadTaggedPointerField(ctx, I::kNativeContextEmbedderDataOffset);
- int value_offset =
- I::kEmbedderDataArrayHeaderSize + (I::kEmbedderDataSlotSize * index);
-#ifdef V8_HEAP_SANDBOX
- value_offset += I::kEmbedderDataSlotRawPayloadOffset;
-#endif
- internal::Isolate* isolate = I::GetIsolateForHeapSandbox(ctx);
- return reinterpret_cast<void*>(
- I::ReadExternalPointerField(isolate, embedder_data, value_offset,
- internal::kEmbedderDataSlotPayloadTag));
-#else
- return SlowGetAlignedPointerFromEmbedderData(index);
-#endif
-}
-
-template <class T>
-MaybeLocal<T> Context::GetDataFromSnapshotOnce(size_t index) {
- T* data = reinterpret_cast<T*>(GetDataFromSnapshotOnce(index));
- if (data) internal::PerformCastCheck(data);
- return Local<T>(data);
-}
-
-template <class T>
-size_t SnapshotCreator::AddData(Local<Context> context, Local<T> object) {
- T* object_ptr = *object;
- internal::Address* p = reinterpret_cast<internal::Address*>(object_ptr);
- return AddData(context, *p);
-}
-
-template <class T>
-size_t SnapshotCreator::AddData(Local<T> object) {
- T* object_ptr = *object;
- internal::Address* p = reinterpret_cast<internal::Address*>(object_ptr);
- return AddData(*p);
-}
/**
* \example shell.cc
@@ -12335,7 +79,6 @@ size_t SnapshotCreator::AddData(Local<T> object) {
* command-line and executes them.
*/
-
/**
* \example process.cc
*/
diff --git a/deps/v8/infra/mb/mb_config.pyl b/deps/v8/infra/mb/mb_config.pyl
index 236dc1e847..f80c637634 100644
--- a/deps/v8/infra/mb/mb_config.pyl
+++ b/deps/v8/infra/mb/mb_config.pyl
@@ -77,6 +77,9 @@
'V8 Linux64 - no wasm - builder': 'release_x64_webassembly_disabled',
# Windows.
'V8 Win32 - builder': 'release_x86_minimal_symbols',
+ 'V8 Win32 - builder (goma cache silo)': 'release_x64',
+ 'V8 Win32 - builder (reclient)': 'release_x86_minimal_symbols_reclient',
+ 'V8 Win32 - builder (reclient compare)': 'release_x86_minimal_symbols_reclient',
'V8 Win32 - debug builder': 'debug_x86_minimal_symbols',
# TODO(machenbach): Remove after switching to x64 on infra side.
'V8 Win64 ASAN': 'release_x64_asan_no_lsan',
@@ -105,6 +108,7 @@
'V8 Linux gcc': 'release_x86_gcc',
# FYI.
'V8 iOS - sim': 'release_x64_ios_simulator',
+ 'V8 Linux64 - cppgc-non-default - debug - builder': 'debug_x64_non_default_cppgc',
'V8 Linux64 - debug - perfetto - builder': 'debug_x64_perfetto',
'V8 Linux64 - disable runtime call stats': 'release_x64_disable_runtime_call_stats',
'V8 Linux64 - debug - single generation - builder': 'debug_x64_single_generation',
@@ -194,6 +198,8 @@
'V8 Linux - s390x - sim': 'release_simulate_s390x',
# RISC-V
'V8 Linux - riscv64 - sim - builder': 'release_simulate_riscv64',
+ # Loongson
+ 'V8 Linux - loong64 - sim - builder': 'release_simulate_loong64',
},
'tryserver.v8': {
'v8_android_arm_compile_rel': 'release_android_arm',
@@ -216,6 +222,7 @@
'v8_linux_vtunejit': 'debug_x86_vtunejit',
'v8_linux64_arm64_pointer_compression_rel_ng':
'release_simulate_arm64_pointer_compression',
+ 'v8_linux64_cppgc_non_default_dbg_ng': 'debug_x64_non_default_cppgc',
'v8_linux64_dbg_ng': 'debug_x64_trybot',
'v8_linux64_dict_tracking_dbg_ng': 'debug_x64_dict_tracking_trybot',
'v8_linux64_disable_runtime_call_stats_rel': 'release_x64_disable_runtime_call_stats',
@@ -237,6 +244,7 @@
'v8_linux64_asan_rel_ng': 'release_x64_asan_minimal_symbols',
'v8_linux64_cfi_rel_ng': 'release_x64_cfi',
'v8_linux64_fuzzilli_ng': 'release_x64_fuzzilli',
+ 'v8_linux64_loong64_rel_ng': 'release_simulate_loong64',
'v8_linux64_msan_rel_ng': 'release_simulate_arm64_msan_minimal_symbols',
'v8_linux64_riscv64_rel_ng': 'release_simulate_riscv64',
'v8_linux64_tsan_rel_ng': 'release_x64_tsan_minimal_symbols',
@@ -282,6 +290,9 @@
'v8_linux_arm64_gc_stress_dbg_ng': 'debug_simulate_arm64',
'v8_linux_mipsel_compile_rel': 'release_simulate_mipsel',
'v8_linux_mips64el_compile_rel': 'release_simulate_mips64el',
+ 'v8_numfuzz_ng': 'release_x64',
+ 'v8_numfuzz_dbg_ng': 'debug_x64',
+ 'v8_numfuzz_tsan_ng': 'release_x64_tsan',
},
},
@@ -413,6 +424,8 @@
'release_bot', 'simulate_arm64', 'msan_no_origins'],
'release_simulate_arm64_trybot': [
'release_trybot', 'simulate_arm64'],
+ 'release_simulate_loong64': [
+ 'release_bot', 'simulate_loong64'],
'release_simulate_mipsel': [
'release_bot', 'simulate_mipsel'],
'release_simulate_mips64el': [
@@ -562,6 +575,8 @@
'debug_bot', 'x64', 'v8_enable_heap_sandbox'],
'debug_x64_minimal_symbols': [
'debug_bot', 'x64', 'minimal_symbols'],
+ 'debug_x64_non_default_cppgc': [
+ 'debug_bot', 'x64', 'non_default_cppgc'],
'debug_x64_perfetto': [
'debug_bot', 'x64', 'perfetto'],
'debug_x64_single_generation': [
@@ -606,6 +621,8 @@
'release_trybot', 'x86', 'gcmole'],
'release_x86_minimal_symbols': [
'release_bot', 'x86', 'minimal_symbols'],
+ 'release_x86_minimal_symbols_reclient': [
+ 'release_bot_reclient', 'x86', 'minimal_symbols'],
'release_x86_no_i18n_trybot': [
'release_trybot', 'x86', 'v8_no_i18n'],
'release_x86_predictable': [
@@ -779,6 +796,10 @@
'gn_args': 'use_sysroot=false',
},
+ 'non_default_cppgc': {
+ 'gn_args': 'cppgc_enable_object_names=true cppgc_enable_young_generation=true',
+ },
+
'perfetto': {
'gn_args': 'v8_use_perfetto=true',
},
@@ -823,6 +844,10 @@
'gn_args': 'target_cpu="x64" v8_target_cpu="arm64"',
},
+ 'simulate_loong64': {
+ 'gn_args': 'target_cpu="x64" v8_target_cpu="loong64"',
+ },
+
'simulate_mipsel': {
'gn_args':
'target_cpu="x86" v8_target_cpu="mipsel" mips_arch_variant="r2"',
diff --git a/deps/v8/infra/testing/builders.pyl b/deps/v8/infra/testing/builders.pyl
index f37c66ba90..abdadb9af9 100644
--- a/deps/v8/infra/testing/builders.pyl
+++ b/deps/v8/infra/testing/builders.pyl
@@ -42,7 +42,6 @@
{'name': 'mozilla', 'variant': 'default'},
{'name': 'test262', 'variant': 'default', 'shards': 10},
{'name': 'v8testing', 'variant': 'default', 'shards': 4},
- {'name': 'v8testing', 'variant': 'trusted', 'shards': 4},
],
},
##############################################################################
@@ -286,7 +285,6 @@
{'name': 'test262', 'variant': 'default'},
{'name': 'v8testing', 'shards': 7},
{'name': 'v8testing', 'variant': 'extra', 'shards': 7},
- {'name': 'v8testing', 'variant': 'trusted', 'shards': 2},
],
},
'v8_linux_arm_lite_rel_ng_triggered': {
@@ -307,7 +305,6 @@
{'name': 'test262', 'variant': 'default', 'shards': 2},
{'name': 'v8testing', 'shards': 10},
{'name': 'v8testing', 'variant': 'extra', 'shards': 10},
- {'name': 'v8testing', 'variant': 'trusted', 'shards': 2},
],
},
##############################################################################
@@ -335,6 +332,15 @@
{'name': 'v8testing', 'shards': 3},
],
},
+ 'v8_linux64_cppgc_non_default_dbg_ng_triggered': {
+ 'swarming_dimensions' : {
+ 'cpu': 'x86-64-avx2',
+ 'os': 'Ubuntu-18.04',
+ },
+ 'tests': [
+ {'name': 'v8testing', 'shards': 3},
+ ],
+ },
'v8_linux64_dbg_ng_triggered': {
'swarming_dimensions' : {
'cpu': 'x86-64-avx2',
@@ -355,6 +361,7 @@
{'name': 'v8testing', 'variant': 'stress_instruction_scheduling'},
{'name': 'v8testing', 'variant': 'stress_concurrent_allocation'},
{'name': 'v8testing', 'variant': 'stress_concurrent_inlining'},
+ {'name': 'v8testing', 'variant': 'no_concurrent_inlining'},
],
},
'v8_linux64_dict_tracking_dbg_ng_triggered': {
@@ -396,8 +403,6 @@
{'name': 'mjsunit', 'variant': 'stress_snapshot'},
# Experimental regexp engine.
{'name': 'mjsunit', 'variant': 'experimental_regexp'},
- # Concurrent inlining.
- {'name': 'mjsunit', 'variant': 'concurrent_inlining'},
# Wasm write protect code space.
{'name': 'mjsunit', 'variant': 'wasm_write_protect_code'},
],
@@ -461,6 +466,7 @@
{'name': 'v8testing', 'variant': 'extra'},
{'name': 'v8testing', 'variant': 'no_lfa'},
{'name': 'v8testing', 'variant': 'stress_instruction_scheduling'},
+ {'name': 'v8testing', 'variant': 'no_concurrent_inlining'},
],
},
'v8_linux64_perfetto_dbg_ng_triggered': {
@@ -506,6 +512,7 @@
{'name': 'v8testing', 'variant': 'no_lfa'},
{'name': 'v8testing', 'variant': 'slow_path'},
{'name': 'v8testing', 'variant': 'stress_instruction_scheduling'},
+ {'name': 'v8testing', 'variant': 'no_concurrent_inlining'},
],
},
'v8_linux64_tsan_rel_ng_triggered': {
@@ -576,7 +583,6 @@
{'name': 'test262', 'variant': 'default', 'shards': 4},
{'name': 'v8testing', 'shards': 14},
{'name': 'v8testing', 'variant': 'extra', 'shards': 12},
- {'name': 'v8testing', 'variant': 'trusted', 'shards': 5},
],
},
'v8_linux_arm64_gc_stress_dbg_ng_triggered': {
@@ -597,7 +603,6 @@
{'name': 'test262', 'variant': 'default', 'shards': 4},
{'name': 'v8testing', 'shards': 14},
{'name': 'v8testing', 'variant': 'extra', 'shards': 12},
- {'name': 'v8testing', 'variant': 'trusted', 'shards': 5},
],
},
'v8_linux_arm64_cfi_rel_ng_triggered': {
@@ -618,6 +623,16 @@
],
},
##############################################################################
+ # Linux64 with Loongson simulators
+ 'v8_linux64_loong64_rel_ng_triggered': {
+ 'swarming_dimensions': {
+ 'os': 'Ubuntu-18.04',
+ },
+ 'tests': [
+ {'name': 'v8testing', 'shards': 3},
+ ],
+ },
+ ##############################################################################
# Linux64 with RISC-V simulators
'v8_linux64_riscv64_rel_ng_triggered': {
'swarming_dimensions': {
@@ -1125,6 +1140,7 @@
{'name': 'v8testing', 'variant': 'minor_mc'},
{'name': 'v8testing', 'variant': 'no_lfa'},
{'name': 'v8testing', 'variant': 'stress_instruction_scheduling'},
+ {'name': 'v8testing', 'variant': 'no_concurrent_inlining'},
# Noavx.
{
'name': 'mozilla',
@@ -1186,6 +1202,7 @@
{'name': 'v8testing', 'variant': 'stress_instruction_scheduling'},
{'name': 'v8testing', 'variant': 'stress_concurrent_allocation'},
{'name': 'v8testing', 'variant': 'stress_concurrent_inlining'},
+ {'name': 'v8testing', 'variant': 'no_concurrent_inlining'},
# Noavx.
{
'name': 'mozilla',
@@ -1229,12 +1246,19 @@
{'name': 'mjsunit', 'variant': 'stress_snapshot'},
# Experimental regexp engine.
{'name': 'mjsunit', 'variant': 'experimental_regexp'},
- # Concurrent inlining.
- {'name': 'mjsunit', 'variant': 'concurrent_inlining'},
# Wasm write protect code space.
{'name': 'mjsunit', 'variant': 'wasm_write_protect_code'},
],
},
+ 'V8 Linux64 - cppgc-non-default - debug': {
+ 'swarming_dimensions': {
+ 'cpu': 'x86-64-avx2',
+ 'os': 'Ubuntu-18.04',
+ },
+ 'tests': [
+ {'name': 'v8testing', 'shards': 3},
+ ],
+ },
'V8 Linux64 - debug - perfetto': {
'swarming_dimensions' : {
'os': 'Ubuntu-18.04',
@@ -1284,8 +1308,6 @@
{'name': 'mjsunit', 'variant': 'stress_snapshot'},
# Experimental regexp engine.
{'name': 'mjsunit', 'variant': 'experimental_regexp'},
- # Concurrent inlining.
- {'name': 'mjsunit', 'variant': 'concurrent_inlining'},
# Wasm write protect code space.
{'name': 'mjsunit', 'variant': 'wasm_write_protect_code'},
],
@@ -1627,7 +1649,6 @@
{'name': 'mozilla', 'variant': 'default'},
{'name': 'test262', 'variant': 'default', 'shards': 10},
{'name': 'v8testing', 'variant': 'default', 'shards': 4},
- {'name': 'v8testing', 'variant': 'trusted', 'shards': 4},
],
},
'V8 Arm': {
@@ -1717,7 +1738,6 @@
{'name': 'test262', 'variant': 'default'},
{'name': 'v8testing', 'shards': 6},
{'name': 'v8testing', 'variant': 'extra', 'shards': 3},
- {'name': 'v8testing', 'variant': 'trusted'},
# Armv8-a.
{
'name': 'mozilla',
@@ -1766,7 +1786,6 @@
{'name': 'test262', 'variant': 'default'},
{'name': 'v8testing', 'shards': 10},
{'name': 'v8testing', 'variant': 'extra', 'shards': 10},
- {'name': 'v8testing', 'variant': 'trusted', 'shards': 4},
# Armv8-a.
{
'name': 'mozilla',
@@ -1835,7 +1854,6 @@
{'name': 'test262', 'variant': 'default'},
{'name': 'v8testing', 'shards': 3},
{'name': 'v8testing', 'variant': 'extra', 'shards': 2},
- {'name': 'v8testing', 'variant': 'trusted'},
],
},
'V8 Linux - arm64 - sim - debug': {
@@ -1852,7 +1870,6 @@
{'name': 'test262', 'variant': 'default', 'shards': 2},
{'name': 'v8testing', 'shards': 12},
{'name': 'v8testing', 'variant': 'extra', 'shards': 11},
- {'name': 'v8testing', 'variant': 'trusted', 'shards': 2},
],
},
'V8 Linux - arm64 - sim - gc stress': {
@@ -1872,6 +1889,19 @@
},
],
},
+ 'V8 Linux - loong64 - sim': {
+ 'swarming_dimensions': {
+ 'os': 'Ubuntu-18.04',
+ },
+ 'swarming_task_attrs': {
+ 'expiration': 14400,
+ 'hard_timeout': 3600,
+ 'priority': 35,
+ },
+ 'tests': [
+ {'name': 'v8testing', 'shards': 3},
+ ],
+ },
'V8 Linux - mips64el - sim': {
'swarming_dimensions': {
'os': 'Ubuntu-18.04',
@@ -2074,4 +2104,288 @@
},
],
},
+ 'V8 NumFuzz - staging': {
+ 'swarming_dimensions': {
+ 'os': 'Ubuntu-18.04',
+ },
+ 'swarming_task_attrs': {
+ 'expiration': 13800,
+ 'hard_timeout': 4200,
+ 'priority': 35,
+ },
+ 'tests': [
+ {
+ 'name': 'numfuzz',
+ 'suffix': 'deopt',
+ 'test_args': ['--infra-staging', '--total-timeout-sec=2100', '--stress-deopt=1']
+ },
+ ],
+ },
+ 'V8 NumFuzz - TSAN - staging': {
+ 'swarming_dimensions': {
+ 'os': 'Ubuntu-18.04',
+ },
+ 'swarming_task_attrs': {
+ 'expiration': 13800,
+ 'hard_timeout': 4200,
+ 'priority': 35,
+ },
+ 'tests': [
+ {'name': 'd8testing_random_gc', 'shards': 2},
+ {
+ 'name': 'numfuzz',
+ 'suffix': 'marking',
+ 'test_args': ['--infra-staging', '--total-timeout-sec=2100', '--stress-marking=1']
+ },
+ {
+ 'name': 'numfuzz',
+ 'suffix': 'delay',
+ 'test_args': ['--infra-staging', '--total-timeout-sec=2100', '--stress-delay-tasks=1']
+ },
+ {
+ 'name': 'numfuzz',
+ 'suffix': 'threads',
+ 'test_args': ['--infra-staging', '--total-timeout-sec=2100', '--stress-thread-pool-size=1']
+ },
+ {
+ 'name': 'numfuzz',
+ 'suffix': 'stack',
+ 'test_args': ['--infra-staging', '--total-timeout-sec=2100', '--stress-stack-size=1']
+ },
+ {
+ 'name': 'numfuzz',
+ 'suffix': 'combined',
+ 'test_args': [
+ '--infra-staging',
+ '--total-timeout-sec=2100',
+ '--stress-delay-tasks=4',
+ '--stress-deopt=2',
+ '--stress-compaction=2',
+ '--stress-gc=4',
+ '--stress-marking=4',
+ '--stress-scavenge=4',
+ '--stress-thread-pool-size=2',
+ '--stress-stack-size=1',
+ ],
+ 'shards': 4
+ },
+ {
+ 'name': 'numfuzz',
+ 'suffix': 'scavenge',
+ 'test_args': ['--infra-staging', '--total-timeout-sec=2100', '--stress-scavenge=1']
+ },
+ ],
+ },
+ 'V8 NumFuzz - debug - staging': {
+ 'swarming_dimensions': {
+ 'os': 'Ubuntu-18.04',
+ },
+ 'swarming_task_attrs': {
+ 'expiration': 13800,
+ 'hard_timeout': 4200,
+ 'priority': 35,
+ },
+ 'tests': [
+ {'name': 'd8testing_random_gc'},
+ {
+ 'name': 'numfuzz',
+ 'suffix': 'marking',
+ 'test_args': ['--infra-staging', '--total-timeout-sec=2100', '--stress-marking=1'],
+ 'shards': 2
+ },
+ {
+ 'name': 'numfuzz',
+ 'suffix': 'delay',
+ 'test_args': ['--infra-staging', '--total-timeout-sec=2100', '--stress-delay-tasks=1']
+ },
+ {
+ 'name': 'numfuzz',
+ 'suffix': 'threads',
+ 'test_args': ['--infra-staging', '--total-timeout-sec=2100', '--stress-thread-pool-size=1']
+ },
+ {
+ 'name': 'numfuzz',
+ 'suffix': 'stack',
+ 'test_args': ['--infra-staging', '--total-timeout-sec=2100', '--stress-stack-size=1']
+ },
+ {
+ 'name': 'numfuzz',
+ 'suffix': 'combined',
+ 'test_args': [
+ '--infra-staging',
+ '--total-timeout-sec=2100',
+ '--stress-delay-tasks=4',
+ '--stress-deopt=2',
+ '--stress-compaction=2',
+ '--stress-gc=4',
+ '--stress-marking=4',
+ '--stress-scavenge=4',
+ '--stress-thread-pool-size=2',
+ '--stress-stack-size=1',
+ ],
+ 'shards': 3
+ },
+ {
+ 'name': 'numfuzz',
+ 'suffix': 'scavenge',
+ 'test_args': ['--infra-staging', '--total-timeout-sec=2100', '--stress-scavenge=1']
+ },
+ {
+ 'name': 'numfuzz',
+ 'suffix': 'deopt',
+ 'test_args': ['--infra-staging', '--total-timeout-sec=2100', '--stress-deopt=1'],
+ 'shards': 2
+ },
+ ],
+ },
+ 'v8_numfuzz_ng_triggered': {
+ 'swarming_dimensions': {
+ 'os': 'Ubuntu-18.04',
+ },
+ 'swarming_task_attrs': {
+ 'expiration': 13800,
+ 'hard_timeout': 4200,
+ 'priority': 35,
+ },
+ 'tests': [
+ {
+ 'name': 'numfuzz',
+ 'suffix': 'deopt',
+ 'test_args': ['--total-timeout-sec=900', '--stress-deopt=1']
+ },
+ {
+ 'name': 'numfuzz',
+ 'suffix': 'deopt-staging',
+ 'test_args': ['--infra-staging', '--total-timeout-sec=900', '--stress-deopt=1']
+ },
+ ],
+ },
+ 'v8_numfuzz_tsan_ng_triggered': {
+ 'swarming_dimensions': {
+ 'os': 'Ubuntu-18.04',
+ },
+ 'swarming_task_attrs': {
+ 'expiration': 13800,
+ 'hard_timeout': 4200,
+ 'priority': 35,
+ },
+ 'tests': [
+ {
+ 'name': 'numfuzz',
+ 'suffix': 'marking',
+ 'test_args': ['--total-timeout-sec=900', '--stress-marking=1']
+ },
+ {
+ 'name': 'numfuzz',
+ 'suffix': 'delay',
+ 'test_args': ['--total-timeout-sec=900', '--stress-delay-tasks=1']
+ },
+ {
+ 'name': 'numfuzz',
+ 'suffix': 'threads',
+ 'test_args': ['--total-timeout-sec=900', '--stress-thread-pool-size=1']
+ },
+ {
+ 'name': 'numfuzz',
+ 'suffix': 'combined',
+ 'test_args': [
+ '--total-timeout-sec=900',
+ '--stress-delay-tasks=4',
+ '--stress-deopt=2',
+ '--stress-compaction=2',
+ '--stress-gc=4',
+ '--stress-marking=4',
+ '--stress-scavenge=4',
+ '--stress-thread-pool-size=2',
+ ],
+ },
+ {
+ 'name': 'numfuzz',
+ 'suffix': 'combined-staging',
+ 'test_args': [
+ '--infra-staging',
+ '--total-timeout-sec=900',
+ '--stress-delay-tasks=4',
+ '--stress-deopt=2',
+ '--stress-compaction=2',
+ '--stress-gc=4',
+ '--stress-marking=4',
+ '--stress-scavenge=4',
+ '--stress-thread-pool-size=2',
+ ],
+ },
+ {
+ 'name': 'numfuzz',
+ 'suffix': 'scavenge',
+ 'test_args': ['--total-timeout-sec=900', '--stress-scavenge=1']
+ },
+ ],
+ },
+ 'v8_numfuzz_dbg_ng_triggered': {
+ 'swarming_dimensions': {
+ 'os': 'Ubuntu-18.04',
+ },
+ 'swarming_task_attrs': {
+ 'expiration': 13800,
+ 'hard_timeout': 4200,
+ 'priority': 35,
+ },
+ 'tests': [
+ {'name': 'd8testing_random_gc'},
+ {
+ 'name': 'numfuzz',
+ 'suffix': 'marking',
+ 'test_args': ['--total-timeout-sec=900', '--stress-marking=1'],
+ },
+ {
+ 'name': 'numfuzz',
+ 'suffix': 'delay',
+ 'test_args': ['--total-timeout-sec=900', '--stress-delay-tasks=1']
+ },
+ {
+ 'name': 'numfuzz',
+ 'suffix': 'threads',
+ 'test_args': ['--total-timeout-sec=900', '--stress-thread-pool-size=1']
+ },
+ {
+ 'name': 'numfuzz',
+ 'suffix': 'combined',
+ 'test_args': [
+ '--total-timeout-sec=900',
+ '--stress-delay-tasks=4',
+ '--stress-deopt=2',
+ '--stress-compaction=2',
+ '--stress-gc=4',
+ '--stress-marking=4',
+ '--stress-scavenge=4',
+ '--stress-thread-pool-size=2',
+ ],
+ },
+ {
+ 'name': 'numfuzz',
+ 'suffix': 'combined-staging',
+ 'test_args': [
+ '--infra-staging',
+ '--total-timeout-sec=900',
+ '--stress-delay-tasks=4',
+ '--stress-deopt=2',
+ '--stress-compaction=2',
+ '--stress-gc=4',
+ '--stress-marking=4',
+ '--stress-scavenge=4',
+ '--stress-thread-pool-size=2',
+ ],
+ },
+ {
+ 'name': 'numfuzz',
+ 'suffix': 'scavenge',
+ 'test_args': ['--total-timeout-sec=900', '--stress-scavenge=1']
+ },
+ {
+ 'name': 'numfuzz',
+ 'suffix': 'deopt',
+ 'test_args': ['--total-timeout-sec=900', '--stress-deopt=1'],
+ },
+ ],
+ },
}
diff --git a/deps/v8/samples/cppgc/cppgc-sample.cc b/deps/v8/samples/cppgc/hello-world.cc
index d76c16a553..d76c16a553 100644
--- a/deps/v8/samples/cppgc/cppgc-sample.cc
+++ b/deps/v8/samples/cppgc/hello-world.cc
diff --git a/deps/v8/samples/hello-world.cc b/deps/v8/samples/hello-world.cc
index 6e506475e4..92436e0177 100644
--- a/deps/v8/samples/hello-world.cc
+++ b/deps/v8/samples/hello-world.cc
@@ -7,7 +7,12 @@
#include <string.h>
#include "include/libplatform/libplatform.h"
-#include "include/v8.h"
+#include "include/v8-context.h"
+#include "include/v8-initialization.h"
+#include "include/v8-isolate.h"
+#include "include/v8-local-handle.h"
+#include "include/v8-primitive.h"
+#include "include/v8-script.h"
int main(int argc, char* argv[]) {
// Initialize V8.
diff --git a/deps/v8/samples/process.cc b/deps/v8/samples/process.cc
index 16e70a2064..28b6f119c3 100644
--- a/deps/v8/samples/process.cc
+++ b/deps/v8/samples/process.cc
@@ -25,16 +25,29 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-#include <include/v8.h>
-
-#include <include/libplatform/libplatform.h>
-
#include <stdlib.h>
#include <string.h>
#include <map>
#include <string>
+#include "include/libplatform/libplatform.h"
+#include "include/v8-array-buffer.h"
+#include "include/v8-context.h"
+#include "include/v8-exception.h"
+#include "include/v8-external.h"
+#include "include/v8-function.h"
+#include "include/v8-initialization.h"
+#include "include/v8-isolate.h"
+#include "include/v8-local-handle.h"
+#include "include/v8-object.h"
+#include "include/v8-persistent-handle.h"
+#include "include/v8-primitive.h"
+#include "include/v8-script.h"
+#include "include/v8-snapshot.h"
+#include "include/v8-template.h"
+#include "include/v8-value.h"
+
using std::map;
using std::pair;
using std::string;
diff --git a/deps/v8/samples/shell.cc b/deps/v8/samples/shell.cc
index 7de600a88f..933f138542 100644
--- a/deps/v8/samples/shell.cc
+++ b/deps/v8/samples/shell.cc
@@ -25,16 +25,21 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-#include <include/v8.h>
-
-#include <include/libplatform/libplatform.h>
-
#include <assert.h>
#include <fcntl.h>
+#include <include/libplatform/libplatform.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
+#include "include/v8-context.h"
+#include "include/v8-exception.h"
+#include "include/v8-initialization.h"
+#include "include/v8-isolate.h"
+#include "include/v8-local-handle.h"
+#include "include/v8-script.h"
+#include "include/v8-template.h"
+
/**
* This sample program shows how to implement a simple javascript shell
* based on V8. This includes initializing V8 with command line options,
diff --git a/deps/v8/src/DEPS b/deps/v8/src/DEPS
index b3fcddf2f4..341435e28d 100644
--- a/deps/v8/src/DEPS
+++ b/deps/v8/src/DEPS
@@ -52,6 +52,7 @@ include_rules = [
"+src/interpreter/setup-interpreter.h",
"-src/regexp",
"+src/regexp/regexp.h",
+ "+src/regexp/regexp-flags.h",
"+src/regexp/regexp-stack.h",
"+src/regexp/regexp-utils.h",
"-src/trap-handler",
@@ -65,6 +66,10 @@ include_rules = [
"+builtins-generated",
"+torque-generated",
"+starboard",
+ # Using cppgc inside v8 is not (yet) allowed.
+ "-include/cppgc",
+ "+include/cppgc/platform.h",
+ "+include/cppgc/source-location.h",
]
specific_include_rules = {
diff --git a/deps/v8/src/api/api-arguments.h b/deps/v8/src/api/api-arguments.h
index 464ebadf37..98354757be 100644
--- a/deps/v8/src/api/api-arguments.h
+++ b/deps/v8/src/api/api-arguments.h
@@ -5,6 +5,7 @@
#ifndef V8_API_API_ARGUMENTS_H_
#define V8_API_API_ARGUMENTS_H_
+#include "include/v8-template.h"
#include "src/api/api.h"
#include "src/debug/debug.h"
#include "src/execution/isolate.h"
diff --git a/deps/v8/src/api/api-natives.h b/deps/v8/src/api/api-natives.h
index fb59eb6cfc..38a8a7b917 100644
--- a/deps/v8/src/api/api-natives.h
+++ b/deps/v8/src/api/api-natives.h
@@ -5,7 +5,7 @@
#ifndef V8_API_API_NATIVES_H_
#define V8_API_API_NATIVES_H_
-#include "include/v8.h"
+#include "include/v8-template.h"
#include "src/base/macros.h"
#include "src/handles/handles.h"
#include "src/handles/maybe-handles.h"
diff --git a/deps/v8/src/api/api.cc b/deps/v8/src/api/api.cc
index a8af304a53..dedbd5db66 100644
--- a/deps/v8/src/api/api.cc
+++ b/deps/v8/src/api/api.cc
@@ -11,12 +11,19 @@
#include <utility> // For move
#include <vector>
-#include "include/cppgc/custom-space.h"
+#include "include/v8-callbacks.h"
#include "include/v8-cppgc.h"
+#include "include/v8-date.h"
+#include "include/v8-extension.h"
#include "include/v8-fast-api-calls.h"
+#include "include/v8-function.h"
+#include "include/v8-json.h"
+#include "include/v8-locker.h"
+#include "include/v8-primitive-object.h"
#include "include/v8-profiler.h"
#include "include/v8-unwinder-state.h"
#include "include/v8-util.h"
+#include "include/v8-wasm.h"
#include "src/api/api-inl.h"
#include "src/api/api-natives.h"
#include "src/base/functional.h"
@@ -56,6 +63,7 @@
#include "src/init/icu_util.h"
#include "src/init/startup-data-util.h"
#include "src/init/v8.h"
+#include "src/init/vm-cage.h"
#include "src/json/json-parser.h"
#include "src/json/json-stringifier.h"
#include "src/logging/counters-scopes.h"
@@ -177,6 +185,49 @@ static ScriptOrigin GetScriptOriginForScript(i::Isolate* isolate,
return origin;
}
+ScriptOrigin::ScriptOrigin(
+ Local<Value> resource_name, Local<Integer> line_offset,
+ Local<Integer> column_offset, Local<Boolean> is_shared_cross_origin,
+ Local<Integer> script_id, Local<Value> source_map_url,
+ Local<Boolean> is_opaque, Local<Boolean> is_wasm, Local<Boolean> is_module,
+ Local<PrimitiveArray> host_defined_options)
+ : ScriptOrigin(
+ Isolate::GetCurrent(), resource_name,
+ line_offset.IsEmpty() ? 0 : static_cast<int>(line_offset->Value()),
+ column_offset.IsEmpty() ? 0
+ : static_cast<int>(column_offset->Value()),
+ !is_shared_cross_origin.IsEmpty() && is_shared_cross_origin->IsTrue(),
+ static_cast<int>(script_id.IsEmpty() ? -1 : script_id->Value()),
+ source_map_url, !is_opaque.IsEmpty() && is_opaque->IsTrue(),
+ !is_wasm.IsEmpty() && is_wasm->IsTrue(),
+ !is_module.IsEmpty() && is_module->IsTrue(), host_defined_options) {}
+
+ScriptOrigin::ScriptOrigin(Local<Value> resource_name, int line_offset,
+ int column_offset, bool is_shared_cross_origin,
+ int script_id, Local<Value> source_map_url,
+ bool is_opaque, bool is_wasm, bool is_module,
+ Local<PrimitiveArray> host_defined_options)
+ : isolate_(Isolate::GetCurrent()),
+ resource_name_(resource_name),
+ resource_line_offset_(line_offset),
+ resource_column_offset_(column_offset),
+ options_(is_shared_cross_origin, is_opaque, is_wasm, is_module),
+ script_id_(script_id),
+ source_map_url_(source_map_url),
+ host_defined_options_(host_defined_options) {}
+
+Local<Integer> ScriptOrigin::ResourceLineOffset() const {
+ return v8::Integer::New(isolate_, resource_line_offset_);
+}
+
+Local<Integer> ScriptOrigin::ResourceColumnOffset() const {
+ return v8::Integer::New(isolate_, resource_column_offset_);
+}
+
+Local<Integer> ScriptOrigin::ScriptID() const {
+ return v8::Integer::New(isolate_, script_id_);
+}
+
// --- E x c e p t i o n B e h a v i o r ---
void i::FatalProcessOutOfMemory(i::Isolate* isolate, const char* location) {
@@ -331,6 +382,37 @@ void V8::SetSnapshotDataBlob(StartupData* snapshot_blob) {
namespace {
+#ifdef V8_VIRTUAL_MEMORY_CAGE
+// ArrayBufferAllocator to use when the virtual memory cage is enabled, in which
+// case all ArrayBuffer backing stores need to be allocated inside the data
+// cage. Note, the current implementation is extremely inefficient as it uses
+// the BoundedPageAllocator. In the future, we'll need a proper allocator
+// implementation.
+class ArrayBufferAllocator : public v8::ArrayBuffer::Allocator {
+ public:
+ ArrayBufferAllocator() { CHECK(page_allocator_); }
+
+ void* Allocate(size_t length) override {
+ return page_allocator_->AllocatePages(nullptr, RoundUp(length, page_size_),
+ page_size_,
+ PageAllocator::kReadWrite);
+ }
+
+ void* AllocateUninitialized(size_t length) override {
+ return Allocate(length);
+ }
+
+ void Free(void* data, size_t length) override {
+ page_allocator_->FreePages(data, RoundUp(length, page_size_));
+ }
+
+ private:
+ PageAllocator* page_allocator_ = internal::GetPlatformDataCagePageAllocator();
+ const size_t page_size_ = page_allocator_->AllocatePageSize();
+};
+
+#else
+
class ArrayBufferAllocator : public v8::ArrayBuffer::Allocator {
public:
void* Allocate(size_t length) override {
@@ -372,6 +454,7 @@ class ArrayBufferAllocator : public v8::ArrayBuffer::Allocator {
return new_data;
}
};
+#endif // V8_VIRTUAL_MEMORY_CAGE
struct SnapshotCreatorData {
explicit SnapshotCreatorData(Isolate* isolate)
@@ -746,9 +829,17 @@ void ResourceConstraints::ConfigureDefaults(uint64_t physical_memory,
}
}
-i::Address* V8::GlobalizeReference(i::Isolate* isolate, i::Address* obj) {
- LOG_API(isolate, Persistent, New);
- i::Handle<i::Object> result = isolate->global_handles()->Create(*obj);
+namespace api_internal {
+i::Address* GlobalizeTracedReference(i::Isolate* isolate, i::Address* obj,
+ internal::Address* slot,
+ bool has_destructor) {
+ LOG_API(isolate, TracedGlobal, New);
+#ifdef DEBUG
+ Utils::ApiCheck((slot != nullptr), "v8::GlobalizeTracedReference",
+ "the address slot must be not null");
+#endif
+ i::Handle<i::Object> result =
+ isolate->global_handles()->CreateTraced(*obj, slot, has_destructor);
#ifdef VERIFY_HEAP
if (i::FLAG_verify_heap) {
i::Object(*obj).ObjectVerify(isolate);
@@ -757,16 +848,9 @@ i::Address* V8::GlobalizeReference(i::Isolate* isolate, i::Address* obj) {
return result.location();
}
-i::Address* V8::GlobalizeTracedReference(i::Isolate* isolate, i::Address* obj,
- internal::Address* slot,
- bool has_destructor) {
- LOG_API(isolate, TracedGlobal, New);
-#ifdef DEBUG
- Utils::ApiCheck((slot != nullptr), "v8::GlobalizeTracedReference",
- "the address slot must be not null");
-#endif
- i::Handle<i::Object> result =
- isolate->global_handles()->CreateTraced(*obj, slot, has_destructor);
+i::Address* GlobalizeReference(i::Isolate* isolate, i::Address* obj) {
+ LOG_API(isolate, Persistent, New);
+ i::Handle<i::Object> result = isolate->global_handles()->Create(*obj);
#ifdef VERIFY_HEAP
if (i::FLAG_verify_heap) {
i::Object(*obj).ObjectVerify(isolate);
@@ -775,59 +859,38 @@ i::Address* V8::GlobalizeTracedReference(i::Isolate* isolate, i::Address* obj,
return result.location();
}
-i::Address* V8::CopyGlobalReference(i::Address* from) {
+i::Address* CopyGlobalReference(i::Address* from) {
i::Handle<i::Object> result = i::GlobalHandles::CopyGlobal(from);
return result.location();
}
-void V8::MoveGlobalReference(internal::Address** from, internal::Address** to) {
+void MoveGlobalReference(internal::Address** from, internal::Address** to) {
i::GlobalHandles::MoveGlobal(from, to);
}
-void V8::MoveTracedGlobalReference(internal::Address** from,
- internal::Address** to) {
- i::GlobalHandles::MoveTracedGlobal(from, to);
-}
-
-void V8::CopyTracedGlobalReference(const internal::Address* const* from,
- internal::Address** to) {
- i::GlobalHandles::CopyTracedGlobal(from, to);
-}
-
-void V8::MakeWeak(i::Address* location, void* parameter,
- WeakCallbackInfo<void>::Callback weak_callback,
- WeakCallbackType type) {
+void MakeWeak(i::Address* location, void* parameter,
+ WeakCallbackInfo<void>::Callback weak_callback,
+ WeakCallbackType type) {
i::GlobalHandles::MakeWeak(location, parameter, weak_callback, type);
}
-void V8::MakeWeak(i::Address** location_addr) {
+void MakeWeak(i::Address** location_addr) {
i::GlobalHandles::MakeWeak(location_addr);
}
-void* V8::ClearWeak(i::Address* location) {
+void* ClearWeak(i::Address* location) {
return i::GlobalHandles::ClearWeakness(location);
}
-void V8::AnnotateStrongRetainer(i::Address* location, const char* label) {
+void AnnotateStrongRetainer(i::Address* location, const char* label) {
i::GlobalHandles::AnnotateStrongRetainer(location, label);
}
-void V8::DisposeGlobal(i::Address* location) {
+void DisposeGlobal(i::Address* location) {
i::GlobalHandles::Destroy(location);
}
-void V8::DisposeTracedGlobal(internal::Address* location) {
- i::GlobalHandles::DestroyTraced(location);
-}
-
-void V8::SetFinalizationCallbackTraced(
- internal::Address* location, void* parameter,
- WeakCallbackInfo<void>::Callback callback) {
- i::GlobalHandles::SetFinalizationCallbackForTraced(location, parameter,
- callback);
-}
-
-Value* V8::Eternalize(Isolate* v8_isolate, Value* value) {
+Value* Eternalize(Isolate* v8_isolate, Value* value) {
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(v8_isolate);
i::Object object = *Utils::OpenHandle(value);
int index = -1;
@@ -836,20 +899,42 @@ Value* V8::Eternalize(Isolate* v8_isolate, Value* value) {
isolate->eternal_handles()->Get(index).location());
}
-void V8::FromJustIsNothing() {
+void MoveTracedGlobalReference(internal::Address** from,
+ internal::Address** to) {
+ i::GlobalHandles::MoveTracedGlobal(from, to);
+}
+
+void CopyTracedGlobalReference(const internal::Address* const* from,
+ internal::Address** to) {
+ i::GlobalHandles::CopyTracedGlobal(from, to);
+}
+
+void DisposeTracedGlobal(internal::Address* location) {
+ i::GlobalHandles::DestroyTraced(location);
+}
+
+void SetFinalizationCallbackTraced(internal::Address* location, void* parameter,
+ WeakCallbackInfo<void>::Callback callback) {
+ i::GlobalHandles::SetFinalizationCallbackForTraced(location, parameter,
+ callback);
+}
+
+void FromJustIsNothing() {
Utils::ApiCheck(false, "v8::FromJust", "Maybe value is Nothing.");
}
-void V8::ToLocalEmpty() {
+void ToLocalEmpty() {
Utils::ApiCheck(false, "v8::ToLocalChecked", "Empty MaybeLocal.");
}
-void V8::InternalFieldOutOfBounds(int index) {
+void InternalFieldOutOfBounds(int index) {
Utils::ApiCheck(0 <= index && index < kInternalFieldsInWeakCallback,
"WeakCallbackInfo::GetInternalField",
"Internal field out of bounds.");
}
+} // namespace api_internal
+
// --- H a n d l e s ---
HandleScope::HandleScope(Isolate* isolate) { Initialize(isolate); }
@@ -2387,42 +2472,44 @@ MaybeLocal<UnboundScript> ScriptCompiler::CompileUnboundInternal(
i::Handle<i::String> str = Utils::OpenHandle(*(source->source_string));
- std::unique_ptr<i::AlignedCachedData> cached_data;
- if (options == kConsumeCodeCache) {
- if (source->consume_cache_task) {
- // If there's a cache consume task, finish it
- i::MaybeHandle<i::SharedFunctionInfo> maybe_function_info =
- source->consume_cache_task->impl_->Finish(isolate, str,
- source->resource_options);
- i::Handle<i::SharedFunctionInfo> result;
- if (maybe_function_info.ToHandle(&result)) {
- RETURN_ESCAPED(ToApiHandle<UnboundScript>(result));
- }
- // If the above failed, then we must have rejected the cache. Continue
- // with normal compilation, disabling the code cache consumption.
- source->cached_data->rejected = true;
- options = kNoCompileOptions;
- } else {
- DCHECK(source->cached_data);
- // AlignedCachedData takes care of pointer-aligning the data.
- cached_data.reset(new i::AlignedCachedData(source->cached_data->data,
- source->cached_data->length));
- }
- }
-
i::Handle<i::SharedFunctionInfo> result;
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"), "V8.CompileScript");
i::ScriptDetails script_details = GetScriptDetails(
isolate, source->resource_name, source->resource_line_offset,
source->resource_column_offset, source->source_map_url,
source->host_defined_options, source->resource_options);
- i::MaybeHandle<i::SharedFunctionInfo> maybe_function_info =
- i::Compiler::GetSharedFunctionInfoForScript(
- isolate, str, script_details, nullptr, cached_data.get(), options,
- no_cache_reason, i::NOT_NATIVES_CODE);
+
+ i::MaybeHandle<i::SharedFunctionInfo> maybe_function_info;
if (options == kConsumeCodeCache) {
- source->cached_data->rejected = cached_data->rejected();
+ if (source->consume_cache_task) {
+ // Take ownership of the internal deserialization task and clear it off
+ // the consume task on the source.
+ DCHECK_NOT_NULL(source->consume_cache_task->impl_);
+ std::unique_ptr<i::BackgroundDeserializeTask> deserialize_task =
+ std::move(source->consume_cache_task->impl_);
+ maybe_function_info =
+ i::Compiler::GetSharedFunctionInfoForScriptWithDeserializeTask(
+ isolate, str, script_details, deserialize_task.get(), options,
+ no_cache_reason, i::NOT_NATIVES_CODE);
+ source->cached_data->rejected = deserialize_task->rejected();
+ } else {
+ DCHECK(source->cached_data);
+ // AlignedCachedData takes care of pointer-aligning the data.
+ auto cached_data = std::make_unique<i::AlignedCachedData>(
+ source->cached_data->data, source->cached_data->length);
+ maybe_function_info =
+ i::Compiler::GetSharedFunctionInfoForScriptWithCachedData(
+ isolate, str, script_details, cached_data.get(), options,
+ no_cache_reason, i::NOT_NATIVES_CODE);
+ source->cached_data->rejected = cached_data->rejected();
+ }
+ } else {
+ // Compile without any cache.
+ maybe_function_info = i::Compiler::GetSharedFunctionInfoForScript(
+ isolate, str, script_details, options, no_cache_reason,
+ i::NOT_NATIVES_CODE);
}
+
has_pending_exception = !maybe_function_info.ToHandle(&result);
RETURN_ON_FAILED_EXECUTION(UnboundScript);
RETURN_ESCAPED(ToApiHandle<UnboundScript>(result));
@@ -2726,7 +2813,7 @@ v8::TryCatch::TryCatch(v8::Isolate* isolate)
has_terminated_(false) {
ResetInternal();
// Special handling for simulators which have a separate JS stack.
- js_stack_comparable_address_ = reinterpret_cast<void*>(
+ js_stack_comparable_address_ = static_cast<internal::Address>(
i::SimulatorStack::RegisterJSStackComparableAddress(isolate_));
isolate_->RegisterTryCatchHandler(this);
}
@@ -5852,6 +5939,12 @@ void v8::V8::InitializePlatform(Platform* platform) {
i::V8::InitializePlatform(platform);
}
+#ifdef V8_VIRTUAL_MEMORY_CAGE
+bool v8::V8::InitializeVirtualMemoryCage() {
+ return i::V8::InitializeVirtualMemoryCage();
+}
+#endif
+
void v8::V8::ShutdownPlatform() { i::V8::ShutdownPlatform(); }
bool v8::V8::Initialize(const int build_config) {
@@ -5882,6 +5975,16 @@ bool v8::V8::Initialize(const int build_config) {
V8_HEAP_SANDBOX_BOOL ? "ENABLED" : "DISABLED");
}
+ const bool kEmbedderVirtualMemoryCage =
+ (build_config & kVirtualMemoryCage) != 0;
+ if (kEmbedderVirtualMemoryCage != V8_VIRTUAL_MEMORY_CAGE_BOOL) {
+ FATAL(
+ "Embedder-vs-V8 build configuration mismatch. On embedder side "
+ "virtual memory cage is %s while on V8 side it's %s.",
+ kEmbedderVirtualMemoryCage ? "ENABLED" : "DISABLED",
+ V8_VIRTUAL_MEMORY_CAGE_BOOL ? "ENABLED" : "DISABLED");
+ }
+
i::V8::Initialize();
return true;
}
@@ -5998,6 +6101,13 @@ void v8::V8::InitializeExternalStartupDataFromFile(const char* snapshot_blob) {
const char* v8::V8::GetVersion() { return i::Version::GetVersion(); }
+#ifdef V8_VIRTUAL_MEMORY_CAGE
+PageAllocator* v8::V8::GetVirtualMemoryCageDataPageAllocator() {
+ CHECK(i::GetProcessWideVirtualMemoryCage()->is_initialized());
+ return i::GetProcessWideVirtualMemoryCage()->GetDataCagePageAllocator();
+}
+#endif
+
void V8::GetSharedMemoryStatistics(SharedMemoryStatistics* statistics) {
i::ReadOnlyHeap::PopulateReadOnlySpaceStatistics(statistics);
}
@@ -10336,6 +10446,18 @@ bool ConvertDouble(double d) {
} // namespace internal
+bool CopyAndConvertArrayToCppBufferInt32(Local<Array> src, int32_t* dst,
+ uint32_t max_length) {
+ return CopyAndConvertArrayToCppBuffer<&v8::kTypeInfoInt32, int32_t>(
+ src, dst, max_length);
+}
+
+bool CopyAndConvertArrayToCppBufferFloat64(Local<Array> src, double* dst,
+ uint32_t max_length) {
+ return CopyAndConvertArrayToCppBuffer<&v8::kTypeInfoFloat64, double>(
+ src, dst, max_length);
+}
+
} // namespace v8
#undef TRACE_BS
diff --git a/deps/v8/src/api/api.h b/deps/v8/src/api/api.h
index 7d2a0c3e9c..e24c951306 100644
--- a/deps/v8/src/api/api.h
+++ b/deps/v8/src/api/api.h
@@ -7,6 +7,11 @@
#include <memory>
+#include "include/v8-container.h"
+#include "include/v8-external.h"
+#include "include/v8-proxy.h"
+#include "include/v8-typed-array.h"
+#include "include/v8-wasm.h"
#include "src/execution/isolate.h"
#include "src/heap/factory.h"
#include "src/objects/bigint.h"
@@ -18,12 +23,16 @@
#include "src/objects/objects.h"
#include "src/objects/shared-function-info.h"
#include "src/objects/source-text-module.h"
-#include "src/utils/detachable-vector.h"
-
#include "src/objects/templates.h"
+#include "src/utils/detachable-vector.h"
namespace v8 {
+class AccessorSignature;
+class Extension;
+class Signature;
+class Template;
+
namespace internal {
class JSArrayBufferView;
class JSFinalizationRegistry;
diff --git a/deps/v8/src/asmjs/asm-parser.cc b/deps/v8/src/asmjs/asm-parser.cc
index 8babca7a3b..09c520bbc0 100644
--- a/deps/v8/src/asmjs/asm-parser.cc
+++ b/deps/v8/src/asmjs/asm-parser.cc
@@ -698,7 +698,8 @@ void AsmJsParser::ValidateFunctionTable() {
FAIL("Function table definition doesn't match use");
}
module_builder_->SetIndirectFunction(
- static_cast<uint32_t>(table_info->index + count), info->index);
+ 0, static_cast<uint32_t>(table_info->index + count), info->index,
+ WasmModuleBuilder::WasmElemSegment::kRelativeToDeclaredFunctions);
}
++count;
if (Check(',')) {
@@ -2134,7 +2135,10 @@ AsmType* AsmJsParser::ValidateCall() {
EXPECT_TOKENn(']');
VarInfo* function_info = GetVarInfo(function_name);
if (function_info->kind == VarKind::kUnused) {
- uint32_t index = module_builder_->AllocateIndirectFunctions(mask + 1);
+ if (module_builder_->NumTables() == 0) {
+ module_builder_->AddTable(kWasmFuncRef, 0);
+ }
+ uint32_t index = module_builder_->IncreaseTableMinSize(0, mask + 1);
if (index == std::numeric_limits<uint32_t>::max()) {
FAILn("Exceeded maximum function table size");
}
diff --git a/deps/v8/src/ast/prettyprinter.cc b/deps/v8/src/ast/prettyprinter.cc
index fb3690164d..6a68a80cdc 100644
--- a/deps/v8/src/ast/prettyprinter.cc
+++ b/deps/v8/src/ast/prettyprinter.cc
@@ -13,6 +13,7 @@
#include "src/base/vector.h"
#include "src/common/globals.h"
#include "src/objects/objects-inl.h"
+#include "src/regexp/regexp-flags.h"
#include "src/strings/string-builder-inl.h"
namespace v8 {
@@ -72,6 +73,12 @@ void CallPrinter::Find(AstNode* node, bool print) {
}
}
+void CallPrinter::Print(char c) {
+ if (!found_ || done_) return;
+ num_prints_++;
+ builder_->AppendCharacter(c);
+}
+
void CallPrinter::Print(const char* str) {
if (!found_ || done_) return;
num_prints_++;
@@ -269,13 +276,10 @@ void CallPrinter::VisitRegExpLiteral(RegExpLiteral* node) {
Print("/");
PrintLiteral(node->pattern(), false);
Print("/");
- if (node->flags() & RegExp::kHasIndices) Print("d");
- if (node->flags() & RegExp::kGlobal) Print("g");
- if (node->flags() & RegExp::kIgnoreCase) Print("i");
- if (node->flags() & RegExp::kLinear) Print("l");
- if (node->flags() & RegExp::kMultiline) Print("m");
- if (node->flags() & RegExp::kUnicode) Print("u");
- if (node->flags() & RegExp::kSticky) Print("y");
+#define V(Lower, Camel, LowerCamel, Char, Bit) \
+ if (node->flags() & RegExp::k##Camel) Print(Char);
+ REGEXP_FLAG_LIST(V)
+#undef V
}
@@ -1189,13 +1193,10 @@ void AstPrinter::VisitRegExpLiteral(RegExpLiteral* node) {
PrintLiteralIndented("PATTERN", node->raw_pattern(), false);
int i = 0;
base::EmbeddedVector<char, 128> buf;
- if (node->flags() & RegExp::kHasIndices) buf[i++] = 'd';
- if (node->flags() & RegExp::kGlobal) buf[i++] = 'g';
- if (node->flags() & RegExp::kIgnoreCase) buf[i++] = 'i';
- if (node->flags() & RegExp::kLinear) buf[i++] = 'l';
- if (node->flags() & RegExp::kMultiline) buf[i++] = 'm';
- if (node->flags() & RegExp::kUnicode) buf[i++] = 'u';
- if (node->flags() & RegExp::kSticky) buf[i++] = 'y';
+#define V(Lower, Camel, LowerCamel, Char, Bit) \
+ if (node->flags() & RegExp::k##Camel) buf[i++] = Char;
+ REGEXP_FLAG_LIST(V)
+#undef V
buf[i] = '\0';
PrintIndented("FLAGS ");
Print("%s", buf.begin());
diff --git a/deps/v8/src/ast/prettyprinter.h b/deps/v8/src/ast/prettyprinter.h
index e26d98e7a3..4ffc36a3a2 100644
--- a/deps/v8/src/ast/prettyprinter.h
+++ b/deps/v8/src/ast/prettyprinter.h
@@ -52,6 +52,7 @@ class CallPrinter final : public AstVisitor<CallPrinter> {
#undef DECLARE_VISIT
private:
+ void Print(char c);
void Print(const char* str);
void Print(Handle<String> str);
diff --git a/deps/v8/src/base/atomicops.h b/deps/v8/src/base/atomicops.h
index 888157dc61..20efe3479c 100644
--- a/deps/v8/src/base/atomicops.h
+++ b/deps/v8/src/base/atomicops.h
@@ -191,11 +191,31 @@ inline void Release_Store(volatile Atomic8* ptr, Atomic8 value) {
std::memory_order_release);
}
+inline void Release_Store(volatile Atomic16* ptr, Atomic16 value) {
+ std::atomic_store_explicit(helper::to_std_atomic(ptr), value,
+ std::memory_order_release);
+}
+
inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
std::atomic_store_explicit(helper::to_std_atomic(ptr), value,
std::memory_order_release);
}
+inline void SeqCst_Store(volatile Atomic8* ptr, Atomic8 value) {
+ std::atomic_store_explicit(helper::to_std_atomic(ptr), value,
+ std::memory_order_seq_cst);
+}
+
+inline void SeqCst_Store(volatile Atomic16* ptr, Atomic16 value) {
+ std::atomic_store_explicit(helper::to_std_atomic(ptr), value,
+ std::memory_order_seq_cst);
+}
+
+inline void SeqCst_Store(volatile Atomic32* ptr, Atomic32 value) {
+ std::atomic_store_explicit(helper::to_std_atomic(ptr), value,
+ std::memory_order_seq_cst);
+}
+
inline Atomic8 Relaxed_Load(volatile const Atomic8* ptr) {
return std::atomic_load_explicit(helper::to_std_atomic_const(ptr),
std::memory_order_relaxed);
@@ -279,6 +299,11 @@ inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) {
std::memory_order_release);
}
+inline void SeqCst_Store(volatile Atomic64* ptr, Atomic64 value) {
+ std::atomic_store_explicit(helper::to_std_atomic(ptr), value,
+ std::memory_order_seq_cst);
+}
+
inline Atomic64 Relaxed_Load(volatile const Atomic64* ptr) {
return std::atomic_load_explicit(helper::to_std_atomic_const(ptr),
std::memory_order_relaxed);
diff --git a/deps/v8/src/base/bounded-page-allocator.cc b/deps/v8/src/base/bounded-page-allocator.cc
index fa7b10324d..0143b179ff 100644
--- a/deps/v8/src/base/bounded-page-allocator.cc
+++ b/deps/v8/src/base/bounded-page-allocator.cc
@@ -30,19 +30,30 @@ void* BoundedPageAllocator::AllocatePages(void* hint, size_t size,
PageAllocator::Permission access) {
MutexGuard guard(&mutex_);
DCHECK(IsAligned(alignment, region_allocator_.page_size()));
-
- // Region allocator does not support alignments bigger than it's own
- // allocation alignment.
- DCHECK_LE(alignment, allocate_page_size_);
-
- // TODO(ishell): Consider using randomized version here.
- Address address = region_allocator_.AllocateRegion(size);
+ DCHECK(IsAligned(alignment, allocate_page_size_));
+
+ Address address;
+ if (alignment <= allocate_page_size_) {
+ // TODO(ishell): Consider using randomized version here.
+ address = region_allocator_.AllocateRegion(size);
+ } else {
+ // Currently, this should only be necessary when V8_VIRTUAL_MEMORY_CAGE is
+ // enabled, in which case a bounded page allocator is used to allocate WASM
+ // memory buffers, which have a larger alignment.
+ address = region_allocator_.AllocateAlignedRegion(size, alignment);
+ }
if (address == RegionAllocator::kAllocationFailure) {
return nullptr;
}
- CHECK(page_allocator_->SetPermissions(reinterpret_cast<void*>(address), size,
- access));
- return reinterpret_cast<void*>(address);
+
+ void* ptr = reinterpret_cast<void*>(address);
+ if (!page_allocator_->SetPermissions(ptr, size, access)) {
+ // This most likely means that we ran out of memory.
+ CHECK_EQ(region_allocator_.FreeRegion(address), size);
+ return nullptr;
+ }
+
+ return ptr;
}
bool BoundedPageAllocator::AllocatePagesAt(Address address, size_t size,
@@ -59,8 +70,13 @@ bool BoundedPageAllocator::AllocatePagesAt(Address address, size_t size,
}
}
- CHECK(page_allocator_->SetPermissions(reinterpret_cast<void*>(address), size,
- access));
+ void* ptr = reinterpret_cast<void*>(address);
+ if (!page_allocator_->SetPermissions(ptr, size, access)) {
+ // This most likely means that we ran out of memory.
+ CHECK_EQ(region_allocator_.FreeRegion(address), size);
+ return false;
+ }
+
return true;
}
@@ -94,8 +110,16 @@ bool BoundedPageAllocator::FreePages(void* raw_address, size_t size) {
Address address = reinterpret_cast<Address>(raw_address);
size_t freed_size = region_allocator_.FreeRegion(address);
if (freed_size != size) return false;
+#ifdef V8_VIRTUAL_MEMORY_CAGE
+ // When the virtual memory cage is enabled, the pages returned by the
+ // BoundedPageAllocator must be zero-initialized, as some of the additional
+ // clients expect them to. Decommitting them during FreePages ensures that
+ // while also changing the access permissions to kNoAccess.
+ CHECK(page_allocator_->DecommitPages(raw_address, size));
+#else
CHECK(page_allocator_->SetPermissions(raw_address, size,
PageAllocator::kNoAccess));
+#endif
return true;
}
@@ -128,8 +152,14 @@ bool BoundedPageAllocator::ReleasePages(void* raw_address, size_t size,
// Keep the region in "used" state just uncommit some pages.
Address free_address = address + new_size;
size_t free_size = size - new_size;
+#ifdef V8_VIRTUAL_MEMORY_CAGE
+ // See comment in FreePages().
+ return page_allocator_->DecommitPages(reinterpret_cast<void*>(free_address),
+ free_size);
+#else
return page_allocator_->SetPermissions(reinterpret_cast<void*>(free_address),
free_size, PageAllocator::kNoAccess);
+#endif
}
bool BoundedPageAllocator::SetPermissions(void* address, size_t size,
@@ -144,5 +174,9 @@ bool BoundedPageAllocator::DiscardSystemPages(void* address, size_t size) {
return page_allocator_->DiscardSystemPages(address, size);
}
+bool BoundedPageAllocator::DecommitPages(void* address, size_t size) {
+ return page_allocator_->DecommitPages(address, size);
+}
+
} // namespace base
} // namespace v8
diff --git a/deps/v8/src/base/bounded-page-allocator.h b/deps/v8/src/base/bounded-page-allocator.h
index 1c8c846711..db364255f1 100644
--- a/deps/v8/src/base/bounded-page-allocator.h
+++ b/deps/v8/src/base/bounded-page-allocator.h
@@ -71,6 +71,8 @@ class V8_BASE_EXPORT BoundedPageAllocator : public v8::PageAllocator {
bool DiscardSystemPages(void* address, size_t size) override;
+ bool DecommitPages(void* address, size_t size) override;
+
private:
v8::base::Mutex mutex_;
const size_t allocate_page_size_;
diff --git a/deps/v8/src/base/build_config.h b/deps/v8/src/base/build_config.h
index d7a0c9f3cf..3303916776 100644
--- a/deps/v8/src/base/build_config.h
+++ b/deps/v8/src/base/build_config.h
@@ -33,6 +33,9 @@
#elif defined(__MIPSEB__) || defined(__MIPSEL__)
#define V8_HOST_ARCH_MIPS 1
#define V8_HOST_ARCH_32_BIT 1
+#elif defined(__loongarch64)
+#define V8_HOST_ARCH_LOONG64 1
+#define V8_HOST_ARCH_64_BIT 1
#elif defined(__PPC64__) || defined(_ARCH_PPC64)
#define V8_HOST_ARCH_PPC64 1
#define V8_HOST_ARCH_64_BIT 1
@@ -83,7 +86,7 @@
#if !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_IA32 && !V8_TARGET_ARCH_ARM && \
!V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_MIPS && !V8_TARGET_ARCH_MIPS64 && \
!V8_TARGET_ARCH_PPC && !V8_TARGET_ARCH_PPC64 && !V8_TARGET_ARCH_S390 && \
- !V8_TARGET_ARCH_RISCV64
+ !V8_TARGET_ARCH_RISCV64 && !V8_TARGET_ARCH_LOONG64
#if defined(_M_X64) || defined(__x86_64__)
#define V8_TARGET_ARCH_X64 1
#elif defined(_M_IX86) || defined(__i386__)
@@ -128,6 +131,8 @@
#define V8_TARGET_ARCH_32_BIT 1
#elif V8_TARGET_ARCH_MIPS64
#define V8_TARGET_ARCH_64_BIT 1
+#elif V8_TARGET_ARCH_LOONG64
+#define V8_TARGET_ARCH_64_BIT 1
#elif V8_TARGET_ARCH_PPC
#define V8_TARGET_ARCH_32_BIT 1
#elif V8_TARGET_ARCH_PPC64
@@ -171,6 +176,9 @@
#if (V8_TARGET_ARCH_RISCV64 && !(V8_HOST_ARCH_X64 || V8_HOST_ARCH_RISCV64))
#error Target architecture riscv64 is only supported on riscv64 and x64 host
#endif
+#if (V8_TARGET_ARCH_LOONG64 && !(V8_HOST_ARCH_X64 || V8_HOST_ARCH_LOONG64))
+#error Target architecture loong64 is only supported on loong64 and x64 host
+#endif
// Determine architecture endianness.
#if V8_TARGET_ARCH_IA32
@@ -181,6 +189,8 @@
#define V8_TARGET_LITTLE_ENDIAN 1
#elif V8_TARGET_ARCH_ARM64
#define V8_TARGET_LITTLE_ENDIAN 1
+#elif V8_TARGET_ARCH_LOONG64
+#define V8_TARGET_LITTLE_ENDIAN 1
#elif V8_TARGET_ARCH_MIPS
#if defined(__MIPSEB__)
#define V8_TARGET_BIG_ENDIAN 1
diff --git a/deps/v8/src/base/compiler-specific.h b/deps/v8/src/base/compiler-specific.h
index f7e2e0e14d..0c37e56afa 100644
--- a/deps/v8/src/base/compiler-specific.h
+++ b/deps/v8/src/base/compiler-specific.h
@@ -7,13 +7,15 @@
#include "include/v8config.h"
-// Annotate a using ALLOW_UNUSED_TYPE = or function indicating it's ok if it's
-// not used. Use like:
-// using Bar = Foo;
+// Annotation to silence compiler warnings about unused
+// types/functions/variables. Use like:
+//
+// using V8_ALLOW_UNUSED Bar = Foo;
+// V8_ALLOW_UNUSED void foo() {}
#if V8_HAS_ATTRIBUTE_UNUSED
-#define ALLOW_UNUSED_TYPE __attribute__((unused))
+#define V8_ALLOW_UNUSED __attribute__((unused))
#else
-#define ALLOW_UNUSED_TYPE
+#define V8_ALLOW_UNUSED
#endif
// Tell the compiler a function is using a printf-style format string.
diff --git a/deps/v8/src/base/flags.h b/deps/v8/src/base/flags.h
index 96d99059ca..2a36ca77e8 100644
--- a/deps/v8/src/base/flags.h
+++ b/deps/v8/src/base/flags.h
@@ -89,39 +89,39 @@ class Flags final {
mask_type mask_;
};
-#define DEFINE_OPERATORS_FOR_FLAGS(Type) \
- ALLOW_UNUSED_TYPE V8_WARN_UNUSED_RESULT inline constexpr Type operator&( \
- Type::flag_type lhs, Type::flag_type rhs) { \
- return Type(lhs) & rhs; \
- } \
- ALLOW_UNUSED_TYPE V8_WARN_UNUSED_RESULT inline constexpr Type operator&( \
- Type::flag_type lhs, const Type& rhs) { \
- return rhs & lhs; \
- } \
- ALLOW_UNUSED_TYPE inline void operator&(Type::flag_type lhs, \
- Type::mask_type rhs) {} \
- ALLOW_UNUSED_TYPE V8_WARN_UNUSED_RESULT inline constexpr Type operator|( \
- Type::flag_type lhs, Type::flag_type rhs) { \
- return Type(lhs) | rhs; \
- } \
- ALLOW_UNUSED_TYPE V8_WARN_UNUSED_RESULT inline constexpr Type operator|( \
- Type::flag_type lhs, const Type& rhs) { \
- return rhs | lhs; \
- } \
- ALLOW_UNUSED_TYPE inline void operator|(Type::flag_type lhs, \
- Type::mask_type rhs) {} \
- ALLOW_UNUSED_TYPE V8_WARN_UNUSED_RESULT inline constexpr Type operator^( \
- Type::flag_type lhs, Type::flag_type rhs) { \
- return Type(lhs) ^ rhs; \
- } \
- ALLOW_UNUSED_TYPE V8_WARN_UNUSED_RESULT inline constexpr Type operator^( \
- Type::flag_type lhs, const Type& rhs) { \
- return rhs ^ lhs; \
- } \
- ALLOW_UNUSED_TYPE inline void operator^(Type::flag_type lhs, \
- Type::mask_type rhs) {} \
- ALLOW_UNUSED_TYPE inline constexpr Type operator~(Type::flag_type val) { \
- return ~Type(val); \
+#define DEFINE_OPERATORS_FOR_FLAGS(Type) \
+ V8_ALLOW_UNUSED V8_WARN_UNUSED_RESULT inline constexpr Type operator&( \
+ Type::flag_type lhs, Type::flag_type rhs) { \
+ return Type(lhs) & rhs; \
+ } \
+ V8_ALLOW_UNUSED V8_WARN_UNUSED_RESULT inline constexpr Type operator&( \
+ Type::flag_type lhs, const Type& rhs) { \
+ return rhs & lhs; \
+ } \
+ V8_ALLOW_UNUSED inline void operator&(Type::flag_type lhs, \
+ Type::mask_type rhs) {} \
+ V8_ALLOW_UNUSED V8_WARN_UNUSED_RESULT inline constexpr Type operator|( \
+ Type::flag_type lhs, Type::flag_type rhs) { \
+ return Type(lhs) | rhs; \
+ } \
+ V8_ALLOW_UNUSED V8_WARN_UNUSED_RESULT inline constexpr Type operator|( \
+ Type::flag_type lhs, const Type& rhs) { \
+ return rhs | lhs; \
+ } \
+ V8_ALLOW_UNUSED inline void operator|(Type::flag_type lhs, \
+ Type::mask_type rhs) {} \
+ V8_ALLOW_UNUSED V8_WARN_UNUSED_RESULT inline constexpr Type operator^( \
+ Type::flag_type lhs, Type::flag_type rhs) { \
+ return Type(lhs) ^ rhs; \
+ } \
+ V8_ALLOW_UNUSED V8_WARN_UNUSED_RESULT inline constexpr Type operator^( \
+ Type::flag_type lhs, const Type& rhs) { \
+ return rhs ^ lhs; \
+ } \
+ V8_ALLOW_UNUSED inline void operator^(Type::flag_type lhs, \
+ Type::mask_type rhs) {} \
+ V8_ALLOW_UNUSED inline constexpr Type operator~(Type::flag_type val) { \
+ return ~Type(val); \
}
} // namespace base
diff --git a/deps/v8/src/base/optional.h b/deps/v8/src/base/optional.h
index 77e9bb896e..31fe9a972c 100644
--- a/deps/v8/src/base/optional.h
+++ b/deps/v8/src/base/optional.h
@@ -35,7 +35,7 @@ constexpr in_place_t in_place = {};
// http://en.cppreference.com/w/cpp/utility/optional/nullopt
constexpr nullopt_t nullopt(0);
-// Forward declaration, which is refered by following helpers.
+// Forward declaration, which is referred by following helpers.
template <typename T>
class Optional;
diff --git a/deps/v8/src/base/page-allocator.cc b/deps/v8/src/base/page-allocator.cc
index 1438c88337..2956bf1475 100644
--- a/deps/v8/src/base/page-allocator.cc
+++ b/deps/v8/src/base/page-allocator.cc
@@ -151,5 +151,9 @@ bool PageAllocator::DiscardSystemPages(void* address, size_t size) {
return base::OS::DiscardSystemPages(address, size);
}
+bool PageAllocator::DecommitPages(void* address, size_t size) {
+ return base::OS::DecommitPages(address, size);
+}
+
} // namespace base
} // namespace v8
diff --git a/deps/v8/src/base/page-allocator.h b/deps/v8/src/base/page-allocator.h
index a98f084790..7374c67837 100644
--- a/deps/v8/src/base/page-allocator.h
+++ b/deps/v8/src/base/page-allocator.h
@@ -47,6 +47,8 @@ class V8_BASE_EXPORT PageAllocator
bool DiscardSystemPages(void* address, size_t size) override;
+ bool DecommitPages(void* address, size_t size) override;
+
private:
friend class v8::base::SharedMemory;
diff --git a/deps/v8/src/base/platform/platform-fuchsia.cc b/deps/v8/src/base/platform/platform-fuchsia.cc
index bd0000c4a1..c51012c3f1 100644
--- a/deps/v8/src/base/platform/platform-fuchsia.cc
+++ b/deps/v8/src/base/platform/platform-fuchsia.cc
@@ -133,6 +133,11 @@ bool OS::DiscardSystemPages(void* address, size_t size) {
return status == ZX_OK;
}
+bool OS::DecommitPages(void* address, size_t size) {
+ // TODO(chromium:1218005): support this.
+ return false;
+}
+
// static
bool OS::HasLazyCommits() {
// TODO(scottmg): Port, https://crbug.com/731217.
diff --git a/deps/v8/src/base/platform/platform-posix.cc b/deps/v8/src/base/platform/platform-posix.cc
index 179a17cc0f..f05f22c913 100644
--- a/deps/v8/src/base/platform/platform-posix.cc
+++ b/deps/v8/src/base/platform/platform-posix.cc
@@ -341,6 +341,10 @@ void* OS::GetRandomMmapAddr() {
// TODO(RISCV): We need more information from the kernel to correctly mask
// this address for RISC-V. https://github.com/v8-riscv/v8/issues/375
raw_addr &= uint64_t{0xFFFFFF0000};
+#elif V8_TARGET_ARCH_LOONG64
+ // 42 bits of virtual addressing. Truncate to 40 bits to allow kernel chance
+ // to fulfill request.
+ raw_addr &= uint64_t{0xFFFFFF0000};
#else
raw_addr &= 0x3FFFF000;
@@ -491,6 +495,20 @@ bool OS::DiscardSystemPages(void* address, size_t size) {
return ret == 0;
}
+bool OS::DecommitPages(void* address, size_t size) {
+ DCHECK_EQ(0, reinterpret_cast<uintptr_t>(address) % CommitPageSize());
+ DCHECK_EQ(0, size % CommitPageSize());
+ // From https://pubs.opengroup.org/onlinepubs/9699919799/functions/mmap.html:
+ // "If a MAP_FIXED request is successful, then any previous mappings [...] for
+ // those whole pages containing any part of the address range [pa,pa+len)
+ // shall be removed, as if by an appropriate call to munmap(), before the new
+ // mapping is established." As a consequence, the memory will be
+ // zero-initialized on next access.
+ void* ptr = mmap(address, size, PROT_NONE,
+ MAP_FIXED | MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
+ return ptr == address;
+}
+
// static
bool OS::HasLazyCommits() {
#if V8_OS_AIX || V8_OS_LINUX || V8_OS_MACOSX
@@ -530,6 +548,8 @@ void OS::DebugBreak() {
asm("break");
#elif V8_HOST_ARCH_MIPS64
asm("break");
+#elif V8_HOST_ARCH_LOONG64
+ asm("break 0");
#elif V8_HOST_ARCH_PPC || V8_HOST_ARCH_PPC64
asm("twge 2,2");
#elif V8_HOST_ARCH_IA32
@@ -566,25 +586,29 @@ class PosixMemoryMappedFile final : public OS::MemoryMappedFile {
OS::MemoryMappedFile* OS::MemoryMappedFile::open(const char* name,
FileMode mode) {
const char* fopen_mode = (mode == FileMode::kReadOnly) ? "r" : "r+";
- if (FILE* file = fopen(name, fopen_mode)) {
- if (fseek(file, 0, SEEK_END) == 0) {
- long size = ftell(file); // NOLINT(runtime/int)
- if (size == 0) return new PosixMemoryMappedFile(file, nullptr, 0);
- if (size > 0) {
- int prot = PROT_READ;
- int flags = MAP_PRIVATE;
- if (mode == FileMode::kReadWrite) {
- prot |= PROT_WRITE;
- flags = MAP_SHARED;
- }
- void* const memory =
- mmap(OS::GetRandomMmapAddr(), size, prot, flags, fileno(file), 0);
- if (memory != MAP_FAILED) {
- return new PosixMemoryMappedFile(file, memory, size);
+ struct stat statbuf;
+ // Make sure path exists and is not a directory.
+ if (stat(name, &statbuf) == 0 && !S_ISDIR(statbuf.st_mode)) {
+ if (FILE* file = fopen(name, fopen_mode)) {
+ if (fseek(file, 0, SEEK_END) == 0) {
+ long size = ftell(file); // NOLINT(runtime/int)
+ if (size == 0) return new PosixMemoryMappedFile(file, nullptr, 0);
+ if (size > 0) {
+ int prot = PROT_READ;
+ int flags = MAP_PRIVATE;
+ if (mode == FileMode::kReadWrite) {
+ prot |= PROT_WRITE;
+ flags = MAP_SHARED;
+ }
+ void* const memory =
+ mmap(OS::GetRandomMmapAddr(), size, prot, flags, fileno(file), 0);
+ if (memory != MAP_FAILED) {
+ return new PosixMemoryMappedFile(file, memory, size);
+ }
}
}
+ fclose(file);
}
- fclose(file);
}
return nullptr;
}
diff --git a/deps/v8/src/base/platform/platform-win32.cc b/deps/v8/src/base/platform/platform-win32.cc
index 79c1aa06ce..6b5c5df496 100644
--- a/deps/v8/src/base/platform/platform-win32.cc
+++ b/deps/v8/src/base/platform/platform-win32.cc
@@ -935,6 +935,21 @@ bool OS::DiscardSystemPages(void* address, size_t size) {
}
// static
+bool OS::DecommitPages(void* address, size_t size) {
+ DCHECK_EQ(0, reinterpret_cast<uintptr_t>(address) % CommitPageSize());
+ DCHECK_EQ(0, size % CommitPageSize());
+ // https://docs.microsoft.com/en-us/windows/win32/api/memoryapi/nf-memoryapi-virtualfree:
+ // "If a page is decommitted but not released, its state changes to reserved.
+ // Subsequently, you can call VirtualAlloc to commit it, or VirtualFree to
+ // release it. Attempts to read from or write to a reserved page results in an
+ // access violation exception."
+ // https://docs.microsoft.com/en-us/windows/win32/api/memoryapi/nf-memoryapi-virtualalloc
+ // for MEM_COMMIT: "The function also guarantees that when the caller later
+ // initially accesses the memory, the contents will be zero."
+ return VirtualFree(address, size, MEM_DECOMMIT) != 0;
+}
+
+// static
bool OS::HasLazyCommits() {
// TODO(alph): implement for the platform.
return false;
diff --git a/deps/v8/src/base/platform/platform.h b/deps/v8/src/base/platform/platform.h
index d196578342..2e7ad32974 100644
--- a/deps/v8/src/base/platform/platform.h
+++ b/deps/v8/src/base/platform/platform.h
@@ -311,6 +311,8 @@ class V8_BASE_EXPORT OS {
V8_WARN_UNUSED_RESULT static bool DiscardSystemPages(void* address,
size_t size);
+ V8_WARN_UNUSED_RESULT static bool DecommitPages(void* address, size_t size);
+
static const int msPerSecond = 1000;
#if V8_OS_POSIX
diff --git a/deps/v8/src/base/region-allocator.cc b/deps/v8/src/base/region-allocator.cc
index 9224dc99dc..53932d2864 100644
--- a/deps/v8/src/base/region-allocator.cc
+++ b/deps/v8/src/base/region-allocator.cc
@@ -200,6 +200,35 @@ bool RegionAllocator::AllocateRegionAt(Address requested_address, size_t size,
return true;
}
+RegionAllocator::Address RegionAllocator::AllocateAlignedRegion(
+ size_t size, size_t alignment) {
+ DCHECK(IsAligned(size, page_size_));
+ DCHECK(IsAligned(alignment, page_size_));
+ DCHECK_GE(alignment, page_size_);
+
+ const size_t padded_size = size + alignment - page_size_;
+ Region* region = FreeListFindRegion(padded_size);
+ if (region == nullptr) return kAllocationFailure;
+
+ if (!IsAligned(region->begin(), alignment)) {
+ size_t start = RoundUp(region->begin(), alignment);
+ region = Split(region, start - region->begin());
+ DCHECK_EQ(region->begin(), start);
+ DCHECK(IsAligned(region->begin(), alignment));
+ }
+
+ if (region->size() != size) {
+ Split(region, size);
+ }
+ DCHECK(IsAligned(region->begin(), alignment));
+ DCHECK_EQ(region->size(), size);
+
+ // Mark region as used.
+ FreeListRemoveRegion(region);
+ region->set_state(RegionState::kAllocated);
+ return region->begin();
+}
+
size_t RegionAllocator::TrimRegion(Address address, size_t new_size) {
DCHECK(IsAligned(new_size, page_size_));
diff --git a/deps/v8/src/base/region-allocator.h b/deps/v8/src/base/region-allocator.h
index adc4bd10b6..f80524870f 100644
--- a/deps/v8/src/base/region-allocator.h
+++ b/deps/v8/src/base/region-allocator.h
@@ -61,6 +61,11 @@ class V8_BASE_EXPORT RegionAllocator final {
bool AllocateRegionAt(Address requested_address, size_t size,
RegionState region_state = RegionState::kAllocated);
+ // Allocates a region of |size| aligned to |alignment|. The size and alignment
+ // must be a multiple of |page_size|. Returns the address of the region on
+ // success or kAllocationFailure.
+ Address AllocateAlignedRegion(size_t size, size_t alignment);
+
// Frees region at given |address|, returns the size of the region.
// There must be a used region starting at given address otherwise nothing
// will be freed and 0 will be returned.
diff --git a/deps/v8/src/base/sanitizer/asan.h b/deps/v8/src/base/sanitizer/asan.h
index 82f03aa258..6466fc6163 100644
--- a/deps/v8/src/base/sanitizer/asan.h
+++ b/deps/v8/src/base/sanitizer/asan.h
@@ -24,8 +24,9 @@
// Check that all bytes in a memory region are poisoned. This is different from
// `__asan_region_is_poisoned()` which only requires a single byte in the region
-// to be poisoned.
-#define ASAN_CHECK_MEMORY_REGION_IS_POISONED(start, size) \
+// to be poisoned. Please note that the macro only works if both start and size
+// are multiple of asan's shadow memory granularity.
+#define ASAN_CHECK_WHOLE_MEMORY_REGION_IS_POISONED(start, size) \
do { \
for (size_t i = 0; i < size; i++) { \
CHECK(__asan_address_is_poisoned(reinterpret_cast<const char*>(start) + \
@@ -47,7 +48,7 @@
#define ASAN_UNPOISON_MEMORY_REGION(start, size) \
ASAN_POISON_MEMORY_REGION(start, size)
-#define ASAN_CHECK_MEMORY_REGION_IS_POISONED(start, size) \
+#define ASAN_CHECK_WHOLE_MEMORY_REGION_IS_POISONED(start, size) \
ASAN_POISON_MEMORY_REGION(start, size)
#endif // !V8_USE_ADDRESS_SANITIZER
diff --git a/deps/v8/src/base/sanitizer/tsan.h b/deps/v8/src/base/sanitizer/tsan.h
new file mode 100644
index 0000000000..854c82eb22
--- /dev/null
+++ b/deps/v8/src/base/sanitizer/tsan.h
@@ -0,0 +1,20 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// ThreadSanitizer support.
+
+#ifndef V8_BASE_SANITIZER_TSAN_H_
+#define V8_BASE_SANITIZER_TSAN_H_
+
+#if defined(THREAD_SANITIZER)
+
+#define DISABLE_TSAN __attribute__((no_sanitize_thread))
+
+#else // !defined(THREAD_SANITIZER)
+
+#define DISABLE_TSAN
+
+#endif // !defined(THREAD_SANITIZER)
+
+#endif // V8_BASE_SANITIZER_TSAN_H_
diff --git a/deps/v8/src/base/win32-headers.h b/deps/v8/src/base/win32-headers.h
index e4e845d86d..95aedd8c95 100644
--- a/deps/v8/src/base/win32-headers.h
+++ b/deps/v8/src/base/win32-headers.h
@@ -41,12 +41,6 @@
#include <signal.h> // For raise().
#include <time.h> // For LocalOffset() implementation.
-#ifdef __MINGW32__
-// Require Windows XP or higher when compiling with MinGW. This is for MinGW
-// header files to expose getaddrinfo.
-#undef _WIN32_WINNT
-#define _WIN32_WINNT 0x501
-#endif // __MINGW32__
#if !defined(__MINGW32__) || defined(__MINGW64_VERSION_MAJOR)
#include <errno.h> // For STRUNCATE
#endif // !defined(__MINGW32__) || defined(__MINGW64_VERSION_MAJOR)
diff --git a/deps/v8/src/baseline/arm/baseline-assembler-arm-inl.h b/deps/v8/src/baseline/arm/baseline-assembler-arm-inl.h
index 040761091a..db3c05ce18 100644
--- a/deps/v8/src/baseline/arm/baseline-assembler-arm-inl.h
+++ b/deps/v8/src/baseline/arm/baseline-assembler-arm-inl.h
@@ -501,14 +501,21 @@ void BaselineAssembler::EmitReturn(MacroAssembler* masm) {
__ masm()->LeaveFrame(StackFrame::BASELINE);
// Drop receiver + arguments.
- __ masm()->add(params_size, params_size,
- Operand(1)); // Include the receiver.
- __ masm()->Drop(params_size);
+ __ masm()->DropArguments(params_size, TurboAssembler::kCountIsInteger,
+ kJSArgcIncludesReceiver
+ ? TurboAssembler::kCountIncludesReceiver
+ : TurboAssembler::kCountExcludesReceiver);
__ masm()->Ret();
}
#undef __
+inline void EnsureAccumulatorPreservedScope::AssertEqualToAccumulator(
+ Register reg) {
+ assembler_->masm()->cmp(reg, kInterpreterAccumulatorRegister);
+ assembler_->masm()->Assert(eq, AbortReason::kUnexpectedValue);
+}
+
} // namespace baseline
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/baseline/arm64/baseline-assembler-arm64-inl.h b/deps/v8/src/baseline/arm64/baseline-assembler-arm64-inl.h
index cda2108327..7824f92c2a 100644
--- a/deps/v8/src/baseline/arm64/baseline-assembler-arm64-inl.h
+++ b/deps/v8/src/baseline/arm64/baseline-assembler-arm64-inl.h
@@ -583,13 +583,21 @@ void BaselineAssembler::EmitReturn(MacroAssembler* masm) {
__ masm()->LeaveFrame(StackFrame::BASELINE);
// Drop receiver + arguments.
- __ masm()->Add(params_size, params_size, 1); // Include the receiver.
- __ masm()->DropArguments(params_size);
+ __ masm()->DropArguments(params_size,
+ kJSArgcIncludesReceiver
+ ? TurboAssembler::kCountIncludesReceiver
+ : TurboAssembler::kCountExcludesReceiver);
__ masm()->Ret();
}
#undef __
+inline void EnsureAccumulatorPreservedScope::AssertEqualToAccumulator(
+ Register reg) {
+ assembler_->masm()->CmpTagged(reg, kInterpreterAccumulatorRegister);
+ assembler_->masm()->Assert(eq, AbortReason::kUnexpectedValue);
+}
+
} // namespace baseline
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/baseline/baseline-assembler-inl.h b/deps/v8/src/baseline/baseline-assembler-inl.h
index 83c102176f..583db7e679 100644
--- a/deps/v8/src/baseline/baseline-assembler-inl.h
+++ b/deps/v8/src/baseline/baseline-assembler-inl.h
@@ -34,6 +34,8 @@
#include "src/baseline/mips64/baseline-assembler-mips64-inl.h"
#elif V8_TARGET_ARCH_MIPS
#include "src/baseline/mips/baseline-assembler-mips-inl.h"
+#elif V8_TARGET_ARCH_LOONG64
+#include "src/baseline/loong64/baseline-assembler-loong64-inl.h"
#else
#error Unsupported target architecture.
#endif
@@ -135,6 +137,24 @@ SaveAccumulatorScope::~SaveAccumulatorScope() {
assembler_->Pop(kInterpreterAccumulatorRegister);
}
+EnsureAccumulatorPreservedScope::EnsureAccumulatorPreservedScope(
+ BaselineAssembler* assembler)
+ : assembler_(assembler)
+#ifdef V8_CODE_COMMENTS
+ ,
+ comment_(assembler->masm(), "EnsureAccumulatorPreservedScope")
+#endif
+{
+ assembler_->Push(kInterpreterAccumulatorRegister);
+}
+
+EnsureAccumulatorPreservedScope::~EnsureAccumulatorPreservedScope() {
+ BaselineAssembler::ScratchRegisterScope scratch(assembler_);
+ Register reg = scratch.AcquireScratch();
+ assembler_->Pop(reg);
+ AssertEqualToAccumulator(reg);
+}
+
#undef __
} // namespace baseline
diff --git a/deps/v8/src/baseline/baseline-assembler.h b/deps/v8/src/baseline/baseline-assembler.h
index e1063ff2b2..b8c876a8d3 100644
--- a/deps/v8/src/baseline/baseline-assembler.h
+++ b/deps/v8/src/baseline/baseline-assembler.h
@@ -202,6 +202,21 @@ class SaveAccumulatorScope final {
BaselineAssembler* assembler_;
};
+class EnsureAccumulatorPreservedScope final {
+ public:
+ inline explicit EnsureAccumulatorPreservedScope(BaselineAssembler* assembler);
+
+ inline ~EnsureAccumulatorPreservedScope();
+
+ private:
+ inline void AssertEqualToAccumulator(Register reg);
+
+ BaselineAssembler* assembler_;
+#ifdef V8_CODE_COMMENTS
+ Assembler::CodeComment comment_;
+#endif
+};
+
} // namespace baseline
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/baseline/baseline-batch-compiler.cc b/deps/v8/src/baseline/baseline-batch-compiler.cc
index 6a25df7264..fb66139a31 100644
--- a/deps/v8/src/baseline/baseline-batch-compiler.cc
+++ b/deps/v8/src/baseline/baseline-batch-compiler.cc
@@ -6,9 +6,8 @@
// TODO(v8:11421): Remove #if once baseline compiler is ported to other
// architectures.
-#if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64 || \
- V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_RISCV64 || V8_TARGET_ARCH_MIPS64 || \
- V8_TARGET_ARCH_MIPS
+#include "src/flags/flags.h"
+#if ENABLE_SPARKPLUG
#include "src/baseline/baseline-compiler.h"
#include "src/codegen/compiler.h"
@@ -40,7 +39,7 @@ bool BaselineBatchCompiler::EnqueueFunction(Handle<JSFunction> function) {
Handle<SharedFunctionInfo> shared(function->shared(), isolate_);
// Early return if the function is compiled with baseline already or it is not
// suitable for baseline compilation.
- if (shared->HasBaselineData()) return true;
+ if (shared->HasBaselineCode()) return true;
if (!CanCompileWithBaseline(isolate_, *shared)) return false;
// Immediately compile the function if batch compilation is disabled.
diff --git a/deps/v8/src/baseline/baseline-compiler.cc b/deps/v8/src/baseline/baseline-compiler.cc
index f30812c85a..63d684e733 100644
--- a/deps/v8/src/baseline/baseline-compiler.cc
+++ b/deps/v8/src/baseline/baseline-compiler.cc
@@ -48,6 +48,8 @@
#include "src/baseline/mips64/baseline-compiler-mips64-inl.h"
#elif V8_TARGET_ARCH_MIPS
#include "src/baseline/mips/baseline-compiler-mips-inl.h"
+#elif V8_TARGET_ARCH_LOONG64
+#include "src/baseline/loong64/baseline-compiler-loong64-inl.h"
#else
#error Unsupported target architecture.
#endif
@@ -321,9 +323,16 @@ MaybeHandle<Code> BaselineCompiler::Build(Isolate* isolate) {
// Allocate the bytecode offset table.
Handle<ByteArray> bytecode_offset_table =
bytecode_offset_table_builder_.ToBytecodeOffsetTable(isolate);
- return Factory::CodeBuilder(isolate, desc, CodeKind::BASELINE)
- .set_bytecode_offset_table(bytecode_offset_table)
- .TryBuild();
+
+ Factory::CodeBuilder code_builder(isolate, desc, CodeKind::BASELINE);
+ code_builder.set_bytecode_offset_table(bytecode_offset_table);
+ if (shared_function_info_->HasInterpreterData()) {
+ code_builder.set_interpreter_data(
+ handle(shared_function_info_->interpreter_data(), isolate));
+ } else {
+ code_builder.set_interpreter_data(bytecode_);
+ }
+ return code_builder.TryBuild();
}
int BaselineCompiler::EstimateInstructionSize(BytecodeArray bytecode) {
@@ -488,13 +497,31 @@ void BaselineCompiler::VisitSingleBytecode() {
TraceBytecode(Runtime::kTraceUnoptimizedBytecodeEntry);
#endif
- switch (iterator().current_bytecode()) {
+ {
+ interpreter::Bytecode bytecode = iterator().current_bytecode();
+
+#ifdef DEBUG
+ base::Optional<EnsureAccumulatorPreservedScope> accumulator_preserved_scope;
+ // We should make sure to preserve the accumulator whenever the bytecode
+ // isn't registered as writing to it. We can't do this for jumps or switches
+ // though, since the control flow would not match the control flow of this
+ // scope.
+ if (FLAG_debug_code &&
+ !interpreter::Bytecodes::WritesAccumulator(bytecode) &&
+ !interpreter::Bytecodes::IsJump(bytecode) &&
+ !interpreter::Bytecodes::IsSwitch(bytecode)) {
+ accumulator_preserved_scope.emplace(&basm_);
+ }
+#endif // DEBUG
+
+ switch (bytecode) {
#define BYTECODE_CASE(name, ...) \
case interpreter::Bytecode::k##name: \
Visit##name(); \
break;
- BYTECODE_LIST(BYTECODE_CASE)
+ BYTECODE_LIST(BYTECODE_CASE)
#undef BYTECODE_CASE
+ }
}
#ifdef V8_TRACE_UNOPTIMIZED
@@ -1173,53 +1200,57 @@ void BaselineCompiler::BuildCall(uint32_t slot, uint32_t arg_count,
void BaselineCompiler::VisitCallAnyReceiver() {
interpreter::RegisterList args = iterator().GetRegisterListOperand(1);
- uint32_t arg_count = args.register_count() - 1; // Remove receiver.
+ uint32_t arg_count = args.register_count();
+ if (!kJSArgcIncludesReceiver) arg_count -= 1; // Remove receiver.
BuildCall<ConvertReceiverMode::kAny>(Index(3), arg_count, args);
}
void BaselineCompiler::VisitCallProperty() {
interpreter::RegisterList args = iterator().GetRegisterListOperand(1);
- uint32_t arg_count = args.register_count() - 1; // Remove receiver.
+ uint32_t arg_count = args.register_count();
+ if (!kJSArgcIncludesReceiver) arg_count -= 1; // Remove receiver.
BuildCall<ConvertReceiverMode::kNotNullOrUndefined>(Index(3), arg_count,
args);
}
void BaselineCompiler::VisitCallProperty0() {
- BuildCall<ConvertReceiverMode::kNotNullOrUndefined>(Index(2), 0,
- RegisterOperand(1));
+ BuildCall<ConvertReceiverMode::kNotNullOrUndefined>(
+ Index(2), JSParameterCount(0), RegisterOperand(1));
}
void BaselineCompiler::VisitCallProperty1() {
BuildCall<ConvertReceiverMode::kNotNullOrUndefined>(
- Index(3), 1, RegisterOperand(1), RegisterOperand(2));
+ Index(3), JSParameterCount(1), RegisterOperand(1), RegisterOperand(2));
}
void BaselineCompiler::VisitCallProperty2() {
BuildCall<ConvertReceiverMode::kNotNullOrUndefined>(
- Index(4), 2, RegisterOperand(1), RegisterOperand(2), RegisterOperand(3));
+ Index(4), JSParameterCount(2), RegisterOperand(1), RegisterOperand(2),
+ RegisterOperand(3));
}
void BaselineCompiler::VisitCallUndefinedReceiver() {
interpreter::RegisterList args = iterator().GetRegisterListOperand(1);
- uint32_t arg_count = args.register_count();
+ uint32_t arg_count = JSParameterCount(args.register_count());
BuildCall<ConvertReceiverMode::kNullOrUndefined>(
Index(3), arg_count, RootIndex::kUndefinedValue, args);
}
void BaselineCompiler::VisitCallUndefinedReceiver0() {
- BuildCall<ConvertReceiverMode::kNullOrUndefined>(Index(1), 0,
- RootIndex::kUndefinedValue);
+ BuildCall<ConvertReceiverMode::kNullOrUndefined>(
+ Index(1), JSParameterCount(0), RootIndex::kUndefinedValue);
}
void BaselineCompiler::VisitCallUndefinedReceiver1() {
BuildCall<ConvertReceiverMode::kNullOrUndefined>(
- Index(2), 1, RootIndex::kUndefinedValue, RegisterOperand(1));
+ Index(2), JSParameterCount(1), RootIndex::kUndefinedValue,
+ RegisterOperand(1));
}
void BaselineCompiler::VisitCallUndefinedReceiver2() {
BuildCall<ConvertReceiverMode::kNullOrUndefined>(
- Index(3), 2, RootIndex::kUndefinedValue, RegisterOperand(1),
- RegisterOperand(2));
+ Index(3), JSParameterCount(2), RootIndex::kUndefinedValue,
+ RegisterOperand(1), RegisterOperand(2));
}
void BaselineCompiler::VisitCallWithSpread() {
@@ -1229,7 +1260,8 @@ void BaselineCompiler::VisitCallWithSpread() {
interpreter::Register spread_register = args.last_register();
args = args.Truncate(args.register_count() - 1);
- uint32_t arg_count = args.register_count() - 1; // Remove receiver.
+ uint32_t arg_count = args.register_count();
+ if (!kJSArgcIncludesReceiver) arg_count -= 1; // Remove receiver.
CallBuiltin<Builtin::kCallWithSpread_Baseline>(
RegisterOperand(0), // kFunction
@@ -1253,7 +1285,7 @@ void BaselineCompiler::VisitCallRuntimeForPair() {
void BaselineCompiler::VisitCallJSRuntime() {
interpreter::RegisterList args = iterator().GetRegisterListOperand(1);
- uint32_t arg_count = args.register_count();
+ uint32_t arg_count = JSParameterCount(args.register_count());
// Load context for LoadNativeContextSlot.
__ LoadContext(kContextRegister);
@@ -1376,7 +1408,7 @@ void BaselineCompiler::VisitIntrinsicAsyncGeneratorYield(
void BaselineCompiler::VisitConstruct() {
interpreter::RegisterList args = iterator().GetRegisterListOperand(1);
- uint32_t arg_count = args.register_count();
+ uint32_t arg_count = JSParameterCount(args.register_count());
CallBuiltin<Builtin::kConstruct_Baseline>(
RegisterOperand(0), // kFunction
kInterpreterAccumulatorRegister, // kNewTarget
@@ -1393,7 +1425,7 @@ void BaselineCompiler::VisitConstructWithSpread() {
interpreter::Register spread_register = args.last_register();
args = args.Truncate(args.register_count() - 1);
- uint32_t arg_count = args.register_count();
+ uint32_t arg_count = JSParameterCount(args.register_count());
using Descriptor =
CallInterfaceDescriptorFor<Builtin::kConstructWithSpread_Baseline>::type;
@@ -2079,13 +2111,15 @@ void BaselineCompiler::VisitReturn() {
iterator().current_bytecode_size_without_prefix();
int parameter_count = bytecode_->parameter_count();
- // We must pop all arguments from the stack (including the receiver). This
- // number of arguments is given by max(1 + argc_reg, parameter_count).
- int parameter_count_without_receiver =
- parameter_count - 1; // Exclude the receiver to simplify the
- // computation. We'll account for it at the end.
- TailCallBuiltin<Builtin::kBaselineLeaveFrame>(
- parameter_count_without_receiver, -profiling_weight);
+ if (kJSArgcIncludesReceiver) {
+ TailCallBuiltin<Builtin::kBaselineLeaveFrame>(parameter_count,
+ -profiling_weight);
+
+ } else {
+ int parameter_count_without_receiver = parameter_count - 1;
+ TailCallBuiltin<Builtin::kBaselineLeaveFrame>(
+ parameter_count_without_receiver, -profiling_weight);
+ }
}
void BaselineCompiler::VisitThrowReferenceErrorIfHole() {
diff --git a/deps/v8/src/baseline/baseline-compiler.h b/deps/v8/src/baseline/baseline-compiler.h
index d8cd9ac5c6..341e7c0822 100644
--- a/deps/v8/src/baseline/baseline-compiler.h
+++ b/deps/v8/src/baseline/baseline-compiler.h
@@ -162,6 +162,7 @@ class BaselineCompiler {
LocalIsolate* local_isolate_;
RuntimeCallStats* stats_;
Handle<SharedFunctionInfo> shared_function_info_;
+ Handle<HeapObject> interpreter_data_;
Handle<BytecodeArray> bytecode_;
MacroAssembler masm_;
BaselineAssembler basm_;
diff --git a/deps/v8/src/baseline/baseline.cc b/deps/v8/src/baseline/baseline.cc
index cec0805aec..764d2db645 100644
--- a/deps/v8/src/baseline/baseline.cc
+++ b/deps/v8/src/baseline/baseline.cc
@@ -43,6 +43,13 @@ bool CanCompileWithBaseline(Isolate* isolate, SharedFunctionInfo shared) {
// Functions with breakpoints have to stay interpreted.
if (shared.HasBreakInfo()) return false;
+ // Functions with instrumented bytecode can't be baseline compiled since the
+ // baseline code's bytecode array pointer is immutable.
+ if (shared.HasDebugInfo() &&
+ shared.GetDebugInfo().HasInstrumentedBytecodeArray()) {
+ return false;
+ }
+
// Do not baseline compile if function doesn't pass sparkplug_filter.
if (!shared.PassesFilter(FLAG_sparkplug_filter)) return false;
diff --git a/deps/v8/src/baseline/ia32/baseline-assembler-ia32-inl.h b/deps/v8/src/baseline/ia32/baseline-assembler-ia32-inl.h
index e3f991886d..e280bee3da 100644
--- a/deps/v8/src/baseline/ia32/baseline-assembler-ia32-inl.h
+++ b/deps/v8/src/baseline/ia32/baseline-assembler-ia32-inl.h
@@ -457,16 +457,21 @@ void BaselineAssembler::EmitReturn(MacroAssembler* masm) {
__ masm()->LeaveFrame(StackFrame::BASELINE);
// Drop receiver + arguments.
- Register return_pc = scratch;
- __ masm()->PopReturnAddressTo(return_pc);
- __ masm()->lea(esp, MemOperand(esp, params_size, times_system_pointer_size,
- kSystemPointerSize));
- __ masm()->PushReturnAddressFrom(return_pc);
+ __ masm()->DropArguments(
+ params_size, scratch, TurboAssembler::kCountIsInteger,
+ kJSArgcIncludesReceiver ? TurboAssembler::kCountIncludesReceiver
+ : TurboAssembler::kCountExcludesReceiver);
__ masm()->Ret();
}
#undef __
+inline void EnsureAccumulatorPreservedScope::AssertEqualToAccumulator(
+ Register reg) {
+ assembler_->masm()->cmp(reg, kInterpreterAccumulatorRegister);
+ assembler_->masm()->Assert(equal, AbortReason::kUnexpectedValue);
+}
+
} // namespace baseline
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/baseline/loong64/baseline-assembler-loong64-inl.h b/deps/v8/src/baseline/loong64/baseline-assembler-loong64-inl.h
new file mode 100644
index 0000000000..059d932ef9
--- /dev/null
+++ b/deps/v8/src/baseline/loong64/baseline-assembler-loong64-inl.h
@@ -0,0 +1,503 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_BASELINE_LOONG64_BASELINE_ASSEMBLER_LOONG64_INL_H_
+#define V8_BASELINE_LOONG64_BASELINE_ASSEMBLER_LOONG64_INL_H_
+
+#include "src/baseline/baseline-assembler.h"
+#include "src/codegen/interface-descriptors.h"
+#include "src/codegen/loong64/assembler-loong64-inl.h"
+
+namespace v8 {
+namespace internal {
+namespace baseline {
+
+class BaselineAssembler::ScratchRegisterScope {
+ public:
+ explicit ScratchRegisterScope(BaselineAssembler* assembler)
+ : assembler_(assembler),
+ prev_scope_(assembler->scratch_register_scope_),
+ wrapped_scope_(assembler->masm()) {
+ if (!assembler_->scratch_register_scope_) {
+ // If we haven't opened a scratch scope yet, for the first one add a
+ // couple of extra registers.
+ wrapped_scope_.Include(t0.bit() | t1.bit() | t2.bit() | t3.bit());
+ }
+ assembler_->scratch_register_scope_ = this;
+ }
+ ~ScratchRegisterScope() { assembler_->scratch_register_scope_ = prev_scope_; }
+
+ Register AcquireScratch() { return wrapped_scope_.Acquire(); }
+
+ private:
+ BaselineAssembler* assembler_;
+ ScratchRegisterScope* prev_scope_;
+ UseScratchRegisterScope wrapped_scope_;
+};
+
+enum class Condition : uint32_t {
+ kEqual = eq,
+ kNotEqual = ne,
+
+ kLessThan = lt,
+ kGreaterThan = gt,
+ kLessThanEqual = le,
+ kGreaterThanEqual = ge,
+
+ kUnsignedLessThan = Uless,
+ kUnsignedGreaterThan = Ugreater,
+ kUnsignedLessThanEqual = Uless_equal,
+ kUnsignedGreaterThanEqual = Ugreater_equal,
+
+ kOverflow = overflow,
+ kNoOverflow = no_overflow,
+
+ kZero = eq,
+ kNotZero = ne,
+};
+
+inline internal::Condition AsMasmCondition(Condition cond) {
+ STATIC_ASSERT(sizeof(internal::Condition) == sizeof(Condition));
+ return static_cast<internal::Condition>(cond);
+}
+
+namespace detail {
+
+#ifdef DEBUG
+inline bool Clobbers(Register target, MemOperand op) {
+ return op.base() == target || op.index() == target;
+}
+#endif
+
+} // namespace detail
+
+#define __ masm_->
+
+MemOperand BaselineAssembler::RegisterFrameOperand(
+ interpreter::Register interpreter_register) {
+ return MemOperand(fp, interpreter_register.ToOperand() * kSystemPointerSize);
+}
+MemOperand BaselineAssembler::FeedbackVectorOperand() {
+ return MemOperand(fp, BaselineFrameConstants::kFeedbackVectorFromFp);
+}
+
+void BaselineAssembler::Bind(Label* label) { __ bind(label); }
+
+void BaselineAssembler::BindWithoutJumpTarget(Label* label) { __ bind(label); }
+
+void BaselineAssembler::JumpTarget() {
+ // NOP.
+}
+void BaselineAssembler::Jump(Label* target, Label::Distance distance) {
+ __ Branch(target);
+}
+void BaselineAssembler::JumpIfRoot(Register value, RootIndex index,
+ Label* target, Label::Distance) {
+ __ JumpIfRoot(value, index, target);
+}
+void BaselineAssembler::JumpIfNotRoot(Register value, RootIndex index,
+ Label* target, Label::Distance) {
+ __ JumpIfNotRoot(value, index, target);
+}
+void BaselineAssembler::JumpIfSmi(Register value, Label* target,
+ Label::Distance) {
+ __ JumpIfSmi(value, target);
+}
+void BaselineAssembler::JumpIfNotSmi(Register value, Label* target,
+ Label::Distance) {
+ __ JumpIfNotSmi(value, target);
+}
+
+void BaselineAssembler::CallBuiltin(Builtin builtin) {
+ ASM_CODE_COMMENT_STRING(masm_,
+ __ CommentForOffHeapTrampoline("call", builtin));
+ Register temp = t7;
+ __ LoadEntryFromBuiltin(builtin, temp);
+ __ Call(temp);
+}
+
+void BaselineAssembler::TailCallBuiltin(Builtin builtin) {
+ ASM_CODE_COMMENT_STRING(masm_,
+ __ CommentForOffHeapTrampoline("tail call", builtin));
+ Register temp = t7;
+ __ LoadEntryFromBuiltin(builtin, temp);
+ __ Jump(temp);
+}
+
+void BaselineAssembler::TestAndBranch(Register value, int mask, Condition cc,
+ Label* target, Label::Distance) {
+ ScratchRegisterScope temps(this);
+ Register scratch = temps.AcquireScratch();
+ __ And(scratch, value, Operand(mask));
+ __ Branch(target, AsMasmCondition(cc), scratch, Operand(zero_reg));
+}
+
+void BaselineAssembler::JumpIf(Condition cc, Register lhs, const Operand& rhs,
+ Label* target, Label::Distance) {
+ __ Branch(target, AsMasmCondition(cc), lhs, Operand(rhs));
+}
+void BaselineAssembler::JumpIfObjectType(Condition cc, Register object,
+ InstanceType instance_type,
+ Register map, Label* target,
+ Label::Distance) {
+ ScratchRegisterScope temps(this);
+ Register type = temps.AcquireScratch();
+ __ GetObjectType(object, map, type);
+ __ Branch(target, AsMasmCondition(cc), type, Operand(instance_type));
+}
+void BaselineAssembler::JumpIfInstanceType(Condition cc, Register map,
+ InstanceType instance_type,
+ Label* target, Label::Distance) {
+ ScratchRegisterScope temps(this);
+ Register type = temps.AcquireScratch();
+ if (FLAG_debug_code) {
+ __ AssertNotSmi(map);
+ __ GetObjectType(map, type, type);
+ __ Assert(eq, AbortReason::kUnexpectedValue, type, Operand(MAP_TYPE));
+ }
+ __ Ld_d(type, FieldMemOperand(map, Map::kInstanceTypeOffset));
+ __ Branch(target, AsMasmCondition(cc), type, Operand(instance_type));
+}
+void BaselineAssembler::JumpIfSmi(Condition cc, Register value, Smi smi,
+ Label* target, Label::Distance) {
+ ScratchRegisterScope temps(this);
+ Register scratch = temps.AcquireScratch();
+ __ li(scratch, Operand(smi));
+ __ SmiUntag(scratch);
+ __ Branch(target, AsMasmCondition(cc), value, Operand(scratch));
+}
+void BaselineAssembler::JumpIfSmi(Condition cc, Register lhs, Register rhs,
+ Label* target, Label::Distance) {
+ __ AssertSmi(lhs);
+ __ AssertSmi(rhs);
+ __ Branch(target, AsMasmCondition(cc), lhs, Operand(rhs));
+}
+void BaselineAssembler::JumpIfTagged(Condition cc, Register value,
+ MemOperand operand, Label* target,
+ Label::Distance) {
+ ScratchRegisterScope temps(this);
+ Register scratch = temps.AcquireScratch();
+ __ Ld_d(scratch, operand);
+ __ Branch(target, AsMasmCondition(cc), value, Operand(scratch));
+}
+void BaselineAssembler::JumpIfTagged(Condition cc, MemOperand operand,
+ Register value, Label* target,
+ Label::Distance) {
+ ScratchRegisterScope temps(this);
+ Register scratch = temps.AcquireScratch();
+ __ Ld_d(scratch, operand);
+ __ Branch(target, AsMasmCondition(cc), scratch, Operand(value));
+}
+void BaselineAssembler::JumpIfByte(Condition cc, Register value, int32_t byte,
+ Label* target, Label::Distance) {
+ __ Branch(target, AsMasmCondition(cc), value, Operand(byte));
+}
+void BaselineAssembler::Move(interpreter::Register output, Register source) {
+ Move(RegisterFrameOperand(output), source);
+}
+void BaselineAssembler::Move(Register output, TaggedIndex value) {
+ __ li(output, Operand(value.ptr()));
+}
+void BaselineAssembler::Move(MemOperand output, Register source) {
+ __ St_d(source, output);
+}
+void BaselineAssembler::Move(Register output, ExternalReference reference) {
+ __ li(output, Operand(reference));
+}
+void BaselineAssembler::Move(Register output, Handle<HeapObject> value) {
+ __ li(output, Operand(value));
+}
+void BaselineAssembler::Move(Register output, int32_t value) {
+ __ li(output, Operand(value));
+}
+void BaselineAssembler::MoveMaybeSmi(Register output, Register source) {
+ __ Move(output, source);
+}
+void BaselineAssembler::MoveSmi(Register output, Register source) {
+ __ Move(output, source);
+}
+
+namespace detail {
+
+template <typename Arg>
+inline Register ToRegister(BaselineAssembler* basm,
+ BaselineAssembler::ScratchRegisterScope* scope,
+ Arg arg) {
+ Register reg = scope->AcquireScratch();
+ basm->Move(reg, arg);
+ return reg;
+}
+inline Register ToRegister(BaselineAssembler* basm,
+ BaselineAssembler::ScratchRegisterScope* scope,
+ Register reg) {
+ return reg;
+}
+
+template <typename... Args>
+struct PushAllHelper;
+template <>
+struct PushAllHelper<> {
+ static int Push(BaselineAssembler* basm) { return 0; }
+ static int PushReverse(BaselineAssembler* basm) { return 0; }
+};
+// TODO(ishell): try to pack sequence of pushes into one instruction by
+// looking at regiser codes. For example, Push(r1, r2, r5, r0, r3, r4)
+// could be generated as two pushes: Push(r1, r2, r5) and Push(r0, r3, r4).
+template <typename Arg>
+struct PushAllHelper<Arg> {
+ static int Push(BaselineAssembler* basm, Arg arg) {
+ BaselineAssembler::ScratchRegisterScope scope(basm);
+ basm->masm()->Push(ToRegister(basm, &scope, arg));
+ return 1;
+ }
+ static int PushReverse(BaselineAssembler* basm, Arg arg) {
+ return Push(basm, arg);
+ }
+};
+// TODO(ishell): try to pack sequence of pushes into one instruction by
+// looking at regiser codes. For example, Push(r1, r2, r5, r0, r3, r4)
+// could be generated as two pushes: Push(r1, r2, r5) and Push(r0, r3, r4).
+template <typename Arg, typename... Args>
+struct PushAllHelper<Arg, Args...> {
+ static int Push(BaselineAssembler* basm, Arg arg, Args... args) {
+ PushAllHelper<Arg>::Push(basm, arg);
+ return 1 + PushAllHelper<Args...>::Push(basm, args...);
+ }
+ static int PushReverse(BaselineAssembler* basm, Arg arg, Args... args) {
+ int nargs = PushAllHelper<Args...>::PushReverse(basm, args...);
+ PushAllHelper<Arg>::Push(basm, arg);
+ return nargs + 1;
+ }
+};
+
+template <>
+struct PushAllHelper<interpreter::RegisterList> {
+ static int Push(BaselineAssembler* basm, interpreter::RegisterList list) {
+ for (int reg_index = 0; reg_index < list.register_count(); ++reg_index) {
+ PushAllHelper<interpreter::Register>::Push(basm, list[reg_index]);
+ }
+ return list.register_count();
+ }
+ static int PushReverse(BaselineAssembler* basm,
+ interpreter::RegisterList list) {
+ for (int reg_index = list.register_count() - 1; reg_index >= 0;
+ --reg_index) {
+ PushAllHelper<interpreter::Register>::Push(basm, list[reg_index]);
+ }
+ return list.register_count();
+ }
+};
+
+template <typename... T>
+struct PopAllHelper;
+template <>
+struct PopAllHelper<> {
+ static void Pop(BaselineAssembler* basm) {}
+};
+// TODO(ishell): try to pack sequence of pops into one instruction by
+// looking at regiser codes. For example, Pop(r1, r2, r5, r0, r3, r4)
+// could be generated as two pops: Pop(r1, r2, r5) and Pop(r0, r3, r4).
+template <>
+struct PopAllHelper<Register> {
+ static void Pop(BaselineAssembler* basm, Register reg) {
+ basm->masm()->Pop(reg);
+ }
+};
+template <typename... T>
+struct PopAllHelper<Register, T...> {
+ static void Pop(BaselineAssembler* basm, Register reg, T... tail) {
+ PopAllHelper<Register>::Pop(basm, reg);
+ PopAllHelper<T...>::Pop(basm, tail...);
+ }
+};
+
+} // namespace detail
+
+template <typename... T>
+int BaselineAssembler::Push(T... vals) {
+ return detail::PushAllHelper<T...>::Push(this, vals...);
+}
+
+template <typename... T>
+void BaselineAssembler::PushReverse(T... vals) {
+ detail::PushAllHelper<T...>::PushReverse(this, vals...);
+}
+
+template <typename... T>
+void BaselineAssembler::Pop(T... registers) {
+ detail::PopAllHelper<T...>::Pop(this, registers...);
+}
+
+void BaselineAssembler::LoadTaggedPointerField(Register output, Register source,
+ int offset) {
+ __ Ld_d(output, FieldMemOperand(source, offset));
+}
+void BaselineAssembler::LoadTaggedSignedField(Register output, Register source,
+ int offset) {
+ __ Ld_d(output, FieldMemOperand(source, offset));
+}
+void BaselineAssembler::LoadTaggedAnyField(Register output, Register source,
+ int offset) {
+ __ Ld_d(output, FieldMemOperand(source, offset));
+}
+void BaselineAssembler::LoadByteField(Register output, Register source,
+ int offset) {
+ __ Ld_b(output, FieldMemOperand(source, offset));
+}
+void BaselineAssembler::StoreTaggedSignedField(Register target, int offset,
+ Smi value) {
+ ASM_CODE_COMMENT(masm_);
+ ScratchRegisterScope temps(this);
+ Register scratch = temps.AcquireScratch();
+ __ li(scratch, Operand(value));
+ __ St_d(scratch, FieldMemOperand(target, offset));
+}
+void BaselineAssembler::StoreTaggedFieldWithWriteBarrier(Register target,
+ int offset,
+ Register value) {
+ ASM_CODE_COMMENT(masm_);
+ __ St_d(value, FieldMemOperand(target, offset));
+ ScratchRegisterScope temps(this);
+ __ RecordWriteField(target, offset, value, kRAHasNotBeenSaved,
+ SaveFPRegsMode::kIgnore);
+}
+void BaselineAssembler::StoreTaggedFieldNoWriteBarrier(Register target,
+ int offset,
+ Register value) {
+ __ St_d(value, FieldMemOperand(target, offset));
+}
+void BaselineAssembler::AddToInterruptBudgetAndJumpIfNotExceeded(
+ int32_t weight, Label* skip_interrupt_label) {
+ ASM_CODE_COMMENT(masm_);
+ ScratchRegisterScope scratch_scope(this);
+ Register feedback_cell = scratch_scope.AcquireScratch();
+ LoadFunction(feedback_cell);
+ LoadTaggedPointerField(feedback_cell, feedback_cell,
+ JSFunction::kFeedbackCellOffset);
+
+ Register interrupt_budget = scratch_scope.AcquireScratch();
+ __ Ld_w(interrupt_budget,
+ FieldMemOperand(feedback_cell, FeedbackCell::kInterruptBudgetOffset));
+ __ Add_w(interrupt_budget, interrupt_budget, weight);
+ __ St_w(interrupt_budget,
+ FieldMemOperand(feedback_cell, FeedbackCell::kInterruptBudgetOffset));
+ if (skip_interrupt_label) {
+ DCHECK_LT(weight, 0);
+ __ Branch(skip_interrupt_label, ge, interrupt_budget, Operand(zero_reg));
+ }
+}
+void BaselineAssembler::AddToInterruptBudgetAndJumpIfNotExceeded(
+ Register weight, Label* skip_interrupt_label) {
+ ASM_CODE_COMMENT(masm_);
+ ScratchRegisterScope scratch_scope(this);
+ Register feedback_cell = scratch_scope.AcquireScratch();
+ LoadFunction(feedback_cell);
+ LoadTaggedPointerField(feedback_cell, feedback_cell,
+ JSFunction::kFeedbackCellOffset);
+
+ Register interrupt_budget = scratch_scope.AcquireScratch();
+ __ Ld_w(interrupt_budget,
+ FieldMemOperand(feedback_cell, FeedbackCell::kInterruptBudgetOffset));
+ __ Add_w(interrupt_budget, interrupt_budget, weight);
+ __ St_w(interrupt_budget,
+ FieldMemOperand(feedback_cell, FeedbackCell::kInterruptBudgetOffset));
+ if (skip_interrupt_label)
+ __ Branch(skip_interrupt_label, ge, interrupt_budget, Operand(zero_reg));
+}
+
+void BaselineAssembler::AddSmi(Register lhs, Smi rhs) {
+ __ Add_d(lhs, lhs, Operand(rhs));
+}
+
+void BaselineAssembler::Switch(Register reg, int case_value_base,
+ Label** labels, int num_labels) {
+ ASM_CODE_COMMENT(masm_);
+ Label fallthrough;
+ if (case_value_base > 0) {
+ __ Sub_d(reg, reg, Operand(case_value_base));
+ }
+
+ ScratchRegisterScope scope(this);
+ Register scratch = scope.AcquireScratch();
+ __ Branch(&fallthrough, AsMasmCondition(Condition::kUnsignedGreaterThanEqual),
+ reg, Operand(num_labels));
+ int entry_size_log2 = 2;
+ __ pcaddi(scratch, 3);
+ __ Alsl_d(scratch, reg, scratch, entry_size_log2);
+ __ Jump(scratch);
+ {
+ TurboAssembler::BlockTrampolinePoolScope(masm());
+ __ BlockTrampolinePoolFor(num_labels * kInstrSize);
+ for (int i = 0; i < num_labels; ++i) {
+ __ Branch(labels[i]);
+ }
+ __ bind(&fallthrough);
+ }
+}
+
+#undef __
+
+#define __ basm.
+
+void BaselineAssembler::EmitReturn(MacroAssembler* masm) {
+ ASM_CODE_COMMENT(masm);
+ BaselineAssembler basm(masm);
+
+ Register weight = BaselineLeaveFrameDescriptor::WeightRegister();
+ Register params_size = BaselineLeaveFrameDescriptor::ParamsSizeRegister();
+
+ {
+ ASM_CODE_COMMENT_STRING(masm, "Update Interrupt Budget");
+
+ Label skip_interrupt_label;
+ __ AddToInterruptBudgetAndJumpIfNotExceeded(weight, &skip_interrupt_label);
+ __ masm()->SmiTag(params_size);
+ __ masm()->Push(params_size, kInterpreterAccumulatorRegister);
+
+ __ LoadContext(kContextRegister);
+ __ LoadFunction(kJSFunctionRegister);
+ __ masm()->Push(kJSFunctionRegister);
+ __ CallRuntime(Runtime::kBytecodeBudgetInterruptFromBytecode, 1);
+
+ __ masm()->Pop(params_size, kInterpreterAccumulatorRegister);
+ __ masm()->SmiUntag(params_size);
+ __ Bind(&skip_interrupt_label);
+ }
+
+ BaselineAssembler::ScratchRegisterScope temps(&basm);
+ Register actual_params_size = temps.AcquireScratch();
+ // Compute the size of the actual parameters + receiver (in bytes).
+ __ Move(actual_params_size,
+ MemOperand(fp, StandardFrameConstants::kArgCOffset));
+
+ // If actual is bigger than formal, then we should use it to free up the stack
+ // arguments.
+ Label corrected_args_count;
+ __ masm()->Branch(&corrected_args_count, ge, params_size,
+ Operand(actual_params_size));
+ __ masm()->Move(params_size, actual_params_size);
+ __ Bind(&corrected_args_count);
+
+ // Leave the frame (also dropping the register file).
+ __ masm()->LeaveFrame(StackFrame::BASELINE);
+
+ // Drop receiver + arguments.
+ __ masm()->Add_d(params_size, params_size, 1); // Include the receiver.
+ __ masm()->Alsl_d(sp, params_size, sp, kPointerSizeLog2);
+ __ masm()->Ret();
+}
+
+#undef __
+
+inline void EnsureAccumulatorPreservedScope::AssertEqualToAccumulator(
+ Register reg) {
+ assembler_->masm()->Assert(eq, AbortReason::kUnexpectedValue, reg,
+ Operand(kInterpreterAccumulatorRegister));
+}
+
+} // namespace baseline
+} // namespace internal
+} // namespace v8
+
+#endif // V8_BASELINE_LOONG64_BASELINE_ASSEMBLER_LOONG64_INL_H_
diff --git a/deps/v8/src/baseline/loong64/baseline-compiler-loong64-inl.h b/deps/v8/src/baseline/loong64/baseline-compiler-loong64-inl.h
new file mode 100644
index 0000000000..9a68c7ebca
--- /dev/null
+++ b/deps/v8/src/baseline/loong64/baseline-compiler-loong64-inl.h
@@ -0,0 +1,77 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_BASELINE_LOONG64_BASELINE_COMPILER_LOONG64_INL_H_
+#define V8_BASELINE_LOONG64_BASELINE_COMPILER_LOONG64_INL_H_
+
+#include "src/base/logging.h"
+#include "src/baseline/baseline-compiler.h"
+
+namespace v8 {
+namespace internal {
+namespace baseline {
+
+#define __ basm_.
+
+void BaselineCompiler::Prologue() {
+ ASM_CODE_COMMENT(&masm_);
+ __ masm()->EnterFrame(StackFrame::BASELINE);
+ DCHECK_EQ(kJSFunctionRegister, kJavaScriptCallTargetRegister);
+ int max_frame_size =
+ bytecode_->frame_size() + max_call_args_ * kSystemPointerSize;
+ CallBuiltin<Builtin::kBaselineOutOfLinePrologue>(
+ kContextRegister, kJSFunctionRegister, kJavaScriptCallArgCountRegister,
+ max_frame_size, kJavaScriptCallNewTargetRegister, bytecode_);
+
+ PrologueFillFrame();
+}
+
+void BaselineCompiler::PrologueFillFrame() {
+ ASM_CODE_COMMENT(&masm_);
+ // Inlined register frame fill
+ interpreter::Register new_target_or_generator_register =
+ bytecode_->incoming_new_target_or_generator_register();
+ __ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kUndefinedValue);
+ int register_count = bytecode_->register_count();
+ // Magic value
+ const int kLoopUnrollSize = 8;
+ const int new_target_index = new_target_or_generator_register.index();
+ const bool has_new_target = new_target_index != kMaxInt;
+ if (has_new_target) {
+ DCHECK_LE(new_target_index, register_count);
+ __ masm()->Add_d(sp, sp, Operand(-(kPointerSize * new_target_index)));
+ for (int i = 0; i < new_target_index; i++) {
+ __ masm()->St_d(kInterpreterAccumulatorRegister, MemOperand(sp, i * 8));
+ }
+ // Push new_target_or_generator.
+ __ Push(kJavaScriptCallNewTargetRegister);
+ register_count -= new_target_index + 1;
+ }
+ if (register_count < 2 * kLoopUnrollSize) {
+ // If the frame is small enough, just unroll the frame fill completely.
+ __ masm()->Add_d(sp, sp, Operand(-(kPointerSize * register_count)));
+ for (int i = 0; i < register_count; ++i) {
+ __ masm()->St_d(kInterpreterAccumulatorRegister, MemOperand(sp, i * 8));
+ }
+ } else {
+ __ masm()->Add_d(sp, sp, Operand(-(kPointerSize * register_count)));
+ for (int i = 0; i < register_count; ++i) {
+ __ masm()->St_d(kInterpreterAccumulatorRegister, MemOperand(sp, i * 8));
+ }
+ }
+}
+
+void BaselineCompiler::VerifyFrameSize() {
+ ASM_CODE_COMMENT(&masm_);
+ __ masm()->Add_d(t0, sp,
+ Operand(InterpreterFrameConstants::kFixedFrameSizeFromFp +
+ bytecode_->frame_size()));
+ __ masm()->Assert(eq, AbortReason::kUnexpectedStackPointer, t0, Operand(fp));
+}
+
+} // namespace baseline
+} // namespace internal
+} // namespace v8
+
+#endif // V8_BASELINE_LOONG64_BASELINE_COMPILER_LOONG64_INL_H_
diff --git a/deps/v8/src/baseline/mips/baseline-assembler-mips-inl.h b/deps/v8/src/baseline/mips/baseline-assembler-mips-inl.h
index 31bc96861b..989d5c4ae5 100644
--- a/deps/v8/src/baseline/mips/baseline-assembler-mips-inl.h
+++ b/deps/v8/src/baseline/mips/baseline-assembler-mips-inl.h
@@ -506,6 +506,12 @@ void BaselineAssembler::EmitReturn(MacroAssembler* masm) {
#undef __
+inline void EnsureAccumulatorPreservedScope::AssertEqualToAccumulator(
+ Register reg) {
+ assembler_->masm()->Assert(eq, AbortReason::kUnexpectedValue, reg,
+ Operand(kInterpreterAccumulatorRegister));
+}
+
} // namespace baseline
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/baseline/mips64/baseline-assembler-mips64-inl.h b/deps/v8/src/baseline/mips64/baseline-assembler-mips64-inl.h
index d8220fa798..561e45249e 100644
--- a/deps/v8/src/baseline/mips64/baseline-assembler-mips64-inl.h
+++ b/deps/v8/src/baseline/mips64/baseline-assembler-mips64-inl.h
@@ -504,6 +504,12 @@ void BaselineAssembler::EmitReturn(MacroAssembler* masm) {
#undef __
+inline void EnsureAccumulatorPreservedScope::AssertEqualToAccumulator(
+ Register reg) {
+ assembler_->masm()->Assert(eq, AbortReason::kUnexpectedValue, reg,
+ Operand(kInterpreterAccumulatorRegister));
+}
+
} // namespace baseline
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/baseline/riscv64/baseline-assembler-riscv64-inl.h b/deps/v8/src/baseline/riscv64/baseline-assembler-riscv64-inl.h
index 01f5a5802b..663462fdb5 100644
--- a/deps/v8/src/baseline/riscv64/baseline-assembler-riscv64-inl.h
+++ b/deps/v8/src/baseline/riscv64/baseline-assembler-riscv64-inl.h
@@ -109,30 +109,19 @@ void BaselineAssembler::JumpIfNotSmi(Register value, Label* target,
}
void BaselineAssembler::CallBuiltin(Builtin builtin) {
- if (masm()->options().short_builtin_calls) {
- __ CallBuiltin(builtin);
- } else {
- ASM_CODE_COMMENT_STRING(masm_,
- __ CommentForOffHeapTrampoline("call", builtin));
- Register temp = t6;
- __ LoadEntryFromBuiltin(builtin, temp);
- __ Call(temp);
- }
+ ASM_CODE_COMMENT_STRING(masm_,
+ __ CommentForOffHeapTrampoline("call", builtin));
+ Register temp = t6;
+ __ LoadEntryFromBuiltin(builtin, temp);
+ __ Call(temp);
}
void BaselineAssembler::TailCallBuiltin(Builtin builtin) {
- if (masm()->options().short_builtin_calls) {
- // Generate pc-relative jump.
- __ TailCallBuiltin(builtin);
- } else {
- ASM_CODE_COMMENT_STRING(
- masm_, __ CommentForOffHeapTrampoline("tail call", builtin));
- // t6 be used for function call in RISCV64
- // For example 'jalr t6' or 'jal t6'
- Register temp = t6;
- __ LoadEntryFromBuiltin(builtin, temp);
- __ Jump(temp);
- }
+ ASM_CODE_COMMENT_STRING(masm_,
+ __ CommentForOffHeapTrampoline("tail call", builtin));
+ Register temp = t6;
+ __ LoadEntryFromBuiltin(builtin, temp);
+ __ Jump(temp);
}
void BaselineAssembler::TestAndBranch(Register value, int mask, Condition cc,
@@ -140,7 +129,7 @@ void BaselineAssembler::TestAndBranch(Register value, int mask, Condition cc,
ScratchRegisterScope temps(this);
Register tmp = temps.AcquireScratch();
__ And(tmp, value, Operand(mask));
- __ Branch(target, AsMasmCondition(cc), tmp, Operand(mask));
+ __ Branch(target, AsMasmCondition(cc), tmp, Operand(zero_reg));
}
void BaselineAssembler::JumpIf(Condition cc, Register lhs, const Operand& rhs,
@@ -161,6 +150,11 @@ void BaselineAssembler::JumpIfInstanceType(Condition cc, Register map,
Label* target, Label::Distance) {
ScratchRegisterScope temps(this);
Register type = temps.AcquireScratch();
+ if (FLAG_debug_code) {
+ __ AssertNotSmi(map);
+ __ GetObjectType(map, type, type);
+ __ Assert(eq, AbortReason::kUnexpectedValue, type, Operand(MAP_TYPE));
+ }
__ Ld(type, FieldMemOperand(map, Map::kInstanceTypeOffset));
__ Branch(target, AsMasmCondition(cc), type, Operand(instance_type));
}
@@ -182,44 +176,28 @@ void BaselineAssembler::JumpIfSmi(Condition cc, Register value, Smi smi,
}
void BaselineAssembler::JumpIfSmi(Condition cc, Register lhs, Register rhs,
Label* target, Label::Distance) {
- ScratchRegisterScope temps(this);
- Register temp = temps.AcquireScratch();
+ // todo: compress pointer
__ AssertSmi(lhs);
__ AssertSmi(rhs);
- if (COMPRESS_POINTERS_BOOL) {
- __ Sub32(temp, lhs, rhs);
- } else {
- __ Sub64(temp, lhs, rhs);
- }
- __ Branch(target, AsMasmCondition(cc), temp, Operand(zero_reg));
+ __ Branch(target, AsMasmCondition(cc), lhs, Operand(rhs));
}
void BaselineAssembler::JumpIfTagged(Condition cc, Register value,
MemOperand operand, Label* target,
Label::Distance) {
+ // todo: compress pointer
ScratchRegisterScope temps(this);
- Register tmp1 = temps.AcquireScratch();
- Register tmp2 = temps.AcquireScratch();
- __ Ld(tmp1, operand);
- if (COMPRESS_POINTERS_BOOL) {
- __ Sub32(tmp2, value, tmp1);
- } else {
- __ Sub64(tmp2, value, tmp1);
- }
- __ Branch(target, AsMasmCondition(cc), tmp2, Operand(zero_reg));
+ Register scratch = temps.AcquireScratch();
+ __ Ld(scratch, operand);
+ __ Branch(target, AsMasmCondition(cc), value, Operand(scratch));
}
void BaselineAssembler::JumpIfTagged(Condition cc, MemOperand operand,
Register value, Label* target,
Label::Distance) {
+ // todo: compress pointer
ScratchRegisterScope temps(this);
- Register tmp1 = temps.AcquireScratch();
- Register tmp2 = temps.AcquireScratch();
- __ Ld(tmp1, operand);
- if (COMPRESS_POINTERS_BOOL) {
- __ Sub32(tmp2, tmp1, value);
- } else {
- __ Sub64(tmp2, tmp1, value);
- }
- __ Branch(target, AsMasmCondition(cc), tmp2, Operand(zero_reg));
+ Register scratch = temps.AcquireScratch();
+ __ Ld(scratch, operand);
+ __ Branch(target, AsMasmCondition(cc), scratch, Operand(value));
}
void BaselineAssembler::JumpIfByte(Condition cc, Register value, int32_t byte,
Label* target, Label::Distance) {
@@ -268,136 +246,50 @@ inline Register ToRegister(BaselineAssembler* basm,
}
template <typename... Args>
-struct CountPushHelper;
-template <>
-struct CountPushHelper<> {
- static int Count() { return 0; }
-};
-template <typename Arg, typename... Args>
-struct CountPushHelper<Arg, Args...> {
- static int Count(Arg arg, Args... args) {
- return 1 + CountPushHelper<Args...>::Count(args...);
- }
-};
-template <typename... Args>
-struct CountPushHelper<interpreter::RegisterList, Args...> {
- static int Count(interpreter::RegisterList list, Args... args) {
- return list.register_count() + CountPushHelper<Args...>::Count(args...);
- }
-};
-
-template <typename... Args>
struct PushAllHelper;
-template <typename... Args>
-void PushAll(BaselineAssembler* basm, Args... args) {
- PushAllHelper<Args...>::Push(basm, args...);
-}
-template <typename... Args>
-void PushAllReverse(BaselineAssembler* basm, Args... args) {
- PushAllHelper<Args...>::PushReverse(basm, args...);
-}
-
template <>
struct PushAllHelper<> {
- static void Push(BaselineAssembler* basm) {}
- static void PushReverse(BaselineAssembler* basm) {}
+ static int Push(BaselineAssembler* basm) { return 0; }
+ static int PushReverse(BaselineAssembler* basm) { return 0; }
};
-
-inline void PushSingle(MacroAssembler* masm, RootIndex source) {
- masm->PushRoot(source);
-}
-inline void PushSingle(MacroAssembler* masm, Register reg) { masm->Push(reg); }
-
-inline void PushSingle(MacroAssembler* masm, Smi value) { masm->Push(value); }
-inline void PushSingle(MacroAssembler* masm, Handle<HeapObject> object) {
- masm->Push(object);
-}
-inline void PushSingle(MacroAssembler* masm, int32_t immediate) {
- masm->li(kScratchReg, (int64_t)(immediate));
- PushSingle(masm, kScratchReg);
-}
-
-inline void PushSingle(MacroAssembler* masm, TaggedIndex value) {
- masm->li(kScratchReg, static_cast<int64_t>(value.ptr()));
- PushSingle(masm, kScratchReg);
-}
-inline void PushSingle(MacroAssembler* masm, MemOperand operand) {
- masm->Ld(kScratchReg, operand);
- PushSingle(masm, kScratchReg);
-}
-inline void PushSingle(MacroAssembler* masm, interpreter::Register source) {
- return PushSingle(masm, BaselineAssembler::RegisterFrameOperand(source));
-}
-
template <typename Arg>
struct PushAllHelper<Arg> {
- static void Push(BaselineAssembler* basm, Arg arg) {
- PushSingle(basm->masm(), arg);
+ static int Push(BaselineAssembler* basm, Arg arg) {
+ BaselineAssembler::ScratchRegisterScope scope(basm);
+ basm->masm()->Push(ToRegister(basm, &scope, arg));
+ return 1;
}
- static void PushReverse(BaselineAssembler* basm, Arg arg) {
- // Push the padding register to round up the amount of values pushed.
+ static int PushReverse(BaselineAssembler* basm, Arg arg) {
return Push(basm, arg);
}
};
-template <typename Arg1, typename Arg2, typename... Args>
-struct PushAllHelper<Arg1, Arg2, Args...> {
- static void Push(BaselineAssembler* basm, Arg1 arg1, Arg2 arg2,
- Args... args) {
- {
- BaselineAssembler::ScratchRegisterScope scope(basm);
- basm->masm()->Push(ToRegister(basm, &scope, arg1),
- ToRegister(basm, &scope, arg2));
- }
- PushAll(basm, args...);
+template <typename Arg, typename... Args>
+struct PushAllHelper<Arg, Args...> {
+ static int Push(BaselineAssembler* basm, Arg arg, Args... args) {
+ PushAllHelper<Arg>::Push(basm, arg);
+ return 1 + PushAllHelper<Args...>::Push(basm, args...);
}
- static void PushReverse(BaselineAssembler* basm, Arg1 arg1, Arg2 arg2,
- Args... args) {
- PushAllReverse(basm, args...);
- {
- BaselineAssembler::ScratchRegisterScope scope(basm);
- basm->masm()->Push(ToRegister(basm, &scope, arg2),
- ToRegister(basm, &scope, arg1));
- }
- }
-};
-// Currently RegisterLists are always be the last argument, so we don't
-// specialize for the case where they're not. We do still specialise for the
-// aligned and unaligned cases.
-template <typename Arg>
-struct PushAllHelper<Arg, interpreter::RegisterList> {
- static void Push(BaselineAssembler* basm, Arg arg,
- interpreter::RegisterList list) {
- DCHECK_EQ(list.register_count() % 2, 1);
- PushAll(basm, arg, list[0], list.PopLeft());
- }
- static void PushReverse(BaselineAssembler* basm, Arg arg,
- interpreter::RegisterList list) {
- if (list.register_count() == 0) {
- PushAllReverse(basm, arg);
- } else {
- PushAllReverse(basm, arg, list[0], list.PopLeft());
- }
+ static int PushReverse(BaselineAssembler* basm, Arg arg, Args... args) {
+ int nargs = PushAllHelper<Args...>::PushReverse(basm, args...);
+ PushAllHelper<Arg>::Push(basm, arg);
+ return nargs + 1;
}
};
template <>
struct PushAllHelper<interpreter::RegisterList> {
- static void Push(BaselineAssembler* basm, interpreter::RegisterList list) {
- DCHECK_EQ(list.register_count() % 2, 0);
- for (int reg_index = 0; reg_index < list.register_count(); reg_index += 2) {
- PushAll(basm, list[reg_index], list[reg_index + 1]);
+ static int Push(BaselineAssembler* basm, interpreter::RegisterList list) {
+ for (int reg_index = 0; reg_index < list.register_count(); ++reg_index) {
+ PushAllHelper<interpreter::Register>::Push(basm, list[reg_index]);
}
+ return list.register_count();
}
- static void PushReverse(BaselineAssembler* basm,
- interpreter::RegisterList list) {
- int reg_index = list.register_count() - 1;
- if (reg_index % 2 == 0) {
- // Push the padding register to round up the amount of values pushed.
- PushAllReverse(basm, list[reg_index]);
- reg_index--;
- }
- for (; reg_index >= 1; reg_index -= 2) {
- PushAllReverse(basm, list[reg_index - 1], list[reg_index]);
+ static int PushReverse(BaselineAssembler* basm,
+ interpreter::RegisterList list) {
+ for (int reg_index = list.register_count() - 1; reg_index >= 0;
+ --reg_index) {
+ PushAllHelper<interpreter::Register>::Push(basm, list[reg_index]);
}
+ return list.register_count();
}
};
@@ -414,10 +306,9 @@ struct PopAllHelper<Register> {
}
};
template <typename... T>
-struct PopAllHelper<Register, Register, T...> {
- static void Pop(BaselineAssembler* basm, Register reg1, Register reg2,
- T... tail) {
- basm->masm()->Pop(reg1, reg2);
+struct PopAllHelper<Register, T...> {
+ static void Pop(BaselineAssembler* basm, Register reg, T... tail) {
+ PopAllHelper<Register>::Pop(basm, reg);
PopAllHelper<T...>::Pop(basm, tail...);
}
};
@@ -426,20 +317,12 @@ struct PopAllHelper<Register, Register, T...> {
template <typename... T>
int BaselineAssembler::Push(T... vals) {
- // We have to count the pushes first, to decide whether to add padding before
- // the first push.
- int push_count = detail::CountPushHelper<T...>::Count(vals...);
- if (push_count % 2 == 0) {
- detail::PushAll(this, vals...);
- } else {
- detail::PushAll(this, vals...);
- }
- return push_count;
+ return detail::PushAllHelper<T...>::Push(this, vals...);
}
template <typename... T>
void BaselineAssembler::PushReverse(T... vals) {
- detail::PushAllReverse(this, vals...);
+ detail::PushAllHelper<T...>::PushReverse(this, vals...);
}
template <typename... T>
@@ -461,7 +344,7 @@ void BaselineAssembler::LoadTaggedAnyField(Register output, Register source,
}
void BaselineAssembler::LoadByteField(Register output, Register source,
int offset) {
- __ Ld(output, FieldMemOperand(source, offset));
+ __ Lb(output, FieldMemOperand(source, offset));
}
void BaselineAssembler::StoreTaggedSignedField(Register target, int offset,
Smi value) {
@@ -495,11 +378,11 @@ void BaselineAssembler::AddToInterruptBudgetAndJumpIfNotExceeded(
JSFunction::kFeedbackCellOffset);
Register interrupt_budget = scratch_scope.AcquireScratch();
- __ Ld(interrupt_budget,
+ __ Lw(interrupt_budget,
FieldMemOperand(feedback_cell, FeedbackCell::kInterruptBudgetOffset));
// Remember to set flags as part of the add!
- __ Add64(interrupt_budget, interrupt_budget, weight);
- __ Sd(interrupt_budget,
+ __ Add32(interrupt_budget, interrupt_budget, weight);
+ __ Sw(interrupt_budget,
FieldMemOperand(feedback_cell, FeedbackCell::kInterruptBudgetOffset));
if (skip_interrupt_label) {
DCHECK_LT(weight, 0);
@@ -517,11 +400,11 @@ void BaselineAssembler::AddToInterruptBudgetAndJumpIfNotExceeded(
JSFunction::kFeedbackCellOffset);
Register interrupt_budget = scratch_scope.AcquireScratch();
- __ Ld(interrupt_budget,
+ __ Lw(interrupt_budget,
FieldMemOperand(feedback_cell, FeedbackCell::kInterruptBudgetOffset));
// Remember to set flags as part of the add!
- __ Add64(interrupt_budget, interrupt_budget, weight);
- __ Sd(interrupt_budget,
+ __ Add32(interrupt_budget, interrupt_budget, weight);
+ __ Sw(interrupt_budget,
FieldMemOperand(feedback_cell, FeedbackCell::kInterruptBudgetOffset));
if (skip_interrupt_label)
__ Branch(skip_interrupt_label, ge, interrupt_budget, Operand(weight));
@@ -546,7 +429,6 @@ void BaselineAssembler::Switch(Register reg, int case_value_base,
// Mostly copied from code-generator-riscv64.cc
ScratchRegisterScope scope(this);
- Register temp = scope.AcquireScratch();
Label table;
__ Branch(&fallthrough, AsMasmCondition(Condition::kUnsignedGreaterThanEqual),
reg, Operand(int64_t(num_labels)));
@@ -555,21 +437,20 @@ void BaselineAssembler::Switch(Register reg, int case_value_base,
DCHECK(is_int32(imm64));
int32_t Hi20 = (((int32_t)imm64 + 0x800) >> 12);
int32_t Lo12 = (int32_t)imm64 << 20 >> 20;
- __ auipc(temp, Hi20); // Read PC + Hi20 into t6
- __ lui(temp, Lo12); // jump PC + Hi20 + Lo12
+ __ auipc(t6, Hi20); // Read PC + Hi20 into t6
+ __ addi(t6, t6, Lo12); // jump PC + Hi20 + Lo12
- int entry_size_log2 = 2;
- Register temp2 = scope.AcquireScratch();
- __ CalcScaledAddress(temp2, temp, reg, entry_size_log2);
- __ Jump(temp);
+ int entry_size_log2 = 3;
+ __ CalcScaledAddress(t6, t6, reg, entry_size_log2);
+ __ Jump(t6);
{
TurboAssembler::BlockTrampolinePoolScope(masm());
- __ BlockTrampolinePoolFor(num_labels * kInstrSize);
+ __ BlockTrampolinePoolFor(num_labels * kInstrSize * 2);
__ bind(&table);
for (int i = 0; i < num_labels; ++i) {
- __ Branch(labels[i]);
+ __ BranchLong(labels[i]);
}
- DCHECK_EQ(num_labels * kInstrSize, __ InstructionsGeneratedSince(&table));
+ DCHECK_EQ(num_labels * 2, __ InstructionsGeneratedSince(&table));
__ bind(&fallthrough);
}
}
@@ -598,7 +479,7 @@ void BaselineAssembler::EmitReturn(MacroAssembler* masm) {
__ masm()->Push(kJSFunctionRegister);
__ CallRuntime(Runtime::kBytecodeBudgetInterruptFromBytecode, 1);
- __ masm()->Pop(kInterpreterAccumulatorRegister, params_size);
+ __ masm()->Pop(params_size, kInterpreterAccumulatorRegister);
__ masm()->SmiUntag(params_size);
__ Bind(&skip_interrupt_label);
@@ -630,6 +511,11 @@ void BaselineAssembler::EmitReturn(MacroAssembler* masm) {
#undef __
+inline void EnsureAccumulatorPreservedScope::AssertEqualToAccumulator(
+ Register reg) {
+ assembler_->masm()->Assert(eq, AbortReason::kUnexpectedValue, reg,
+ Operand(kInterpreterAccumulatorRegister));
+}
} // namespace baseline
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/baseline/riscv64/baseline-compiler-riscv64-inl.h b/deps/v8/src/baseline/riscv64/baseline-compiler-riscv64-inl.h
index fc73105b8e..1fbdaa0761 100644
--- a/deps/v8/src/baseline/riscv64/baseline-compiler-riscv64-inl.h
+++ b/deps/v8/src/baseline/riscv64/baseline-compiler-riscv64-inl.h
@@ -37,69 +37,35 @@ void BaselineCompiler::PrologueFillFrame() {
const int kLoopUnrollSize = 8;
const int new_target_index = new_target_or_generator_register.index();
const bool has_new_target = new_target_index != kMaxInt;
- // BaselineOutOfLinePrologue already pushed one undefined.
- register_count -= 1;
if (has_new_target) {
- if (new_target_index == 0) {
- // Oops, need to fix up that undefined that BaselineOutOfLinePrologue
- // pushed.
- __ masm()->Sd(kJavaScriptCallNewTargetRegister, MemOperand(sp));
- } else {
- DCHECK_LE(new_target_index, register_count);
- int index = 1;
- for (; index + 2 <= new_target_index; index += 2) {
- __ masm()->Push(kInterpreterAccumulatorRegister,
- kInterpreterAccumulatorRegister);
- }
- if (index == new_target_index) {
- __ masm()->Push(kJavaScriptCallNewTargetRegister,
- kInterpreterAccumulatorRegister);
- } else {
- DCHECK_EQ(index, new_target_index - 1);
- __ masm()->Push(kInterpreterAccumulatorRegister,
- kJavaScriptCallNewTargetRegister);
- }
- // We pushed "index" registers, minus the one the prologue pushed, plus
- // the two registers that included new_target.
- register_count -= (index - 1 + 2);
+ DCHECK_LE(new_target_index, register_count);
+ __ masm()->Add64(sp, sp, Operand(-(kPointerSize * new_target_index)));
+ for (int i = 0; i < new_target_index; i++) {
+ __ masm()->Sd(kInterpreterAccumulatorRegister, MemOperand(sp, i * 8));
}
+ // Push new_target_or_generator.
+ __ Push(kJavaScriptCallNewTargetRegister);
+ register_count -= new_target_index + 1;
}
if (register_count < 2 * kLoopUnrollSize) {
// If the frame is small enough, just unroll the frame fill completely.
- for (int i = 0; i < register_count; i += 2) {
- __ masm()->Push(kInterpreterAccumulatorRegister,
- kInterpreterAccumulatorRegister);
+ __ masm()->Add64(sp, sp, Operand(-(kPointerSize * register_count)));
+ for (int i = 0; i < register_count; ++i) {
+ __ masm()->Sd(kInterpreterAccumulatorRegister, MemOperand(sp, i * 8));
}
} else {
- BaselineAssembler::ScratchRegisterScope temps(&basm_);
- Register scratch = temps.AcquireScratch();
-
- // Extract the first few registers to round to the unroll size.
- int first_registers = register_count % kLoopUnrollSize;
- for (int i = 0; i < first_registers; i += 2) {
- __ masm()->Push(kInterpreterAccumulatorRegister,
- kInterpreterAccumulatorRegister);
- }
- __ Move(scratch, register_count / kLoopUnrollSize);
- // We enter the loop unconditionally, so make sure we need to loop at least
- // once.
- DCHECK_GT(register_count / kLoopUnrollSize, 0);
- Label loop;
- __ Bind(&loop);
- for (int i = 0; i < kLoopUnrollSize; i += 2) {
- __ masm()->Push(kInterpreterAccumulatorRegister,
- kInterpreterAccumulatorRegister);
+ __ masm()->Add64(sp, sp, Operand(-(kPointerSize * register_count)));
+ for (int i = 0; i < register_count; ++i) {
+ __ masm()->Sd(kInterpreterAccumulatorRegister, MemOperand(sp, i * 8));
}
- __ masm()->Branch(&loop, gt, scratch, Operand(1));
}
}
void BaselineCompiler::VerifyFrameSize() {
ASM_CODE_COMMENT(&masm_);
__ masm()->Add64(kScratchReg, sp,
- RoundUp(InterpreterFrameConstants::kFixedFrameSizeFromFp +
- bytecode_->frame_size(),
- 2 * kSystemPointerSize));
+ Operand(InterpreterFrameConstants::kFixedFrameSizeFromFp +
+ bytecode_->frame_size()));
__ masm()->Assert(eq, AbortReason::kUnexpectedStackPointer, kScratchReg,
Operand(fp));
}
diff --git a/deps/v8/src/baseline/x64/baseline-assembler-x64-inl.h b/deps/v8/src/baseline/x64/baseline-assembler-x64-inl.h
index f18ac84eae..aa9564dcea 100644
--- a/deps/v8/src/baseline/x64/baseline-assembler-x64-inl.h
+++ b/deps/v8/src/baseline/x64/baseline-assembler-x64-inl.h
@@ -468,16 +468,21 @@ void BaselineAssembler::EmitReturn(MacroAssembler* masm) {
__ masm()->LeaveFrame(StackFrame::BASELINE);
// Drop receiver + arguments.
- Register return_pc = scratch;
- __ masm()->PopReturnAddressTo(return_pc);
- __ masm()->leaq(rsp, MemOperand(rsp, params_size, times_system_pointer_size,
- kSystemPointerSize));
- __ masm()->PushReturnAddressFrom(return_pc);
+ __ masm()->DropArguments(
+ params_size, scratch, TurboAssembler::kCountIsInteger,
+ kJSArgcIncludesReceiver ? TurboAssembler::kCountIncludesReceiver
+ : TurboAssembler::kCountExcludesReceiver);
__ masm()->Ret();
}
#undef __
+inline void EnsureAccumulatorPreservedScope::AssertEqualToAccumulator(
+ Register reg) {
+ assembler_->masm()->cmp_tagged(reg, kInterpreterAccumulatorRegister);
+ assembler_->masm()->Assert(equal, AbortReason::kUnexpectedValue);
+}
+
} // namespace baseline
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/bigint/bigint-internal.h b/deps/v8/src/bigint/bigint-internal.h
index 4c214153bf..e1e8cf77a0 100644
--- a/deps/v8/src/bigint/bigint-internal.h
+++ b/deps/v8/src/bigint/bigint-internal.h
@@ -22,6 +22,7 @@ constexpr int kNewtonInversionThreshold = 50;
// kBarrettThreshold is defined in bigint.h.
constexpr int kToStringFastThreshold = 43;
+constexpr int kFromStringLargeThreshold = 300;
class ProcessorImpl : public Processor {
public:
@@ -69,6 +70,8 @@ class ProcessorImpl : public Processor {
void FromString(RWDigits Z, FromStringAccumulator* accumulator);
void FromStringClassic(RWDigits Z, FromStringAccumulator* accumulator);
+ void FromStringLarge(RWDigits Z, FromStringAccumulator* accumulator);
+ void FromStringBasePowerOfTwo(RWDigits Z, FromStringAccumulator* accumulator);
bool should_terminate() { return status_ == Status::kInterrupted; }
diff --git a/deps/v8/src/bigint/bigint.h b/deps/v8/src/bigint/bigint.h
index 218bf4616c..47159d0bf4 100644
--- a/deps/v8/src/bigint/bigint.h
+++ b/deps/v8/src/bigint/bigint.h
@@ -262,6 +262,8 @@ class Processor {
// upon return will be set to the actual length of the result string.
Status ToString(char* out, int* out_length, Digits X, int radix, bool sign);
+ // Z := the contents of {accumulator}.
+ // Assume that this leaves {accumulator} in unusable state.
Status FromString(RWDigits Z, FromStringAccumulator* accumulator);
};
@@ -336,7 +338,7 @@ class FromStringAccumulator {
// So for sufficiently large N, setting max_digits=N here will not actually
// allow parsing BigInts with N digits. We can fix that if/when anyone cares.
explicit FromStringAccumulator(int max_digits)
- : max_digits_(std::max(max_digits - kStackParts, kStackParts)) {}
+ : max_digits_(std::max(max_digits, kStackParts)) {}
// Step 2: Call this method to read all characters.
// {Char} should be a character type, such as uint8_t or uint16_t.
@@ -348,7 +350,7 @@ class FromStringAccumulator {
digit_t radix);
// Step 3: Check if a result is available, and determine its required
- // allocation size.
+ // allocation size (guaranteed to be <= max_digits passed to the constructor).
Result result() { return result_; }
int ResultLength() {
return std::max(stack_parts_used_, static_cast<int>(heap_parts_.size()));
@@ -360,8 +362,12 @@ class FromStringAccumulator {
private:
friend class ProcessorImpl;
- ALWAYS_INLINE bool AddPart(digit_t multiplier, digit_t part,
- bool is_last = false);
+ template <class Char>
+ ALWAYS_INLINE const Char* ParsePowerTwo(const Char* start, const Char* end,
+ digit_t radix);
+
+ ALWAYS_INLINE bool AddPart(digit_t multiplier, digit_t part, bool is_last);
+ ALWAYS_INLINE bool AddPart(digit_t part);
digit_t stack_parts_[kStackParts];
std::vector<digit_t> heap_parts_;
@@ -371,6 +377,7 @@ class FromStringAccumulator {
Result result_{Result::kOk};
int stack_parts_used_{0};
bool inline_everything_{false};
+ uint8_t radix_{0};
};
// The rest of this file is the inlineable implementation of
@@ -403,6 +410,47 @@ static constexpr uint8_t kCharValue[] = {
25, 26, 27, 28, 29, 30, 31, 32, // 112..119
33, 34, 35, 255, 255, 255, 255, 255, // 120..127 'z' == 122
};
+
+// A space- and time-efficient way to map {2,4,8,16,32} to {1,2,3,4,5}.
+static constexpr uint8_t kCharBits[] = {1, 2, 3, 0, 4, 0, 0, 0, 5};
+
+template <class Char>
+const Char* FromStringAccumulator::ParsePowerTwo(const Char* current,
+ const Char* end,
+ digit_t radix) {
+ radix_ = static_cast<uint8_t>(radix);
+ const int char_bits = kCharBits[radix >> 2];
+ int bits_left;
+ bool done = false;
+ do {
+ digit_t part = 0;
+ bits_left = kDigitBits;
+ while (true) {
+ digit_t d; // Numeric value of the current character {c}.
+ uint32_t c = *current;
+ if (c > 127 || (d = bigint::kCharValue[c]) >= radix) {
+ done = true;
+ break;
+ }
+
+ if (bits_left < char_bits) break;
+ bits_left -= char_bits;
+ part = (part << char_bits) | d;
+
+ ++current;
+ if (current == end) {
+ done = true;
+ break;
+ }
+ }
+ if (!AddPart(part)) return current;
+ } while (!done);
+ // We use the unused {last_multiplier_} field to
+ // communicate how many bits are unused in the last part.
+ last_multiplier_ = bits_left;
+ return current;
+}
+
template <class Char>
const Char* FromStringAccumulator::Parse(const Char* start, const Char* end,
digit_t radix) {
@@ -417,12 +465,15 @@ const Char* FromStringAccumulator::Parse(const Char* start, const Char* end,
static constexpr int kInlineThreshold = kStackParts * kDigitBits * 100 / 517;
inline_everything_ = (end - start) <= kInlineThreshold;
#endif
+ if (!inline_everything_ && (radix & (radix - 1)) == 0) {
+ return ParsePowerTwo(start, end, radix);
+ }
bool done = false;
do {
digit_t multiplier = 1;
digit_t part = 0;
while (true) {
- digit_t d;
+ digit_t d; // Numeric value of the current character {c}.
uint32_t c = *current;
if (c > 127 || (d = bigint::kCharValue[c]) >= radix) {
done = true;
@@ -478,6 +529,10 @@ bool FromStringAccumulator::AddPart(digit_t multiplier, digit_t part,
BIGINT_H_DCHECK(max_multiplier_ == 0 || max_multiplier_ == multiplier);
max_multiplier_ = multiplier;
}
+ return AddPart(part);
+}
+
+bool FromStringAccumulator::AddPart(digit_t part) {
if (stack_parts_used_ < kStackParts) {
stack_parts_[stack_parts_used_++] = part;
return true;
@@ -489,7 +544,7 @@ bool FromStringAccumulator::AddPart(digit_t multiplier, digit_t part,
heap_parts_.push_back(stack_parts_[i]);
}
}
- if (static_cast<int>(heap_parts_.size()) >= max_digits_ && !is_last) {
+ if (static_cast<int>(heap_parts_.size()) >= max_digits_) {
result_ = Result::kMaxSizeExceeded;
return false;
}
diff --git a/deps/v8/src/bigint/fromstring.cc b/deps/v8/src/bigint/fromstring.cc
index 0307745cad..a4b34a1a02 100644
--- a/deps/v8/src/bigint/fromstring.cc
+++ b/deps/v8/src/bigint/fromstring.cc
@@ -40,7 +40,6 @@ void ProcessorImpl::FromStringClassic(RWDigits Z,
// Parts are stored on the heap.
for (int i = 1; i < num_heap_parts - 1; i++) {
MultiplySingle(Z, already_set, max_multiplier);
- if (should_terminate()) return;
Add(Z, accumulator->heap_parts_[i]);
already_set.set_len(already_set.len() + 1);
}
@@ -48,6 +47,262 @@ void ProcessorImpl::FromStringClassic(RWDigits Z,
Add(Z, accumulator->heap_parts_.back());
}
+// The fast algorithm: combine parts in a balanced-binary-tree like order:
+// Multiply-and-add neighboring pairs of parts, then loop, until only one
+// part is left. The benefit is that the multiplications will have inputs of
+// similar sizes, which makes them amenable to fast multiplication algorithms.
+// We have to do more multiplications than the classic algorithm though,
+// because we also have to multiply the multipliers.
+// Optimizations:
+// - We can skip the multiplier for the first part, because we never need it.
+// - Most multipliers are the same; we can avoid repeated multiplications and
+// just copy the previous result. (In theory we could even de-dupe them, but
+// as the parts/multipliers grow, we'll need most of the memory anyway.)
+// Copied results are marked with a * below.
+// - We can re-use memory using a system of three buffers whose usage rotates:
+// - one is considered empty, and is overwritten with the new parts,
+// - one holds the multipliers (and will be "empty" in the next round), and
+// - one initially holds the parts and is overwritten with the new multipliers
+// Parts and multipliers both grow in each iteration, and get fewer, so we
+// use the space of two adjacent old chunks for one new chunk.
+// Since the {heap_parts_} vectors has the right size, and so does the
+// result {Z}, we can use that memory, and only need to allocate one scratch
+// vector. If the final result ends up in the wrong bucket, we have to copy it
+// to the correct one.
+// - We don't have to keep track of the positions and sizes of the chunks,
+// because we can deduce their precise placement from the iteration index.
+//
+// Example, assuming digit_t is 4 bits, fitting one decimal digit:
+// Initial state:
+// parts_: 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5
+// multipliers_: 10 10 10 10 10 10 10 10 10 10 10 10 10 10 10
+// After the first iteration of the outer loop:
+// parts: 12 34 56 78 90 12 34 5
+// multipliers: 100 *100 *100 *100 *100 *100 10
+// After the second iteration:
+// parts: 1234 5678 9012 345
+// multipliers: 10000 *10000 1000
+// After the third iteration:
+// parts: 12345678 9012345
+// multipliers: 10000000
+// And then there's an obvious last iteration.
+void ProcessorImpl::FromStringLarge(RWDigits Z,
+ FromStringAccumulator* accumulator) {
+ int num_parts = static_cast<int>(accumulator->heap_parts_.size());
+ DCHECK(num_parts >= 2); // NOLINT(readability/check)
+ DCHECK(Z.len() >= num_parts);
+ RWDigits parts(accumulator->heap_parts_.data(), num_parts);
+ Storage multipliers_storage(num_parts);
+ RWDigits multipliers(multipliers_storage.get(), num_parts);
+ RWDigits temp(Z, 0, num_parts);
+ // Unrolled and specialized first iteration: part_len == 1, so instead of
+ // Digits sub-vectors we have individual digit_t values, and the multipliers
+ // are known up front.
+ {
+ digit_t max_multiplier = accumulator->max_multiplier_;
+ digit_t last_multiplier = accumulator->last_multiplier_;
+ RWDigits new_parts = temp;
+ RWDigits new_multipliers = parts;
+ int i = 0;
+ for (; i + 1 < num_parts; i += 2) {
+ digit_t p_in = parts[i];
+ digit_t p_in2 = parts[i + 1];
+ digit_t m_in = max_multiplier;
+ digit_t m_in2 = i == num_parts - 2 ? last_multiplier : max_multiplier;
+ // p[j] = p[i] * m[i+1] + p[i+1]
+ digit_t p_high;
+ digit_t p_low = digit_mul(p_in, m_in2, &p_high);
+ digit_t carry;
+ new_parts[i] = digit_add2(p_low, p_in2, &carry);
+ new_parts[i + 1] = p_high + carry;
+ // m[j] = m[i] * m[i+1]
+ if (i > 0) {
+ if (i > 2 && m_in2 != last_multiplier) {
+ new_multipliers[i] = new_multipliers[i - 2];
+ new_multipliers[i + 1] = new_multipliers[i - 1];
+ } else {
+ digit_t m_high;
+ new_multipliers[i] = digit_mul(m_in, m_in2, &m_high);
+ new_multipliers[i + 1] = m_high;
+ }
+ }
+ }
+ // Trailing last part (if {num_parts} was odd).
+ if (i < num_parts) {
+ new_parts[i] = parts[i];
+ new_multipliers[i] = last_multiplier;
+ i += 2;
+ }
+ num_parts = i >> 1;
+ RWDigits new_temp = multipliers;
+ parts = new_parts;
+ multipliers = new_multipliers;
+ temp = new_temp;
+ AddWorkEstimate(num_parts);
+ }
+ int part_len = 2;
+
+ // Remaining iterations.
+ while (num_parts > 1) {
+ RWDigits new_parts = temp;
+ RWDigits new_multipliers = parts;
+ int new_part_len = part_len * 2;
+ int i = 0;
+ for (; i + 1 < num_parts; i += 2) {
+ int start = i * part_len;
+ Digits p_in(parts, start, part_len);
+ Digits p_in2(parts, start + part_len, part_len);
+ Digits m_in(multipliers, start, part_len);
+ Digits m_in2(multipliers, start + part_len, part_len);
+ RWDigits p_out(new_parts, start, new_part_len);
+ RWDigits m_out(new_multipliers, start, new_part_len);
+ // p[j] = p[i] * m[i+1] + p[i+1]
+ Multiply(p_out, p_in, m_in2);
+ if (should_terminate()) return;
+ digit_t overflow = AddAndReturnOverflow(p_out, p_in2);
+ DCHECK(overflow == 0); // NOLINT(readability/check)
+ USE(overflow);
+ // m[j] = m[i] * m[i+1]
+ if (i > 0) {
+ bool copied = false;
+ if (i > 2) {
+ int prev_start = (i - 2) * part_len;
+ Digits m_in_prev(multipliers, prev_start, part_len);
+ Digits m_in2_prev(multipliers, prev_start + part_len, part_len);
+ if (Compare(m_in, m_in_prev) == 0 &&
+ Compare(m_in2, m_in2_prev) == 0) {
+ copied = true;
+ Digits m_out_prev(new_multipliers, prev_start, new_part_len);
+ for (int k = 0; k < new_part_len; k++) m_out[k] = m_out_prev[k];
+ }
+ }
+ if (!copied) {
+ Multiply(m_out, m_in, m_in2);
+ if (should_terminate()) return;
+ }
+ }
+ }
+ // Trailing last part (if {num_parts} was odd).
+ if (i < num_parts) {
+ Digits p_in(parts, i * part_len, part_len);
+ Digits m_in(multipliers, i * part_len, part_len);
+ RWDigits p_out(new_parts, i * part_len, new_part_len);
+ RWDigits m_out(new_multipliers, i * part_len, new_part_len);
+ int k = 0;
+ for (; k < p_in.len(); k++) p_out[k] = p_in[k];
+ for (; k < p_out.len(); k++) p_out[k] = 0;
+ k = 0;
+ for (; k < m_in.len(); k++) m_out[k] = m_in[k];
+ for (; k < m_out.len(); k++) m_out[k] = 0;
+ i += 2;
+ }
+ num_parts = i >> 1;
+ part_len = new_part_len;
+ RWDigits new_temp = multipliers;
+ parts = new_parts;
+ multipliers = new_multipliers;
+ temp = new_temp;
+ }
+ // Copy the result to Z, if it doesn't happen to be there already.
+ if (parts.digits() != Z.digits()) {
+ int i = 0;
+ for (; i < parts.len(); i++) Z[i] = parts[i];
+ // Z might be bigger than we requested; be robust towards that.
+ for (; i < Z.len(); i++) Z[i] = 0;
+ }
+}
+
+// Specialized algorithms for power-of-two radixes. Designed to work with
+// {ParsePowerTwo}: {max_multiplier_} isn't saved, but {radix_} is, and
+// {last_multiplier_} has special meaning, namely the number of unpopulated bits
+// in the last part.
+// For these radixes, {parts} already is a list of correct bit sequences, we
+// just have to put them together in the right way:
+// - The parts are currently in reversed order. The highest-index parts[i]
+// will go into Z[0].
+// - All parts, possibly except for the last, are maximally populated.
+// - A maximally populated part stores a non-fractional number of characters,
+// i.e. the largest fitting multiple of {char_bits} of it is populated.
+// - The populated bits in a part are at the low end.
+// - The number of unused bits in the last part is stored in
+// {accumulator->last_multiplier_}.
+//
+// Example: Given the following parts vector, where letters are used to
+// label bits, bit order is big endian (i.e. [00000101] encodes "5"),
+// 'x' means "unpopulated", kDigitBits == 8, radix == 8, and char_bits == 3:
+//
+// parts[0] -> [xxABCDEF][xxGHIJKL][xxMNOPQR][xxxxxSTU] <- parts[3]
+//
+// We have to assemble the following result:
+//
+// Z[0] -> [NOPQRSTU][FGHIJKLM][xxxABCDE] <- Z[2]
+//
+void ProcessorImpl::FromStringBasePowerOfTwo(
+ RWDigits Z, FromStringAccumulator* accumulator) {
+ const int num_parts = accumulator->ResultLength();
+ DCHECK(num_parts >= 1); // NOLINT(readability/check)
+ DCHECK(Z.len() >= num_parts);
+ Digits parts(accumulator->heap_parts_.size() > 0
+ ? accumulator->heap_parts_.data()
+ : accumulator->stack_parts_,
+ num_parts);
+ uint8_t radix = accumulator->radix_;
+ DCHECK(radix == 2 || radix == 4 || radix == 8 || radix == 16 || radix == 32);
+ const int char_bits = BitLength(radix - 1);
+ const int unused_last_part_bits =
+ static_cast<int>(accumulator->last_multiplier_);
+ const int unused_part_bits = kDigitBits % char_bits;
+ const int max_part_bits = kDigitBits - unused_part_bits;
+ int z_index = 0;
+ int part_index = num_parts - 1;
+
+ // If the last part is fully populated, then all parts must be, and we can
+ // simply copy them (in reversed order).
+ if (unused_last_part_bits == 0) {
+ DCHECK(kDigitBits % char_bits == 0); // NOLINT(readability/check)
+ while (part_index >= 0) {
+ Z[z_index++] = parts[part_index--];
+ }
+ for (; z_index < Z.len(); z_index++) Z[z_index] = 0;
+ return;
+ }
+
+ // Otherwise we have to shift parts contents around as needed.
+ // Holds the next Z digit that we want to store...
+ digit_t digit = parts[part_index--];
+ // ...and the number of bits (at the right end) we already know.
+ int digit_bits = kDigitBits - unused_last_part_bits;
+ while (part_index >= 0) {
+ // Holds the last part that we read from {parts}...
+ digit_t part;
+ // ...and the number of bits (at the right end) that we haven't used yet.
+ int part_bits;
+ while (digit_bits < kDigitBits) {
+ part = parts[part_index--];
+ part_bits = max_part_bits;
+ digit |= part << digit_bits;
+ int part_shift = kDigitBits - digit_bits;
+ if (part_shift > part_bits) {
+ digit_bits += part_bits;
+ part = 0;
+ part_bits = 0;
+ if (part_index < 0) break;
+ } else {
+ digit_bits = kDigitBits;
+ part >>= part_shift;
+ part_bits -= part_shift;
+ }
+ }
+ Z[z_index++] = digit;
+ digit = part;
+ digit_bits = part_bits;
+ }
+ if (digit_bits > 0) {
+ Z[z_index++] = digit;
+ }
+ for (; z_index < Z.len(); z_index++) Z[z_index] = 0;
+}
+
void ProcessorImpl::FromString(RWDigits Z, FromStringAccumulator* accumulator) {
if (accumulator->inline_everything_) {
int i = 0;
@@ -57,8 +312,12 @@ void ProcessorImpl::FromString(RWDigits Z, FromStringAccumulator* accumulator) {
for (; i < Z.len(); i++) Z[i] = 0;
} else if (accumulator->stack_parts_used_ == 0) {
for (int i = 0; i < Z.len(); i++) Z[i] = 0;
- } else {
+ } else if (IsPowerOfTwo(accumulator->radix_)) {
+ FromStringBasePowerOfTwo(Z, accumulator);
+ } else if (accumulator->ResultLength() < kFromStringLargeThreshold) {
FromStringClassic(Z, accumulator);
+ } else {
+ FromStringLarge(Z, accumulator);
}
}
diff --git a/deps/v8/src/builtins/accessors.cc b/deps/v8/src/builtins/accessors.cc
index 8d26259204..0d994d2d03 100644
--- a/deps/v8/src/builtins/accessors.cc
+++ b/deps/v8/src/builtins/accessors.cc
@@ -17,7 +17,6 @@
#include "src/objects/contexts.h"
#include "src/objects/field-index-inl.h"
#include "src/objects/js-array-inl.h"
-#include "src/objects/js-regexp-inl.h"
#include "src/objects/module-inl.h"
#include "src/objects/property-details.h"
#include "src/objects/prototype.h"
diff --git a/deps/v8/src/builtins/accessors.h b/deps/v8/src/builtins/accessors.h
index 0148b8e3d1..27ff276821 100644
--- a/deps/v8/src/builtins/accessors.h
+++ b/deps/v8/src/builtins/accessors.h
@@ -5,7 +5,7 @@
#ifndef V8_BUILTINS_ACCESSORS_H_
#define V8_BUILTINS_ACCESSORS_H_
-#include "include/v8.h"
+#include "include/v8-local-handle.h"
#include "src/base/bit-field.h"
#include "src/common/globals.h"
#include "src/objects/property-details.h"
diff --git a/deps/v8/src/builtins/arm/builtins-arm.cc b/deps/v8/src/builtins/arm/builtins-arm.cc
index f45c927e67..1ef63e1096 100644
--- a/deps/v8/src/builtins/arm/builtins-arm.cc
+++ b/deps/v8/src/builtins/arm/builtins-arm.cc
@@ -76,6 +76,36 @@ static void GenerateTailCallToReturnedCode(MacroAssembler* masm,
namespace {
+enum class ArgumentsElementType {
+ kRaw, // Push arguments as they are.
+ kHandle // Dereference arguments before pushing.
+};
+
+void Generate_PushArguments(MacroAssembler* masm, Register array, Register argc,
+ Register scratch,
+ ArgumentsElementType element_type) {
+ DCHECK(!AreAliased(array, argc, scratch));
+ UseScratchRegisterScope temps(masm);
+ Register counter = scratch;
+ Register value = temps.Acquire();
+ Label loop, entry;
+ if (kJSArgcIncludesReceiver) {
+ __ sub(counter, argc, Operand(kJSArgcReceiverSlots));
+ } else {
+ __ mov(counter, argc);
+ }
+ __ b(&entry);
+ __ bind(&loop);
+ __ ldr(value, MemOperand(array, counter, LSL, kSystemPointerSizeLog2));
+ if (element_type == ArgumentsElementType::kHandle) {
+ __ ldr(value, MemOperand(value));
+ }
+ __ push(value);
+ __ bind(&entry);
+ __ sub(counter, counter, Operand(1), SetCC);
+ __ b(ge, &loop);
+}
+
void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r0 : number of arguments
@@ -106,12 +136,14 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
// correct position (including any undefined), instead of delaying this to
// InvokeFunction.
- // Set up pointer to last argument (skip receiver).
+ // Set up pointer to first argument (skip receiver).
__ add(
r4, fp,
Operand(StandardFrameConstants::kCallerSPOffset + kSystemPointerSize));
// Copy arguments and receiver to the expression stack.
- __ PushArray(r4, r0, r5);
+ // r4: Pointer to start of arguments.
+ // r0: Number of arguments.
+ Generate_PushArguments(masm, r4, r0, r5, ArgumentsElementType::kRaw);
// The receiver for the builtin/api call.
__ PushRoot(RootIndex::kTheHoleValue);
@@ -130,7 +162,9 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
// Remove caller arguments from the stack and return.
__ DropArguments(scratch, TurboAssembler::kCountIsSmi,
- TurboAssembler::kCountExcludesReceiver);
+ kJSArgcIncludesReceiver
+ ? TurboAssembler::kCountIncludesReceiver
+ : TurboAssembler::kCountExcludesReceiver);
__ Jump(lr);
__ bind(&stack_overflow);
@@ -230,7 +264,9 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
// InvokeFunction.
// Copy arguments to the expression stack.
- __ PushArray(r4, r0, r5);
+ // r4: Pointer to start of argument.
+ // r0: Number of arguments.
+ Generate_PushArguments(masm, r4, r0, r5, ArgumentsElementType::kRaw);
// Push implicit receiver.
__ Push(r6);
@@ -276,7 +312,9 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
// Remove caller arguments from the stack and return.
__ DropArguments(r1, TurboAssembler::kCountIsSmi,
- TurboAssembler::kCountExcludesReceiver);
+ kJSArgcIncludesReceiver
+ ? TurboAssembler::kCountIncludesReceiver
+ : TurboAssembler::kCountExcludesReceiver);
__ Jump(lr);
__ bind(&check_receiver);
@@ -308,14 +346,32 @@ void Builtins::Generate_JSBuiltinsConstructStub(MacroAssembler* masm) {
Generate_JSBuiltinsConstructStubHelper(masm);
}
+static void AssertCodeIsBaseline(MacroAssembler* masm, Register code,
+ Register scratch) {
+ DCHECK(!AreAliased(code, scratch));
+ // Verify that the code kind is baseline code via the CodeKind.
+ __ ldr(scratch, FieldMemOperand(code, Code::kFlagsOffset));
+ __ DecodeField<Code::KindField>(scratch);
+ __ cmp(scratch, Operand(static_cast<int>(CodeKind::BASELINE)));
+ __ Assert(eq, AbortReason::kExpectedBaselineData);
+}
+
static void GetSharedFunctionInfoBytecodeOrBaseline(MacroAssembler* masm,
Register sfi_data,
Register scratch1,
Label* is_baseline) {
ASM_CODE_COMMENT(masm);
Label done;
- __ CompareObjectType(sfi_data, scratch1, scratch1, BASELINE_DATA_TYPE);
- __ b(eq, is_baseline);
+ __ CompareObjectType(sfi_data, scratch1, scratch1, CODET_TYPE);
+ if (FLAG_debug_code) {
+ Label not_baseline;
+ __ b(ne, &not_baseline);
+ AssertCodeIsBaseline(masm, sfi_data, scratch1);
+ __ b(eq, is_baseline);
+ __ bind(&not_baseline);
+ } else {
+ __ b(eq, is_baseline);
+ }
__ cmp(scratch1, Operand(INTERPRETER_DATA_TYPE));
__ b(ne, &done);
__ ldr(sfi_data,
@@ -383,6 +439,9 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
__ ldr(r3, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
__ ldrh(r3,
FieldMemOperand(r3, SharedFunctionInfo::kFormalParameterCountOffset));
+ if (kJSArgcIncludesReceiver) {
+ __ sub(r3, r3, Operand(kJSArgcReceiverSlots));
+ }
__ ldr(r2,
FieldMemOperand(r1, JSGeneratorObject::kParametersAndRegistersOffset));
{
@@ -705,7 +764,11 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
// Check if we have enough stack space to push all arguments + receiver.
// Clobbers r5.
Label enough_stack_space, stack_overflow;
- __ add(r6, r0, Operand(1)); // Add one for receiver.
+ if (kJSArgcIncludesReceiver) {
+ __ mov(r6, r0);
+ } else {
+ __ add(r6, r0, Operand(1)); // Add one for receiver.
+ }
__ StackOverflowCheck(r6, r5, &stack_overflow);
__ b(&enough_stack_space);
__ bind(&stack_overflow);
@@ -715,24 +778,13 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
__ bind(&enough_stack_space);
- // Copy arguments to the stack in a loop.
+ // Copy arguments to the stack.
// r1: new.target
// r2: function
// r3: receiver
// r0: argc
// r4: argv, i.e. points to first arg
- Label loop, entry;
- __ add(r6, r4, Operand(r0, LSL, kSystemPointerSizeLog2));
- // r6 points past last arg.
- __ b(&entry);
- __ bind(&loop);
- __ ldr(r5, MemOperand(r6, -kSystemPointerSize,
- PreIndex)); // read next parameter
- __ ldr(r5, MemOperand(r5)); // dereference handle
- __ push(r5); // push parameter
- __ bind(&entry);
- __ cmp(r4, r6);
- __ b(ne, &loop);
+ Generate_PushArguments(masm, r4, r0, r5, ArgumentsElementType::kHandle);
// Push the receiver.
__ Push(r3);
@@ -815,7 +867,9 @@ static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch1,
__ ldr(actual_params_size,
MemOperand(fp, StandardFrameConstants::kArgCOffset));
__ lsl(actual_params_size, actual_params_size, Operand(kPointerSizeLog2));
- __ add(actual_params_size, actual_params_size, Operand(kSystemPointerSize));
+ if (!kJSArgcIncludesReceiver) {
+ __ add(actual_params_size, actual_params_size, Operand(kSystemPointerSize));
+ }
// If actual is bigger than formal, then we should use it to free up the stack
// arguments.
@@ -1196,7 +1250,7 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) {
// stack left to right.
//
// The live registers are:
-// o r0: actual argument count (not including the receiver)
+// o r0: actual argument count
// o r1: the JS function object being called.
// o r3: the incoming new target or generator object
// o cp: our context
@@ -1414,8 +1468,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
&has_optimized_code_or_marker);
// Load the baseline code into the closure.
- __ ldr(r2, FieldMemOperand(kInterpreterBytecodeArrayRegister,
- BaselineData::kBaselineCodeOffset));
+ __ mov(r2, kInterpreterBytecodeArrayRegister);
static_assert(kJavaScriptCallCodeStartRegister == r2, "ABI mismatch");
ReplaceClosureCodeWithOptimizedCode(masm, r2, closure);
__ JumpCodeObject(r2);
@@ -1451,7 +1504,7 @@ void Builtins::Generate_InterpreterPushArgsThenCallImpl(
InterpreterPushArgsMode mode) {
DCHECK(mode != InterpreterPushArgsMode::kArrayFunction);
// ----------- S t a t e -------------
- // -- r0 : the number of arguments (not including the receiver)
+ // -- r0 : the number of arguments
// -- r2 : the address of the first argument to be pushed. Subsequent
// arguments should be consecutive above this, in the same order as
// they are to be pushed onto the stack.
@@ -1464,15 +1517,18 @@ void Builtins::Generate_InterpreterPushArgsThenCallImpl(
__ sub(r0, r0, Operand(1));
}
- __ add(r3, r0, Operand(1)); // Add one for receiver.
-
- __ StackOverflowCheck(r3, r4, &stack_overflow);
-
- if (receiver_mode == ConvertReceiverMode::kNullOrUndefined) {
- // Don't copy receiver. Argument count is correct.
+ const bool skip_receiver =
+ receiver_mode == ConvertReceiverMode::kNullOrUndefined;
+ if (kJSArgcIncludesReceiver && skip_receiver) {
+ __ sub(r3, r0, Operand(kJSArgcReceiverSlots));
+ } else if (!kJSArgcIncludesReceiver && !skip_receiver) {
+ __ add(r3, r0, Operand(1));
+ } else {
__ mov(r3, r0);
}
+ __ StackOverflowCheck(r3, r4, &stack_overflow);
+
// Push the arguments. r2 and r4 will be modified.
GenerateInterpreterPushArgs(masm, r3, r2, r4);
@@ -1510,7 +1566,7 @@ void Builtins::Generate_InterpreterPushArgsThenCallImpl(
void Builtins::Generate_InterpreterPushArgsThenConstructImpl(
MacroAssembler* masm, InterpreterPushArgsMode mode) {
// ----------- S t a t e -------------
- // -- r0 : argument count (not including receiver)
+ // -- r0 : argument count
// -- r3 : new target
// -- r1 : constructor to call
// -- r2 : allocation site feedback if available, undefined otherwise.
@@ -1518,17 +1574,20 @@ void Builtins::Generate_InterpreterPushArgsThenConstructImpl(
// -----------------------------------
Label stack_overflow;
- __ add(r5, r0, Operand(1)); // Add one for receiver.
-
- __ StackOverflowCheck(r5, r6, &stack_overflow);
+ __ StackOverflowCheck(r0, r6, &stack_overflow);
if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
// The spread argument should not be pushed.
__ sub(r0, r0, Operand(1));
}
+ Register argc_without_receiver = r0;
+ if (kJSArgcIncludesReceiver) {
+ argc_without_receiver = r6;
+ __ sub(argc_without_receiver, r0, Operand(kJSArgcReceiverSlots));
+ }
// Push the arguments. r4 and r5 will be modified.
- GenerateInterpreterPushArgs(masm, r0, r4, r5);
+ GenerateInterpreterPushArgs(masm, argc_without_receiver, r4, r5);
// Push a slot for the receiver to be constructed.
__ mov(r5, Operand::Zero());
@@ -1729,10 +1788,13 @@ void Generate_ContinueToBuiltinHelper(MacroAssembler* masm,
// Overwrite the hole inserted by the deoptimizer with the return value from
// the LAZY deopt point. r0 contains the arguments count, the return value
// from LAZY is always the last argument.
- __ add(r0, r0, Operand(BuiltinContinuationFrameConstants::kFixedSlotCount));
+ constexpr int return_value_offset =
+ BuiltinContinuationFrameConstants::kFixedSlotCount -
+ kJSArgcReceiverSlots;
+ __ add(r0, r0, Operand(return_value_offset));
__ str(scratch, MemOperand(sp, r0, LSL, kPointerSizeLog2));
// Recover arguments count.
- __ sub(r0, r0, Operand(BuiltinContinuationFrameConstants::kFixedSlotCount));
+ __ sub(r0, r0, Operand(return_value_offset));
}
__ ldr(fp, MemOperand(
sp, BuiltinContinuationFrameConstants::kFixedFrameSizeFromFp));
@@ -1815,7 +1877,8 @@ void OnStackReplacement(MacroAssembler* masm, bool is_interpreter) {
// Load deoptimization data from the code object.
// <deopt_data> = <code>[#deoptimization_data_offset]
- __ ldr(r1, FieldMemOperand(r0, Code::kDeoptimizationDataOffset));
+ __ ldr(r1,
+ FieldMemOperand(r0, Code::kDeoptimizationDataOrInterpreterDataOffset));
{
ConstantPoolUnavailableScope constant_pool_unavailable(masm);
@@ -1857,12 +1920,14 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
__ LoadRoot(r5, RootIndex::kUndefinedValue);
__ mov(r2, r5);
__ ldr(r1, MemOperand(sp, 0)); // receiver
- __ cmp(r0, Operand(1));
+ __ cmp(r0, Operand(JSParameterCount(1)));
__ ldr(r5, MemOperand(sp, kSystemPointerSize), ge); // thisArg
- __ cmp(r0, Operand(2), ge);
+ __ cmp(r0, Operand(JSParameterCount(2)), ge);
__ ldr(r2, MemOperand(sp, 2 * kSystemPointerSize), ge); // argArray
- __ DropArgumentsAndPushNewReceiver(r0, r5, TurboAssembler::kCountIsInteger,
- TurboAssembler::kCountExcludesReceiver);
+ __ DropArgumentsAndPushNewReceiver(
+ r0, r5, TurboAssembler::kCountIsInteger,
+ kJSArgcIncludesReceiver ? TurboAssembler::kCountIncludesReceiver
+ : TurboAssembler::kCountExcludesReceiver);
}
// ----------- S t a t e -------------
@@ -1888,7 +1953,7 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
// arguments to the receiver.
__ bind(&no_arguments);
{
- __ mov(r0, Operand(0));
+ __ mov(r0, Operand(JSParameterCount(0)));
__ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
}
}
@@ -1902,7 +1967,7 @@ void Builtins::Generate_FunctionPrototypeCall(MacroAssembler* masm) {
// r0: actual number of arguments
{
Label done;
- __ cmp(r0, Operand::Zero());
+ __ cmp(r0, Operand(JSParameterCount(0)));
__ b(ne, &done);
__ PushRoot(RootIndex::kUndefinedValue);
__ add(r0, r0, Operand(1));
@@ -1932,14 +1997,16 @@ void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
__ LoadRoot(r1, RootIndex::kUndefinedValue);
__ mov(r5, r1);
__ mov(r2, r1);
- __ cmp(r0, Operand(1));
+ __ cmp(r0, Operand(JSParameterCount(1)));
__ ldr(r1, MemOperand(sp, kSystemPointerSize), ge); // target
- __ cmp(r0, Operand(2), ge);
+ __ cmp(r0, Operand(JSParameterCount(2)), ge);
__ ldr(r5, MemOperand(sp, 2 * kSystemPointerSize), ge); // thisArgument
- __ cmp(r0, Operand(3), ge);
+ __ cmp(r0, Operand(JSParameterCount(3)), ge);
__ ldr(r2, MemOperand(sp, 3 * kSystemPointerSize), ge); // argumentsList
- __ DropArgumentsAndPushNewReceiver(r0, r5, TurboAssembler::kCountIsInteger,
- TurboAssembler::kCountExcludesReceiver);
+ __ DropArgumentsAndPushNewReceiver(
+ r0, r5, TurboAssembler::kCountIsInteger,
+ kJSArgcIncludesReceiver ? TurboAssembler::kCountIncludesReceiver
+ : TurboAssembler::kCountExcludesReceiver);
}
// ----------- S t a t e -------------
@@ -1974,15 +2041,17 @@ void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
__ LoadRoot(r1, RootIndex::kUndefinedValue);
__ mov(r2, r1);
__ mov(r4, r1);
- __ cmp(r0, Operand(1));
+ __ cmp(r0, Operand(JSParameterCount(1)));
__ ldr(r1, MemOperand(sp, kSystemPointerSize), ge); // target
__ mov(r3, r1); // new.target defaults to target
- __ cmp(r0, Operand(2), ge);
+ __ cmp(r0, Operand(JSParameterCount(2)), ge);
__ ldr(r2, MemOperand(sp, 2 * kSystemPointerSize), ge); // argumentsList
- __ cmp(r0, Operand(3), ge);
+ __ cmp(r0, Operand(JSParameterCount(3)), ge);
__ ldr(r3, MemOperand(sp, 3 * kSystemPointerSize), ge); // new.target
- __ DropArgumentsAndPushNewReceiver(r0, r4, TurboAssembler::kCountIsInteger,
- TurboAssembler::kCountExcludesReceiver);
+ __ DropArgumentsAndPushNewReceiver(
+ r0, r4, TurboAssembler::kCountIsInteger,
+ kJSArgcIncludesReceiver ? TurboAssembler::kCountIncludesReceiver
+ : TurboAssembler::kCountExcludesReceiver);
}
// ----------- S t a t e -------------
@@ -2005,13 +2074,55 @@ void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
RelocInfo::CODE_TARGET);
}
+namespace {
+
+// Allocate new stack space for |count| arguments and shift all existing
+// arguments already on the stack. |pointer_to_new_space_out| points to the
+// first free slot on the stack to copy additional arguments to and
+// |argc_in_out| is updated to include |count|.
+void Generate_AllocateSpaceAndShiftExistingArguments(
+ MacroAssembler* masm, Register count, Register argc_in_out,
+ Register pointer_to_new_space_out, Register scratch1, Register scratch2) {
+ DCHECK(!AreAliased(count, argc_in_out, pointer_to_new_space_out, scratch1,
+ scratch2));
+ UseScratchRegisterScope temps(masm);
+ Register old_sp = scratch1;
+ Register new_space = scratch2;
+ __ mov(old_sp, sp);
+ __ lsl(new_space, count, Operand(kSystemPointerSizeLog2));
+ __ AllocateStackSpace(new_space);
+
+ Register end = scratch2;
+ Register value = temps.Acquire();
+ Register dest = pointer_to_new_space_out;
+ __ mov(dest, sp);
+ __ add(end, old_sp, Operand(argc_in_out, LSL, kSystemPointerSizeLog2));
+ Label loop, done;
+ __ bind(&loop);
+ __ cmp(old_sp, end);
+ if (kJSArgcIncludesReceiver) {
+ __ b(ge, &done);
+ } else {
+ __ b(gt, &done);
+ }
+ __ ldr(value, MemOperand(old_sp, kSystemPointerSize, PostIndex));
+ __ str(value, MemOperand(dest, kSystemPointerSize, PostIndex));
+ __ b(&loop);
+ __ bind(&done);
+
+ // Update total number of arguments.
+ __ add(argc_in_out, argc_in_out, count);
+}
+
+} // namespace
+
// static
// TODO(v8:11615): Observe Code::kMaxArguments in CallOrConstructVarargs
void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
Handle<Code> code) {
// ----------- S t a t e -------------
// -- r1 : target
- // -- r0 : number of parameters on the stack (not including the receiver)
+ // -- r0 : number of parameters on the stack
// -- r2 : arguments list (a FixedArray)
// -- r4 : len (number of elements to push from args)
// -- r3 : new.target (for [[Construct]])
@@ -2042,23 +2153,10 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
// Move the arguments already in the stack,
// including the receiver and the return address.
- {
- Label copy, check;
- Register num = r5, src = r6, dest = r9; // r7 and r8 are context and root.
- __ mov(src, sp);
- // Update stack pointer.
- __ lsl(scratch, r4, Operand(kSystemPointerSizeLog2));
- __ AllocateStackSpace(scratch);
- __ mov(dest, sp);
- __ mov(num, r0);
- __ b(&check);
- __ bind(&copy);
- __ ldr(scratch, MemOperand(src, kSystemPointerSize, PostIndex));
- __ str(scratch, MemOperand(dest, kSystemPointerSize, PostIndex));
- __ sub(num, num, Operand(1), SetCC);
- __ bind(&check);
- __ b(ge, &copy);
- }
+ // r4: Number of arguments to make room for.
+ // r0: Number of arguments already on the stack.
+ // r9: Points to first free slot on the stack after arguments were shifted.
+ Generate_AllocateSpaceAndShiftExistingArguments(masm, r4, r0, r9, r5, r6);
// Copy arguments onto the stack (thisArgument is already on the stack).
{
@@ -2077,7 +2175,6 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
__ add(r6, r6, Operand(1));
__ b(&loop);
__ bind(&done);
- __ add(r0, r0, r6);
}
// Tail-call to the actual Call or Construct builtin.
@@ -2092,7 +2189,7 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
CallOrConstructMode mode,
Handle<Code> code) {
// ----------- S t a t e -------------
- // -- r0 : the number of arguments (not including the receiver)
+ // -- r0 : the number of arguments
// -- r3 : the new.target (for [[Construct]] calls)
// -- r1 : the target to call (can be any Object)
// -- r2 : start index (to support rest parameters)
@@ -2120,12 +2217,14 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
Label stack_done, stack_overflow;
__ ldr(r5, MemOperand(fp, StandardFrameConstants::kArgCOffset));
+ if (kJSArgcIncludesReceiver) {
+ __ sub(r5, r5, Operand(kJSArgcReceiverSlots));
+ }
__ sub(r5, r5, r2, SetCC);
__ b(le, &stack_done);
{
// ----------- S t a t e -------------
- // -- r0 : the number of arguments already in the stack (not including the
- // receiver)
+ // -- r0 : the number of arguments already in the stack
// -- r1 : the target to call (can be any Object)
// -- r2 : start index (to support rest parameters)
// -- r3 : the new.target (for [[Construct]] calls)
@@ -2145,30 +2244,17 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
// Move the arguments already in the stack,
// including the receiver and the return address.
- {
- Label copy, check;
- Register num = r8, src = r9,
- dest = r2; // r7 and r10 are context and root.
- __ mov(src, sp);
- // Update stack pointer.
- __ lsl(scratch, r5, Operand(kSystemPointerSizeLog2));
- __ AllocateStackSpace(scratch);
- __ mov(dest, sp);
- __ mov(num, r0);
- __ b(&check);
- __ bind(&copy);
- __ ldr(scratch, MemOperand(src, kSystemPointerSize, PostIndex));
- __ str(scratch, MemOperand(dest, kSystemPointerSize, PostIndex));
- __ sub(num, num, Operand(1), SetCC);
- __ bind(&check);
- __ b(ge, &copy);
- }
+ // r5: Number of arguments to make room for.
+ // r0: Number of arguments already on the stack.
+ // r2: Points to first free slot on the stack after arguments were shifted.
+ Generate_AllocateSpaceAndShiftExistingArguments(masm, r5, r0, r2, scratch,
+ r8);
+
// Copy arguments from the caller frame.
// TODO(victorgomes): Consider using forward order as potentially more cache
// friendly.
{
Label loop;
- __ add(r0, r0, r5);
__ bind(&loop);
{
__ sub(r5, r5, Operand(1), SetCC);
@@ -2191,7 +2277,7 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
void Builtins::Generate_CallFunction(MacroAssembler* masm,
ConvertReceiverMode mode) {
// ----------- S t a t e -------------
- // -- r0 : the number of arguments (not including the receiver)
+ // -- r0 : the number of arguments
// -- r1 : the function to call (checked to be a JSFunction)
// -----------------------------------
__ AssertFunction(r1);
@@ -2216,7 +2302,7 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
__ b(ne, &done_convert);
{
// ----------- S t a t e -------------
- // -- r0 : the number of arguments (not including the receiver)
+ // -- r0 : the number of arguments
// -- r1 : the function to call (checked to be a JSFunction)
// -- r2 : the shared function info.
// -- cp : the function context.
@@ -2268,7 +2354,7 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
__ bind(&done_convert);
// ----------- S t a t e -------------
- // -- r0 : the number of arguments (not including the receiver)
+ // -- r0 : the number of arguments
// -- r1 : the function to call (checked to be a JSFunction)
// -- r2 : the shared function info.
// -- cp : the function context.
@@ -2292,7 +2378,7 @@ namespace {
void Generate_PushBoundArguments(MacroAssembler* masm) {
ASM_CODE_COMMENT(masm);
// ----------- S t a t e -------------
- // -- r0 : the number of arguments (not including the receiver)
+ // -- r0 : the number of arguments
// -- r1 : target (checked to be a JSBoundFunction)
// -- r3 : new.target (only in case of [[Construct]])
// -----------------------------------
@@ -2306,7 +2392,7 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
__ b(eq, &no_bound_arguments);
{
// ----------- S t a t e -------------
- // -- r0 : the number of arguments (not including the receiver)
+ // -- r0 : the number of arguments
// -- r1 : target (checked to be a JSBoundFunction)
// -- r2 : the [[BoundArguments]] (implemented as FixedArray)
// -- r3 : new.target (only in case of [[Construct]])
@@ -2370,7 +2456,7 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
// static
void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm) {
// ----------- S t a t e -------------
- // -- r0 : the number of arguments (not including the receiver)
+ // -- r0 : the number of arguments
// -- r1 : the function to call (checked to be a JSBoundFunction)
// -----------------------------------
__ AssertBoundFunction(r1);
@@ -2391,7 +2477,7 @@ void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm) {
// static
void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) {
// ----------- S t a t e -------------
- // -- r0 : the number of arguments (not including the receiver)
+ // -- r0 : the number of arguments
// -- r1 : the target to call (can be any Object).
// -----------------------------------
@@ -2438,7 +2524,7 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) {
// static
void Builtins::Generate_ConstructFunction(MacroAssembler* masm) {
// ----------- S t a t e -------------
- // -- r0 : the number of arguments (not including the receiver)
+ // -- r0 : the number of arguments
// -- r1 : the constructor to call (checked to be a JSFunction)
// -- r3 : the new target (checked to be a constructor)
// -----------------------------------
@@ -2468,7 +2554,7 @@ void Builtins::Generate_ConstructFunction(MacroAssembler* masm) {
// static
void Builtins::Generate_ConstructBoundFunction(MacroAssembler* masm) {
// ----------- S t a t e -------------
- // -- r0 : the number of arguments (not including the receiver)
+ // -- r0 : the number of arguments
// -- r1 : the function to call (checked to be a JSBoundFunction)
// -- r3 : the new target (checked to be a constructor)
// -----------------------------------
@@ -2491,7 +2577,7 @@ void Builtins::Generate_ConstructBoundFunction(MacroAssembler* masm) {
// static
void Builtins::Generate_Construct(MacroAssembler* masm) {
// ----------- S t a t e -------------
- // -- r0 : the number of arguments (not including the receiver)
+ // -- r0 : the number of arguments
// -- r1 : the constructor to call (can be any Object)
// -- r3 : the new target (either the same as the constructor or
// the JSFunction on which new was invoked initially)
@@ -2777,12 +2863,6 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
__ cmp(cp, Operand(0));
__ str(cp, MemOperand(fp, StandardFrameConstants::kContextOffset), ne);
- // Reset the masking register. This is done independent of the underlying
- // feature flag {FLAG_untrusted_code_mitigations} to make the snapshot work
- // with both configurations. It is safe to always do this, because the
- // underlying register is caller-saved and can be arbitrarily clobbered.
- __ ResetSpeculationPoisonRegister();
-
// Clear c_entry_fp, like we do in `LeaveExitFrame`.
{
UseScratchRegisterScope temps(masm);
@@ -3504,7 +3584,7 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
// always have baseline code.
if (!is_osr) {
Label start_with_baseline;
- __ CompareObjectType(code_obj, r3, r3, BASELINE_DATA_TYPE);
+ __ CompareObjectType(code_obj, r3, r3, CODET_TYPE);
__ b(eq, &start_with_baseline);
// Start with bytecode as there is no baseline code.
@@ -3517,13 +3597,13 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
// Start with baseline code.
__ bind(&start_with_baseline);
} else if (FLAG_debug_code) {
- __ CompareObjectType(code_obj, r3, r3, BASELINE_DATA_TYPE);
+ __ CompareObjectType(code_obj, r3, r3, CODET_TYPE);
__ Assert(eq, AbortReason::kExpectedBaselineData);
}
- // Load baseline code from baseline data.
- __ ldr(code_obj,
- FieldMemOperand(code_obj, BaselineData::kBaselineCodeOffset));
+ if (FLAG_debug_code) {
+ AssertCodeIsBaseline(masm, code_obj, r3);
+ }
// Load the feedback vector.
Register feedback_vector = r2;
diff --git a/deps/v8/src/builtins/arm64/builtins-arm64.cc b/deps/v8/src/builtins/arm64/builtins-arm64.cc
index b1f9a63e3c..ac34e17354 100644
--- a/deps/v8/src/builtins/arm64/builtins-arm64.cc
+++ b/deps/v8/src/builtins/arm64/builtins-arm64.cc
@@ -112,10 +112,12 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
__ SmiTag(x11, argc);
__ Push(x11, padreg);
- // Add a slot for the receiver, and round up to maintain alignment.
+ // Add a slot for the receiver (if not already included), and round up to
+ // maintain alignment.
Register slot_count = x2;
Register slot_count_without_rounding = x12;
- __ Add(slot_count_without_rounding, argc, 2);
+ constexpr int additional_slots = kJSArgcIncludesReceiver ? 1 : 2;
+ __ Add(slot_count_without_rounding, argc, additional_slots);
__ Bic(slot_count, slot_count_without_rounding, 1);
__ Claim(slot_count);
@@ -128,7 +130,8 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
// Store padding, if needed.
__ Tbnz(slot_count_without_rounding, 0, &already_aligned);
- __ Str(padreg, MemOperand(x2, 1 * kSystemPointerSize));
+ __ Str(padreg,
+ MemOperand(x2, kJSArgcIncludesReceiver ? 0 : kSystemPointerSize));
__ Bind(&already_aligned);
// TODO(victorgomes): When the arguments adaptor is completely removed, we
@@ -148,7 +151,11 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
__ Add(src, fp,
StandardFrameConstants::kCallerSPOffset +
kSystemPointerSize); // Skip receiver.
- __ Mov(count, argc);
+ if (kJSArgcIncludesReceiver) {
+ __ Sub(count, argc, kJSArgcReceiverSlots);
+ } else {
+ __ Mov(count, argc);
+ }
__ CopyDoubleWords(dst, src, count);
}
@@ -190,7 +197,9 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
}
// Remove caller arguments from the stack and return.
- __ DropArguments(x1, TurboAssembler::kCountExcludesReceiver);
+ __ DropArguments(x1, kJSArgcIncludesReceiver
+ ? TurboAssembler::kCountIncludesReceiver
+ : TurboAssembler::kCountExcludesReceiver);
__ Ret();
__ Bind(&stack_overflow);
@@ -311,6 +320,11 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
// Round the number of arguments down to the next even number, and claim
// slots for the arguments. If the number of arguments was odd, the last
// argument will overwrite one of the receivers pushed above.
+ Register argc_without_receiver = x12;
+ if (kJSArgcIncludesReceiver) {
+ argc_without_receiver = x11;
+ __ Sub(argc_without_receiver, x12, kJSArgcReceiverSlots);
+ }
__ Bic(x10, x12, 1);
// Check if we have enough stack space to push all arguments.
@@ -328,7 +342,7 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
Register count = x2;
Register dst = x10;
Register src = x11;
- __ Mov(count, x12);
+ __ Mov(count, argc_without_receiver);
__ Poke(x0, 0); // Add the receiver.
__ SlotAddress(dst, 1); // Skip receiver.
__ Add(src, fp,
@@ -374,7 +388,9 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
// Leave construct frame.
__ LeaveFrame(StackFrame::CONSTRUCT);
// Remove caller arguments from the stack and return.
- __ DropArguments(x1, TurboAssembler::kCountExcludesReceiver);
+ __ DropArguments(x1, kJSArgcIncludesReceiver
+ ? TurboAssembler::kCountIncludesReceiver
+ : TurboAssembler::kCountExcludesReceiver);
__ Ret();
// Otherwise we do a smi check and fall through to check if the return value
@@ -414,6 +430,21 @@ void Builtins::Generate_ConstructedNonConstructable(MacroAssembler* masm) {
__ Unreachable();
}
+static void AssertCodeIsBaselineAllowClobber(MacroAssembler* masm,
+ Register code, Register scratch) {
+ // Verify that the code kind is baseline code via the CodeKind.
+ __ Ldr(scratch, FieldMemOperand(code, Code::kFlagsOffset));
+ __ DecodeField<Code::KindField>(scratch);
+ __ Cmp(scratch, Operand(static_cast<int>(CodeKind::BASELINE)));
+ __ Assert(eq, AbortReason::kExpectedBaselineData);
+}
+
+static void AssertCodeIsBaseline(MacroAssembler* masm, Register code,
+ Register scratch) {
+ DCHECK(!AreAliased(code, scratch));
+ return AssertCodeIsBaselineAllowClobber(masm, code, scratch);
+}
+
// TODO(v8:11429): Add a path for "not_compiled" and unify the two uses under
// the more general dispatch.
static void GetSharedFunctionInfoBytecodeOrBaseline(MacroAssembler* masm,
@@ -422,8 +453,21 @@ static void GetSharedFunctionInfoBytecodeOrBaseline(MacroAssembler* masm,
Label* is_baseline) {
ASM_CODE_COMMENT(masm);
Label done;
- __ CompareObjectType(sfi_data, scratch1, scratch1, BASELINE_DATA_TYPE);
- __ B(eq, is_baseline);
+ __ CompareObjectType(sfi_data, scratch1, scratch1, CODET_TYPE);
+ if (FLAG_debug_code) {
+ Label not_baseline;
+ __ B(ne, &not_baseline);
+ if (V8_EXTERNAL_CODE_SPACE_BOOL) {
+ __ LoadCodeDataContainerCodeNonBuiltin(scratch1, sfi_data);
+ AssertCodeIsBaselineAllowClobber(masm, scratch1, scratch1);
+ } else {
+ AssertCodeIsBaseline(masm, sfi_data, scratch1);
+ }
+ __ B(eq, is_baseline);
+ __ Bind(&not_baseline);
+ } else {
+ __ B(eq, is_baseline);
+ }
__ Cmp(scratch1, INTERPRETER_DATA_TYPE);
__ B(ne, &done);
__ LoadTaggedPointerField(
@@ -485,12 +529,15 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
__ Ldrh(w10, FieldMemOperand(
x10, SharedFunctionInfo::kFormalParameterCountOffset));
+ if (kJSArgcIncludesReceiver) {
+ __ Sub(x10, x10, kJSArgcReceiverSlots);
+ }
// Claim slots for arguments and receiver (rounded up to a multiple of two).
__ Add(x11, x10, 2);
__ Bic(x11, x11, 1);
__ Claim(x11);
- // Store padding (which might be replaced by the receiver).
+ // Store padding (which might be replaced by the last argument).
__ Sub(x11, x11, 1);
__ Poke(padreg, Operand(x11, LSL, kSystemPointerSizeLog2));
@@ -855,9 +902,11 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
masm->isolate()));
__ Ldr(cp, MemOperand(scratch));
- // Claim enough space for the arguments, the receiver and the function,
- // including an optional slot of padding.
- __ Add(slots_to_claim, argc, 3);
+ // Claim enough space for the arguments, the function and the receiver (if
+ // it is not included in argc already), including an optional slot of
+ // padding.
+ constexpr int additional_slots = kJSArgcIncludesReceiver ? 2 : 3;
+ __ Add(slots_to_claim, argc, additional_slots);
__ Bic(slots_to_claim, slots_to_claim, 1);
// Check if we have enough stack space to push all arguments.
@@ -880,7 +929,9 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
__ Poke(receiver, 0);
// Store function on the stack.
__ SlotAddress(scratch, argc);
- __ Str(function, MemOperand(scratch, kSystemPointerSize));
+ __ Str(
+ function,
+ MemOperand(scratch, kJSArgcIncludesReceiver ? 0 : kSystemPointerSize));
// Copy arguments to the stack in a loop, in reverse order.
// x4: argc.
@@ -888,7 +939,12 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
Label loop, done;
// Skip the argument set up if we have no arguments.
- __ Cbz(argc, &done);
+ if (kJSArgcIncludesReceiver) {
+ __ Cmp(argc, JSParameterCount(0));
+ __ B(eq, &done);
+ } else {
+ __ Cbz(argc, &done);
+ }
// scratch has been set to point to the location of the function, which
// marks the end of the argument copy.
@@ -902,7 +958,11 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
__ Str(x11, MemOperand(x0, kSystemPointerSize, PostIndex));
// Loop if we've not reached the end of copy marker.
__ Cmp(x0, scratch);
- __ B(le, &loop);
+ if (kJSArgcIncludesReceiver) {
+ __ B(lt, &loop);
+ } else {
+ __ B(le, &loop);
+ }
__ Bind(&done);
@@ -992,7 +1052,9 @@ static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch1,
__ Ldr(actual_params_size,
MemOperand(fp, StandardFrameConstants::kArgCOffset));
__ lsl(actual_params_size, actual_params_size, kSystemPointerSizeLog2);
- __ Add(actual_params_size, actual_params_size, Operand(kSystemPointerSize));
+ if (!kJSArgcIncludesReceiver) {
+ __ Add(actual_params_size, actual_params_size, Operand(kSystemPointerSize));
+ }
// If actual is bigger than formal, then we should use it to free up the stack
// arguments.
@@ -1378,7 +1440,7 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) {
// stack left to right.
//
// The live registers are:
-// - x0: actual argument count (not including the receiver)
+// - x0: actual argument count
// - x1: the JS function object being called.
// - x3: the incoming new target or generator object
// - cp: our context.
@@ -1614,9 +1676,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
&has_optimized_code_or_marker);
// Load the baseline code into the closure.
- __ LoadTaggedPointerField(
- x2, FieldMemOperand(kInterpreterBytecodeArrayRegister,
- BaselineData::kBaselineCodeOffset));
+ __ Move(x2, kInterpreterBytecodeArrayRegister);
static_assert(kJavaScriptCallCodeStartRegister == x2, "ABI mismatch");
ReplaceClosureCodeWithOptimizedCode(masm, x2, closure);
__ JumpCodeTObject(x2);
@@ -1643,7 +1703,7 @@ static void GenerateInterpreterPushArgs(MacroAssembler* masm, Register num_args,
Register last_arg_addr = x10;
Register stack_addr = x11;
Register slots_to_claim = x12;
- Register slots_to_copy = x13; // May include receiver, unlike num_args.
+ Register slots_to_copy = x13;
DCHECK(!AreAliased(num_args, first_arg_index, last_arg_addr, stack_addr,
slots_to_claim, slots_to_copy));
@@ -1651,15 +1711,17 @@ static void GenerateInterpreterPushArgs(MacroAssembler* masm, Register num_args,
DCHECK(!AreAliased(spread_arg_out, last_arg_addr, stack_addr, slots_to_claim,
slots_to_copy));
- // Add one slot for the receiver.
- __ Add(slots_to_claim, num_args, 1);
-
if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
// Exclude final spread from slots to claim and the number of arguments.
- __ Sub(slots_to_claim, slots_to_claim, 1);
__ Sub(num_args, num_args, 1);
}
+ // Add receiver (if not already included in argc) and round up to an even
+ // number of slots.
+ constexpr int additional_slots = kJSArgcIncludesReceiver ? 1 : 2;
+ __ Add(slots_to_claim, num_args, additional_slots);
+ __ Bic(slots_to_claim, slots_to_claim, 1);
+
// Add a stack check before pushing arguments.
Label stack_overflow, done;
__ StackOverflowCheck(slots_to_claim, &stack_overflow);
@@ -1669,9 +1731,6 @@ static void GenerateInterpreterPushArgs(MacroAssembler* masm, Register num_args,
__ Unreachable();
__ Bind(&done);
- // Round up to an even number of slots and claim them.
- __ Add(slots_to_claim, slots_to_claim, 1);
- __ Bic(slots_to_claim, slots_to_claim, 1);
__ Claim(slots_to_claim);
{
@@ -1682,15 +1741,16 @@ static void GenerateInterpreterPushArgs(MacroAssembler* masm, Register num_args,
__ Poke(padreg, Operand(scratch, LSL, kSystemPointerSizeLog2));
}
- if (receiver_mode == ConvertReceiverMode::kNullOrUndefined) {
- __ Mov(slots_to_copy, num_args);
- __ SlotAddress(stack_addr, 1);
- } else {
- // If we're not given an explicit receiver to store, we'll need to copy it
- // together with the rest of the arguments.
+ const bool skip_receiver =
+ receiver_mode == ConvertReceiverMode::kNullOrUndefined;
+ if (kJSArgcIncludesReceiver && skip_receiver) {
+ __ Sub(slots_to_copy, num_args, kJSArgcReceiverSlots);
+ } else if (!kJSArgcIncludesReceiver && !skip_receiver) {
__ Add(slots_to_copy, num_args, 1);
- __ SlotAddress(stack_addr, 0);
+ } else {
+ __ Mov(slots_to_copy, num_args);
}
+ __ SlotAddress(stack_addr, skip_receiver ? 1 : 0);
__ Sub(last_arg_addr, first_arg_index,
Operand(slots_to_copy, LSL, kSystemPointerSizeLog2));
@@ -1718,7 +1778,7 @@ void Builtins::Generate_InterpreterPushArgsThenCallImpl(
InterpreterPushArgsMode mode) {
DCHECK(mode != InterpreterPushArgsMode::kArrayFunction);
// ----------- S t a t e -------------
- // -- x0 : the number of arguments (not including the receiver)
+ // -- x0 : the number of arguments
// -- x2 : the address of the first argument to be pushed. Subsequent
// arguments should be consecutive above this, in the same order as
// they are to be pushed onto the stack.
@@ -1749,7 +1809,7 @@ void Builtins::Generate_InterpreterPushArgsThenCallImpl(
void Builtins::Generate_InterpreterPushArgsThenConstructImpl(
MacroAssembler* masm, InterpreterPushArgsMode mode) {
// ----------- S t a t e -------------
- // -- x0 : argument count (not including receiver)
+ // -- x0 : argument count
// -- x3 : new target
// -- x1 : constructor to call
// -- x2 : allocation site feedback if available, undefined otherwise
@@ -1975,16 +2035,16 @@ void Generate_ContinueToBuiltinHelper(MacroAssembler* masm,
if (java_script_builtin && with_result) {
// Overwrite the hole inserted by the deoptimizer with the return value from
- // the LAZY deopt point. r0 contains the arguments count, the return value
+ // the LAZY deopt point. x0 contains the arguments count, the return value
// from LAZY is always the last argument.
- __ add(x0, x0,
- BuiltinContinuationFrameConstants::kCallerSPOffset /
- kSystemPointerSize);
+ constexpr int return_offset =
+ BuiltinContinuationFrameConstants::kCallerSPOffset /
+ kSystemPointerSize -
+ kJSArgcReceiverSlots;
+ __ add(x0, x0, return_offset);
__ Str(scratch, MemOperand(fp, x0, LSL, kSystemPointerSizeLog2));
// Recover argument count.
- __ sub(x0, x0,
- BuiltinContinuationFrameConstants::kCallerSPOffset /
- kSystemPointerSize);
+ __ sub(x0, x0, return_offset);
}
// Load builtin index (stored as a Smi) and use it to get the builtin start
@@ -2078,7 +2138,8 @@ void OnStackReplacement(MacroAssembler* masm, bool is_interpreter) {
// Load deoptimization data from the code object.
// <deopt_data> = <code>[#deoptimization_data_offset]
__ LoadTaggedPointerField(
- x1, FieldMemOperand(x0, Code::kDeoptimizationDataOffset));
+ x1,
+ FieldMemOperand(x0, Code::kDeoptimizationDataOrInterpreterDataOffset));
// Load the OSR entrypoint offset from the deoptimization data.
// <osr_offset> = <deopt_data>[#header_size + #osr_pc_offset]
@@ -2133,14 +2194,16 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
__ Mov(this_arg, undefined_value);
__ Mov(arg_array, undefined_value);
__ Peek(receiver, 0);
- __ Cmp(argc, Immediate(1));
+ __ Cmp(argc, Immediate(JSParameterCount(1)));
__ B(lt, &done);
__ Peek(this_arg, kSystemPointerSize);
__ B(eq, &done);
__ Peek(arg_array, 2 * kSystemPointerSize);
__ bind(&done);
}
- __ DropArguments(argc, TurboAssembler::kCountExcludesReceiver);
+ __ DropArguments(argc, kJSArgcIncludesReceiver
+ ? TurboAssembler::kCountIncludesReceiver
+ : TurboAssembler::kCountExcludesReceiver);
__ PushArgument(this_arg);
// ----------- S t a t e -------------
@@ -2167,7 +2230,7 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
// arguments to the receiver.
__ Bind(&no_arguments);
{
- __ Mov(x0, 0);
+ __ Mov(x0, JSParameterCount(0));
DCHECK_EQ(receiver, x1);
__ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
}
@@ -2187,7 +2250,12 @@ void Builtins::Generate_FunctionPrototypeCall(MacroAssembler* masm) {
{
Label non_zero;
Register scratch = x10;
- __ Cbnz(argc, &non_zero);
+ if (kJSArgcIncludesReceiver) {
+ __ Cmp(argc, JSParameterCount(0));
+ __ B(gt, &non_zero);
+ } else {
+ __ Cbnz(argc, &non_zero);
+ }
__ LoadRoot(scratch, RootIndex::kUndefinedValue);
// Overwrite receiver with undefined, which will be the new receiver.
// We do not need to overwrite the padding slot above it with anything.
@@ -2205,8 +2273,15 @@ void Builtins::Generate_FunctionPrototypeCall(MacroAssembler* masm) {
Register copy_from = x10;
Register copy_to = x11;
Register count = x12;
- __ Mov(count, argc); // CopyDoubleWords changes the count argument.
- __ Tbz(argc, 0, &even);
+ UseScratchRegisterScope temps(masm);
+ Register argc_without_receiver = argc;
+ if (kJSArgcIncludesReceiver) {
+ argc_without_receiver = temps.AcquireX();
+ __ Sub(argc_without_receiver, argc, kJSArgcReceiverSlots);
+ }
+ // CopyDoubleWords changes the count argument.
+ __ Mov(count, argc_without_receiver);
+ __ Tbz(argc_without_receiver, 0, &even);
// Shift arguments one slot down on the stack (overwriting the original
// receiver).
@@ -2214,7 +2289,7 @@ void Builtins::Generate_FunctionPrototypeCall(MacroAssembler* masm) {
__ Sub(copy_to, copy_from, kSystemPointerSize);
__ CopyDoubleWords(copy_to, copy_from, count);
// Overwrite the duplicated remaining last argument.
- __ Poke(padreg, Operand(argc, LSL, kXRegSizeLog2));
+ __ Poke(padreg, Operand(argc_without_receiver, LSL, kXRegSizeLog2));
__ B(&arguments_ready);
// Copy arguments one slot higher in memory, overwriting the original
@@ -2261,17 +2336,19 @@ void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
__ Mov(target, undefined_value);
__ Mov(this_argument, undefined_value);
__ Mov(arguments_list, undefined_value);
- __ Cmp(argc, Immediate(1));
+ __ Cmp(argc, Immediate(JSParameterCount(1)));
__ B(lt, &done);
__ Peek(target, kSystemPointerSize);
__ B(eq, &done);
__ Peek(this_argument, 2 * kSystemPointerSize);
- __ Cmp(argc, Immediate(3));
+ __ Cmp(argc, Immediate(JSParameterCount(3)));
__ B(lt, &done);
__ Peek(arguments_list, 3 * kSystemPointerSize);
__ bind(&done);
}
- __ DropArguments(argc, TurboAssembler::kCountExcludesReceiver);
+ __ DropArguments(argc, kJSArgcIncludesReceiver
+ ? TurboAssembler::kCountIncludesReceiver
+ : TurboAssembler::kCountExcludesReceiver);
__ PushArgument(this_argument);
// ----------- S t a t e -------------
@@ -2317,19 +2394,21 @@ void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
__ Mov(target, undefined_value);
__ Mov(arguments_list, undefined_value);
__ Mov(new_target, undefined_value);
- __ Cmp(argc, Immediate(1));
+ __ Cmp(argc, Immediate(JSParameterCount(1)));
__ B(lt, &done);
__ Peek(target, kSystemPointerSize);
__ B(eq, &done);
__ Peek(arguments_list, 2 * kSystemPointerSize);
__ Mov(new_target, target); // new.target defaults to target
- __ Cmp(argc, Immediate(3));
+ __ Cmp(argc, Immediate(JSParameterCount(3)));
__ B(lt, &done);
__ Peek(new_target, 3 * kSystemPointerSize);
__ bind(&done);
}
- __ DropArguments(argc, TurboAssembler::kCountExcludesReceiver);
+ __ DropArguments(argc, kJSArgcIncludesReceiver
+ ? TurboAssembler::kCountIncludesReceiver
+ : TurboAssembler::kCountExcludesReceiver);
// Push receiver (undefined).
__ PushArgument(undefined_value);
@@ -2365,19 +2444,25 @@ void Generate_PrepareForCopyingVarargs(MacroAssembler* masm, Register argc,
Register slots_to_copy = x10;
Register slots_to_claim = x12;
- __ Add(slots_to_copy, argc, 1); // Copy with receiver.
+ if (kJSArgcIncludesReceiver) {
+ __ Mov(slots_to_copy, argc);
+ } else {
+ __ Add(slots_to_copy, argc, 1); // Copy with receiver.
+ }
__ Mov(slots_to_claim, len);
__ Tbz(slots_to_claim, 0, &even);
- // Claim space we need. If argc is even, slots_to_claim = len + 1, as we need
- // one extra padding slot. If argc is odd, we know that the original arguments
- // will have a padding slot we can reuse (since len is odd), so
- // slots_to_claim = len - 1.
+ // Claim space we need. If argc (without receiver) is even, slots_to_claim =
+ // len + 1, as we need one extra padding slot. If argc (without receiver) is
+ // odd, we know that the original arguments will have a padding slot we can
+ // reuse (since len is odd), so slots_to_claim = len - 1.
{
Register scratch = x11;
__ Add(slots_to_claim, len, 1);
__ And(scratch, argc, 1);
- __ Eor(scratch, scratch, 1);
+ if (!kJSArgcIncludesReceiver) {
+ __ Eor(scratch, scratch, 1);
+ }
__ Sub(slots_to_claim, slots_to_claim, Operand(scratch, LSL, 1));
}
@@ -2404,7 +2489,7 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
Handle<Code> code) {
// ----------- S t a t e -------------
// -- x1 : target
- // -- x0 : number of parameters on the stack (not including the receiver)
+ // -- x0 : number of parameters on the stack
// -- x2 : arguments list (a FixedArray)
// -- x4 : len (number of elements to push from args)
// -- x3 : new.target (for [[Construct]])
@@ -2455,8 +2540,12 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
// scenes and we want to avoid that in a loop.
// TODO(all): Consider using Ldp and Stp.
Register dst = x16;
- __ Add(dst, argc, Immediate(1)); // Consider the receiver as well.
- __ SlotAddress(dst, dst);
+ if (kJSArgcIncludesReceiver) {
+ __ SlotAddress(dst, argc);
+ } else {
+ __ Add(dst, argc, Immediate(1)); // Consider the receiver as well.
+ __ SlotAddress(dst, dst);
+ }
__ Add(argc, argc, len); // Update new argc.
__ Bind(&loop);
__ Sub(len, len, 1);
@@ -2479,7 +2568,7 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
CallOrConstructMode mode,
Handle<Code> code) {
// ----------- S t a t e -------------
- // -- x0 : the number of arguments (not including the receiver)
+ // -- x0 : the number of arguments
// -- x3 : the new.target (for [[Construct]] calls)
// -- x1 : the target to call (can be any Object)
// -- x2 : start index (to support rest parameters)
@@ -2510,6 +2599,9 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
Register len = x6;
Label stack_done, stack_overflow;
__ Ldr(len, MemOperand(fp, StandardFrameConstants::kArgCOffset));
+ if (kJSArgcIncludesReceiver) {
+ __ Subs(len, len, kJSArgcReceiverSlots);
+ }
__ Subs(len, len, start_index);
__ B(le, &stack_done);
// Check for stack overflow.
@@ -2527,8 +2619,12 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
__ lsl(start_index, start_index, kSystemPointerSizeLog2);
__ Add(args_fp, args_fp, start_index);
// Point to the position to copy to.
- __ Add(x10, argc, 1);
- __ SlotAddress(dst, x10);
+ if (kJSArgcIncludesReceiver) {
+ __ SlotAddress(dst, argc);
+ } else {
+ __ Add(x10, argc, 1);
+ __ SlotAddress(dst, x10);
+ }
// Update total number of arguments.
__ Add(argc, argc, len);
__ CopyDoubleWords(dst, args_fp, len);
@@ -2547,7 +2643,7 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
ConvertReceiverMode mode) {
ASM_LOCATION("Builtins::Generate_CallFunction");
// ----------- S t a t e -------------
- // -- x0 : the number of arguments (not including the receiver)
+ // -- x0 : the number of arguments
// -- x1 : the function to call (checked to be a JSFunction)
// -----------------------------------
__ AssertFunction(x1);
@@ -2574,7 +2670,7 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
&done_convert);
{
// ----------- S t a t e -------------
- // -- x0 : the number of arguments (not including the receiver)
+ // -- x0 : the number of arguments
// -- x1 : the function to call (checked to be a JSFunction)
// -- x2 : the shared function info.
// -- cp : the function context.
@@ -2625,7 +2721,7 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
__ Bind(&done_convert);
// ----------- S t a t e -------------
- // -- x0 : the number of arguments (not including the receiver)
+ // -- x0 : the number of arguments
// -- x1 : the function to call (checked to be a JSFunction)
// -- x2 : the shared function info.
// -- cp : the function context.
@@ -2649,7 +2745,7 @@ namespace {
void Generate_PushBoundArguments(MacroAssembler* masm) {
// ----------- S t a t e -------------
- // -- x0 : the number of arguments (not including the receiver)
+ // -- x0 : the number of arguments
// -- x1 : target (checked to be a JSBoundFunction)
// -- x3 : new.target (only in case of [[Construct]])
// -----------------------------------
@@ -2666,7 +2762,7 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
__ Cbz(bound_argc, &no_bound_arguments);
{
// ----------- S t a t e -------------
- // -- x0 : the number of arguments (not including the receiver)
+ // -- x0 : the number of arguments
// -- x1 : target (checked to be a JSBoundFunction)
// -- x2 : the [[BoundArguments]] (implemented as FixedArray)
// -- x3 : new.target (only in case of [[Construct]])
@@ -2698,6 +2794,9 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
Register scratch = x10;
Register receiver = x14;
+ if (kJSArgcIncludesReceiver) {
+ __ Sub(argc, argc, kJSArgcReceiverSlots);
+ }
__ Add(total_argc, argc, bound_argc);
__ Peek(receiver, 0);
@@ -2766,7 +2865,11 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
__ Cbnz(counter, &loop);
}
// Update argc.
- __ Mov(argc, total_argc);
+ if (kJSArgcIncludesReceiver) {
+ __ Add(argc, total_argc, kJSArgcReceiverSlots);
+ } else {
+ __ Mov(argc, total_argc);
+ }
}
__ Bind(&no_bound_arguments);
}
@@ -2776,7 +2879,7 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
// static
void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm) {
// ----------- S t a t e -------------
- // -- x0 : the number of arguments (not including the receiver)
+ // -- x0 : the number of arguments
// -- x1 : the function to call (checked to be a JSBoundFunction)
// -----------------------------------
__ AssertBoundFunction(x1);
@@ -2799,7 +2902,7 @@ void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm) {
// static
void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) {
// ----------- S t a t e -------------
- // -- x0 : the number of arguments (not including the receiver)
+ // -- x0 : the number of arguments
// -- x1 : the target to call (can be any Object).
// -----------------------------------
@@ -2848,7 +2951,7 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) {
// static
void Builtins::Generate_ConstructFunction(MacroAssembler* masm) {
// ----------- S t a t e -------------
- // -- x0 : the number of arguments (not including the receiver)
+ // -- x0 : the number of arguments
// -- x1 : the constructor to call (checked to be a JSFunction)
// -- x3 : the new target (checked to be a constructor)
// -----------------------------------
@@ -2879,7 +2982,7 @@ void Builtins::Generate_ConstructFunction(MacroAssembler* masm) {
// static
void Builtins::Generate_ConstructBoundFunction(MacroAssembler* masm) {
// ----------- S t a t e -------------
- // -- x0 : the number of arguments (not including the receiver)
+ // -- x0 : the number of arguments
// -- x1 : the function to call (checked to be a JSBoundFunction)
// -- x3 : the new target (checked to be a constructor)
// -----------------------------------
@@ -2908,7 +3011,7 @@ void Builtins::Generate_ConstructBoundFunction(MacroAssembler* masm) {
// static
void Builtins::Generate_Construct(MacroAssembler* masm) {
// ----------- S t a t e -------------
- // -- x0 : the number of arguments (not including the receiver)
+ // -- x0 : the number of arguments
// -- x1 : the constructor to call (can be any Object)
// -- x3 : the new target (either the same as the constructor or
// the JSFunction on which new was invoked initially)
@@ -3250,12 +3353,6 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
__ Str(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
__ Bind(&not_js_frame);
- // Reset the masking register. This is done independent of the underlying
- // feature flag {FLAG_untrusted_code_mitigations} to make the snapshot work
- // with both configurations. It is safe to always do this, because the
- // underlying register is caller-saved and can be arbitrarily clobbered.
- __ ResetSpeculationPoisonRegister();
-
{
// Clear c_entry_fp, like we do in `LeaveExitFrame`.
UseScratchRegisterScope temps(masm);
@@ -4032,7 +4129,7 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
// always have baseline code.
if (!is_osr) {
Label start_with_baseline;
- __ CompareObjectType(code_obj, x3, x3, BASELINE_DATA_TYPE);
+ __ CompareObjectType(code_obj, x3, x3, CODET_TYPE);
__ B(eq, &start_with_baseline);
// Start with bytecode as there is no baseline code.
@@ -4045,16 +4142,16 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
// Start with baseline code.
__ bind(&start_with_baseline);
} else if (FLAG_debug_code) {
- __ CompareObjectType(code_obj, x3, x3, BASELINE_DATA_TYPE);
+ __ CompareObjectType(code_obj, x3, x3, CODET_TYPE);
__ Assert(eq, AbortReason::kExpectedBaselineData);
}
- // Load baseline code from baseline data.
- __ LoadTaggedPointerField(
- code_obj, FieldMemOperand(code_obj, BaselineData::kBaselineCodeOffset));
if (V8_EXTERNAL_CODE_SPACE_BOOL) {
__ LoadCodeDataContainerCodeNonBuiltin(code_obj, code_obj);
}
+ if (FLAG_debug_code) {
+ AssertCodeIsBaseline(masm, code_obj, x3);
+ }
// Load the feedback vector.
Register feedback_vector = x2;
diff --git a/deps/v8/src/builtins/array-concat.tq b/deps/v8/src/builtins/array-concat.tq
index 5eb66e6ce8..6fad3e6683 100644
--- a/deps/v8/src/builtins/array-concat.tq
+++ b/deps/v8/src/builtins/array-concat.tq
@@ -43,7 +43,7 @@ ArrayPrototypeConcat(
// TODO(victorgomes): Implement slow path ArrayConcat in Torque.
tail ArrayConcat(
context, LoadTargetFromFrame(), Undefined,
- Convert<int32>(arguments.length));
+ Convert<int32>(arguments.actual_count));
}
} // namespace array
diff --git a/deps/v8/src/builtins/array-shift.tq b/deps/v8/src/builtins/array-shift.tq
index ed1087a85a..ea62b1c7a8 100644
--- a/deps/v8/src/builtins/array-shift.tq
+++ b/deps/v8/src/builtins/array-shift.tq
@@ -103,7 +103,7 @@ transitioning javascript builtin ArrayPrototypeShift(
} label Runtime {
tail ArrayShift(
context, LoadTargetFromFrame(), Undefined,
- Convert<int32>(arguments.length));
+ Convert<int32>(arguments.actual_count));
}
}
}
diff --git a/deps/v8/src/builtins/array-unshift.tq b/deps/v8/src/builtins/array-unshift.tq
index 7afeeb0627..69938ccaea 100644
--- a/deps/v8/src/builtins/array-unshift.tq
+++ b/deps/v8/src/builtins/array-unshift.tq
@@ -89,7 +89,7 @@ transitioning javascript builtin ArrayPrototypeUnshift(
tail ArrayUnshift(
context, LoadTargetFromFrame(), Undefined,
- Convert<int32>(arguments.length));
+ Convert<int32>(arguments.actual_count));
} label Slow {
return GenericArrayUnshift(context, receiver, arguments);
}
diff --git a/deps/v8/src/builtins/builtins-array-gen.cc b/deps/v8/src/builtins/builtins-array-gen.cc
index 48eb954f83..75c3c194b9 100644
--- a/deps/v8/src/builtins/builtins-array-gen.cc
+++ b/deps/v8/src/builtins/builtins-array-gen.cc
@@ -45,13 +45,13 @@ void ArrayBuiltinsAssembler::TypedArrayMapResultGenerator() {
// See tc39.github.io/ecma262/#sec-%typedarray%.prototype.map.
TNode<Object> ArrayBuiltinsAssembler::TypedArrayMapProcessor(
TNode<Object> k_value, TNode<UintPtrT> k) {
- // 8. c. Let mapped_value be ? Call(callbackfn, T, « kValue, k, O »).
+ // 7c. Let mapped_value be ? Call(callbackfn, T, « kValue, k, O »).
TNode<Number> k_number = ChangeUintPtrToTagged(k);
TNode<Object> mapped_value =
Call(context(), callbackfn(), this_arg(), k_value, k_number, o());
Label fast(this), slow(this), done(this), detached(this, Label::kDeferred);
- // 8. d. Perform ? Set(A, Pk, mapped_value, true).
+ // 7d. Perform ? Set(A, Pk, mapped_value, true).
// Since we know that A is a TypedArray, this always ends up in
// #sec-integer-indexed-exotic-objects-set-p-v-receiver and then
// tc39.github.io/ecma262/#sec-integerindexedelementset .
@@ -59,9 +59,9 @@ TNode<Object> ArrayBuiltinsAssembler::TypedArrayMapProcessor(
BIND(&fast);
// #sec-integerindexedelementset
- // 5. If arrayTypeName is "BigUint64Array" or "BigInt64Array", let
+ // 2. If arrayTypeName is "BigUint64Array" or "BigInt64Array", let
// numValue be ? ToBigInt(v).
- // 6. Otherwise, let numValue be ? ToNumber(value).
+ // 3. Otherwise, let numValue be ? ToNumber(value).
TNode<Object> num_value;
if (source_elements_kind_ == BIGINT64_ELEMENTS ||
source_elements_kind_ == BIGUINT64_ELEMENTS) {
@@ -175,24 +175,15 @@ void ArrayBuiltinsAssembler::GenerateIteratingTypedArrayBuiltinBody(
size_t i = 0;
for (auto it = labels.begin(); it != labels.end(); ++i, ++it) {
BIND(&*it);
- Label done(this);
source_elements_kind_ = static_cast<ElementsKind>(elements_kinds[i]);
- // TODO(turbofan): Silently cancelling the loop on buffer detachment is a
- // spec violation. Should go to &throw_detached and throw a TypeError
- // instead.
- VisitAllTypedArrayElements(array_buffer, processor, &done, direction,
- typed_array);
- Goto(&done);
- // No exception, return success
- BIND(&done);
+ VisitAllTypedArrayElements(array_buffer, processor, direction, typed_array);
ReturnFromBuiltin(a_.value());
}
}
void ArrayBuiltinsAssembler::VisitAllTypedArrayElements(
TNode<JSArrayBuffer> array_buffer, const CallResultProcessor& processor,
- Label* detached, ForEachDirection direction,
- TNode<JSTypedArray> typed_array) {
+ ForEachDirection direction, TNode<JSTypedArray> typed_array) {
VariableList list({&a_, &k_}, zone());
TNode<UintPtrT> start = UintPtrConstant(0);
@@ -208,12 +199,28 @@ void ArrayBuiltinsAssembler::VisitAllTypedArrayElements(
BuildFastLoop<UintPtrT>(
list, start, end,
[&](TNode<UintPtrT> index) {
- GotoIf(IsDetachedBuffer(array_buffer), detached);
- TNode<RawPtrT> data_ptr = LoadJSTypedArrayDataPtr(typed_array);
- TNode<Numeric> value = LoadFixedTypedArrayElementAsTagged(
- data_ptr, index, source_elements_kind_);
- k_ = index;
- a_ = processor(this, value, index);
+ TVARIABLE(Object, value);
+ Label detached(this, Label::kDeferred);
+ Label process(this);
+ GotoIf(IsDetachedBuffer(array_buffer), &detached);
+ {
+ TNode<RawPtrT> data_ptr = LoadJSTypedArrayDataPtr(typed_array);
+ value = LoadFixedTypedArrayElementAsTagged(data_ptr, index,
+ source_elements_kind_);
+ Goto(&process);
+ }
+
+ BIND(&detached);
+ {
+ value = UndefinedConstant();
+ Goto(&process);
+ }
+
+ BIND(&process);
+ {
+ k_ = index;
+ a_ = processor(this, value.value(), index);
+ }
},
incr, advance_mode);
}
@@ -621,9 +628,9 @@ void ArrayIncludesIndexofAssembler::Generate(SearchVariant variant,
Label is_smi(this), is_nonsmi(this), done(this);
// If no fromIndex was passed, default to 0.
- GotoIf(
- IntPtrLessThanOrEqual(args.GetLength(), IntPtrConstant(kFromIndexArg)),
- &done);
+ GotoIf(IntPtrLessThanOrEqual(args.GetLengthWithoutReceiver(),
+ IntPtrConstant(kFromIndexArg)),
+ &done);
TNode<Object> start_from = args.AtIndex(kFromIndexArg);
// Handle Smis and undefined here and everything else in runtime.
@@ -1774,11 +1781,13 @@ void ArrayBuiltinsAssembler::GenerateDispatchToArrayStub(
base::Optional<TNode<AllocationSite>> allocation_site) {
CodeStubArguments args(this, argc);
Label check_one_case(this), fallthrough(this);
- GotoIfNot(IntPtrEqual(args.GetLength(), IntPtrConstant(0)), &check_one_case);
+ GotoIfNot(IntPtrEqual(args.GetLengthWithoutReceiver(), IntPtrConstant(0)),
+ &check_one_case);
CreateArrayDispatchNoArgument(context, target, argc, mode, allocation_site);
BIND(&check_one_case);
- GotoIfNot(IntPtrEqual(args.GetLength(), IntPtrConstant(1)), &fallthrough);
+ GotoIfNot(IntPtrEqual(args.GetLengthWithoutReceiver(), IntPtrConstant(1)),
+ &fallthrough);
CreateArrayDispatchSingleArgument(context, target, argc, mode,
allocation_site);
diff --git a/deps/v8/src/builtins/builtins-array-gen.h b/deps/v8/src/builtins/builtins-array-gen.h
index 96833d9dea..1f169632bf 100644
--- a/deps/v8/src/builtins/builtins-array-gen.h
+++ b/deps/v8/src/builtins/builtins-array-gen.h
@@ -104,7 +104,7 @@ class ArrayBuiltinsAssembler : public CodeStubAssembler {
private:
void VisitAllTypedArrayElements(TNode<JSArrayBuffer> array_buffer,
const CallResultProcessor& processor,
- Label* detached, ForEachDirection direction,
+ ForEachDirection direction,
TNode<JSTypedArray> typed_array);
TNode<Object> callbackfn_;
diff --git a/deps/v8/src/builtins/builtins-async-function-gen.cc b/deps/v8/src/builtins/builtins-async-function-gen.cc
index 3e87252673..c5b4eb9041 100644
--- a/deps/v8/src/builtins/builtins-async-function-gen.cc
+++ b/deps/v8/src/builtins/builtins-async-function-gen.cc
@@ -84,9 +84,8 @@ TF_BUILTIN(AsyncFunctionEnter, AsyncFunctionBuiltinsAssembler) {
// Compute the number of registers and parameters.
TNode<SharedFunctionInfo> shared = LoadObjectField<SharedFunctionInfo>(
closure, JSFunction::kSharedFunctionInfoOffset);
- TNode<IntPtrT> formal_parameter_count =
- ChangeInt32ToIntPtr(LoadObjectField<Uint16T>(
- shared, SharedFunctionInfo::kFormalParameterCountOffset));
+ TNode<IntPtrT> formal_parameter_count = ChangeInt32ToIntPtr(
+ LoadSharedFunctionInfoFormalParameterCountWithoutReceiver(shared));
TNode<BytecodeArray> bytecode_array =
LoadSharedFunctionInfoBytecodeArray(shared);
TNode<IntPtrT> frame_size = ChangeInt32ToIntPtr(LoadObjectField<Uint32T>(
diff --git a/deps/v8/src/builtins/builtins-async-iterator-gen.cc b/deps/v8/src/builtins/builtins-async-iterator-gen.cc
index 11dd73cd4a..f4af61b1a0 100644
--- a/deps/v8/src/builtins/builtins-async-iterator-gen.cc
+++ b/deps/v8/src/builtins/builtins-async-iterator-gen.cc
@@ -137,9 +137,9 @@ void AsyncFromSyncBuiltinsAssembler::Generate_AsyncFromSyncIteratorMethod(
{
Label has_sent_value(this), no_sent_value(this), merge(this);
ScopedExceptionHandler handler(this, &reject_promise, &var_exception);
- Branch(
- IntPtrGreaterThan(args->GetLength(), IntPtrConstant(kValueOrReasonArg)),
- &has_sent_value, &no_sent_value);
+ Branch(IntPtrGreaterThan(args->GetLengthWithoutReceiver(),
+ IntPtrConstant(kValueOrReasonArg)),
+ &has_sent_value, &no_sent_value);
BIND(&has_sent_value);
{
iter_result = Call(context, method, sync_iterator, sent_value);
diff --git a/deps/v8/src/builtins/builtins-call-gen.cc b/deps/v8/src/builtins/builtins-call-gen.cc
index 54d2c74802..78003e71bd 100644
--- a/deps/v8/src/builtins/builtins-call-gen.cc
+++ b/deps/v8/src/builtins/builtins-call-gen.cc
@@ -274,7 +274,8 @@ void CallOrConstructBuiltinsAssembler::CallOrConstructWithArrayLike(
BIND(&if_done);
{
Label if_not_double(this), if_double(this);
- TNode<Int32T> args_count = Int32Constant(0); // args already on the stack
+ TNode<Int32T> args_count =
+ Int32Constant(i::JSParameterCount(0)); // args already on the stack
TNode<Int32T> length = var_length.value();
{
@@ -737,8 +738,8 @@ void CallOrConstructBuiltinsAssembler::CallFunctionTemplate(
TNode<RawPtrT> callback = LoadForeignForeignAddressPtr(foreign);
TNode<Object> call_data =
LoadObjectField<Object>(call_handler_info, CallHandlerInfo::kDataOffset);
- TailCallStub(CodeFactory::CallApiCallback(isolate()), context, callback, argc,
- call_data, holder);
+ TailCallStub(CodeFactory::CallApiCallback(isolate()), context, callback,
+ args.GetLengthWithoutReceiver(), call_data, holder);
}
TF_BUILTIN(CallFunctionTemplate_CheckAccess, CallOrConstructBuiltinsAssembler) {
diff --git a/deps/v8/src/builtins/builtins-constructor-gen.cc b/deps/v8/src/builtins/builtins-constructor-gen.cc
index 0d677da854..23d7747491 100644
--- a/deps/v8/src/builtins/builtins-constructor-gen.cc
+++ b/deps/v8/src/builtins/builtins-constructor-gen.cc
@@ -589,10 +589,7 @@ TNode<HeapObject> ConstructorBuiltinsAssembler::CreateShallowObjectLiteral(
BIND(&if_copy_elements);
CSA_ASSERT(this, Word32BinaryNot(
IsFixedCOWArrayMap(LoadMap(boilerplate_elements))));
- ExtractFixedArrayFlags flags;
- flags |= ExtractFixedArrayFlag::kAllFixedArrays;
- flags |= ExtractFixedArrayFlag::kNewSpaceAllocationOnly;
- flags |= ExtractFixedArrayFlag::kDontCopyCOW;
+ auto flags = ExtractFixedArrayFlag::kAllFixedArrays;
var_elements = CloneFixedArray(boilerplate_elements, flags);
Goto(&done);
BIND(&done);
diff --git a/deps/v8/src/builtins/builtins-dataview.cc b/deps/v8/src/builtins/builtins-dataview.cc
index 3ae331f5d7..465de8e982 100644
--- a/deps/v8/src/builtins/builtins-dataview.cc
+++ b/deps/v8/src/builtins/builtins-dataview.cc
@@ -102,7 +102,6 @@ BUILTIN(DataViewConstructor) {
// 13. Set O's [[ByteOffset]] internal slot to offset.
Handle<JSDataView>::cast(result)->set_byte_offset(view_byte_offset);
- Handle<JSDataView>::cast(result)->AllocateExternalPointerEntries(isolate);
Handle<JSDataView>::cast(result)->set_data_pointer(
isolate,
static_cast<uint8_t*>(array_buffer->backing_store()) + view_byte_offset);
diff --git a/deps/v8/src/builtins/builtins-date.cc b/deps/v8/src/builtins/builtins-date.cc
index 1de6357cf8..32c1f4b059 100644
--- a/deps/v8/src/builtins/builtins-date.cc
+++ b/deps/v8/src/builtins/builtins-date.cc
@@ -24,85 +24,6 @@ namespace internal {
namespace {
-// ES6 section 20.3.1.1 Time Values and Time Range
-const double kMinYear = -1000000.0;
-const double kMaxYear = -kMinYear;
-const double kMinMonth = -10000000.0;
-const double kMaxMonth = -kMinMonth;
-
-// 20.3.1.2 Day Number and Time within Day
-const double kMsPerDay = 86400000.0;
-
-// ES6 section 20.3.1.11 Hours, Minutes, Second, and Milliseconds
-const double kMsPerSecond = 1000.0;
-const double kMsPerMinute = 60000.0;
-const double kMsPerHour = 3600000.0;
-
-// ES6 section 20.3.1.14 MakeDate (day, time)
-double MakeDate(double day, double time) {
- if (std::isfinite(day) && std::isfinite(time)) {
- return time + day * kMsPerDay;
- }
- return std::numeric_limits<double>::quiet_NaN();
-}
-
-// ES6 section 20.3.1.13 MakeDay (year, month, date)
-double MakeDay(double year, double month, double date) {
- if ((kMinYear <= year && year <= kMaxYear) &&
- (kMinMonth <= month && month <= kMaxMonth) && std::isfinite(date)) {
- int y = FastD2I(year);
- int m = FastD2I(month);
- y += m / 12;
- m %= 12;
- if (m < 0) {
- m += 12;
- y -= 1;
- }
- DCHECK_LE(0, m);
- DCHECK_LT(m, 12);
-
- // kYearDelta is an arbitrary number such that:
- // a) kYearDelta = -1 (mod 400)
- // b) year + kYearDelta > 0 for years in the range defined by
- // ECMA 262 - 15.9.1.1, i.e. upto 100,000,000 days on either side of
- // Jan 1 1970. This is required so that we don't run into integer
- // division of negative numbers.
- // c) there shouldn't be an overflow for 32-bit integers in the following
- // operations.
- static const int kYearDelta = 399999;
- static const int kBaseDay =
- 365 * (1970 + kYearDelta) + (1970 + kYearDelta) / 4 -
- (1970 + kYearDelta) / 100 + (1970 + kYearDelta) / 400;
- int day_from_year = 365 * (y + kYearDelta) + (y + kYearDelta) / 4 -
- (y + kYearDelta) / 100 + (y + kYearDelta) / 400 -
- kBaseDay;
- if ((y % 4 != 0) || (y % 100 == 0 && y % 400 != 0)) {
- static const int kDayFromMonth[] = {0, 31, 59, 90, 120, 151,
- 181, 212, 243, 273, 304, 334};
- day_from_year += kDayFromMonth[m];
- } else {
- static const int kDayFromMonth[] = {0, 31, 60, 91, 121, 152,
- 182, 213, 244, 274, 305, 335};
- day_from_year += kDayFromMonth[m];
- }
- return static_cast<double>(day_from_year - 1) + DoubleToInteger(date);
- }
- return std::numeric_limits<double>::quiet_NaN();
-}
-
-// ES6 section 20.3.1.12 MakeTime (hour, min, sec, ms)
-double MakeTime(double hour, double min, double sec, double ms) {
- if (std::isfinite(hour) && std::isfinite(min) && std::isfinite(sec) &&
- std::isfinite(ms)) {
- double const h = DoubleToInteger(hour);
- double const m = DoubleToInteger(min);
- double const s = DoubleToInteger(sec);
- double const milli = DoubleToInteger(ms);
- return h * kMsPerHour + m * kMsPerMinute + s * kMsPerSecond + milli;
- }
- return std::numeric_limits<double>::quiet_NaN();
-}
-
const char* kShortWeekDays[] = {"Sun", "Mon", "Tue", "Wed",
"Thu", "Fri", "Sat"};
const char* kShortMonths[] = {"Jan", "Feb", "Mar", "Apr", "May", "Jun",
diff --git a/deps/v8/src/builtins/builtins-definitions.h b/deps/v8/src/builtins/builtins-definitions.h
index 70eb349dab..0c89b0e45a 100644
--- a/deps/v8/src/builtins/builtins-definitions.h
+++ b/deps/v8/src/builtins/builtins-definitions.h
@@ -41,20 +41,28 @@ namespace internal {
TFC(EphemeronKeyBarrierIgnoreFP, WriteBarrier) \
\
/* TSAN support for stores in generated code.*/ \
- IF_TSAN(TFC, TSANRelaxedStore8IgnoreFP, TSANRelaxedStore) \
- IF_TSAN(TFC, TSANRelaxedStore8SaveFP, TSANRelaxedStore) \
- IF_TSAN(TFC, TSANRelaxedStore16IgnoreFP, TSANRelaxedStore) \
- IF_TSAN(TFC, TSANRelaxedStore16SaveFP, TSANRelaxedStore) \
- IF_TSAN(TFC, TSANRelaxedStore32IgnoreFP, TSANRelaxedStore) \
- IF_TSAN(TFC, TSANRelaxedStore32SaveFP, TSANRelaxedStore) \
- IF_TSAN(TFC, TSANRelaxedStore64IgnoreFP, TSANRelaxedStore) \
- IF_TSAN(TFC, TSANRelaxedStore64SaveFP, TSANRelaxedStore) \
+ IF_TSAN(TFC, TSANRelaxedStore8IgnoreFP, TSANStore) \
+ IF_TSAN(TFC, TSANRelaxedStore8SaveFP, TSANStore) \
+ IF_TSAN(TFC, TSANRelaxedStore16IgnoreFP, TSANStore) \
+ IF_TSAN(TFC, TSANRelaxedStore16SaveFP, TSANStore) \
+ IF_TSAN(TFC, TSANRelaxedStore32IgnoreFP, TSANStore) \
+ IF_TSAN(TFC, TSANRelaxedStore32SaveFP, TSANStore) \
+ IF_TSAN(TFC, TSANRelaxedStore64IgnoreFP, TSANStore) \
+ IF_TSAN(TFC, TSANRelaxedStore64SaveFP, TSANStore) \
+ IF_TSAN(TFC, TSANSeqCstStore8IgnoreFP, TSANStore) \
+ IF_TSAN(TFC, TSANSeqCstStore8SaveFP, TSANStore) \
+ IF_TSAN(TFC, TSANSeqCstStore16IgnoreFP, TSANStore) \
+ IF_TSAN(TFC, TSANSeqCstStore16SaveFP, TSANStore) \
+ IF_TSAN(TFC, TSANSeqCstStore32IgnoreFP, TSANStore) \
+ IF_TSAN(TFC, TSANSeqCstStore32SaveFP, TSANStore) \
+ IF_TSAN(TFC, TSANSeqCstStore64IgnoreFP, TSANStore) \
+ IF_TSAN(TFC, TSANSeqCstStore64SaveFP, TSANStore) \
\
/* TSAN support for loads in generated code.*/ \
- IF_TSAN(TFC, TSANRelaxedLoad32IgnoreFP, TSANRelaxedLoad) \
- IF_TSAN(TFC, TSANRelaxedLoad32SaveFP, TSANRelaxedLoad) \
- IF_TSAN(TFC, TSANRelaxedLoad64IgnoreFP, TSANRelaxedLoad) \
- IF_TSAN(TFC, TSANRelaxedLoad64SaveFP, TSANRelaxedLoad) \
+ IF_TSAN(TFC, TSANRelaxedLoad32IgnoreFP, TSANLoad) \
+ IF_TSAN(TFC, TSANRelaxedLoad32SaveFP, TSANLoad) \
+ IF_TSAN(TFC, TSANRelaxedLoad64IgnoreFP, TSANLoad) \
+ IF_TSAN(TFC, TSANRelaxedLoad64SaveFP, TSANLoad) \
\
/* Adaptor for CPP builtin */ \
TFC(AdaptorWithBuiltinExitFrame, CppBuiltinAdaptor) \
@@ -302,7 +310,7 @@ namespace internal {
CPP(Illegal) \
CPP(StrictPoisonPillThrower) \
CPP(UnsupportedThrower) \
- TFJ(ReturnReceiver, 0, kReceiver) \
+ TFJ(ReturnReceiver, kJSArgcReceiverSlots, kReceiver) \
\
/* Array */ \
TFC(ArrayConstructor, JSTrampoline) \
@@ -373,13 +381,13 @@ namespace internal {
TFS(CloneFastJSArrayFillingHoles, kSource) \
TFS(ExtractFastJSArray, kSource, kBegin, kCount) \
/* ES6 #sec-array.prototype.entries */ \
- TFJ(ArrayPrototypeEntries, 0, kReceiver) \
+ TFJ(ArrayPrototypeEntries, kJSArgcReceiverSlots, kReceiver) \
/* ES6 #sec-array.prototype.keys */ \
- TFJ(ArrayPrototypeKeys, 0, kReceiver) \
+ TFJ(ArrayPrototypeKeys, kJSArgcReceiverSlots, kReceiver) \
/* ES6 #sec-array.prototype.values */ \
- TFJ(ArrayPrototypeValues, 0, kReceiver) \
+ TFJ(ArrayPrototypeValues, kJSArgcReceiverSlots, kReceiver) \
/* ES6 #sec-%arrayiteratorprototype%.next */ \
- TFJ(ArrayIteratorPrototypeNext, 0, kReceiver) \
+ TFJ(ArrayIteratorPrototypeNext, kJSArgcReceiverSlots, kReceiver) \
/* https://tc39.github.io/proposal-flatMap/#sec-FlattenIntoArray */ \
TFS(FlattenIntoArray, kTarget, kSource, kSourceLength, kStart, kDepth) \
TFS(FlatMapIntoArray, kTarget, kSource, kSourceLength, kStart, kDepth, \
@@ -404,8 +412,10 @@ namespace internal {
TFC(AsyncFunctionLazyDeoptContinuation, AsyncFunctionStackParameter) \
TFS(AsyncFunctionAwaitCaught, kAsyncFunctionObject, kValue) \
TFS(AsyncFunctionAwaitUncaught, kAsyncFunctionObject, kValue) \
- TFJ(AsyncFunctionAwaitRejectClosure, 1, kReceiver, kSentError) \
- TFJ(AsyncFunctionAwaitResolveClosure, 1, kReceiver, kSentValue) \
+ TFJ(AsyncFunctionAwaitRejectClosure, kJSArgcReceiverSlots + 1, kReceiver, \
+ kSentError) \
+ TFJ(AsyncFunctionAwaitResolveClosure, kJSArgcReceiverSlots + 1, kReceiver, \
+ kSentValue) \
\
/* BigInt */ \
CPP(BigIntConstructor) \
@@ -471,45 +481,45 @@ namespace internal {
/* ES #sec-date-constructor */ \
CPP(DateConstructor) \
/* ES6 #sec-date.prototype.getdate */ \
- TFJ(DatePrototypeGetDate, 0, kReceiver) \
+ TFJ(DatePrototypeGetDate, kJSArgcReceiverSlots, kReceiver) \
/* ES6 #sec-date.prototype.getday */ \
- TFJ(DatePrototypeGetDay, 0, kReceiver) \
+ TFJ(DatePrototypeGetDay, kJSArgcReceiverSlots, kReceiver) \
/* ES6 #sec-date.prototype.getfullyear */ \
- TFJ(DatePrototypeGetFullYear, 0, kReceiver) \
+ TFJ(DatePrototypeGetFullYear, kJSArgcReceiverSlots, kReceiver) \
/* ES6 #sec-date.prototype.gethours */ \
- TFJ(DatePrototypeGetHours, 0, kReceiver) \
+ TFJ(DatePrototypeGetHours, kJSArgcReceiverSlots, kReceiver) \
/* ES6 #sec-date.prototype.getmilliseconds */ \
- TFJ(DatePrototypeGetMilliseconds, 0, kReceiver) \
+ TFJ(DatePrototypeGetMilliseconds, kJSArgcReceiverSlots, kReceiver) \
/* ES6 #sec-date.prototype.getminutes */ \
- TFJ(DatePrototypeGetMinutes, 0, kReceiver) \
+ TFJ(DatePrototypeGetMinutes, kJSArgcReceiverSlots, kReceiver) \
/* ES6 #sec-date.prototype.getmonth */ \
- TFJ(DatePrototypeGetMonth, 0, kReceiver) \
+ TFJ(DatePrototypeGetMonth, kJSArgcReceiverSlots, kReceiver) \
/* ES6 #sec-date.prototype.getseconds */ \
- TFJ(DatePrototypeGetSeconds, 0, kReceiver) \
+ TFJ(DatePrototypeGetSeconds, kJSArgcReceiverSlots, kReceiver) \
/* ES6 #sec-date.prototype.gettime */ \
- TFJ(DatePrototypeGetTime, 0, kReceiver) \
+ TFJ(DatePrototypeGetTime, kJSArgcReceiverSlots, kReceiver) \
/* ES6 #sec-date.prototype.gettimezoneoffset */ \
- TFJ(DatePrototypeGetTimezoneOffset, 0, kReceiver) \
+ TFJ(DatePrototypeGetTimezoneOffset, kJSArgcReceiverSlots, kReceiver) \
/* ES6 #sec-date.prototype.getutcdate */ \
- TFJ(DatePrototypeGetUTCDate, 0, kReceiver) \
+ TFJ(DatePrototypeGetUTCDate, kJSArgcReceiverSlots, kReceiver) \
/* ES6 #sec-date.prototype.getutcday */ \
- TFJ(DatePrototypeGetUTCDay, 0, kReceiver) \
+ TFJ(DatePrototypeGetUTCDay, kJSArgcReceiverSlots, kReceiver) \
/* ES6 #sec-date.prototype.getutcfullyear */ \
- TFJ(DatePrototypeGetUTCFullYear, 0, kReceiver) \
+ TFJ(DatePrototypeGetUTCFullYear, kJSArgcReceiverSlots, kReceiver) \
/* ES6 #sec-date.prototype.getutchours */ \
- TFJ(DatePrototypeGetUTCHours, 0, kReceiver) \
+ TFJ(DatePrototypeGetUTCHours, kJSArgcReceiverSlots, kReceiver) \
/* ES6 #sec-date.prototype.getutcmilliseconds */ \
- TFJ(DatePrototypeGetUTCMilliseconds, 0, kReceiver) \
+ TFJ(DatePrototypeGetUTCMilliseconds, kJSArgcReceiverSlots, kReceiver) \
/* ES6 #sec-date.prototype.getutcminutes */ \
- TFJ(DatePrototypeGetUTCMinutes, 0, kReceiver) \
+ TFJ(DatePrototypeGetUTCMinutes, kJSArgcReceiverSlots, kReceiver) \
/* ES6 #sec-date.prototype.getutcmonth */ \
- TFJ(DatePrototypeGetUTCMonth, 0, kReceiver) \
+ TFJ(DatePrototypeGetUTCMonth, kJSArgcReceiverSlots, kReceiver) \
/* ES6 #sec-date.prototype.getutcseconds */ \
- TFJ(DatePrototypeGetUTCSeconds, 0, kReceiver) \
+ TFJ(DatePrototypeGetUTCSeconds, kJSArgcReceiverSlots, kReceiver) \
/* ES6 #sec-date.prototype.valueof */ \
- TFJ(DatePrototypeValueOf, 0, kReceiver) \
+ TFJ(DatePrototypeValueOf, kJSArgcReceiverSlots, kReceiver) \
/* ES6 #sec-date.prototype-@@toprimitive */ \
- TFJ(DatePrototypeToPrimitive, 1, kReceiver, kHint) \
+ TFJ(DatePrototypeToPrimitive, kJSArgcReceiverSlots + 1, kReceiver, kHint) \
CPP(DatePrototypeGetYear) \
CPP(DatePrototypeSetYear) \
CPP(DateNow) \
@@ -578,9 +588,9 @@ namespace internal {
CPP(GlobalUnescape) \
CPP(GlobalEval) \
/* ES6 #sec-isfinite-number */ \
- TFJ(GlobalIsFinite, 1, kReceiver, kNumber) \
+ TFJ(GlobalIsFinite, kJSArgcReceiverSlots + 1, kReceiver, kNumber) \
/* ES6 #sec-isnan-number */ \
- TFJ(GlobalIsNaN, 1, kReceiver, kNumber) \
+ TFJ(GlobalIsNaN, kJSArgcReceiverSlots + 1, kReceiver, kNumber) \
\
/* JSON */ \
CPP(JsonParse) \
@@ -643,23 +653,23 @@ namespace internal {
/* Map */ \
TFS(FindOrderedHashMapEntry, kTable, kKey) \
TFJ(MapConstructor, kDontAdaptArgumentsSentinel) \
- TFJ(MapPrototypeSet, 2, kReceiver, kKey, kValue) \
- TFJ(MapPrototypeDelete, 1, kReceiver, kKey) \
- TFJ(MapPrototypeGet, 1, kReceiver, kKey) \
- TFJ(MapPrototypeHas, 1, kReceiver, kKey) \
+ TFJ(MapPrototypeSet, kJSArgcReceiverSlots + 2, kReceiver, kKey, kValue) \
+ TFJ(MapPrototypeDelete, kJSArgcReceiverSlots + 1, kReceiver, kKey) \
+ TFJ(MapPrototypeGet, kJSArgcReceiverSlots + 1, kReceiver, kKey) \
+ TFJ(MapPrototypeHas, kJSArgcReceiverSlots + 1, kReceiver, kKey) \
CPP(MapPrototypeClear) \
/* ES #sec-map.prototype.entries */ \
- TFJ(MapPrototypeEntries, 0, kReceiver) \
+ TFJ(MapPrototypeEntries, kJSArgcReceiverSlots, kReceiver) \
/* ES #sec-get-map.prototype.size */ \
- TFJ(MapPrototypeGetSize, 0, kReceiver) \
+ TFJ(MapPrototypeGetSize, kJSArgcReceiverSlots, kReceiver) \
/* ES #sec-map.prototype.forEach */ \
TFJ(MapPrototypeForEach, kDontAdaptArgumentsSentinel) \
/* ES #sec-map.prototype.keys */ \
- TFJ(MapPrototypeKeys, 0, kReceiver) \
+ TFJ(MapPrototypeKeys, kJSArgcReceiverSlots, kReceiver) \
/* ES #sec-map.prototype.values */ \
- TFJ(MapPrototypeValues, 0, kReceiver) \
+ TFJ(MapPrototypeValues, kJSArgcReceiverSlots, kReceiver) \
/* ES #sec-%mapiteratorprototype%.next */ \
- TFJ(MapIteratorPrototypeNext, 0, kReceiver) \
+ TFJ(MapIteratorPrototypeNext, kJSArgcReceiverSlots, kReceiver) \
TFS(MapIteratorToList, kSource) \
\
/* ES #sec-number-constructor */ \
@@ -731,28 +741,30 @@ namespace internal {
CPP(ObjectDefineProperties) \
CPP(ObjectDefineProperty) \
CPP(ObjectDefineSetter) \
- TFJ(ObjectEntries, 1, kReceiver, kObject) \
+ TFJ(ObjectEntries, kJSArgcReceiverSlots + 1, kReceiver, kObject) \
CPP(ObjectFreeze) \
TFJ(ObjectGetOwnPropertyDescriptor, kDontAdaptArgumentsSentinel) \
CPP(ObjectGetOwnPropertyDescriptors) \
- TFJ(ObjectGetOwnPropertyNames, 1, kReceiver, kObject) \
+ TFJ(ObjectGetOwnPropertyNames, kJSArgcReceiverSlots + 1, kReceiver, kObject) \
CPP(ObjectGetOwnPropertySymbols) \
- TFJ(ObjectHasOwn, 2, kReceiver, kObject, kKey) \
- TFJ(ObjectIs, 2, kReceiver, kLeft, kRight) \
+ TFJ(ObjectHasOwn, kJSArgcReceiverSlots + 2, kReceiver, kObject, kKey) \
+ TFJ(ObjectIs, kJSArgcReceiverSlots + 2, kReceiver, kLeft, kRight) \
CPP(ObjectIsFrozen) \
CPP(ObjectIsSealed) \
- TFJ(ObjectKeys, 1, kReceiver, kObject) \
+ TFJ(ObjectKeys, kJSArgcReceiverSlots + 1, kReceiver, kObject) \
CPP(ObjectLookupGetter) \
CPP(ObjectLookupSetter) \
/* ES6 #sec-object.prototype.hasownproperty */ \
- TFJ(ObjectPrototypeHasOwnProperty, 1, kReceiver, kKey) \
- TFJ(ObjectPrototypeIsPrototypeOf, 1, kReceiver, kValue) \
+ TFJ(ObjectPrototypeHasOwnProperty, kJSArgcReceiverSlots + 1, kReceiver, \
+ kKey) \
+ TFJ(ObjectPrototypeIsPrototypeOf, kJSArgcReceiverSlots + 1, kReceiver, \
+ kValue) \
CPP(ObjectPrototypePropertyIsEnumerable) \
CPP(ObjectPrototypeGetProto) \
CPP(ObjectPrototypeSetProto) \
CPP(ObjectSeal) \
TFS(ObjectToString, kReceiver) \
- TFJ(ObjectValues, 1, kReceiver, kObject) \
+ TFJ(ObjectValues, kJSArgcReceiverSlots + 1, kReceiver, kObject) \
\
/* instanceof */ \
TFC(OrdinaryHasInstance, Compare) \
@@ -784,14 +796,16 @@ namespace internal {
CPP(RegExpCapture8Getter) \
CPP(RegExpCapture9Getter) \
/* ES #sec-regexp-pattern-flags */ \
- TFJ(RegExpConstructor, 2, kReceiver, kPattern, kFlags) \
+ TFJ(RegExpConstructor, kJSArgcReceiverSlots + 2, kReceiver, kPattern, \
+ kFlags) \
CPP(RegExpInputGetter) \
CPP(RegExpInputSetter) \
CPP(RegExpLastMatchGetter) \
CPP(RegExpLastParenGetter) \
CPP(RegExpLeftContextGetter) \
/* ES #sec-regexp.prototype.compile */ \
- TFJ(RegExpPrototypeCompile, 2, kReceiver, kPattern, kFlags) \
+ TFJ(RegExpPrototypeCompile, kJSArgcReceiverSlots + 2, kReceiver, kPattern, \
+ kFlags) \
CPP(RegExpPrototypeToString) \
CPP(RegExpRightContextGetter) \
\
@@ -803,20 +817,20 @@ namespace internal {
\
/* Set */ \
TFJ(SetConstructor, kDontAdaptArgumentsSentinel) \
- TFJ(SetPrototypeHas, 1, kReceiver, kKey) \
- TFJ(SetPrototypeAdd, 1, kReceiver, kKey) \
- TFJ(SetPrototypeDelete, 1, kReceiver, kKey) \
+ TFJ(SetPrototypeHas, kJSArgcReceiverSlots + 1, kReceiver, kKey) \
+ TFJ(SetPrototypeAdd, kJSArgcReceiverSlots + 1, kReceiver, kKey) \
+ TFJ(SetPrototypeDelete, kJSArgcReceiverSlots + 1, kReceiver, kKey) \
CPP(SetPrototypeClear) \
/* ES #sec-set.prototype.entries */ \
- TFJ(SetPrototypeEntries, 0, kReceiver) \
+ TFJ(SetPrototypeEntries, kJSArgcReceiverSlots, kReceiver) \
/* ES #sec-get-set.prototype.size */ \
- TFJ(SetPrototypeGetSize, 0, kReceiver) \
+ TFJ(SetPrototypeGetSize, kJSArgcReceiverSlots, kReceiver) \
/* ES #sec-set.prototype.foreach */ \
TFJ(SetPrototypeForEach, kDontAdaptArgumentsSentinel) \
/* ES #sec-set.prototype.values */ \
- TFJ(SetPrototypeValues, 0, kReceiver) \
+ TFJ(SetPrototypeValues, kJSArgcReceiverSlots, kReceiver) \
/* ES #sec-%setiteratorprototype%.next */ \
- TFJ(SetIteratorPrototypeNext, 0, kReceiver) \
+ TFJ(SetIteratorPrototypeNext, kJSArgcReceiverSlots, kReceiver) \
TFS(SetOrSetIteratorToList, kSource) \
\
/* SharedArrayBuffer */ \
@@ -825,16 +839,18 @@ namespace internal {
/* https://tc39.es/proposal-resizablearraybuffer/ */ \
CPP(SharedArrayBufferPrototypeGrow) \
\
- TFJ(AtomicsLoad, 2, kReceiver, kArray, kIndex) \
- TFJ(AtomicsStore, 3, kReceiver, kArray, kIndex, kValue) \
- TFJ(AtomicsExchange, 3, kReceiver, kArray, kIndex, kValue) \
- TFJ(AtomicsCompareExchange, 4, kReceiver, kArray, kIndex, kOldValue, \
- kNewValue) \
- TFJ(AtomicsAdd, 3, kReceiver, kArray, kIndex, kValue) \
- TFJ(AtomicsSub, 3, kReceiver, kArray, kIndex, kValue) \
- TFJ(AtomicsAnd, 3, kReceiver, kArray, kIndex, kValue) \
- TFJ(AtomicsOr, 3, kReceiver, kArray, kIndex, kValue) \
- TFJ(AtomicsXor, 3, kReceiver, kArray, kIndex, kValue) \
+ TFJ(AtomicsLoad, kJSArgcReceiverSlots + 2, kReceiver, kArray, kIndex) \
+ TFJ(AtomicsStore, kJSArgcReceiverSlots + 3, kReceiver, kArray, kIndex, \
+ kValue) \
+ TFJ(AtomicsExchange, kJSArgcReceiverSlots + 3, kReceiver, kArray, kIndex, \
+ kValue) \
+ TFJ(AtomicsCompareExchange, kJSArgcReceiverSlots + 4, kReceiver, kArray, \
+ kIndex, kOldValue, kNewValue) \
+ TFJ(AtomicsAdd, kJSArgcReceiverSlots + 3, kReceiver, kArray, kIndex, kValue) \
+ TFJ(AtomicsSub, kJSArgcReceiverSlots + 3, kReceiver, kArray, kIndex, kValue) \
+ TFJ(AtomicsAnd, kJSArgcReceiverSlots + 3, kReceiver, kArray, kIndex, kValue) \
+ TFJ(AtomicsOr, kJSArgcReceiverSlots + 3, kReceiver, kArray, kIndex, kValue) \
+ TFJ(AtomicsXor, kJSArgcReceiverSlots + 3, kReceiver, kArray, kIndex, kValue) \
CPP(AtomicsNotify) \
CPP(AtomicsIsLockFree) \
CPP(AtomicsWait) \
@@ -848,11 +864,12 @@ namespace internal {
/* ES6 #sec-string.prototype.lastindexof */ \
CPP(StringPrototypeLastIndexOf) \
/* ES #sec-string.prototype.matchAll */ \
- TFJ(StringPrototypeMatchAll, 1, kReceiver, kRegexp) \
+ TFJ(StringPrototypeMatchAll, kJSArgcReceiverSlots + 1, kReceiver, kRegexp) \
/* ES6 #sec-string.prototype.localecompare */ \
CPP(StringPrototypeLocaleCompare) \
/* ES6 #sec-string.prototype.replace */ \
- TFJ(StringPrototypeReplace, 2, kReceiver, kSearch, kReplace) \
+ TFJ(StringPrototypeReplace, kJSArgcReceiverSlots + 2, kReceiver, kSearch, \
+ kReplace) \
/* ES6 #sec-string.prototype.split */ \
TFJ(StringPrototypeSplit, kDontAdaptArgumentsSentinel) \
/* ES6 #sec-string.raw */ \
@@ -868,15 +885,15 @@ namespace internal {
\
/* TypedArray */ \
/* ES #sec-typedarray-constructors */ \
- TFJ(TypedArrayBaseConstructor, 0, kReceiver) \
+ TFJ(TypedArrayBaseConstructor, kJSArgcReceiverSlots, kReceiver) \
TFJ(TypedArrayConstructor, kDontAdaptArgumentsSentinel) \
CPP(TypedArrayPrototypeBuffer) \
/* ES6 #sec-get-%typedarray%.prototype.bytelength */ \
- TFJ(TypedArrayPrototypeByteLength, 0, kReceiver) \
+ TFJ(TypedArrayPrototypeByteLength, kJSArgcReceiverSlots, kReceiver) \
/* ES6 #sec-get-%typedarray%.prototype.byteoffset */ \
- TFJ(TypedArrayPrototypeByteOffset, 0, kReceiver) \
+ TFJ(TypedArrayPrototypeByteOffset, kJSArgcReceiverSlots, kReceiver) \
/* ES6 #sec-get-%typedarray%.prototype.length */ \
- TFJ(TypedArrayPrototypeLength, 0, kReceiver) \
+ TFJ(TypedArrayPrototypeLength, kJSArgcReceiverSlots, kReceiver) \
/* ES6 #sec-%typedarray%.prototype.copywithin */ \
CPP(TypedArrayPrototypeCopyWithin) \
/* ES6 #sec-%typedarray%.prototype.fill */ \
@@ -890,7 +907,7 @@ namespace internal {
/* ES6 #sec-%typedarray%.prototype.reverse */ \
CPP(TypedArrayPrototypeReverse) \
/* ES6 #sec-get-%typedarray%.prototype-@@tostringtag */ \
- TFJ(TypedArrayPrototypeToStringTag, 0, kReceiver) \
+ TFJ(TypedArrayPrototypeToStringTag, kJSArgcReceiverSlots, kReceiver) \
/* ES6 %TypedArray%.prototype.map */ \
TFJ(TypedArrayPrototypeMap, kDontAdaptArgumentsSentinel) \
\
@@ -908,16 +925,16 @@ namespace internal {
/* WeakMap */ \
TFJ(WeakMapConstructor, kDontAdaptArgumentsSentinel) \
TFS(WeakMapLookupHashIndex, kTable, kKey) \
- TFJ(WeakMapGet, 1, kReceiver, kKey) \
- TFJ(WeakMapPrototypeHas, 1, kReceiver, kKey) \
- TFJ(WeakMapPrototypeSet, 2, kReceiver, kKey, kValue) \
- TFJ(WeakMapPrototypeDelete, 1, kReceiver, kKey) \
+ TFJ(WeakMapGet, kJSArgcReceiverSlots + 1, kReceiver, kKey) \
+ TFJ(WeakMapPrototypeHas, kJSArgcReceiverSlots + 1, kReceiver, kKey) \
+ TFJ(WeakMapPrototypeSet, kJSArgcReceiverSlots + 2, kReceiver, kKey, kValue) \
+ TFJ(WeakMapPrototypeDelete, kJSArgcReceiverSlots + 1, kReceiver, kKey) \
\
/* WeakSet */ \
TFJ(WeakSetConstructor, kDontAdaptArgumentsSentinel) \
- TFJ(WeakSetPrototypeHas, 1, kReceiver, kKey) \
- TFJ(WeakSetPrototypeAdd, 1, kReceiver, kValue) \
- TFJ(WeakSetPrototypeDelete, 1, kReceiver, kValue) \
+ TFJ(WeakSetPrototypeHas, kJSArgcReceiverSlots + 1, kReceiver, kKey) \
+ TFJ(WeakSetPrototypeAdd, kJSArgcReceiverSlots + 1, kReceiver, kValue) \
+ TFJ(WeakSetPrototypeDelete, kJSArgcReceiverSlots + 1, kReceiver, kValue) \
\
/* WeakSet / WeakMap Helpers */ \
TFS(WeakCollectionDelete, kCollection, kKey) \
@@ -948,12 +965,18 @@ namespace internal {
/* specific to Async Generators. Internal / Not exposed to JS code. */ \
TFS(AsyncGeneratorAwaitCaught, kAsyncGeneratorObject, kValue) \
TFS(AsyncGeneratorAwaitUncaught, kAsyncGeneratorObject, kValue) \
- TFJ(AsyncGeneratorAwaitResolveClosure, 1, kReceiver, kValue) \
- TFJ(AsyncGeneratorAwaitRejectClosure, 1, kReceiver, kValue) \
- TFJ(AsyncGeneratorYieldResolveClosure, 1, kReceiver, kValue) \
- TFJ(AsyncGeneratorReturnClosedResolveClosure, 1, kReceiver, kValue) \
- TFJ(AsyncGeneratorReturnClosedRejectClosure, 1, kReceiver, kValue) \
- TFJ(AsyncGeneratorReturnResolveClosure, 1, kReceiver, kValue) \
+ TFJ(AsyncGeneratorAwaitResolveClosure, kJSArgcReceiverSlots + 1, kReceiver, \
+ kValue) \
+ TFJ(AsyncGeneratorAwaitRejectClosure, kJSArgcReceiverSlots + 1, kReceiver, \
+ kValue) \
+ TFJ(AsyncGeneratorYieldResolveClosure, kJSArgcReceiverSlots + 1, kReceiver, \
+ kValue) \
+ TFJ(AsyncGeneratorReturnClosedResolveClosure, kJSArgcReceiverSlots + 1, \
+ kReceiver, kValue) \
+ TFJ(AsyncGeneratorReturnClosedRejectClosure, kJSArgcReceiverSlots + 1, \
+ kReceiver, kValue) \
+ TFJ(AsyncGeneratorReturnResolveClosure, kJSArgcReceiverSlots + 1, kReceiver, \
+ kValue) \
\
/* Async-from-Sync Iterator */ \
\
@@ -966,7 +989,7 @@ namespace internal {
/* #sec-%asyncfromsynciteratorprototype%.return */ \
TFJ(AsyncFromSyncIteratorPrototypeReturn, kDontAdaptArgumentsSentinel) \
/* #sec-async-iterator-value-unwrap-functions */ \
- TFJ(AsyncIteratorValueUnwrap, 1, kReceiver, kValue) \
+ TFJ(AsyncIteratorValueUnwrap, kJSArgcReceiverSlots + 1, kReceiver, kValue) \
\
/* CEntry */ \
ASM(CEntry_Return1_DontSaveFPRegs_ArgvOnStack_NoBuiltinExit, Dummy) \
@@ -1053,6 +1076,8 @@ namespace internal {
CPP(DisplayNamesSupportedLocalesOf) \
/* ecma402 #sec-intl.getcanonicallocales */ \
CPP(IntlGetCanonicalLocales) \
+ /* ecma402 #sec-intl.supportedvaluesof */ \
+ CPP(IntlSupportedValuesOf) \
/* ecma402 #sec-intl-listformat-constructor */ \
CPP(ListFormatConstructor) \
/* ecma402 #sec-intl-list-format.prototype.format */ \
@@ -1156,7 +1181,7 @@ namespace internal {
/* ecma402 #sup-string.prototype.tolocaleuppercase */ \
CPP(StringPrototypeToLocaleUpperCase) \
/* ES #sec-string.prototype.tolowercase */ \
- TFJ(StringPrototypeToLowerCaseIntl, 0, kReceiver) \
+ TFJ(StringPrototypeToLowerCaseIntl, kJSArgcReceiverSlots, kReceiver) \
/* ES #sec-string.prototype.touppercase */ \
CPP(StringPrototypeToUpperCaseIntl) \
TFS(StringToLowerCaseIntl, kString) \
diff --git a/deps/v8/src/builtins/builtins-descriptors.h b/deps/v8/src/builtins/builtins-descriptors.h
index c2eb44debe..12f7f58ec5 100644
--- a/deps/v8/src/builtins/builtins-descriptors.h
+++ b/deps/v8/src/builtins/builtins-descriptors.h
@@ -14,19 +14,20 @@ namespace v8 {
namespace internal {
// Define interface descriptors for builtins with JS linkage.
-#define DEFINE_TFJ_INTERFACE_DESCRIPTOR(Name, Argc, ...) \
- struct Builtin_##Name##_InterfaceDescriptor { \
- enum ParameterIndices { \
- kJSTarget = compiler::CodeAssembler::kTargetParameterIndex, \
- ##__VA_ARGS__, \
- kJSNewTarget, \
- kJSActualArgumentsCount, \
- kContext, \
- kParameterCount, \
- }; \
- static_assert((Argc) == static_cast<uint16_t>(kParameterCount - 4), \
- "Inconsistent set of arguments"); \
- static_assert(kJSTarget == -1, "Unexpected kJSTarget index value"); \
+#define DEFINE_TFJ_INTERFACE_DESCRIPTOR(Name, Argc, ...) \
+ struct Builtin_##Name##_InterfaceDescriptor { \
+ enum ParameterIndices { \
+ kJSTarget = compiler::CodeAssembler::kTargetParameterIndex, \
+ ##__VA_ARGS__, \
+ kJSNewTarget, \
+ kJSActualArgumentsCount, \
+ kContext, \
+ kParameterCount, \
+ }; \
+ static_assert((Argc) == static_cast<uint16_t>(kParameterCount - 4 + \
+ kJSArgcReceiverSlots), \
+ "Inconsistent set of arguments"); \
+ static_assert(kJSTarget == -1, "Unexpected kJSTarget index value"); \
};
// Define interface descriptors for builtins with StubCall linkage.
diff --git a/deps/v8/src/builtins/builtins-generator-gen.cc b/deps/v8/src/builtins/builtins-generator-gen.cc
index eb557b1ca1..ff39350725 100644
--- a/deps/v8/src/builtins/builtins-generator-gen.cc
+++ b/deps/v8/src/builtins/builtins-generator-gen.cc
@@ -219,11 +219,10 @@ TF_BUILTIN(SuspendGeneratorBaseline, GeneratorBuiltinsAssembler) {
TNode<JSFunction> closure = LoadJSGeneratorObjectFunction(generator);
auto sfi = LoadJSFunctionSharedFunctionInfo(closure);
- TNode<IntPtrT> formal_parameter_count = Signed(
- ChangeUint32ToWord(LoadSharedFunctionInfoFormalParameterCount(sfi)));
- CSA_ASSERT(this, Word32BinaryNot(IntPtrEqual(
- formal_parameter_count,
- IntPtrConstant(kDontAdaptArgumentsSentinel))));
+ CSA_ASSERT(this,
+ Word32BinaryNot(IsSharedFunctionInfoDontAdaptArguments(sfi)));
+ TNode<IntPtrT> formal_parameter_count = Signed(ChangeUint32ToWord(
+ LoadSharedFunctionInfoFormalParameterCountWithoutReceiver(sfi)));
TNode<FixedArray> parameters_and_registers =
LoadJSGeneratorObjectParametersAndRegisters(generator);
@@ -274,11 +273,10 @@ TF_BUILTIN(ResumeGeneratorBaseline, GeneratorBuiltinsAssembler) {
auto generator = Parameter<JSGeneratorObject>(Descriptor::kGeneratorObject);
TNode<JSFunction> closure = LoadJSGeneratorObjectFunction(generator);
auto sfi = LoadJSFunctionSharedFunctionInfo(closure);
- TNode<IntPtrT> formal_parameter_count = Signed(
- ChangeUint32ToWord(LoadSharedFunctionInfoFormalParameterCount(sfi)));
- CSA_ASSERT(this, Word32BinaryNot(IntPtrEqual(
- formal_parameter_count,
- IntPtrConstant(kDontAdaptArgumentsSentinel))));
+ CSA_ASSERT(this,
+ Word32BinaryNot(IsSharedFunctionInfoDontAdaptArguments(sfi)));
+ TNode<IntPtrT> formal_parameter_count = Signed(ChangeUint32ToWord(
+ LoadSharedFunctionInfoFormalParameterCountWithoutReceiver(sfi)));
TNode<FixedArray> parameters_and_registers =
LoadJSGeneratorObjectParametersAndRegisters(generator);
diff --git a/deps/v8/src/builtins/builtins-internal-gen.cc b/deps/v8/src/builtins/builtins-internal-gen.cc
index 49ad4b4e7c..03f9fb932a 100644
--- a/deps/v8/src/builtins/builtins-internal-gen.cc
+++ b/deps/v8/src/builtins/builtins-internal-gen.cc
@@ -439,10 +439,9 @@ class TSANRelaxedStoreCodeStubAssembler : public CodeStubAssembler {
void GenerateTSANRelaxedStore(SaveFPRegsMode fp_mode, int size) {
TNode<ExternalReference> function = GetExternalReference(size);
- auto address =
- UncheckedParameter<IntPtrT>(TSANRelaxedStoreDescriptor::kAddress);
+ auto address = UncheckedParameter<IntPtrT>(TSANStoreDescriptor::kAddress);
TNode<IntPtrT> value = BitcastTaggedToWord(
- UncheckedParameter<Object>(TSANRelaxedStoreDescriptor::kValue));
+ UncheckedParameter<Object>(TSANStoreDescriptor::kValue));
CallCFunctionWithCallerSavedRegisters(
function, MachineType::Int32(), fp_mode,
std::make_pair(MachineType::IntPtr(), address),
@@ -483,6 +482,73 @@ TF_BUILTIN(TSANRelaxedStore64SaveFP, TSANRelaxedStoreCodeStubAssembler) {
GenerateTSANRelaxedStore(SaveFPRegsMode::kSave, kInt64Size);
}
+class TSANSeqCstStoreCodeStubAssembler : public CodeStubAssembler {
+ public:
+ explicit TSANSeqCstStoreCodeStubAssembler(compiler::CodeAssemblerState* state)
+ : CodeStubAssembler(state) {}
+
+ TNode<ExternalReference> GetExternalReference(int size) {
+ if (size == kInt8Size) {
+ return ExternalConstant(
+ ExternalReference::tsan_seq_cst_store_function_8_bits());
+ } else if (size == kInt16Size) {
+ return ExternalConstant(
+ ExternalReference::tsan_seq_cst_store_function_16_bits());
+ } else if (size == kInt32Size) {
+ return ExternalConstant(
+ ExternalReference::tsan_seq_cst_store_function_32_bits());
+ } else {
+ CHECK_EQ(size, kInt64Size);
+ return ExternalConstant(
+ ExternalReference::tsan_seq_cst_store_function_64_bits());
+ }
+ }
+
+ void GenerateTSANSeqCstStore(SaveFPRegsMode fp_mode, int size) {
+ TNode<ExternalReference> function = GetExternalReference(size);
+ auto address = UncheckedParameter<IntPtrT>(TSANStoreDescriptor::kAddress);
+ TNode<IntPtrT> value = BitcastTaggedToWord(
+ UncheckedParameter<Object>(TSANStoreDescriptor::kValue));
+ CallCFunctionWithCallerSavedRegisters(
+ function, MachineType::Int32(), fp_mode,
+ std::make_pair(MachineType::IntPtr(), address),
+ std::make_pair(MachineType::IntPtr(), value));
+ Return(UndefinedConstant());
+ }
+};
+
+TF_BUILTIN(TSANSeqCstStore8IgnoreFP, TSANSeqCstStoreCodeStubAssembler) {
+ GenerateTSANSeqCstStore(SaveFPRegsMode::kIgnore, kInt8Size);
+}
+
+TF_BUILTIN(TSANSeqCstStore8SaveFP, TSANSeqCstStoreCodeStubAssembler) {
+ GenerateTSANSeqCstStore(SaveFPRegsMode::kSave, kInt8Size);
+}
+
+TF_BUILTIN(TSANSeqCstStore16IgnoreFP, TSANSeqCstStoreCodeStubAssembler) {
+ GenerateTSANSeqCstStore(SaveFPRegsMode::kIgnore, kInt16Size);
+}
+
+TF_BUILTIN(TSANSeqCstStore16SaveFP, TSANSeqCstStoreCodeStubAssembler) {
+ GenerateTSANSeqCstStore(SaveFPRegsMode::kSave, kInt16Size);
+}
+
+TF_BUILTIN(TSANSeqCstStore32IgnoreFP, TSANSeqCstStoreCodeStubAssembler) {
+ GenerateTSANSeqCstStore(SaveFPRegsMode::kIgnore, kInt32Size);
+}
+
+TF_BUILTIN(TSANSeqCstStore32SaveFP, TSANSeqCstStoreCodeStubAssembler) {
+ GenerateTSANSeqCstStore(SaveFPRegsMode::kSave, kInt32Size);
+}
+
+TF_BUILTIN(TSANSeqCstStore64IgnoreFP, TSANSeqCstStoreCodeStubAssembler) {
+ GenerateTSANSeqCstStore(SaveFPRegsMode::kIgnore, kInt64Size);
+}
+
+TF_BUILTIN(TSANSeqCstStore64SaveFP, TSANSeqCstStoreCodeStubAssembler) {
+ GenerateTSANSeqCstStore(SaveFPRegsMode::kSave, kInt64Size);
+}
+
class TSANRelaxedLoadCodeStubAssembler : public CodeStubAssembler {
public:
explicit TSANRelaxedLoadCodeStubAssembler(compiler::CodeAssemblerState* state)
@@ -501,8 +567,7 @@ class TSANRelaxedLoadCodeStubAssembler : public CodeStubAssembler {
void GenerateTSANRelaxedLoad(SaveFPRegsMode fp_mode, int size) {
TNode<ExternalReference> function = GetExternalReference(size);
- auto address =
- UncheckedParameter<IntPtrT>(TSANRelaxedLoadDescriptor::kAddress);
+ auto address = UncheckedParameter<IntPtrT>(TSANLoadDescriptor::kAddress);
CallCFunctionWithCallerSavedRegisters(
function, MachineType::Int32(), fp_mode,
std::make_pair(MachineType::IntPtr(), address));
@@ -888,21 +953,23 @@ TF_BUILTIN(AdaptorWithBuiltinExitFrame, CodeStubAssembler) {
auto actual_argc =
UncheckedParameter<Int32T>(Descriptor::kActualArgumentsCount);
+ CodeStubArguments args(this, actual_argc);
- TVARIABLE(Int32T, pushed_argc, actual_argc);
+ TVARIABLE(Int32T, pushed_argc,
+ TruncateIntPtrToInt32(args.GetLengthWithReceiver()));
TNode<SharedFunctionInfo> shared = LoadJSFunctionSharedFunctionInfo(target);
- TNode<Int32T> formal_count =
- UncheckedCast<Int32T>(LoadSharedFunctionInfoFormalParameterCount(shared));
+ TNode<Int32T> formal_count = UncheckedCast<Int32T>(
+ LoadSharedFunctionInfoFormalParameterCountWithReceiver(shared));
// The number of arguments pushed is the maximum of actual arguments count
// and formal parameters count. Except when the formal parameters count is
// the sentinel.
Label check_argc(this), update_argc(this), done_argc(this);
- Branch(Word32Equal(formal_count, Int32Constant(kDontAdaptArgumentsSentinel)),
- &done_argc, &check_argc);
+ Branch(IsSharedFunctionInfoDontAdaptArguments(shared), &done_argc,
+ &check_argc);
BIND(&check_argc);
Branch(Int32GreaterThan(formal_count, pushed_argc.value()), &update_argc,
&done_argc);
@@ -915,7 +982,7 @@ TF_BUILTIN(AdaptorWithBuiltinExitFrame, CodeStubAssembler) {
// including the receiver and the extra arguments.
TNode<Int32T> argc = Int32Add(
pushed_argc.value(),
- Int32Constant(BuiltinExitFrameConstants::kNumExtraArgsWithReceiver));
+ Int32Constant(BuiltinExitFrameConstants::kNumExtraArgsWithoutReceiver));
const bool builtin_exit_frame = true;
TNode<Code> code =
@@ -1053,9 +1120,7 @@ void Builtins::Generate_MemMove(MacroAssembler* masm) {
// TODO(v8:11421): Remove #if once baseline compiler is ported to other
// architectures.
-#if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64 || \
- V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_RISCV64 || V8_TARGET_ARCH_MIPS64 || \
- V8_TARGET_ARCH_MIPS
+#if ENABLE_SPARKPLUG
void Builtins::Generate_BaselineLeaveFrame(MacroAssembler* masm) {
EmitReturnBaseline(masm);
}
@@ -1241,17 +1306,17 @@ TF_BUILTIN(InstantiateAsmJs, CodeStubAssembler) {
GotoIf(TaggedIsSmi(maybe_result_or_smi_zero), &tailcall_to_function);
TNode<SharedFunctionInfo> shared = LoadJSFunctionSharedFunctionInfo(function);
- TNode<Int32T> parameter_count =
- UncheckedCast<Int32T>(LoadSharedFunctionInfoFormalParameterCount(shared));
+ TNode<Int32T> parameter_count = UncheckedCast<Int32T>(
+ LoadSharedFunctionInfoFormalParameterCountWithReceiver(shared));
// This builtin intercepts a call to {function}, where the number of arguments
// pushed is the maximum of actual arguments count and formal parameters
// count.
Label argc_lt_param_count(this), argc_ge_param_count(this);
- Branch(IntPtrLessThan(args.GetLength(), ChangeInt32ToIntPtr(parameter_count)),
+ Branch(IntPtrLessThan(args.GetLengthWithReceiver(),
+ ChangeInt32ToIntPtr(parameter_count)),
&argc_lt_param_count, &argc_ge_param_count);
BIND(&argc_lt_param_count);
- PopAndReturn(Int32Add(parameter_count, Int32Constant(1)),
- maybe_result_or_smi_zero);
+ PopAndReturn(parameter_count, maybe_result_or_smi_zero);
BIND(&argc_ge_param_count);
args.PopAndReturn(maybe_result_or_smi_zero);
diff --git a/deps/v8/src/builtins/builtins-intl.cc b/deps/v8/src/builtins/builtins-intl.cc
index c3711898c3..cff87636cb 100644
--- a/deps/v8/src/builtins/builtins-intl.cc
+++ b/deps/v8/src/builtins/builtins-intl.cc
@@ -236,7 +236,7 @@ Handle<JSFunction> CreateBoundFunction(Isolate* isolate,
Handle<SharedFunctionInfo> info =
isolate->factory()->NewSharedFunctionInfoForBuiltin(
isolate->factory()->empty_string(), builtin, kNormalFunction);
- info->set_internal_formal_parameter_count(len);
+ info->set_internal_formal_parameter_count(JSParameterCount(len));
info->set_length(len);
return Factory::JSFunctionBuilder{isolate, info, context}
@@ -576,6 +576,13 @@ BUILTIN(IntlGetCanonicalLocales) {
Intl::GetCanonicalLocales(isolate, locales));
}
+BUILTIN(IntlSupportedValuesOf) {
+ HandleScope scope(isolate);
+ Handle<Object> locales = args.atOrUndefined(isolate, 1);
+
+ RETURN_RESULT_OR_FAILURE(isolate, Intl::SupportedValuesOf(isolate, locales));
+}
+
BUILTIN(ListFormatConstructor) {
HandleScope scope(isolate);
diff --git a/deps/v8/src/builtins/builtins-lazy-gen.cc b/deps/v8/src/builtins/builtins-lazy-gen.cc
index 6ee50ac737..4fb5de7eb5 100644
--- a/deps/v8/src/builtins/builtins-lazy-gen.cc
+++ b/deps/v8/src/builtins/builtins-lazy-gen.cc
@@ -156,8 +156,7 @@ void LazyBuiltinsAssembler::CompileLazy(TNode<JSFunction> function) {
TVARIABLE(Code, code);
// Check if we have baseline code.
- GotoIf(InstanceTypeEqual(sfi_data_type.value(), BASELINE_DATA_TYPE),
- &baseline);
+ GotoIf(InstanceTypeEqual(sfi_data_type.value(), CODET_TYPE), &baseline);
code = sfi_code;
Goto(&tailcall_code);
diff --git a/deps/v8/src/builtins/builtins-object-gen.cc b/deps/v8/src/builtins/builtins-object-gen.cc
index 68112e5bff..558b582789 100644
--- a/deps/v8/src/builtins/builtins-object-gen.cc
+++ b/deps/v8/src/builtins/builtins-object-gen.cc
@@ -436,7 +436,9 @@ TF_BUILTIN(ObjectAssign, ObjectBuiltinsAssembler) {
Label done(this);
// 2. If only one argument was passed, return to.
- GotoIf(UintPtrLessThanOrEqual(args.GetLength(), IntPtrConstant(1)), &done);
+ GotoIf(UintPtrLessThanOrEqual(args.GetLengthWithoutReceiver(),
+ IntPtrConstant(1)),
+ &done);
// 3. Let sources be the List of argument values starting with the
// second argument.
@@ -1242,9 +1244,8 @@ TF_BUILTIN(CreateGeneratorObject, ObjectBuiltinsAssembler) {
TNode<BytecodeArray> bytecode_array =
LoadSharedFunctionInfoBytecodeArray(shared);
- TNode<IntPtrT> formal_parameter_count =
- ChangeInt32ToIntPtr(LoadObjectField<Uint16T>(
- shared, SharedFunctionInfo::kFormalParameterCountOffset));
+ TNode<IntPtrT> formal_parameter_count = ChangeInt32ToIntPtr(
+ LoadSharedFunctionInfoFormalParameterCountWithoutReceiver(shared));
TNode<IntPtrT> frame_size = ChangeInt32ToIntPtr(
LoadObjectField<Int32T>(bytecode_array, BytecodeArray::kFrameSizeOffset));
TNode<IntPtrT> size =
diff --git a/deps/v8/src/builtins/builtins-proxy-gen.cc b/deps/v8/src/builtins/builtins-proxy-gen.cc
index 16304a56a5..9442b64d06 100644
--- a/deps/v8/src/builtins/builtins-proxy-gen.cc
+++ b/deps/v8/src/builtins/builtins-proxy-gen.cc
@@ -121,10 +121,10 @@ TF_BUILTIN(CallProxy, ProxiesCodeStubAssembler) {
TNode<Object> receiver = args.GetReceiver();
// 7. Let argArray be CreateArrayFromList(argumentsList).
- TNode<JSArray> array =
- EmitFastNewAllArguments(UncheckedCast<Context>(context),
- UncheckedCast<RawPtrT>(LoadFramePointer()),
- UncheckedCast<IntPtrT>(argc_ptr));
+ TNode<JSArray> array = EmitFastNewAllArguments(
+ UncheckedCast<Context>(context),
+ UncheckedCast<RawPtrT>(LoadFramePointer()),
+ UncheckedCast<IntPtrT>(args.GetLengthWithoutReceiver()));
// 8. Return Call(trap, handler, «target, thisArgument, argArray»).
TNode<Object> result = Call(context, trap, handler, target, receiver, array);
@@ -174,10 +174,10 @@ TF_BUILTIN(ConstructProxy, ProxiesCodeStubAssembler) {
CodeStubArguments args(this, argc_ptr);
// 7. Let argArray be CreateArrayFromList(argumentsList).
- TNode<JSArray> array =
- EmitFastNewAllArguments(UncheckedCast<Context>(context),
- UncheckedCast<RawPtrT>(LoadFramePointer()),
- UncheckedCast<IntPtrT>(argc_ptr));
+ TNode<JSArray> array = EmitFastNewAllArguments(
+ UncheckedCast<Context>(context),
+ UncheckedCast<RawPtrT>(LoadFramePointer()),
+ UncheckedCast<IntPtrT>(args.GetLengthWithoutReceiver()));
// 8. Let newObj be ? Call(trap, handler, « target, argArray, newTarget »).
TNode<Object> new_obj =
diff --git a/deps/v8/src/builtins/builtins-regexp-gen.cc b/deps/v8/src/builtins/builtins-regexp-gen.cc
index 535188c567..6e4307b404 100644
--- a/deps/v8/src/builtins/builtins-regexp-gen.cc
+++ b/deps/v8/src/builtins/builtins-regexp-gen.cc
@@ -18,6 +18,7 @@
#include "src/objects/js-regexp-string-iterator.h"
#include "src/objects/js-regexp.h"
#include "src/objects/regexp-match-info.h"
+#include "src/regexp/regexp-flags.h"
namespace v8 {
namespace internal {
@@ -1041,23 +1042,16 @@ TNode<String> RegExpBuiltinsAssembler::FlagsGetter(TNode<Context> context,
CAST(LoadObjectField(CAST(regexp), JSRegExp::kFlagsOffset));
var_flags = SmiUntag(flags_smi);
-#define CASE_FOR_FLAG(FLAG) \
- do { \
- Label next(this); \
- GotoIfNot(IsSetWord(var_flags.value(), FLAG), &next); \
- var_length = Uint32Add(var_length.value(), Uint32Constant(1)); \
- Goto(&next); \
- BIND(&next); \
- } while (false)
+#define CASE_FOR_FLAG(Lower, Camel, ...) \
+ do { \
+ Label next(this); \
+ GotoIfNot(IsSetWord(var_flags.value(), JSRegExp::k##Camel), &next); \
+ var_length = Uint32Add(var_length.value(), Uint32Constant(1)); \
+ Goto(&next); \
+ BIND(&next); \
+ } while (false);
- CASE_FOR_FLAG(JSRegExp::kHasIndices);
- CASE_FOR_FLAG(JSRegExp::kGlobal);
- CASE_FOR_FLAG(JSRegExp::kIgnoreCase);
- CASE_FOR_FLAG(JSRegExp::kLinear);
- CASE_FOR_FLAG(JSRegExp::kMultiline);
- CASE_FOR_FLAG(JSRegExp::kDotAll);
- CASE_FOR_FLAG(JSRegExp::kUnicode);
- CASE_FOR_FLAG(JSRegExp::kSticky);
+ REGEXP_FLAG_LIST(CASE_FOR_FLAG)
#undef CASE_FOR_FLAG
} else {
DCHECK(!is_fastpath);
@@ -1123,26 +1117,19 @@ TNode<String> RegExpBuiltinsAssembler::FlagsGetter(TNode<Context> context,
TVARIABLE(IntPtrT, var_offset,
IntPtrConstant(SeqOneByteString::kHeaderSize - kHeapObjectTag));
-#define CASE_FOR_FLAG(FLAG, CHAR) \
- do { \
- Label next(this); \
- GotoIfNot(IsSetWord(var_flags.value(), FLAG), &next); \
- const TNode<Int32T> value = Int32Constant(CHAR); \
- StoreNoWriteBarrier(MachineRepresentation::kWord8, string, \
- var_offset.value(), value); \
- var_offset = IntPtrAdd(var_offset.value(), int_one); \
- Goto(&next); \
- BIND(&next); \
- } while (false)
-
- CASE_FOR_FLAG(JSRegExp::kHasIndices, 'd');
- CASE_FOR_FLAG(JSRegExp::kGlobal, 'g');
- CASE_FOR_FLAG(JSRegExp::kIgnoreCase, 'i');
- CASE_FOR_FLAG(JSRegExp::kLinear, 'l');
- CASE_FOR_FLAG(JSRegExp::kMultiline, 'm');
- CASE_FOR_FLAG(JSRegExp::kDotAll, 's');
- CASE_FOR_FLAG(JSRegExp::kUnicode, 'u');
- CASE_FOR_FLAG(JSRegExp::kSticky, 'y');
+#define CASE_FOR_FLAG(Lower, Camel, LowerCamel, Char, ...) \
+ do { \
+ Label next(this); \
+ GotoIfNot(IsSetWord(var_flags.value(), JSRegExp::k##Camel), &next); \
+ const TNode<Int32T> value = Int32Constant(Char); \
+ StoreNoWriteBarrier(MachineRepresentation::kWord8, string, \
+ var_offset.value(), value); \
+ var_offset = IntPtrAdd(var_offset.value(), int_one); \
+ Goto(&next); \
+ BIND(&next); \
+ } while (false);
+
+ REGEXP_FLAG_LIST(CASE_FOR_FLAG)
#undef CASE_FOR_FLAG
if (is_fastpath) {
@@ -1391,29 +1378,12 @@ TNode<BoolT> RegExpBuiltinsAssembler::SlowFlagGetter(TNode<Context> context,
switch (flag) {
case JSRegExp::kNone:
UNREACHABLE();
- case JSRegExp::kGlobal:
- name = isolate()->factory()->global_string();
- break;
- case JSRegExp::kIgnoreCase:
- name = isolate()->factory()->ignoreCase_string();
- break;
- case JSRegExp::kMultiline:
- name = isolate()->factory()->multiline_string();
- break;
- case JSRegExp::kDotAll:
- UNREACHABLE(); // Never called for dotAll.
- case JSRegExp::kSticky:
- name = isolate()->factory()->sticky_string();
- break;
- case JSRegExp::kUnicode:
- name = isolate()->factory()->unicode_string();
- break;
- case JSRegExp::kHasIndices:
- name = isolate()->factory()->has_indices_string();
- break;
- case JSRegExp::kLinear:
- name = isolate()->factory()->linear_string();
- break;
+#define V(Lower, Camel, LowerCamel, Char, Bit) \
+ case JSRegExp::k##Camel: \
+ name = isolate()->factory()->LowerCamel##_string(); \
+ break;
+ REGEXP_FLAG_LIST(V)
+#undef V
}
TNode<Object> value = GetProperty(context, regexp, name);
diff --git a/deps/v8/src/builtins/builtins-sharedarraybuffer-gen.cc b/deps/v8/src/builtins/builtins-sharedarraybuffer-gen.cc
index fa536792ed..ff0b5d4722 100644
--- a/deps/v8/src/builtins/builtins-sharedarraybuffer-gen.cc
+++ b/deps/v8/src/builtins/builtins-sharedarraybuffer-gen.cc
@@ -204,26 +204,28 @@ TF_BUILTIN(AtomicsLoad, SharedArrayBufferBuiltinsAssembler) {
arraysize(case_labels));
BIND(&i8);
- Return(SmiFromInt32(AtomicLoad<Int8T>(backing_store, index_word)));
+ Return(SmiFromInt32(AtomicLoad<Int8T>(AtomicMemoryOrder::kSeqCst,
+ backing_store, index_word)));
BIND(&u8);
- Return(SmiFromInt32(AtomicLoad<Uint8T>(backing_store, index_word)));
+ Return(SmiFromInt32(AtomicLoad<Uint8T>(AtomicMemoryOrder::kSeqCst,
+ backing_store, index_word)));
BIND(&i16);
- Return(
- SmiFromInt32(AtomicLoad<Int16T>(backing_store, WordShl(index_word, 1))));
+ Return(SmiFromInt32(AtomicLoad<Int16T>(
+ AtomicMemoryOrder::kSeqCst, backing_store, WordShl(index_word, 1))));
BIND(&u16);
- Return(
- SmiFromInt32(AtomicLoad<Uint16T>(backing_store, WordShl(index_word, 1))));
+ Return(SmiFromInt32(AtomicLoad<Uint16T>(
+ AtomicMemoryOrder::kSeqCst, backing_store, WordShl(index_word, 1))));
BIND(&i32);
- Return(ChangeInt32ToTagged(
- AtomicLoad<Int32T>(backing_store, WordShl(index_word, 2))));
+ Return(ChangeInt32ToTagged(AtomicLoad<Int32T>(
+ AtomicMemoryOrder::kSeqCst, backing_store, WordShl(index_word, 2))));
BIND(&u32);
- Return(ChangeUint32ToTagged(
- AtomicLoad<Uint32T>(backing_store, WordShl(index_word, 2))));
+ Return(ChangeUint32ToTagged(AtomicLoad<Uint32T>(
+ AtomicMemoryOrder::kSeqCst, backing_store, WordShl(index_word, 2))));
#if V8_TARGET_ARCH_MIPS && !_MIPS_ARCH_MIPS32R6
BIND(&i64);
Goto(&u64);
@@ -235,12 +237,12 @@ TF_BUILTIN(AtomicsLoad, SharedArrayBufferBuiltinsAssembler) {
}
#else
BIND(&i64);
- Return(BigIntFromSigned64(
- AtomicLoad64<AtomicInt64>(backing_store, WordShl(index_word, 3))));
+ Return(BigIntFromSigned64(AtomicLoad64<AtomicInt64>(
+ AtomicMemoryOrder::kSeqCst, backing_store, WordShl(index_word, 3))));
BIND(&u64);
- Return(BigIntFromUnsigned64(
- AtomicLoad64<AtomicUint64>(backing_store, WordShl(index_word, 3))));
+ Return(BigIntFromUnsigned64(AtomicLoad64<AtomicUint64>(
+ AtomicMemoryOrder::kSeqCst, backing_store, WordShl(index_word, 3))));
#endif
// This shouldn't happen, we've already validated the type.
@@ -307,18 +309,18 @@ TF_BUILTIN(AtomicsStore, SharedArrayBufferBuiltinsAssembler) {
arraysize(case_labels));
BIND(&u8);
- AtomicStore(MachineRepresentation::kWord8, backing_store, index_word,
- value_word32);
+ AtomicStore(MachineRepresentation::kWord8, AtomicMemoryOrder::kSeqCst,
+ backing_store, index_word, value_word32);
Return(value_integer);
BIND(&u16);
- AtomicStore(MachineRepresentation::kWord16, backing_store,
- WordShl(index_word, 1), value_word32);
+ AtomicStore(MachineRepresentation::kWord16, AtomicMemoryOrder::kSeqCst,
+ backing_store, WordShl(index_word, 1), value_word32);
Return(value_integer);
BIND(&u32);
- AtomicStore(MachineRepresentation::kWord32, backing_store,
- WordShl(index_word, 2), value_word32);
+ AtomicStore(MachineRepresentation::kWord32, AtomicMemoryOrder::kSeqCst,
+ backing_store, WordShl(index_word, 2), value_word32);
Return(value_integer);
BIND(&u64);
@@ -340,7 +342,8 @@ TF_BUILTIN(AtomicsStore, SharedArrayBufferBuiltinsAssembler) {
TVARIABLE(UintPtrT, var_high);
BigIntToRawBytes(value_bigint, &var_low, &var_high);
TNode<UintPtrT> high = Is64() ? TNode<UintPtrT>() : var_high.value();
- AtomicStore64(backing_store, WordShl(index_word, 3), var_low.value(), high);
+ AtomicStore64(AtomicMemoryOrder::kSeqCst, backing_store,
+ WordShl(index_word, 3), var_low.value(), high);
Return(value_bigint);
#endif
diff --git a/deps/v8/src/builtins/builtins-string-gen.cc b/deps/v8/src/builtins/builtins-string-gen.cc
index 61c1d8d387..0ce2fd0f17 100644
--- a/deps/v8/src/builtins/builtins-string-gen.cc
+++ b/deps/v8/src/builtins/builtins-string-gen.cc
@@ -792,12 +792,12 @@ TF_BUILTIN(StringFromCharCode, StringBuiltinsAssembler) {
CodeStubArguments arguments(this, argc);
TNode<Uint32T> unsigned_argc =
- Unsigned(TruncateIntPtrToInt32(arguments.GetLength()));
+ Unsigned(TruncateIntPtrToInt32(arguments.GetLengthWithoutReceiver()));
// Check if we have exactly one argument (plus the implicit receiver), i.e.
// if the parent frame is not an arguments adaptor frame.
Label if_oneargument(this), if_notoneargument(this);
- Branch(IntPtrEqual(arguments.GetLength(), IntPtrConstant(1)), &if_oneargument,
- &if_notoneargument);
+ Branch(IntPtrEqual(arguments.GetLengthWithoutReceiver(), IntPtrConstant(1)),
+ &if_oneargument, &if_notoneargument);
BIND(&if_oneargument);
{
diff --git a/deps/v8/src/builtins/builtins-string.tq b/deps/v8/src/builtins/builtins-string.tq
index 4111155fd2..663ba86cdb 100644
--- a/deps/v8/src/builtins/builtins-string.tq
+++ b/deps/v8/src/builtins/builtins-string.tq
@@ -32,7 +32,7 @@ transitioning macro ToStringImpl(context: Context, o: JSAny): String {
ThrowTypeError(MessageTemplate::kSymbolToString);
}
case (JSAny): {
- return runtime::ToString(context, o);
+ return runtime::ToString(context, result);
}
}
}
diff --git a/deps/v8/src/builtins/builtins-typed-array-gen.cc b/deps/v8/src/builtins/builtins-typed-array-gen.cc
index a76650d052..0fd0c32340 100644
--- a/deps/v8/src/builtins/builtins-typed-array-gen.cc
+++ b/deps/v8/src/builtins/builtins-typed-array-gen.cc
@@ -65,9 +65,8 @@ TNode<JSArrayBuffer> TypedArrayBuiltinsAssembler::AllocateEmptyOnHeapBuffer(
StoreObjectFieldNoWriteBarrier(buffer, JSArrayBuffer::kByteLengthOffset,
byte_length);
- InitializeExternalPointerField(buffer, JSArrayBuffer::kBackingStoreOffset,
- PointerConstant(nullptr),
- kArrayBufferBackingStoreTag);
+ StoreObjectFieldNoWriteBarrier(buffer, JSArrayBuffer::kBackingStoreOffset,
+ PointerConstant(nullptr));
StoreObjectFieldNoWriteBarrier(buffer, JSArrayBuffer::kExtensionOffset,
IntPtrConstant(0));
for (int offset = JSArrayBuffer::kHeaderSize;
@@ -404,12 +403,6 @@ void TypedArrayBuiltinsAssembler::DispatchTypedArrayByElementsKind(
BIND(&next);
}
-void TypedArrayBuiltinsAssembler::AllocateJSTypedArrayExternalPointerEntry(
- TNode<JSTypedArray> holder) {
- InitializeExternalPointerField(
- holder, IntPtrConstant(JSTypedArray::kExternalPointerOffset));
-}
-
void TypedArrayBuiltinsAssembler::SetJSTypedArrayOnHeapDataPtr(
TNode<JSTypedArray> holder, TNode<ByteArray> base, TNode<UintPtrT> offset) {
offset = UintPtrAdd(UintPtrConstant(ByteArray::kHeaderSize - kHeapObjectTag),
diff --git a/deps/v8/src/builtins/builtins-typed-array-gen.h b/deps/v8/src/builtins/builtins-typed-array-gen.h
index bb8a15ef02..a309f67286 100644
--- a/deps/v8/src/builtins/builtins-typed-array-gen.h
+++ b/deps/v8/src/builtins/builtins-typed-array-gen.h
@@ -83,7 +83,6 @@ class TypedArrayBuiltinsAssembler : public CodeStubAssembler {
void DispatchTypedArrayByElementsKind(
TNode<Word32T> elements_kind, const TypedArraySwitchCase& case_function);
- void AllocateJSTypedArrayExternalPointerEntry(TNode<JSTypedArray> holder);
void SetJSTypedArrayOnHeapDataPtr(TNode<JSTypedArray> holder,
TNode<ByteArray> base,
TNode<UintPtrT> offset);
diff --git a/deps/v8/src/builtins/console.tq b/deps/v8/src/builtins/console.tq
index c0daa19b6d..483b5422d8 100644
--- a/deps/v8/src/builtins/console.tq
+++ b/deps/v8/src/builtins/console.tq
@@ -12,7 +12,8 @@ javascript builtin FastConsoleAssert(
if (ToBoolean(arguments[0])) {
return Undefined;
} else {
- tail ConsoleAssert(target, newTarget, Convert<int32>(arguments.length));
+ tail ConsoleAssert(
+ target, newTarget, Convert<int32>(arguments.actual_count));
}
}
}
diff --git a/deps/v8/src/builtins/convert.tq b/deps/v8/src/builtins/convert.tq
index c1c73d0060..2849b782c8 100644
--- a/deps/v8/src/builtins/convert.tq
+++ b/deps/v8/src/builtins/convert.tq
@@ -180,6 +180,9 @@ Convert<uint8, intptr>(i: intptr): uint8 {
Convert<int8, intptr>(i: intptr): int8 {
return %RawDownCast<int8>(TruncateIntPtrToInt32(i) << 24 >> 24);
}
+Convert<uint16, uint32>(i: uint32): uint16 {
+ return %RawDownCast<uint16>(i & 0xFFFF);
+}
Convert<int32, uint8>(i: uint8): int32 {
return Signed(Convert<uint32>(i));
}
diff --git a/deps/v8/src/builtins/frame-arguments.tq b/deps/v8/src/builtins/frame-arguments.tq
index 5f25c97dc3..9dd26e2327 100644
--- a/deps/v8/src/builtins/frame-arguments.tq
+++ b/deps/v8/src/builtins/frame-arguments.tq
@@ -6,7 +6,11 @@
struct Arguments {
const frame: FrameWithArguments;
const base: RawPtr;
+ // length is the number of arguments without the receiver.
const length: intptr;
+ // actual_count is the actual number of arguments on the stack (depending on
+ // kJSArgcIncludesReceiver may or may not include the receiver).
+ const actual_count: intptr;
}
extern operator '[]' macro GetArgumentValue(Arguments, intptr): JSAny;
@@ -45,8 +49,8 @@ macro GetFrameWithArgumentsInfo(implicit context: Context)():
const f: JSFunction = frame.function;
const shared: SharedFunctionInfo = f.shared_function_info;
- const formalParameterCount: bint =
- Convert<bint>(Convert<int32>(shared.formal_parameter_count));
+ const formalParameterCount: bint = Convert<bint>(Convert<int32>(
+ LoadSharedFunctionInfoFormalParameterCountWithoutReceiver(shared)));
// TODO(victorgomes): When removing the v8_disable_arguments_adaptor flag,
// FrameWithArgumentsInfo can be simplified, since the frame field already
// contains the argument count.
diff --git a/deps/v8/src/builtins/frames.tq b/deps/v8/src/builtins/frames.tq
index 03336bd464..3e959a094f 100644
--- a/deps/v8/src/builtins/frames.tq
+++ b/deps/v8/src/builtins/frames.tq
@@ -66,8 +66,12 @@ operator '.caller' macro LoadCallerFromFrame(f: Frame): Frame {
const kStandardFrameArgCOffset: constexpr int31
generates 'StandardFrameConstants::kArgCOffset';
+const kJSArgcReceiverSlots: constexpr int31
+ generates 'kJSArgcReceiverSlots';
+
operator '.argument_count' macro LoadArgCFromFrame(f: Frame): intptr {
- return LoadIntptrFromFrame(f, kStandardFrameArgCOffset);
+ return LoadIntptrFromFrame(f, kStandardFrameArgCOffset) -
+ kJSArgcReceiverSlots;
}
type ContextOrFrameType = Context|FrameType;
diff --git a/deps/v8/src/builtins/function.tq b/deps/v8/src/builtins/function.tq
index e6ce7edfef..682fdce4ba 100644
--- a/deps/v8/src/builtins/function.tq
+++ b/deps/v8/src/builtins/function.tq
@@ -38,7 +38,7 @@ transitioning javascript builtin
FastFunctionPrototypeBind(
js-implicit context: NativeContext, receiver: JSAny, newTarget: JSAny,
target: JSFunction)(...arguments): JSAny {
- const argc: intptr = arguments.length;
+ const argc: intptr = arguments.actual_count;
try {
typeswitch (receiver) {
case (fn: JSFunction|JSBoundFunction): {
diff --git a/deps/v8/src/builtins/ia32/builtins-ia32.cc b/deps/v8/src/builtins/ia32/builtins-ia32.cc
index 7a8875fee9..63aba94fe9 100644
--- a/deps/v8/src/builtins/ia32/builtins-ia32.cc
+++ b/deps/v8/src/builtins/ia32/builtins-ia32.cc
@@ -78,6 +78,36 @@ static void GenerateTailCallToReturnedCode(MacroAssembler* masm,
namespace {
+enum class ArgumentsElementType {
+ kRaw, // Push arguments as they are.
+ kHandle // Dereference arguments before pushing.
+};
+
+void Generate_PushArguments(MacroAssembler* masm, Register array, Register argc,
+ Register scratch1, Register scratch2,
+ ArgumentsElementType element_type) {
+ DCHECK(!AreAliased(array, argc, scratch1, scratch2));
+ Register counter = scratch1;
+ Label loop, entry;
+ if (kJSArgcIncludesReceiver) {
+ __ lea(counter, Operand(argc, -kJSArgcReceiverSlots));
+ } else {
+ __ mov(counter, argc);
+ }
+ __ jmp(&entry);
+ __ bind(&loop);
+ Operand value(array, counter, times_system_pointer_size, 0);
+ if (element_type == ArgumentsElementType::kHandle) {
+ DCHECK(scratch2 != no_reg);
+ __ mov(scratch2, value);
+ value = Operand(scratch2, 0);
+ }
+ __ Push(value);
+ __ bind(&entry);
+ __ dec(counter);
+ __ j(greater_equal, &loop, Label::kNear);
+}
+
void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- eax: number of arguments
@@ -109,7 +139,10 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
__ lea(esi, Operand(ebp, StandardFrameConstants::kCallerSPOffset +
kSystemPointerSize));
// Copy arguments to the expression stack.
- __ PushArray(esi, eax, ecx);
+ // esi: Pointer to start of arguments.
+ // eax: Number of arguments.
+ Generate_PushArguments(masm, esi, eax, ecx, no_reg,
+ ArgumentsElementType::kRaw);
// The receiver for the builtin/api call.
__ PushRoot(RootIndex::kTheHoleValue);
@@ -130,7 +163,9 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
// Remove caller arguments from the stack and return.
__ DropArguments(edx, ecx, TurboAssembler::kCountIsSmi,
- TurboAssembler::kCountExcludesReceiver);
+ kJSArgcIncludesReceiver
+ ? TurboAssembler::kCountIncludesReceiver
+ : TurboAssembler::kCountExcludesReceiver);
__ ret(0);
__ bind(&stack_overflow);
@@ -237,7 +272,10 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
// InvokeFunction.
// Copy arguments to the expression stack.
- __ PushArray(edi, eax, ecx);
+ // edi: Pointer to start of arguments.
+ // eax: Number of arguments.
+ Generate_PushArguments(masm, edi, eax, ecx, no_reg,
+ ArgumentsElementType::kRaw);
// Push implicit receiver.
__ movd(ecx, xmm0);
@@ -282,7 +320,9 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
// Remove caller arguments from the stack and return.
__ DropArguments(edx, ecx, TurboAssembler::kCountIsSmi,
- TurboAssembler::kCountExcludesReceiver);
+ kJSArgcIncludesReceiver
+ ? TurboAssembler::kCountIncludesReceiver
+ : TurboAssembler::kCountExcludesReceiver);
__ ret(0);
// Otherwise we do a smi check and fall through to check if the return value
@@ -497,17 +537,11 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
__ bind(&enough_stack_space);
- // Copy arguments to the stack in a loop.
- Label loop, entry;
- __ Move(ecx, eax);
- __ jmp(&entry, Label::kNear);
- __ bind(&loop);
- // Push the parameter from argv.
- __ mov(scratch2, Operand(scratch1, ecx, times_system_pointer_size, 0));
- __ push(Operand(scratch2, 0)); // dereference handle
- __ bind(&entry);
- __ dec(ecx);
- __ j(greater_equal, &loop);
+ // Copy arguments to the stack.
+ // scratch1 (edx): Pointer to start of arguments.
+ // eax: Number of arguments.
+ Generate_PushArguments(masm, scratch1, eax, ecx, scratch2,
+ ArgumentsElementType::kHandle);
// Load the previous frame pointer to access C arguments
__ mov(scratch2, Operand(ebp, 0));
@@ -562,6 +596,16 @@ static void GetSharedFunctionInfoBytecode(MacroAssembler* masm,
__ bind(&done);
}
+static void AssertCodeIsBaseline(MacroAssembler* masm, Register code,
+ Register scratch) {
+ DCHECK(!AreAliased(code, scratch));
+ // Verify that the code kind is baseline code via the CodeKind.
+ __ mov(scratch, FieldOperand(code, Code::kFlagsOffset));
+ __ DecodeField<Code::KindField>(scratch);
+ __ cmp(scratch, Immediate(static_cast<int>(CodeKind::BASELINE)));
+ __ Assert(equal, AbortReason::kExpectedBaselineData);
+}
+
static void GetSharedFunctionInfoBytecodeOrBaseline(MacroAssembler* masm,
Register sfi_data,
Register scratch1,
@@ -570,8 +614,16 @@ static void GetSharedFunctionInfoBytecodeOrBaseline(MacroAssembler* masm,
Label done;
__ LoadMap(scratch1, sfi_data);
- __ CmpInstanceType(scratch1, BASELINE_DATA_TYPE);
- __ j(equal, is_baseline);
+ __ CmpInstanceType(scratch1, CODET_TYPE);
+ if (FLAG_debug_code) {
+ Label not_baseline;
+ __ j(not_equal, &not_baseline);
+ AssertCodeIsBaseline(masm, sfi_data, scratch1);
+ __ j(equal, is_baseline);
+ __ bind(&not_baseline);
+ } else {
+ __ j(equal, is_baseline);
+ }
__ CmpInstanceType(scratch1, INTERPRETER_DATA_TYPE);
__ j(not_equal, &done, Label::kNear);
@@ -641,6 +693,9 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
__ mov(ecx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
__ movzx_w(ecx, FieldOperand(
ecx, SharedFunctionInfo::kFormalParameterCountOffset));
+ if (kJSArgcIncludesReceiver) {
+ __ dec(ecx);
+ }
__ mov(ebx,
FieldOperand(edx, JSGeneratorObject::kParametersAndRegistersOffset));
{
@@ -677,7 +732,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
__ bind(&is_baseline);
__ Pop(eax);
- __ CmpObjectType(ecx, BASELINE_DATA_TYPE, ecx);
+ __ CmpObjectType(ecx, CODET_TYPE, ecx);
__ Assert(equal, AbortReason::kMissingBytecodeArray);
__ bind(&ok);
@@ -757,7 +812,7 @@ static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch1,
__ mov(actual_params_size, Operand(ebp, StandardFrameConstants::kArgCOffset));
__ lea(actual_params_size,
Operand(actual_params_size, times_system_pointer_size,
- kSystemPointerSize));
+ kJSArgcIncludesReceiver ? 0 : kSystemPointerSize));
// If actual is bigger than formal, then we should use it to free up the stack
// arguments.
@@ -1008,7 +1063,7 @@ static void MaybeOptimizeCodeOrTailCallOptimizedCodeSlot(
// stack left to right.
//
// The live registers are:
-// o eax: actual argument count (not including the receiver)
+// o eax: actual argument count
// o edi: the JS function object being called
// o edx: the incoming new target or generator object
// o esi: our context
@@ -1257,7 +1312,6 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
// Load the baseline code into the closure.
__ movd(ecx, xmm2);
- __ mov(ecx, FieldOperand(ecx, BaselineData::kBaselineCodeOffset));
static_assert(kJavaScriptCallCodeStartRegister == ecx, "ABI mismatch");
__ push(edx); // Spill.
__ push(ecx);
@@ -1303,7 +1357,7 @@ void Builtins::Generate_InterpreterPushArgsThenCallImpl(
InterpreterPushArgsMode mode) {
DCHECK(mode != InterpreterPushArgsMode::kArrayFunction);
// ----------- S t a t e -------------
- // -- eax : the number of arguments (not including the receiver)
+ // -- eax : the number of arguments
// -- ecx : the address of the first argument to be pushed. Subsequent
// arguments should be consecutive above this, in the same order as
// they are to be pushed onto the stack.
@@ -1321,19 +1375,22 @@ void Builtins::Generate_InterpreterPushArgsThenCallImpl(
// Add a stack check before pushing the arguments.
__ StackOverflowCheck(eax, scratch, &stack_overflow, true);
-
__ movd(xmm0, eax); // Spill number of arguments.
// Compute the expected number of arguments.
- __ mov(scratch, eax);
+ int argc_modification = kJSArgcIncludesReceiver ? 0 : 1;
+ if (receiver_mode == ConvertReceiverMode::kNullOrUndefined) {
+ argc_modification -= 1;
+ }
+ if (argc_modification != 0) {
+ __ lea(scratch, Operand(eax, argc_modification));
+ } else {
+ __ mov(scratch, eax);
+ }
// Pop return address to allow tail-call after pushing arguments.
__ PopReturnAddressTo(eax);
- if (receiver_mode != ConvertReceiverMode::kNullOrUndefined) {
- __ add(scratch, Immediate(1)); // Add one for receiver.
- }
-
// Find the address of the last argument.
__ shl(scratch, kSystemPointerSizeLog2);
__ neg(scratch);
@@ -1385,9 +1442,10 @@ void Generate_InterpreterPushZeroAndArgsAndReturnAddress(
Label* stack_overflow) {
// We have to move return address and the temporary registers above it
// before we can copy arguments onto the stack. To achieve this:
- // Step 1: Increment the stack pointer by num_args + 1 (for receiver).
- // Step 2: Move the return address and values around it to the top of stack.
- // Step 3: Copy the arguments into the correct locations.
+ // Step 1: Increment the stack pointer by num_args + 1 for receiver (if it is
+ // not included in argc already). Step 2: Move the return address and values
+ // around it to the top of stack. Step 3: Copy the arguments into the correct
+ // locations.
// current stack =====> required stack layout
// | | | return addr | (2) <-- esp (1)
// | | | addtl. slot |
@@ -1402,8 +1460,10 @@ void Generate_InterpreterPushZeroAndArgsAndReturnAddress(
// Step 1 - Update the stack pointer.
+ constexpr int receiver_offset =
+ kJSArgcIncludesReceiver ? 0 : kSystemPointerSize;
__ lea(scratch1,
- Operand(num_args, times_system_pointer_size, kSystemPointerSize));
+ Operand(num_args, times_system_pointer_size, receiver_offset));
__ AllocateStackSpace(scratch1);
// Step 2 move return_address and slots around it to the correct locations.
@@ -1412,7 +1472,7 @@ void Generate_InterpreterPushZeroAndArgsAndReturnAddress(
// extra slot for receiver, so no extra checks are required to avoid copy.
for (int i = 0; i < num_slots_to_move + 1; i++) {
__ mov(scratch1, Operand(esp, num_args, times_system_pointer_size,
- (i + 1) * kSystemPointerSize));
+ i * kSystemPointerSize + receiver_offset));
__ mov(Operand(esp, i * kSystemPointerSize), scratch1);
}
@@ -1434,7 +1494,11 @@ void Generate_InterpreterPushZeroAndArgsAndReturnAddress(
__ bind(&loop_check);
__ inc(scratch1);
__ cmp(scratch1, eax);
- __ j(less_equal, &loop_header, Label::kNear);
+ if (kJSArgcIncludesReceiver) {
+ __ j(less, &loop_header, Label::kNear);
+ } else {
+ __ j(less_equal, &loop_header, Label::kNear);
+ }
}
} // anonymous namespace
@@ -1443,7 +1507,7 @@ void Generate_InterpreterPushZeroAndArgsAndReturnAddress(
void Builtins::Generate_InterpreterPushArgsThenConstructImpl(
MacroAssembler* masm, InterpreterPushArgsMode mode) {
// ----------- S t a t e -------------
- // -- eax : the number of arguments (not including the receiver)
+ // -- eax : the number of arguments
// -- ecx : the address of the first argument to be pushed. Subsequent
// arguments should be consecutive above this, in the same order
// as they are to be pushed onto the stack.
@@ -1832,7 +1896,8 @@ void Generate_ContinueToBuiltinHelper(MacroAssembler* masm,
// the LAZY deopt point. eax contains the arguments count, the return value
// from LAZY is always the last argument.
__ movd(Operand(esp, eax, times_system_pointer_size,
- BuiltinContinuationFrameConstants::kFixedFrameSize),
+ BuiltinContinuationFrameConstants::kFixedFrameSize -
+ (kJSArgcIncludesReceiver ? kSystemPointerSize : 0)),
xmm0);
}
__ mov(
@@ -1894,23 +1959,29 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
Label no_arg_array, no_this_arg;
StackArgumentsAccessor args(eax);
// Spill receiver to allow the usage of edi as a scratch register.
- __ movd(xmm0, args[0]);
+ __ movd(xmm0, args.GetReceiverOperand());
__ LoadRoot(edx, RootIndex::kUndefinedValue);
__ mov(edi, edx);
- __ test(eax, eax);
- __ j(zero, &no_this_arg, Label::kNear);
+ if (kJSArgcIncludesReceiver) {
+ __ cmp(eax, Immediate(JSParameterCount(0)));
+ __ j(equal, &no_this_arg, Label::kNear);
+ } else {
+ __ test(eax, eax);
+ __ j(zero, &no_this_arg, Label::kNear);
+ }
{
__ mov(edi, args[1]);
- __ cmp(eax, Immediate(1));
+ __ cmp(eax, Immediate(JSParameterCount(1)));
__ j(equal, &no_arg_array, Label::kNear);
__ mov(edx, args[2]);
__ bind(&no_arg_array);
}
__ bind(&no_this_arg);
- __ DropArgumentsAndPushNewReceiver(eax, edi, ecx,
- TurboAssembler::kCountIsInteger,
- TurboAssembler::kCountExcludesReceiver);
+ __ DropArgumentsAndPushNewReceiver(
+ eax, edi, ecx, TurboAssembler::kCountIsInteger,
+ kJSArgcIncludesReceiver ? TurboAssembler::kCountIncludesReceiver
+ : TurboAssembler::kCountExcludesReceiver);
// Restore receiver to edi.
__ movd(edi, xmm0);
@@ -1940,7 +2011,7 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
// arguments to the receiver.
__ bind(&no_arguments);
{
- __ Move(eax, 0);
+ __ Move(eax, JSParameterCount(0));
__ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
}
}
@@ -1954,7 +2025,7 @@ void Builtins::Generate_FunctionPrototypeCall(MacroAssembler* masm) {
// ...
// esp[8 * n] : Argument n-1
// esp[8 * (n + 1)] : Argument n
- // eax contains the number of arguments, n, not counting the receiver.
+ // eax contains the number of arguments, n.
// 1. Get the callable to call (passed as receiver) from the stack.
{
@@ -1969,8 +2040,13 @@ void Builtins::Generate_FunctionPrototypeCall(MacroAssembler* masm) {
// 3. Make sure we have at least one argument.
{
Label done;
- __ test(eax, eax);
- __ j(not_zero, &done, Label::kNear);
+ if (kJSArgcIncludesReceiver) {
+ __ cmp(eax, Immediate(JSParameterCount(0)));
+ __ j(greater, &done, Label::kNear);
+ } else {
+ __ test(eax, eax);
+ __ j(not_zero, &done, Label::kNear);
+ }
__ PushRoot(RootIndex::kUndefinedValue);
__ inc(eax);
__ bind(&done);
@@ -2004,12 +2080,12 @@ void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
__ LoadRoot(edi, RootIndex::kUndefinedValue);
__ mov(edx, edi);
__ mov(ecx, edi);
- __ cmp(eax, Immediate(1));
+ __ cmp(eax, Immediate(JSParameterCount(1)));
__ j(below, &done, Label::kNear);
__ mov(edi, args[1]); // target
__ j(equal, &done, Label::kNear);
__ mov(ecx, args[2]); // thisArgument
- __ cmp(eax, Immediate(3));
+ __ cmp(eax, Immediate(JSParameterCount(3)));
__ j(below, &done, Label::kNear);
__ mov(edx, args[3]); // argumentsList
__ bind(&done);
@@ -2017,9 +2093,10 @@ void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
// Spill argumentsList to use edx as a scratch register.
__ movd(xmm0, edx);
- __ DropArgumentsAndPushNewReceiver(eax, ecx, edx,
- TurboAssembler::kCountIsInteger,
- TurboAssembler::kCountExcludesReceiver);
+ __ DropArgumentsAndPushNewReceiver(
+ eax, ecx, edx, TurboAssembler::kCountIsInteger,
+ kJSArgcIncludesReceiver ? TurboAssembler::kCountIncludesReceiver
+ : TurboAssembler::kCountExcludesReceiver);
// Restore argumentsList.
__ movd(edx, xmm0);
@@ -2061,13 +2138,13 @@ void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
__ LoadRoot(edi, RootIndex::kUndefinedValue);
__ mov(edx, edi);
__ mov(ecx, edi);
- __ cmp(eax, Immediate(1));
+ __ cmp(eax, Immediate(JSParameterCount(1)));
__ j(below, &done, Label::kNear);
__ mov(edi, args[1]); // target
__ mov(edx, edi);
__ j(equal, &done, Label::kNear);
__ mov(ecx, args[2]); // argumentsList
- __ cmp(eax, Immediate(3));
+ __ cmp(eax, Immediate(JSParameterCount(3)));
__ j(below, &done, Label::kNear);
__ mov(edx, args[3]); // new.target
__ bind(&done);
@@ -2078,7 +2155,8 @@ void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
__ DropArgumentsAndPushNewReceiver(
eax, masm->RootAsOperand(RootIndex::kUndefinedValue), ecx,
TurboAssembler::kCountIsInteger,
- TurboAssembler::kCountExcludesReceiver);
+ kJSArgcIncludesReceiver ? TurboAssembler::kCountIncludesReceiver
+ : TurboAssembler::kCountExcludesReceiver);
// Restore argumentsList.
__ movd(ecx, xmm0);
@@ -2105,6 +2183,59 @@ void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
RelocInfo::CODE_TARGET);
}
+namespace {
+
+// Allocate new stack space for |count| arguments and shift all existing
+// arguments already on the stack. |pointer_to_new_space_out| points to the
+// first free slot on the stack to copy additional arguments to and
+// |argc_in_out| is updated to include |count|.
+void Generate_AllocateSpaceAndShiftExistingArguments(
+ MacroAssembler* masm, Register count, Register argc_in_out,
+ Register pointer_to_new_space_out, Register scratch1, Register scratch2) {
+ DCHECK(!AreAliased(count, argc_in_out, pointer_to_new_space_out, scratch1,
+ scratch2));
+ // Use pointer_to_new_space_out as scratch until we set it to the correct
+ // value at the end.
+ Register old_esp = pointer_to_new_space_out;
+ Register new_space = scratch1;
+ __ mov(old_esp, esp);
+
+ __ lea(new_space, Operand(count, times_system_pointer_size, 0));
+ __ AllocateStackSpace(new_space);
+
+ if (!kJSArgcIncludesReceiver) {
+ __ inc(argc_in_out);
+ }
+ Register current = scratch1;
+ Register value = scratch2;
+
+ Label loop, entry;
+ __ mov(current, 0);
+ __ jmp(&entry);
+ __ bind(&loop);
+ __ mov(value, Operand(old_esp, current, times_system_pointer_size, 0));
+ __ mov(Operand(esp, current, times_system_pointer_size, 0), value);
+ __ inc(current);
+ __ bind(&entry);
+ __ cmp(current, argc_in_out);
+ __ j(less_equal, &loop, Label::kNear);
+
+ // Point to the next free slot above the shifted arguments (argc + 1 slot for
+ // the return address).
+ __ lea(
+ pointer_to_new_space_out,
+ Operand(esp, argc_in_out, times_system_pointer_size, kSystemPointerSize));
+ // Update the total number of arguments.
+ if (kJSArgcIncludesReceiver) {
+ __ add(argc_in_out, count);
+ } else {
+ // Also subtract the receiver again.
+ __ lea(argc_in_out, Operand(argc_in_out, count, times_1, -1));
+ }
+}
+
+} // namespace
+
// static
// TODO(v8:11615): Observe Code::kMaxArguments in CallOrConstructVarargs
void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
@@ -2112,17 +2243,15 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
// ----------- S t a t e -------------
// -- edi : target
// -- esi : context for the Call / Construct builtin
- // -- eax : number of parameters on the stack (not including the receiver)
+ // -- eax : number of parameters on the stack
// -- ecx : len (number of elements to from args)
- // -- ecx : new.target (checked to be constructor or undefined)
+ // -- edx : new.target (checked to be constructor or undefined)
// -- esp[4] : arguments list (a FixedArray)
// -- esp[0] : return address.
// -----------------------------------
- // We need to preserve eax, edi, esi and ebx.
- __ movd(xmm0, edx);
- __ movd(xmm1, edi);
- __ movd(xmm2, eax);
+ __ movd(xmm0, edx); // Spill new.target.
+ __ movd(xmm1, edi); // Spill target.
__ movd(xmm3, esi); // Spill the context.
const Register kArgumentsList = esi;
@@ -2157,32 +2286,15 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
__ StackOverflowCheck(kArgumentsLength, edx, &stack_overflow);
__ movd(xmm4, kArgumentsList); // Spill the arguments list.
-
// Move the arguments already in the stack,
// including the receiver and the return address.
- {
- Label copy, check;
- Register src = edx, current = edi, tmp = esi;
- // Update stack pointer.
- __ mov(src, esp);
- __ lea(tmp, Operand(kArgumentsLength, times_system_pointer_size, 0));
- __ AllocateStackSpace(tmp);
- // Include return address and receiver.
- __ add(eax, Immediate(2));
- __ mov(current, Immediate(0));
- __ jmp(&check);
- // Loop.
- __ bind(&copy);
- __ mov(tmp, Operand(src, current, times_system_pointer_size, 0));
- __ mov(Operand(esp, current, times_system_pointer_size, 0), tmp);
- __ inc(current);
- __ bind(&check);
- __ cmp(current, eax);
- __ j(less, &copy);
- __ lea(edx, Operand(esp, eax, times_system_pointer_size, 0));
- }
-
+ // kArgumentsLength (ecx): Number of arguments to make room for.
+ // eax: Number of arguments already on the stack.
+ // edx: Points to first free slot on the stack after arguments were shifted.
+ Generate_AllocateSpaceAndShiftExistingArguments(masm, kArgumentsLength, eax,
+ edx, edi, esi);
__ movd(kArgumentsList, xmm4); // Recover arguments list.
+ __ movd(xmm2, eax); // Spill argument count.
// Push additional arguments onto the stack.
{
@@ -2207,12 +2319,9 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
// Restore eax, edi and edx.
__ movd(esi, xmm3); // Restore the context.
- __ movd(eax, xmm2);
- __ movd(edi, xmm1);
- __ movd(edx, xmm0);
-
- // Compute the actual parameter count.
- __ add(eax, kArgumentsLength);
+ __ movd(eax, xmm2); // Restore argument count.
+ __ movd(edi, xmm1); // Restore target.
+ __ movd(edx, xmm0); // Restore new.target.
// Tail-call to the actual Call or Construct builtin.
__ Jump(code, RelocInfo::CODE_TARGET);
@@ -2227,7 +2336,7 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
CallOrConstructMode mode,
Handle<Code> code) {
// ----------- S t a t e -------------
- // -- eax : the number of arguments (not including the receiver)
+ // -- eax : the number of arguments
// -- edi : the target to call (can be any Object)
// -- esi : context for the Call / Construct builtin
// -- edx : the new target (for [[Construct]] calls)
@@ -2261,12 +2370,14 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
Label stack_done, stack_overflow;
__ mov(edx, Operand(ebp, StandardFrameConstants::kArgCOffset));
+ if (kJSArgcIncludesReceiver) {
+ __ dec(edx);
+ }
__ sub(edx, ecx);
__ j(less_equal, &stack_done);
{
// ----------- S t a t e -------------
- // -- eax : the number of arguments already in the stack (not including the
- // receiver)
+ // -- eax : the number of arguments already in the stack
// -- ecx : start index (to support rest parameters)
// -- edx : number of arguments to copy, i.e. arguments count - start index
// -- edi : the target to call (can be any Object)
@@ -2284,31 +2395,11 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
// Move the arguments already in the stack,
// including the receiver and the return address.
- {
- Label copy, check;
- Register src = esi, current = edi;
- // Update stack pointer.
- __ mov(src, esp);
- __ lea(scratch, Operand(edx, times_system_pointer_size, 0));
- __ AllocateStackSpace(scratch);
- // Include return address and receiver.
- __ add(eax, Immediate(2));
- __ Move(current, 0);
- __ jmp(&check);
- // Loop.
- __ bind(&copy);
- __ mov(scratch, Operand(src, current, times_system_pointer_size, 0));
- __ mov(Operand(esp, current, times_system_pointer_size, 0), scratch);
- __ inc(current);
- __ bind(&check);
- __ cmp(current, eax);
- __ j(less, &copy);
- __ lea(esi, Operand(esp, eax, times_system_pointer_size, 0));
- }
-
- // Update total number of arguments.
- __ sub(eax, Immediate(2));
- __ add(eax, edx);
+ // edx: Number of arguments to make room for.
+ // eax: Number of arguments already on the stack.
+ // esi: Points to first free slot on the stack after arguments were shifted.
+ Generate_AllocateSpaceAndShiftExistingArguments(masm, edx, eax, esi, ebx,
+ edi);
// Point to the first argument to copy (skipping receiver).
__ lea(ecx, Operand(ecx, times_system_pointer_size,
@@ -2350,7 +2441,7 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
void Builtins::Generate_CallFunction(MacroAssembler* masm,
ConvertReceiverMode mode) {
// ----------- S t a t e -------------
- // -- eax : the number of arguments (not including the receiver)
+ // -- eax : the number of arguments
// -- edi : the function to call (checked to be a JSFunction)
// -----------------------------------
StackArgumentsAccessor args(eax);
@@ -2376,7 +2467,7 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
__ j(not_zero, &done_convert);
{
// ----------- S t a t e -------------
- // -- eax : the number of arguments (not including the receiver)
+ // -- eax : the number of arguments
// -- edx : the shared function info.
// -- edi : the function to call (checked to be a JSFunction)
// -- esi : the function context.
@@ -2434,7 +2525,7 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
__ bind(&done_convert);
// ----------- S t a t e -------------
- // -- eax : the number of arguments (not including the receiver)
+ // -- eax : the number of arguments
// -- edx : the shared function info.
// -- edi : the function to call (checked to be a JSFunction)
// -- esi : the function context.
@@ -2456,7 +2547,7 @@ namespace {
void Generate_PushBoundArguments(MacroAssembler* masm) {
// ----------- S t a t e -------------
- // -- eax : the number of arguments (not including the receiver)
+ // -- eax : the number of arguments
// -- edx : new.target (only in case of [[Construct]])
// -- edi : target (checked to be a JSBoundFunction)
// -----------------------------------
@@ -2471,7 +2562,7 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
__ j(zero, &no_bound_arguments);
{
// ----------- S t a t e -------------
- // -- eax : the number of arguments (not including the receiver)
+ // -- eax : the number of arguments
// -- xmm0 : new.target (only in case of [[Construct]])
// -- edi : target (checked to be a JSBoundFunction)
// -- ecx : the [[BoundArguments]] (implemented as FixedArray)
@@ -2539,7 +2630,7 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
// static
void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm) {
// ----------- S t a t e -------------
- // -- eax : the number of arguments (not including the receiver)
+ // -- eax : the number of arguments
// -- edi : the function to call (checked to be a JSBoundFunction)
// -----------------------------------
__ AssertBoundFunction(edi);
@@ -2561,7 +2652,7 @@ void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm) {
// static
void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) {
// ----------- S t a t e -------------
- // -- eax : the number of arguments (not including the receiver)
+ // -- eax : the number of arguments
// -- edi : the target to call (can be any Object).
// -----------------------------------
StackArgumentsAccessor args(eax);
@@ -2618,7 +2709,7 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) {
// static
void Builtins::Generate_ConstructFunction(MacroAssembler* masm) {
// ----------- S t a t e -------------
- // -- eax : the number of arguments (not including the receiver)
+ // -- eax : the number of arguments
// -- edx : the new target (checked to be a constructor)
// -- edi : the constructor to call (checked to be a JSFunction)
// -----------------------------------
@@ -2650,7 +2741,7 @@ void Builtins::Generate_ConstructFunction(MacroAssembler* masm) {
// static
void Builtins::Generate_ConstructBoundFunction(MacroAssembler* masm) {
// ----------- S t a t e -------------
- // -- eax : the number of arguments (not including the receiver)
+ // -- eax : the number of arguments
// -- edx : the new target (checked to be a constructor)
// -- edi : the constructor to call (checked to be a JSBoundFunction)
// -----------------------------------
@@ -2677,7 +2768,7 @@ void Builtins::Generate_ConstructBoundFunction(MacroAssembler* masm) {
// static
void Builtins::Generate_Construct(MacroAssembler* masm) {
// ----------- S t a t e -------------
- // -- eax : the number of arguments (not including the receiver)
+ // -- eax : the number of arguments
// -- edx : the new target (either the same as the constructor or
// the JSFunction on which new was invoked initially)
// -- edi : the constructor to call (can be any Object)
@@ -2768,7 +2859,8 @@ void OnStackReplacement(MacroAssembler* masm, bool is_interpreter) {
}
// Load deoptimization data from the code object.
- __ mov(ecx, Operand(eax, Code::kDeoptimizationDataOffset - kHeapObjectTag));
+ __ mov(ecx, Operand(eax, Code::kDeoptimizationDataOrInterpreterDataOffset -
+ kHeapObjectTag));
// Load the OSR entrypoint offset from the deoptimization data.
__ mov(ecx, Operand(ecx, FixedArray::OffsetOfElementAt(
@@ -4125,8 +4217,7 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
// always have baseline code.
if (!is_osr) {
Label start_with_baseline;
- __ CmpObjectType(code_obj, BASELINE_DATA_TYPE,
- kInterpreterBytecodeOffsetRegister);
+ __ CmpObjectType(code_obj, CODET_TYPE, kInterpreterBytecodeOffsetRegister);
__ j(equal, &start_with_baseline);
// Start with bytecode as there is no baseline code.
@@ -4139,13 +4230,13 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
__ bind(&start_with_baseline);
} else if (FLAG_debug_code) {
- __ CmpObjectType(code_obj, BASELINE_DATA_TYPE,
- kInterpreterBytecodeOffsetRegister);
+ __ CmpObjectType(code_obj, CODET_TYPE, kInterpreterBytecodeOffsetRegister);
__ Assert(equal, AbortReason::kExpectedBaselineData);
}
- // Load baseline code from baseline data.
- __ mov(code_obj, FieldOperand(code_obj, BaselineData::kBaselineCodeOffset));
+ if (FLAG_debug_code) {
+ AssertCodeIsBaseline(masm, code_obj, ecx);
+ }
// Load the feedback vector.
Register feedback_vector = ecx;
diff --git a/deps/v8/src/builtins/loong64/builtins-loong64.cc b/deps/v8/src/builtins/loong64/builtins-loong64.cc
new file mode 100644
index 0000000000..714353fc96
--- /dev/null
+++ b/deps/v8/src/builtins/loong64/builtins-loong64.cc
@@ -0,0 +1,3755 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#if V8_TARGET_ARCH_LOONG64
+
+#include "src/api/api-arguments.h"
+#include "src/codegen/code-factory.h"
+#include "src/codegen/interface-descriptors-inl.h"
+#include "src/debug/debug.h"
+#include "src/deoptimizer/deoptimizer.h"
+#include "src/execution/frame-constants.h"
+#include "src/execution/frames.h"
+#include "src/logging/counters.h"
+// For interpreter_entry_return_pc_offset. TODO(jkummerow): Drop.
+#include "src/codegen/loong64/constants-loong64.h"
+#include "src/codegen/macro-assembler-inl.h"
+#include "src/codegen/register-configuration.h"
+#include "src/heap/heap-inl.h"
+#include "src/objects/cell.h"
+#include "src/objects/foreign.h"
+#include "src/objects/heap-number.h"
+#include "src/objects/js-generator.h"
+#include "src/objects/objects-inl.h"
+#include "src/objects/smi.h"
+#include "src/runtime/runtime.h"
+
+#if V8_ENABLE_WEBASSEMBLY
+#include "src/wasm/wasm-linkage.h"
+#include "src/wasm/wasm-objects.h"
+#endif // V8_ENABLE_WEBASSEMBLY
+
+namespace v8 {
+namespace internal {
+
+#define __ ACCESS_MASM(masm)
+
+void Builtins::Generate_Adaptor(MacroAssembler* masm, Address address) {
+ __ li(kJavaScriptCallExtraArg1Register, ExternalReference::Create(address));
+ __ Jump(BUILTIN_CODE(masm->isolate(), AdaptorWithBuiltinExitFrame),
+ RelocInfo::CODE_TARGET);
+}
+
+static void GenerateTailCallToReturnedCode(MacroAssembler* masm,
+ Runtime::FunctionId function_id) {
+ // ----------- S t a t e -------------
+ // -- a0 : actual argument count
+ // -- a1 : target function (preserved for callee)
+ // -- a3 : new target (preserved for callee)
+ // -----------------------------------
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ // Push a copy of the target function, the new target and the actual
+ // argument count.
+ // Push function as parameter to the runtime call.
+ __ SmiTag(kJavaScriptCallArgCountRegister);
+ __ Push(kJavaScriptCallTargetRegister, kJavaScriptCallNewTargetRegister,
+ kJavaScriptCallArgCountRegister, kJavaScriptCallTargetRegister);
+
+ __ CallRuntime(function_id, 1);
+ __ LoadCodeObjectEntry(a2, a0);
+ // Restore target function, new target and actual argument count.
+ __ Pop(kJavaScriptCallTargetRegister, kJavaScriptCallNewTargetRegister,
+ kJavaScriptCallArgCountRegister);
+ __ SmiUntag(kJavaScriptCallArgCountRegister);
+ }
+
+ static_assert(kJavaScriptCallCodeStartRegister == a2, "ABI mismatch");
+ __ Jump(a2);
+}
+
+namespace {
+
+void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- a0 : number of arguments
+ // -- a1 : constructor function
+ // -- a3 : new target
+ // -- cp : context
+ // -- ra : return address
+ // -- sp[...]: constructor arguments
+ // -----------------------------------
+
+ // Enter a construct frame.
+ {
+ FrameScope scope(masm, StackFrame::CONSTRUCT);
+
+ // Preserve the incoming parameters on the stack.
+ __ SmiTag(a0);
+ __ Push(cp, a0);
+ __ SmiUntag(a0);
+
+ // Set up pointer to last argument (skip receiver).
+ __ Add_d(
+ t2, fp,
+ Operand(StandardFrameConstants::kCallerSPOffset + kSystemPointerSize));
+ // Copy arguments and receiver to the expression stack.
+ __ PushArray(t2, a0, t3, t0);
+ // The receiver for the builtin/api call.
+ __ PushRoot(RootIndex::kTheHoleValue);
+
+ // Call the function.
+ // a0: number of arguments (untagged)
+ // a1: constructor function
+ // a3: new target
+ __ InvokeFunctionWithNewTarget(a1, a3, a0, InvokeType::kCall);
+
+ // Restore context from the frame.
+ __ Ld_d(cp, MemOperand(fp, ConstructFrameConstants::kContextOffset));
+ // Restore smi-tagged arguments count from the frame.
+ __ Ld_d(t3, MemOperand(fp, ConstructFrameConstants::kLengthOffset));
+ // Leave construct frame.
+ }
+
+ // Remove caller arguments from the stack and return.
+ __ SmiScale(t3, t3, kPointerSizeLog2);
+ __ Add_d(sp, sp, t3);
+ __ Add_d(sp, sp, kPointerSize);
+ __ Ret();
+}
+
+} // namespace
+
+// The construct stub for ES5 constructor functions and ES6 class constructors.
+void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- a0: number of arguments (untagged)
+ // -- a1: constructor function
+ // -- a3: new target
+ // -- cp: context
+ // -- ra: return address
+ // -- sp[...]: constructor arguments
+ // -----------------------------------
+
+ // Enter a construct frame.
+ FrameScope scope(masm, StackFrame::MANUAL);
+ Label post_instantiation_deopt_entry, not_create_implicit_receiver;
+ __ EnterFrame(StackFrame::CONSTRUCT);
+
+ // Preserve the incoming parameters on the stack.
+ __ SmiTag(a0);
+ __ Push(cp, a0, a1);
+ __ PushRoot(RootIndex::kUndefinedValue);
+ __ Push(a3);
+
+ // ----------- S t a t e -------------
+ // -- sp[0*kPointerSize]: new target
+ // -- sp[1*kPointerSize]: padding
+ // -- a1 and sp[2*kPointerSize]: constructor function
+ // -- sp[3*kPointerSize]: number of arguments (tagged)
+ // -- sp[4*kPointerSize]: context
+ // -----------------------------------
+
+ __ Ld_d(t2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
+ __ Ld_wu(t2, FieldMemOperand(t2, SharedFunctionInfo::kFlagsOffset));
+ __ DecodeField<SharedFunctionInfo::FunctionKindBits>(t2);
+ __ JumpIfIsInRange(t2, kDefaultDerivedConstructor, kDerivedConstructor,
+ &not_create_implicit_receiver);
+
+ // If not derived class constructor: Allocate the new receiver object.
+ __ IncrementCounter(masm->isolate()->counters()->constructed_objects(), 1, t2,
+ t3);
+ __ Call(BUILTIN_CODE(masm->isolate(), FastNewObject), RelocInfo::CODE_TARGET);
+ __ Branch(&post_instantiation_deopt_entry);
+
+ // Else: use TheHoleValue as receiver for constructor call
+ __ bind(&not_create_implicit_receiver);
+ __ LoadRoot(a0, RootIndex::kTheHoleValue);
+
+ // ----------- S t a t e -------------
+ // -- a0: receiver
+ // -- Slot 4 / sp[0*kPointerSize]: new target
+ // -- Slot 3 / sp[1*kPointerSize]: padding
+ // -- Slot 2 / sp[2*kPointerSize]: constructor function
+ // -- Slot 1 / sp[3*kPointerSize]: number of arguments (tagged)
+ // -- Slot 0 / sp[4*kPointerSize]: context
+ // -----------------------------------
+ // Deoptimizer enters here.
+ masm->isolate()->heap()->SetConstructStubCreateDeoptPCOffset(
+ masm->pc_offset());
+ __ bind(&post_instantiation_deopt_entry);
+
+ // Restore new target.
+ __ Pop(a3);
+
+ // Push the allocated receiver to the stack.
+ __ Push(a0);
+
+ // We need two copies because we may have to return the original one
+ // and the calling conventions dictate that the called function pops the
+ // receiver. The second copy is pushed after the arguments, we saved in a6
+ // since a0 will store the return value of callRuntime.
+ __ mov(a6, a0);
+
+ // Set up pointer to last argument.
+ __ Add_d(
+ t2, fp,
+ Operand(StandardFrameConstants::kCallerSPOffset + kSystemPointerSize));
+
+ // ----------- S t a t e -------------
+ // -- r3: new target
+ // -- sp[0*kPointerSize]: implicit receiver
+ // -- sp[1*kPointerSize]: implicit receiver
+ // -- sp[2*kPointerSize]: padding
+ // -- sp[3*kPointerSize]: constructor function
+ // -- sp[4*kPointerSize]: number of arguments (tagged)
+ // -- sp[5*kPointerSize]: context
+ // -----------------------------------
+
+ // Restore constructor function and argument count.
+ __ Ld_d(a1, MemOperand(fp, ConstructFrameConstants::kConstructorOffset));
+ __ Ld_d(a0, MemOperand(fp, ConstructFrameConstants::kLengthOffset));
+ __ SmiUntag(a0);
+
+ Label stack_overflow;
+ __ StackOverflowCheck(a0, t0, t1, &stack_overflow);
+
+ // TODO(victorgomes): When the arguments adaptor is completely removed, we
+ // should get the formal parameter count and copy the arguments in its
+ // correct position (including any undefined), instead of delaying this to
+ // InvokeFunction.
+
+ // Copy arguments and receiver to the expression stack.
+ __ PushArray(t2, a0, t0, t1);
+ // We need two copies because we may have to return the original one
+ // and the calling conventions dictate that the called function pops the
+ // receiver. The second copy is pushed after the arguments,
+ __ Push(a6);
+
+ // Call the function.
+ __ InvokeFunctionWithNewTarget(a1, a3, a0, InvokeType::kCall);
+
+ // ----------- S t a t e -------------
+ // -- s0: constructor result
+ // -- sp[0*kPointerSize]: implicit receiver
+ // -- sp[1*kPointerSize]: padding
+ // -- sp[2*kPointerSize]: constructor function
+ // -- sp[3*kPointerSize]: number of arguments
+ // -- sp[4*kPointerSize]: context
+ // -----------------------------------
+
+ // Store offset of return address for deoptimizer.
+ masm->isolate()->heap()->SetConstructStubInvokeDeoptPCOffset(
+ masm->pc_offset());
+
+ // If the result is an object (in the ECMA sense), we should get rid
+ // of the receiver and use the result; see ECMA-262 section 13.2.2-7
+ // on page 74.
+ Label use_receiver, do_throw, leave_and_return, check_receiver;
+
+ // If the result is undefined, we jump out to using the implicit receiver.
+ __ JumpIfNotRoot(a0, RootIndex::kUndefinedValue, &check_receiver);
+
+ // Otherwise we do a smi check and fall through to check if the return value
+ // is a valid receiver.
+
+ // Throw away the result of the constructor invocation and use the
+ // on-stack receiver as the result.
+ __ bind(&use_receiver);
+ __ Ld_d(a0, MemOperand(sp, 0 * kPointerSize));
+ __ JumpIfRoot(a0, RootIndex::kTheHoleValue, &do_throw);
+
+ __ bind(&leave_and_return);
+ // Restore smi-tagged arguments count from the frame.
+ __ Ld_d(a1, MemOperand(fp, ConstructFrameConstants::kLengthOffset));
+ // Leave construct frame.
+ __ LeaveFrame(StackFrame::CONSTRUCT);
+
+ // Remove caller arguments from the stack and return.
+ __ SmiScale(a4, a1, kPointerSizeLog2);
+ __ Add_d(sp, sp, a4);
+ __ Add_d(sp, sp, kPointerSize);
+ __ Ret();
+
+ __ bind(&check_receiver);
+ __ JumpIfSmi(a0, &use_receiver);
+
+ // If the type of the result (stored in its map) is less than
+ // FIRST_JS_RECEIVER_TYPE, it is not an object in the ECMA sense.
+ __ GetObjectType(a0, t2, t2);
+ STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
+ __ Branch(&leave_and_return, greater_equal, t2,
+ Operand(FIRST_JS_RECEIVER_TYPE));
+ __ Branch(&use_receiver);
+
+ __ bind(&do_throw);
+ // Restore the context from the frame.
+ __ Ld_d(cp, MemOperand(fp, ConstructFrameConstants::kContextOffset));
+ __ CallRuntime(Runtime::kThrowConstructorReturnedNonObject);
+ __ break_(0xCC);
+
+ __ bind(&stack_overflow);
+ // Restore the context from the frame.
+ __ Ld_d(cp, MemOperand(fp, ConstructFrameConstants::kContextOffset));
+ __ CallRuntime(Runtime::kThrowStackOverflow);
+ __ break_(0xCC);
+}
+
+void Builtins::Generate_JSBuiltinsConstructStub(MacroAssembler* masm) {
+ Generate_JSBuiltinsConstructStubHelper(masm);
+}
+
+static void AssertCodeIsBaseline(MacroAssembler* masm, Register code,
+ Register scratch) {
+ DCHECK(!AreAliased(code, scratch));
+ // Verify that the code kind is baseline code via the CodeKind.
+ __ Ld_d(scratch, FieldMemOperand(code, Code::kFlagsOffset));
+ __ DecodeField<Code::KindField>(scratch);
+ __ Assert(eq, AbortReason::kExpectedBaselineData, scratch,
+ Operand(static_cast<int>(CodeKind::BASELINE)));
+}
+
+// TODO(v8:11429): Add a path for "not_compiled" and unify the two uses under
+// the more general dispatch.
+static void GetSharedFunctionInfoBytecodeOrBaseline(MacroAssembler* masm,
+ Register sfi_data,
+ Register scratch1,
+ Label* is_baseline) {
+ Label done;
+
+ __ GetObjectType(sfi_data, scratch1, scratch1);
+ if (FLAG_debug_code) {
+ Label not_baseline;
+ __ Branch(&not_baseline, ne, scratch1, Operand(CODET_TYPE));
+ AssertCodeIsBaseline(masm, sfi_data, scratch1);
+ __ Branch(is_baseline);
+ __ bind(&not_baseline);
+ } else {
+ __ Branch(is_baseline, eq, scratch1, Operand(CODET_TYPE));
+ }
+ __ Branch(&done, ne, scratch1, Operand(INTERPRETER_DATA_TYPE));
+ __ Ld_d(sfi_data,
+ FieldMemOperand(sfi_data, InterpreterData::kBytecodeArrayOffset));
+
+ __ bind(&done);
+}
+
+// static
+void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- a0 : the value to pass to the generator
+ // -- a1 : the JSGeneratorObject to resume
+ // -- ra : return address
+ // -----------------------------------
+ // Store input value into generator object.
+ __ St_d(a0, FieldMemOperand(a1, JSGeneratorObject::kInputOrDebugPosOffset));
+ __ RecordWriteField(a1, JSGeneratorObject::kInputOrDebugPosOffset, a0,
+ kRAHasNotBeenSaved, SaveFPRegsMode::kIgnore);
+ // Check that a1 is still valid, RecordWrite might have clobbered it.
+ __ AssertGeneratorObject(a1);
+
+ // Load suspended function and context.
+ __ Ld_d(a4, FieldMemOperand(a1, JSGeneratorObject::kFunctionOffset));
+ __ Ld_d(cp, FieldMemOperand(a4, JSFunction::kContextOffset));
+
+ // Flood function if we are stepping.
+ Label prepare_step_in_if_stepping, prepare_step_in_suspended_generator;
+ Label stepping_prepared;
+ ExternalReference debug_hook =
+ ExternalReference::debug_hook_on_function_call_address(masm->isolate());
+ __ li(a5, debug_hook);
+ __ Ld_b(a5, MemOperand(a5, 0));
+ __ Branch(&prepare_step_in_if_stepping, ne, a5, Operand(zero_reg));
+
+ // Flood function if we need to continue stepping in the suspended generator.
+ ExternalReference debug_suspended_generator =
+ ExternalReference::debug_suspended_generator_address(masm->isolate());
+ __ li(a5, debug_suspended_generator);
+ __ Ld_d(a5, MemOperand(a5, 0));
+ __ Branch(&prepare_step_in_suspended_generator, eq, a1, Operand(a5));
+ __ bind(&stepping_prepared);
+
+ // Check the stack for overflow. We are not trying to catch interruptions
+ // (i.e. debug break and preemption) here, so check the "real stack limit".
+ Label stack_overflow;
+ __ LoadStackLimit(kScratchReg,
+ MacroAssembler::StackLimitKind::kRealStackLimit);
+ __ Branch(&stack_overflow, lo, sp, Operand(kScratchReg));
+
+ // ----------- S t a t e -------------
+ // -- a1 : the JSGeneratorObject to resume
+ // -- a4 : generator function
+ // -- cp : generator context
+ // -- ra : return address
+ // -----------------------------------
+
+ // Push holes for arguments to generator function. Since the parser forced
+ // context allocation for any variables in generators, the actual argument
+ // values have already been copied into the context and these dummy values
+ // will never be used.
+ __ Ld_d(a3, FieldMemOperand(a4, JSFunction::kSharedFunctionInfoOffset));
+ __ Ld_hu(
+ a3, FieldMemOperand(a3, SharedFunctionInfo::kFormalParameterCountOffset));
+ __ Ld_d(t1, FieldMemOperand(
+ a1, JSGeneratorObject::kParametersAndRegistersOffset));
+ {
+ Label done_loop, loop;
+ __ bind(&loop);
+ __ Sub_d(a3, a3, Operand(1));
+ __ Branch(&done_loop, lt, a3, Operand(zero_reg));
+ __ Alsl_d(kScratchReg, a3, t1, kPointerSizeLog2, t7);
+ __ Ld_d(kScratchReg, FieldMemOperand(kScratchReg, FixedArray::kHeaderSize));
+ __ Push(kScratchReg);
+ __ Branch(&loop);
+ __ bind(&done_loop);
+ // Push receiver.
+ __ Ld_d(kScratchReg,
+ FieldMemOperand(a1, JSGeneratorObject::kReceiverOffset));
+ __ Push(kScratchReg);
+ }
+
+ // Underlying function needs to have bytecode available.
+ if (FLAG_debug_code) {
+ Label is_baseline;
+ __ Ld_d(a3, FieldMemOperand(a4, JSFunction::kSharedFunctionInfoOffset));
+ __ Ld_d(a3, FieldMemOperand(a3, SharedFunctionInfo::kFunctionDataOffset));
+ GetSharedFunctionInfoBytecodeOrBaseline(masm, a3, t5, &is_baseline);
+ __ GetObjectType(a3, a3, a3);
+ __ Assert(eq, AbortReason::kMissingBytecodeArray, a3,
+ Operand(BYTECODE_ARRAY_TYPE));
+ __ bind(&is_baseline);
+ }
+
+ // Resume (Ignition/TurboFan) generator object.
+ {
+ __ Ld_d(a0, FieldMemOperand(a4, JSFunction::kSharedFunctionInfoOffset));
+ __ Ld_hu(a0, FieldMemOperand(
+ a0, SharedFunctionInfo::kFormalParameterCountOffset));
+ // We abuse new.target both to indicate that this is a resume call and to
+ // pass in the generator object. In ordinary calls, new.target is always
+ // undefined because generator functions are non-constructable.
+ __ Move(a3, a1);
+ __ Move(a1, a4);
+ static_assert(kJavaScriptCallCodeStartRegister == a2, "ABI mismatch");
+ __ Ld_d(a2, FieldMemOperand(a1, JSFunction::kCodeOffset));
+ __ JumpCodeObject(a2);
+ }
+
+ __ bind(&prepare_step_in_if_stepping);
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ Push(a1, a4);
+ // Push hole as receiver since we do not use it for stepping.
+ __ PushRoot(RootIndex::kTheHoleValue);
+ __ CallRuntime(Runtime::kDebugOnFunctionCall);
+ __ Pop(a1);
+ }
+ __ Ld_d(a4, FieldMemOperand(a1, JSGeneratorObject::kFunctionOffset));
+ __ Branch(&stepping_prepared);
+
+ __ bind(&prepare_step_in_suspended_generator);
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ Push(a1);
+ __ CallRuntime(Runtime::kDebugPrepareStepInSuspendedGenerator);
+ __ Pop(a1);
+ }
+ __ Ld_d(a4, FieldMemOperand(a1, JSGeneratorObject::kFunctionOffset));
+ __ Branch(&stepping_prepared);
+
+ __ bind(&stack_overflow);
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ CallRuntime(Runtime::kThrowStackOverflow);
+ __ break_(0xCC); // This should be unreachable.
+ }
+}
+
+void Builtins::Generate_ConstructedNonConstructable(MacroAssembler* masm) {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ Push(a1);
+ __ CallRuntime(Runtime::kThrowConstructedNonConstructable);
+}
+
+// Clobbers scratch1 and scratch2; preserves all other registers.
+static void Generate_CheckStackOverflow(MacroAssembler* masm, Register argc,
+ Register scratch1, Register scratch2) {
+ // Check the stack for overflow. We are not trying to catch
+ // interruptions (e.g. debug break and preemption) here, so the "real stack
+ // limit" is checked.
+ Label okay;
+ __ LoadStackLimit(scratch1, MacroAssembler::StackLimitKind::kRealStackLimit);
+ // Make a2 the space we have left. The stack might already be overflowed
+ // here which will cause r2 to become negative.
+ __ sub_d(scratch1, sp, scratch1);
+ // Check if the arguments will overflow the stack.
+ __ slli_d(scratch2, argc, kPointerSizeLog2);
+ __ Branch(&okay, gt, scratch1, Operand(scratch2)); // Signed comparison.
+
+ // Out of stack space.
+ __ CallRuntime(Runtime::kThrowStackOverflow);
+
+ __ bind(&okay);
+}
+
+namespace {
+
+// Called with the native C calling convention. The corresponding function
+// signature is either:
+//
+// using JSEntryFunction = GeneratedCode<Address(
+// Address root_register_value, Address new_target, Address target,
+// Address receiver, intptr_t argc, Address** args)>;
+// or
+// using JSEntryFunction = GeneratedCode<Address(
+// Address root_register_value, MicrotaskQueue* microtask_queue)>;
+void Generate_JSEntryVariant(MacroAssembler* masm, StackFrame::Type type,
+ Builtin entry_trampoline) {
+ Label invoke, handler_entry, exit;
+
+ {
+ NoRootArrayScope no_root_array(masm);
+
+ // Registers:
+ // either
+ // a0: root register value
+ // a1: entry address
+ // a2: function
+ // a3: receiver
+ // a4: argc
+ // a5: argv
+ // or
+ // a0: root register value
+ // a1: microtask_queue
+
+ // Save callee saved registers on the stack.
+ __ MultiPush(kCalleeSaved | ra.bit());
+
+ // Save callee-saved FPU registers.
+ __ MultiPushFPU(kCalleeSavedFPU);
+ // Set up the reserved register for 0.0.
+ __ Move(kDoubleRegZero, 0.0);
+
+ // Initialize the root register.
+ // C calling convention. The first argument is passed in a0.
+ __ mov(kRootRegister, a0);
+ }
+
+ // a1: entry address
+ // a2: function
+ // a3: receiver
+ // a4: argc
+ // a5: argv
+
+ // We build an EntryFrame.
+ __ li(s1, Operand(-1)); // Push a bad frame pointer to fail if it is used.
+ __ li(s2, Operand(StackFrame::TypeToMarker(type)));
+ __ li(s3, Operand(StackFrame::TypeToMarker(type)));
+ ExternalReference c_entry_fp = ExternalReference::Create(
+ IsolateAddressId::kCEntryFPAddress, masm->isolate());
+ __ li(s5, c_entry_fp);
+ __ Ld_d(s4, MemOperand(s5, 0));
+ __ Push(s1, s2, s3, s4);
+
+ // Clear c_entry_fp, now we've pushed its previous value to the stack.
+ // If the c_entry_fp is not already zero and we don't clear it, the
+ // SafeStackFrameIterator will assume we are executing C++ and miss the JS
+ // frames on top.
+ __ St_d(zero_reg, MemOperand(s5, 0));
+
+ // Set up frame pointer for the frame to be pushed.
+ __ addi_d(fp, sp, -EntryFrameConstants::kCallerFPOffset);
+
+ // Registers:
+ // either
+ // a1: entry address
+ // a2: function
+ // a3: receiver
+ // a4: argc
+ // a5: argv
+ // or
+ // a1: microtask_queue
+ //
+ // Stack:
+ // caller fp |
+ // function slot | entry frame
+ // context slot |
+ // bad fp (0xFF...F) |
+ // callee saved registers + ra
+ // [ O32: 4 args slots]
+ // args
+
+ // If this is the outermost JS call, set js_entry_sp value.
+ Label non_outermost_js;
+ ExternalReference js_entry_sp = ExternalReference::Create(
+ IsolateAddressId::kJSEntrySPAddress, masm->isolate());
+ __ li(s1, js_entry_sp);
+ __ Ld_d(s2, MemOperand(s1, 0));
+ __ Branch(&non_outermost_js, ne, s2, Operand(zero_reg));
+ __ St_d(fp, MemOperand(s1, 0));
+ __ li(s3, Operand(StackFrame::OUTERMOST_JSENTRY_FRAME));
+ Label cont;
+ __ b(&cont);
+ __ nop(); // Branch delay slot nop.
+ __ bind(&non_outermost_js);
+ __ li(s3, Operand(StackFrame::INNER_JSENTRY_FRAME));
+ __ bind(&cont);
+ __ Push(s3);
+
+ // Jump to a faked try block that does the invoke, with a faked catch
+ // block that sets the pending exception.
+ __ jmp(&invoke);
+ __ bind(&handler_entry);
+
+ // Store the current pc as the handler offset. It's used later to create the
+ // handler table.
+ masm->isolate()->builtins()->SetJSEntryHandlerOffset(handler_entry.pos());
+
+ // Caught exception: Store result (exception) in the pending exception
+ // field in the JSEnv and return a failure sentinel. Coming in here the
+ // fp will be invalid because the PushStackHandler below sets it to 0 to
+ // signal the existence of the JSEntry frame.
+ __ li(s1, ExternalReference::Create(
+ IsolateAddressId::kPendingExceptionAddress, masm->isolate()));
+ __ St_d(a0,
+ MemOperand(s1, 0)); // We come back from 'invoke'. result is in a0.
+ __ LoadRoot(a0, RootIndex::kException);
+ __ b(&exit); // b exposes branch delay slot.
+ __ nop(); // Branch delay slot nop.
+
+ // Invoke: Link this frame into the handler chain.
+ __ bind(&invoke);
+ __ PushStackHandler();
+ // If an exception not caught by another handler occurs, this handler
+ // returns control to the code after the bal(&invoke) above, which
+ // restores all kCalleeSaved registers (including cp and fp) to their
+ // saved values before returning a failure to C.
+ //
+ // Registers:
+ // either
+ // a0: root register value
+ // a1: entry address
+ // a2: function
+ // a3: receiver
+ // a4: argc
+ // a5: argv
+ // or
+ // a0: root register value
+ // a1: microtask_queue
+ //
+ // Stack:
+ // handler frame
+ // entry frame
+ // callee saved registers + ra
+ // [ O32: 4 args slots]
+ // args
+ //
+ // Invoke the function by calling through JS entry trampoline builtin and
+ // pop the faked function when we return.
+
+ Handle<Code> trampoline_code =
+ masm->isolate()->builtins()->code_handle(entry_trampoline);
+ __ Call(trampoline_code, RelocInfo::CODE_TARGET);
+
+ // Unlink this frame from the handler chain.
+ __ PopStackHandler();
+
+ __ bind(&exit); // a0 holds result
+ // Check if the current stack frame is marked as the outermost JS frame.
+ Label non_outermost_js_2;
+ __ Pop(a5);
+ __ Branch(&non_outermost_js_2, ne, a5,
+ Operand(StackFrame::OUTERMOST_JSENTRY_FRAME));
+ __ li(a5, js_entry_sp);
+ __ St_d(zero_reg, MemOperand(a5, 0));
+ __ bind(&non_outermost_js_2);
+
+ // Restore the top frame descriptors from the stack.
+ __ Pop(a5);
+ __ li(a4, ExternalReference::Create(IsolateAddressId::kCEntryFPAddress,
+ masm->isolate()));
+ __ St_d(a5, MemOperand(a4, 0));
+
+ // Reset the stack to the callee saved registers.
+ __ addi_d(sp, sp, -EntryFrameConstants::kCallerFPOffset);
+
+ // Restore callee-saved fpu registers.
+ __ MultiPopFPU(kCalleeSavedFPU);
+
+ // Restore callee saved registers from the stack.
+ __ MultiPop(kCalleeSaved | ra.bit());
+ // Return.
+ __ Jump(ra);
+}
+
+} // namespace
+
+void Builtins::Generate_JSEntry(MacroAssembler* masm) {
+ Generate_JSEntryVariant(masm, StackFrame::ENTRY, Builtin::kJSEntryTrampoline);
+}
+
+void Builtins::Generate_JSConstructEntry(MacroAssembler* masm) {
+ Generate_JSEntryVariant(masm, StackFrame::CONSTRUCT_ENTRY,
+ Builtin::kJSConstructEntryTrampoline);
+}
+
+void Builtins::Generate_JSRunMicrotasksEntry(MacroAssembler* masm) {
+ Generate_JSEntryVariant(masm, StackFrame::ENTRY,
+ Builtin::kRunMicrotasksTrampoline);
+}
+
+static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
+ bool is_construct) {
+ // ----------- S t a t e -------------
+ // -- a1: new.target
+ // -- a2: function
+ // -- a3: receiver_pointer
+ // -- a4: argc
+ // -- a5: argv
+ // -----------------------------------
+
+ // Enter an internal frame.
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+
+ // Setup the context (we need to use the caller context from the isolate).
+ ExternalReference context_address = ExternalReference::Create(
+ IsolateAddressId::kContextAddress, masm->isolate());
+ __ li(cp, context_address);
+ __ Ld_d(cp, MemOperand(cp, 0));
+
+ // Push the function and the receiver onto the stack.
+ __ Push(a2);
+
+ // Check if we have enough stack space to push all arguments.
+ __ addi_d(a6, a4, 1);
+ Generate_CheckStackOverflow(masm, a6, a0, s2);
+
+ // Copy arguments to the stack in a loop.
+ // a4: argc
+ // a5: argv, i.e. points to first arg
+ Label loop, entry;
+ __ Alsl_d(s1, a4, a5, kPointerSizeLog2, t7);
+ __ b(&entry);
+ // s1 points past last arg.
+ __ bind(&loop);
+ __ addi_d(s1, s1, -kPointerSize);
+ __ Ld_d(s2, MemOperand(s1, 0)); // Read next parameter.
+ __ Ld_d(s2, MemOperand(s2, 0)); // Dereference handle.
+ __ Push(s2); // Push parameter.
+ __ bind(&entry);
+ __ Branch(&loop, ne, a5, Operand(s1));
+
+ // Push the receive.
+ __ Push(a3);
+
+ // a0: argc
+ // a1: function
+ // a3: new.target
+ __ mov(a3, a1);
+ __ mov(a1, a2);
+ __ mov(a0, a4);
+
+ // Initialize all JavaScript callee-saved registers, since they will be seen
+ // by the garbage collector as part of handlers.
+ __ LoadRoot(a4, RootIndex::kUndefinedValue);
+ __ mov(a5, a4);
+ __ mov(s1, a4);
+ __ mov(s2, a4);
+ __ mov(s3, a4);
+ __ mov(s4, a4);
+ __ mov(s5, a4);
+ // s6 holds the root address. Do not clobber.
+ // s7 is cp. Do not init.
+
+ // Invoke the code.
+ Handle<Code> builtin = is_construct
+ ? BUILTIN_CODE(masm->isolate(), Construct)
+ : masm->isolate()->builtins()->Call();
+ __ Call(builtin, RelocInfo::CODE_TARGET);
+
+ // Leave internal frame.
+ }
+ __ Jump(ra);
+}
+
+void Builtins::Generate_JSEntryTrampoline(MacroAssembler* masm) {
+ Generate_JSEntryTrampolineHelper(masm, false);
+}
+
+void Builtins::Generate_JSConstructEntryTrampoline(MacroAssembler* masm) {
+ Generate_JSEntryTrampolineHelper(masm, true);
+}
+
+void Builtins::Generate_RunMicrotasksTrampoline(MacroAssembler* masm) {
+ // a1: microtask_queue
+ __ mov(RunMicrotasksDescriptor::MicrotaskQueueRegister(), a1);
+ __ Jump(BUILTIN_CODE(masm->isolate(), RunMicrotasks), RelocInfo::CODE_TARGET);
+}
+
+static void ReplaceClosureCodeWithOptimizedCode(MacroAssembler* masm,
+ Register optimized_code,
+ Register closure) {
+ DCHECK(!AreAliased(optimized_code, closure));
+ // Store code entry in the closure.
+ __ St_d(optimized_code, FieldMemOperand(closure, JSFunction::kCodeOffset));
+ __ RecordWriteField(closure, JSFunction::kCodeOffset, optimized_code,
+ kRAHasNotBeenSaved, SaveFPRegsMode::kIgnore,
+ RememberedSetAction::kOmit, SmiCheck::kOmit);
+}
+
+static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch1,
+ Register scratch2) {
+ Register params_size = scratch1;
+
+ // Get the size of the formal parameters + receiver (in bytes).
+ __ Ld_d(params_size,
+ MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
+ __ Ld_w(params_size,
+ FieldMemOperand(params_size, BytecodeArray::kParameterSizeOffset));
+
+ Register actual_params_size = scratch2;
+ // Compute the size of the actual parameters + receiver (in bytes).
+ __ Ld_d(actual_params_size,
+ MemOperand(fp, StandardFrameConstants::kArgCOffset));
+ __ slli_d(actual_params_size, actual_params_size, kPointerSizeLog2);
+ __ Add_d(actual_params_size, actual_params_size, Operand(kSystemPointerSize));
+
+ // If actual is bigger than formal, then we should use it to free up the stack
+ // arguments.
+ __ slt(t2, params_size, actual_params_size);
+ __ Movn(params_size, actual_params_size, t2);
+
+ // Leave the frame (also dropping the register file).
+ __ LeaveFrame(StackFrame::INTERPRETED);
+
+ // Drop receiver + arguments.
+ __ Add_d(sp, sp, params_size);
+}
+
+// Tail-call |function_id| if |actual_marker| == |expected_marker|
+static void TailCallRuntimeIfMarkerEquals(MacroAssembler* masm,
+ Register actual_marker,
+ OptimizationMarker expected_marker,
+ Runtime::FunctionId function_id) {
+ Label no_match;
+ __ Branch(&no_match, ne, actual_marker, Operand(expected_marker));
+ GenerateTailCallToReturnedCode(masm, function_id);
+ __ bind(&no_match);
+}
+
+static void TailCallOptimizedCodeSlot(MacroAssembler* masm,
+ Register optimized_code_entry) {
+ // ----------- S t a t e -------------
+ // -- a0 : actual argument count
+ // -- a3 : new target (preserved for callee if needed, and caller)
+ // -- a1 : target function (preserved for callee if needed, and caller)
+ // -----------------------------------
+ DCHECK(!AreAliased(optimized_code_entry, a1, a3));
+
+ Register closure = a1;
+ Label heal_optimized_code_slot;
+
+ // If the optimized code is cleared, go to runtime to update the optimization
+ // marker field.
+ __ LoadWeakValue(optimized_code_entry, optimized_code_entry,
+ &heal_optimized_code_slot);
+
+ // Check if the optimized code is marked for deopt. If it is, call the
+ // runtime to clear it.
+ __ Ld_d(a6, FieldMemOperand(optimized_code_entry,
+ Code::kCodeDataContainerOffset));
+ __ Ld_w(a6, FieldMemOperand(a6, CodeDataContainer::kKindSpecificFlagsOffset));
+ __ And(a6, a6, Operand(1 << Code::kMarkedForDeoptimizationBit));
+ __ Branch(&heal_optimized_code_slot, ne, a6, Operand(zero_reg));
+
+ // Optimized code is good, get it into the closure and link the closure into
+ // the optimized functions list, then tail call the optimized code.
+ // The feedback vector is no longer used, so re-use it as a scratch
+ // register.
+ ReplaceClosureCodeWithOptimizedCode(masm, optimized_code_entry, closure);
+
+ static_assert(kJavaScriptCallCodeStartRegister == a2, "ABI mismatch");
+ __ LoadCodeObjectEntry(a2, optimized_code_entry);
+ __ Jump(a2);
+
+ // Optimized code slot contains deoptimized code or code is cleared and
+ // optimized code marker isn't updated. Evict the code, update the marker
+ // and re-enter the closure's code.
+ __ bind(&heal_optimized_code_slot);
+ GenerateTailCallToReturnedCode(masm, Runtime::kHealOptimizedCodeSlot);
+}
+
+static void MaybeOptimizeCode(MacroAssembler* masm, Register feedback_vector,
+ Register optimization_marker) {
+ // ----------- S t a t e -------------
+ // -- a0 : actual argument count
+ // -- a3 : new target (preserved for callee if needed, and caller)
+ // -- a1 : target function (preserved for callee if needed, and caller)
+ // -- feedback vector (preserved for caller if needed)
+ // -- optimization_marker : a Smi containing a non-zero optimization marker.
+ // -----------------------------------
+ DCHECK(!AreAliased(feedback_vector, a1, a3, optimization_marker));
+
+ // TODO(v8:8394): The logging of first execution will break if
+ // feedback vectors are not allocated. We need to find a different way of
+ // logging these events if required.
+ TailCallRuntimeIfMarkerEquals(masm, optimization_marker,
+ OptimizationMarker::kLogFirstExecution,
+ Runtime::kFunctionFirstExecution);
+ TailCallRuntimeIfMarkerEquals(masm, optimization_marker,
+ OptimizationMarker::kCompileOptimized,
+ Runtime::kCompileOptimized_NotConcurrent);
+ TailCallRuntimeIfMarkerEquals(masm, optimization_marker,
+ OptimizationMarker::kCompileOptimizedConcurrent,
+ Runtime::kCompileOptimized_Concurrent);
+
+ // Marker should be one of LogFirstExecution / CompileOptimized /
+ // CompileOptimizedConcurrent. InOptimizationQueue and None shouldn't reach
+ // here.
+ if (FLAG_debug_code) {
+ __ stop();
+ }
+}
+
+// Advance the current bytecode offset. This simulates what all bytecode
+// handlers do upon completion of the underlying operation. Will bail out to a
+// label if the bytecode (without prefix) is a return bytecode. Will not advance
+// the bytecode offset if the current bytecode is a JumpLoop, instead just
+// re-executing the JumpLoop to jump to the correct bytecode.
+static void AdvanceBytecodeOffsetOrReturn(MacroAssembler* masm,
+ Register bytecode_array,
+ Register bytecode_offset,
+ Register bytecode, Register scratch1,
+ Register scratch2, Register scratch3,
+ Label* if_return) {
+ Register bytecode_size_table = scratch1;
+
+ // The bytecode offset value will be increased by one in wide and extra wide
+ // cases. In the case of having a wide or extra wide JumpLoop bytecode, we
+ // will restore the original bytecode. In order to simplify the code, we have
+ // a backup of it.
+ Register original_bytecode_offset = scratch3;
+ DCHECK(!AreAliased(bytecode_array, bytecode_offset, bytecode,
+ bytecode_size_table, original_bytecode_offset));
+ __ Move(original_bytecode_offset, bytecode_offset);
+ __ li(bytecode_size_table, ExternalReference::bytecode_size_table_address());
+
+ // Check if the bytecode is a Wide or ExtraWide prefix bytecode.
+ Label process_bytecode, extra_wide;
+ STATIC_ASSERT(0 == static_cast<int>(interpreter::Bytecode::kWide));
+ STATIC_ASSERT(1 == static_cast<int>(interpreter::Bytecode::kExtraWide));
+ STATIC_ASSERT(2 == static_cast<int>(interpreter::Bytecode::kDebugBreakWide));
+ STATIC_ASSERT(3 ==
+ static_cast<int>(interpreter::Bytecode::kDebugBreakExtraWide));
+ __ Branch(&process_bytecode, hi, bytecode, Operand(3));
+ __ And(scratch2, bytecode, Operand(1));
+ __ Branch(&extra_wide, ne, scratch2, Operand(zero_reg));
+
+ // Load the next bytecode and update table to the wide scaled table.
+ __ Add_d(bytecode_offset, bytecode_offset, Operand(1));
+ __ Add_d(scratch2, bytecode_array, bytecode_offset);
+ __ Ld_bu(bytecode, MemOperand(scratch2, 0));
+ __ Add_d(bytecode_size_table, bytecode_size_table,
+ Operand(kByteSize * interpreter::Bytecodes::kBytecodeCount));
+ __ jmp(&process_bytecode);
+
+ __ bind(&extra_wide);
+ // Load the next bytecode and update table to the extra wide scaled table.
+ __ Add_d(bytecode_offset, bytecode_offset, Operand(1));
+ __ Add_d(scratch2, bytecode_array, bytecode_offset);
+ __ Ld_bu(bytecode, MemOperand(scratch2, 0));
+ __ Add_d(bytecode_size_table, bytecode_size_table,
+ Operand(2 * kByteSize * interpreter::Bytecodes::kBytecodeCount));
+
+ __ bind(&process_bytecode);
+
+// Bailout to the return label if this is a return bytecode.
+#define JUMP_IF_EQUAL(NAME) \
+ __ Branch(if_return, eq, bytecode, \
+ Operand(static_cast<int>(interpreter::Bytecode::k##NAME)));
+ RETURN_BYTECODE_LIST(JUMP_IF_EQUAL)
+#undef JUMP_IF_EQUAL
+
+ // If this is a JumpLoop, re-execute it to perform the jump to the beginning
+ // of the loop.
+ Label end, not_jump_loop;
+ __ Branch(&not_jump_loop, ne, bytecode,
+ Operand(static_cast<int>(interpreter::Bytecode::kJumpLoop)));
+ // We need to restore the original bytecode_offset since we might have
+ // increased it to skip the wide / extra-wide prefix bytecode.
+ __ Move(bytecode_offset, original_bytecode_offset);
+ __ jmp(&end);
+
+ __ bind(&not_jump_loop);
+ // Otherwise, load the size of the current bytecode and advance the offset.
+ __ Add_d(scratch2, bytecode_size_table, bytecode);
+ __ Ld_b(scratch2, MemOperand(scratch2, 0));
+ __ Add_d(bytecode_offset, bytecode_offset, scratch2);
+
+ __ bind(&end);
+}
+
+// Read off the optimization state in the feedback vector and check if there
+// is optimized code or a optimization marker that needs to be processed.
+static void LoadOptimizationStateAndJumpIfNeedsProcessing(
+ MacroAssembler* masm, Register optimization_state, Register feedback_vector,
+ Label* has_optimized_code_or_marker) {
+ ASM_CODE_COMMENT(masm);
+ Register scratch = t2;
+ // TODO(liuyu): Remove CHECK
+ CHECK_NE(t2, optimization_state);
+ CHECK_NE(t2, feedback_vector);
+ __ Ld_w(optimization_state,
+ FieldMemOperand(feedback_vector, FeedbackVector::kFlagsOffset));
+ __ And(
+ scratch, optimization_state,
+ Operand(FeedbackVector::kHasOptimizedCodeOrCompileOptimizedMarkerMask));
+ __ Branch(has_optimized_code_or_marker, ne, scratch, Operand(zero_reg));
+}
+
+static void MaybeOptimizeCodeOrTailCallOptimizedCodeSlot(
+ MacroAssembler* masm, Register optimization_state,
+ Register feedback_vector) {
+ ASM_CODE_COMMENT(masm);
+ Label maybe_has_optimized_code;
+ // Check if optimized code marker is available
+ {
+ UseScratchRegisterScope temps(masm);
+ Register scratch = temps.Acquire();
+ __ And(
+ scratch, optimization_state,
+ Operand(FeedbackVector::kHasCompileOptimizedOrLogFirstExecutionMarker));
+ __ Branch(&maybe_has_optimized_code, eq, scratch, Operand(zero_reg));
+ }
+
+ Register optimization_marker = optimization_state;
+ __ DecodeField<FeedbackVector::OptimizationMarkerBits>(optimization_marker);
+ MaybeOptimizeCode(masm, feedback_vector, optimization_marker);
+
+ __ bind(&maybe_has_optimized_code);
+ Register optimized_code_entry = optimization_state;
+ __ Ld_d(optimization_marker,
+ FieldMemOperand(feedback_vector,
+ FeedbackVector::kMaybeOptimizedCodeOffset));
+
+ TailCallOptimizedCodeSlot(masm, optimized_code_entry);
+}
+
+// static
+void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) {
+ UseScratchRegisterScope temps(masm);
+ temps.Include(s1.bit() | s2.bit());
+ temps.Exclude(t7.bit());
+ auto descriptor =
+ Builtins::CallInterfaceDescriptorFor(Builtin::kBaselineOutOfLinePrologue);
+ Register closure = descriptor.GetRegisterParameter(
+ BaselineOutOfLinePrologueDescriptor::kClosure);
+ // Load the feedback vector from the closure.
+ Register feedback_vector = temps.Acquire();
+ __ Ld_d(feedback_vector,
+ FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
+ __ Ld_d(feedback_vector,
+ FieldMemOperand(feedback_vector, Cell::kValueOffset));
+ if (FLAG_debug_code) {
+ UseScratchRegisterScope temps(masm);
+ Register scratch = temps.Acquire();
+ __ GetObjectType(feedback_vector, scratch, scratch);
+ __ Assert(eq, AbortReason::kExpectedFeedbackVector, scratch,
+ Operand(FEEDBACK_VECTOR_TYPE));
+ }
+ // Check for an optimization marker.
+ Label has_optimized_code_or_marker;
+ Register optimization_state = no_reg;
+ {
+ UseScratchRegisterScope temps(masm);
+ optimization_state = temps.Acquire();
+ // optimization_state will be used only in |has_optimized_code_or_marker|
+ // and outside it can be reused.
+ LoadOptimizationStateAndJumpIfNeedsProcessing(
+ masm, optimization_state, feedback_vector,
+ &has_optimized_code_or_marker);
+ }
+ // Increment invocation count for the function.
+ {
+ UseScratchRegisterScope temps(masm);
+ Register invocation_count = temps.Acquire();
+ __ Ld_w(invocation_count,
+ FieldMemOperand(feedback_vector,
+ FeedbackVector::kInvocationCountOffset));
+ __ Add_w(invocation_count, invocation_count, Operand(1));
+ __ St_w(invocation_count,
+ FieldMemOperand(feedback_vector,
+ FeedbackVector::kInvocationCountOffset));
+ }
+
+ FrameScope frame_scope(masm, StackFrame::MANUAL);
+ {
+ ASM_CODE_COMMENT_STRING(masm, "Frame Setup");
+ // Normally the first thing we'd do here is Push(ra, fp), but we already
+ // entered the frame in BaselineCompiler::Prologue, as we had to use the
+ // value ra before the call to this BaselineOutOfLinePrologue builtin.
+ Register callee_context = descriptor.GetRegisterParameter(
+ BaselineOutOfLinePrologueDescriptor::kCalleeContext);
+ Register callee_js_function = descriptor.GetRegisterParameter(
+ BaselineOutOfLinePrologueDescriptor::kClosure);
+ __ Push(callee_context, callee_js_function);
+ DCHECK_EQ(callee_js_function, kJavaScriptCallTargetRegister);
+ DCHECK_EQ(callee_js_function, kJSFunctionRegister);
+
+ Register argc = descriptor.GetRegisterParameter(
+ BaselineOutOfLinePrologueDescriptor::kJavaScriptCallArgCount);
+ // We'll use the bytecode for both code age/OSR resetting, and pushing onto
+ // the frame, so load it into a register.
+ Register bytecodeArray = descriptor.GetRegisterParameter(
+ BaselineOutOfLinePrologueDescriptor::kInterpreterBytecodeArray);
+
+ // Reset code age and the OSR arming. The OSR field and BytecodeAgeOffset
+ // are 8-bit fields next to each other, so we could just optimize by writing
+ // a 16-bit. These static asserts guard our assumption is valid.
+ STATIC_ASSERT(BytecodeArray::kBytecodeAgeOffset ==
+ BytecodeArray::kOsrLoopNestingLevelOffset + kCharSize);
+ STATIC_ASSERT(BytecodeArray::kNoAgeBytecodeAge == 0);
+ __ St_h(zero_reg,
+ FieldMemOperand(bytecodeArray,
+ BytecodeArray::kOsrLoopNestingLevelOffset));
+
+ __ Push(argc, bytecodeArray);
+
+ // Baseline code frames store the feedback vector where interpreter would
+ // store the bytecode offset.
+ if (FLAG_debug_code) {
+ UseScratchRegisterScope temps(masm);
+ Register invocation_count = temps.Acquire();
+ __ GetObjectType(feedback_vector, invocation_count, invocation_count);
+ __ Assert(eq, AbortReason::kExpectedFeedbackVector, invocation_count,
+ Operand(FEEDBACK_VECTOR_TYPE));
+ }
+ // Our stack is currently aligned. We have have to push something along with
+ // the feedback vector to keep it that way -- we may as well start
+ // initialising the register frame.
+ // TODO(v8:11429,leszeks): Consider guaranteeing that this call leaves
+ // `undefined` in the accumulator register, to skip the load in the baseline
+ // code.
+ __ Push(feedback_vector);
+ }
+
+ Label call_stack_guard;
+ Register frame_size = descriptor.GetRegisterParameter(
+ BaselineOutOfLinePrologueDescriptor::kStackFrameSize);
+ {
+ ASM_CODE_COMMENT_STRING(masm, "Stack/interrupt check");
+ // Stack check. This folds the checks for both the interrupt stack limit
+ // check and the real stack limit into one by just checking for the
+ // interrupt limit. The interrupt limit is either equal to the real stack
+ // limit or tighter. By ensuring we have space until that limit after
+ // building the frame we can quickly precheck both at once.
+ UseScratchRegisterScope temps(masm);
+ Register sp_minus_frame_size = temps.Acquire();
+ __ Sub_d(sp_minus_frame_size, sp, frame_size);
+ Register interrupt_limit = temps.Acquire();
+ __ LoadStackLimit(interrupt_limit,
+ MacroAssembler::StackLimitKind::kInterruptStackLimit);
+ __ Branch(&call_stack_guard, Uless, sp_minus_frame_size,
+ Operand(interrupt_limit));
+ }
+
+ // Do "fast" return to the caller pc in ra.
+ // TODO(v8:11429): Document this frame setup better.
+ __ Ret();
+
+ __ bind(&has_optimized_code_or_marker);
+ {
+ ASM_CODE_COMMENT_STRING(masm, "Optimized marker check");
+ UseScratchRegisterScope temps(masm);
+ temps.Exclude(optimization_state);
+ // Ensure the optimization_state is not allocated again.
+ // Drop the frame created by the baseline call.
+ __ Pop(ra, fp);
+ MaybeOptimizeCodeOrTailCallOptimizedCodeSlot(masm, optimization_state,
+ feedback_vector);
+ __ Trap();
+ }
+
+ __ bind(&call_stack_guard);
+ {
+ ASM_CODE_COMMENT_STRING(masm, "Stack/interrupt call");
+ FrameScope frame_scope(masm, StackFrame::INTERNAL);
+ // Save incoming new target or generator
+ __ Push(kJavaScriptCallNewTargetRegister);
+ __ SmiTag(frame_size);
+ __ Push(frame_size);
+ __ CallRuntime(Runtime::kStackGuardWithGap);
+ __ Pop(kJavaScriptCallNewTargetRegister);
+ }
+ __ Ret();
+ temps.Exclude(s1.bit() | s2.bit());
+}
+
+// Generate code for entering a JS function with the interpreter.
+// On entry to the function the receiver and arguments have been pushed on the
+// stack left to right.
+//
+// The live registers are:
+// o a0 : actual argument count (not including the receiver)
+// o a1: the JS function object being called.
+// o a3: the incoming new target or generator object
+// o cp: our context
+// o fp: the caller's frame pointer
+// o sp: stack pointer
+// o ra: return address
+//
+// The function builds an interpreter frame. See InterpreterFrameConstants in
+// frame-constants.h for its layout.
+void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
+ Register closure = a1;
+ Register feedback_vector = a2;
+
+ // Get the bytecode array from the function object and load it into
+ // kInterpreterBytecodeArrayRegister.
+ __ Ld_d(kScratchReg,
+ FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
+ __ Ld_d(
+ kInterpreterBytecodeArrayRegister,
+ FieldMemOperand(kScratchReg, SharedFunctionInfo::kFunctionDataOffset));
+ Label is_baseline;
+ GetSharedFunctionInfoBytecodeOrBaseline(
+ masm, kInterpreterBytecodeArrayRegister, kScratchReg, &is_baseline);
+
+ // The bytecode array could have been flushed from the shared function info,
+ // if so, call into CompileLazy.
+ Label compile_lazy;
+ __ GetObjectType(kInterpreterBytecodeArrayRegister, kScratchReg, kScratchReg);
+ __ Branch(&compile_lazy, ne, kScratchReg, Operand(BYTECODE_ARRAY_TYPE));
+
+ // Load the feedback vector from the closure.
+ __ Ld_d(feedback_vector,
+ FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
+ __ Ld_d(feedback_vector,
+ FieldMemOperand(feedback_vector, Cell::kValueOffset));
+
+ Label push_stack_frame;
+ // Check if feedback vector is valid. If valid, check for optimized code
+ // and update invocation count. Otherwise, setup the stack frame.
+ __ Ld_d(a4, FieldMemOperand(feedback_vector, HeapObject::kMapOffset));
+ __ Ld_hu(a4, FieldMemOperand(a4, Map::kInstanceTypeOffset));
+ __ Branch(&push_stack_frame, ne, a4, Operand(FEEDBACK_VECTOR_TYPE));
+
+ // Read off the optimization state in the feedback vector, and if there
+ // is optimized code or an optimization marker, call that instead.
+ Register optimization_state = a4;
+ __ Ld_w(optimization_state,
+ FieldMemOperand(feedback_vector, FeedbackVector::kFlagsOffset));
+
+ // Check if the optimized code slot is not empty or has a optimization marker.
+ Label has_optimized_code_or_marker;
+
+ __ andi(t0, optimization_state,
+ FeedbackVector::kHasOptimizedCodeOrCompileOptimizedMarkerMask);
+ __ Branch(&has_optimized_code_or_marker, ne, t0, Operand(zero_reg));
+
+ Label not_optimized;
+ __ bind(&not_optimized);
+
+ // Increment invocation count for the function.
+ __ Ld_w(a4, FieldMemOperand(feedback_vector,
+ FeedbackVector::kInvocationCountOffset));
+ __ Add_w(a4, a4, Operand(1));
+ __ St_w(a4, FieldMemOperand(feedback_vector,
+ FeedbackVector::kInvocationCountOffset));
+
+ // Open a frame scope to indicate that there is a frame on the stack. The
+ // MANUAL indicates that the scope shouldn't actually generate code to set up
+ // the frame (that is done below).
+ __ bind(&push_stack_frame);
+ FrameScope frame_scope(masm, StackFrame::MANUAL);
+ __ PushStandardFrame(closure);
+
+ // Reset code age and the OSR arming. The OSR field and BytecodeAgeOffset are
+ // 8-bit fields next to each other, so we could just optimize by writing a
+ // 16-bit. These static asserts guard our assumption is valid.
+ STATIC_ASSERT(BytecodeArray::kBytecodeAgeOffset ==
+ BytecodeArray::kOsrLoopNestingLevelOffset + kCharSize);
+ STATIC_ASSERT(BytecodeArray::kNoAgeBytecodeAge == 0);
+ __ St_h(zero_reg, FieldMemOperand(kInterpreterBytecodeArrayRegister,
+ BytecodeArray::kOsrLoopNestingLevelOffset));
+
+ // Load initial bytecode offset.
+ __ li(kInterpreterBytecodeOffsetRegister,
+ Operand(BytecodeArray::kHeaderSize - kHeapObjectTag));
+
+ // Push bytecode array and Smi tagged bytecode array offset.
+ __ SmiTag(a4, kInterpreterBytecodeOffsetRegister);
+ __ Push(kInterpreterBytecodeArrayRegister, a4);
+
+ // Allocate the local and temporary register file on the stack.
+ Label stack_overflow;
+ {
+ // Load frame size (word) from the BytecodeArray object.
+ __ Ld_w(a4, FieldMemOperand(kInterpreterBytecodeArrayRegister,
+ BytecodeArray::kFrameSizeOffset));
+
+ // Do a stack check to ensure we don't go over the limit.
+ __ Sub_d(a5, sp, Operand(a4));
+ __ LoadStackLimit(a2, MacroAssembler::StackLimitKind::kRealStackLimit);
+ __ Branch(&stack_overflow, lo, a5, Operand(a2));
+
+ // If ok, push undefined as the initial value for all register file entries.
+ Label loop_header;
+ Label loop_check;
+ __ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kUndefinedValue);
+ __ Branch(&loop_check);
+ __ bind(&loop_header);
+ // TODO(rmcilroy): Consider doing more than one push per loop iteration.
+ __ Push(kInterpreterAccumulatorRegister);
+ // Continue loop if not done.
+ __ bind(&loop_check);
+ __ Sub_d(a4, a4, Operand(kPointerSize));
+ __ Branch(&loop_header, ge, a4, Operand(zero_reg));
+ }
+
+ // If the bytecode array has a valid incoming new target or generator object
+ // register, initialize it with incoming value which was passed in r3.
+ Label no_incoming_new_target_or_generator_register;
+ __ Ld_w(a5, FieldMemOperand(
+ kInterpreterBytecodeArrayRegister,
+ BytecodeArray::kIncomingNewTargetOrGeneratorRegisterOffset));
+ __ Branch(&no_incoming_new_target_or_generator_register, eq, a5,
+ Operand(zero_reg));
+ __ Alsl_d(a5, a5, fp, kPointerSizeLog2, t7);
+ __ St_d(a3, MemOperand(a5, 0));
+ __ bind(&no_incoming_new_target_or_generator_register);
+
+ // Perform interrupt stack check.
+ // TODO(solanes): Merge with the real stack limit check above.
+ Label stack_check_interrupt, after_stack_check_interrupt;
+ __ LoadStackLimit(a5, MacroAssembler::StackLimitKind::kInterruptStackLimit);
+ __ Branch(&stack_check_interrupt, lo, sp, Operand(a5));
+ __ bind(&after_stack_check_interrupt);
+
+ // The accumulator is already loaded with undefined.
+
+ // Load the dispatch table into a register and dispatch to the bytecode
+ // handler at the current bytecode offset.
+ Label do_dispatch;
+ __ bind(&do_dispatch);
+ __ li(kInterpreterDispatchTableRegister,
+ ExternalReference::interpreter_dispatch_table_address(masm->isolate()));
+ __ Add_d(t5, kInterpreterBytecodeArrayRegister,
+ kInterpreterBytecodeOffsetRegister);
+ __ Ld_bu(a7, MemOperand(t5, 0));
+ __ Alsl_d(kScratchReg, a7, kInterpreterDispatchTableRegister,
+ kPointerSizeLog2, t7);
+ __ Ld_d(kJavaScriptCallCodeStartRegister, MemOperand(kScratchReg, 0));
+ __ Call(kJavaScriptCallCodeStartRegister);
+ masm->isolate()->heap()->SetInterpreterEntryReturnPCOffset(masm->pc_offset());
+
+ // Any returns to the entry trampoline are either due to the return bytecode
+ // or the interpreter tail calling a builtin and then a dispatch.
+
+ // Get bytecode array and bytecode offset from the stack frame.
+ __ Ld_d(kInterpreterBytecodeArrayRegister,
+ MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
+ __ Ld_d(kInterpreterBytecodeOffsetRegister,
+ MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
+ __ SmiUntag(kInterpreterBytecodeOffsetRegister);
+
+ // Either return, or advance to the next bytecode and dispatch.
+ Label do_return;
+ __ Add_d(a1, kInterpreterBytecodeArrayRegister,
+ kInterpreterBytecodeOffsetRegister);
+ __ Ld_bu(a1, MemOperand(a1, 0));
+ AdvanceBytecodeOffsetOrReturn(masm, kInterpreterBytecodeArrayRegister,
+ kInterpreterBytecodeOffsetRegister, a1, a2, a3,
+ a4, &do_return);
+ __ jmp(&do_dispatch);
+
+ __ bind(&do_return);
+ // The return value is in a0.
+ LeaveInterpreterFrame(masm, t0, t1);
+ __ Jump(ra);
+
+ __ bind(&stack_check_interrupt);
+ // Modify the bytecode offset in the stack to be kFunctionEntryBytecodeOffset
+ // for the call to the StackGuard.
+ __ li(kInterpreterBytecodeOffsetRegister,
+ Operand(Smi::FromInt(BytecodeArray::kHeaderSize - kHeapObjectTag +
+ kFunctionEntryBytecodeOffset)));
+ __ St_d(kInterpreterBytecodeOffsetRegister,
+ MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
+ __ CallRuntime(Runtime::kStackGuard);
+
+ // After the call, restore the bytecode array, bytecode offset and accumulator
+ // registers again. Also, restore the bytecode offset in the stack to its
+ // previous value.
+ __ Ld_d(kInterpreterBytecodeArrayRegister,
+ MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
+ __ li(kInterpreterBytecodeOffsetRegister,
+ Operand(BytecodeArray::kHeaderSize - kHeapObjectTag));
+ __ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kUndefinedValue);
+
+ __ SmiTag(a5, kInterpreterBytecodeOffsetRegister);
+ __ St_d(a5, MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
+
+ __ jmp(&after_stack_check_interrupt);
+
+ __ bind(&has_optimized_code_or_marker);
+ MaybeOptimizeCodeOrTailCallOptimizedCodeSlot(masm, optimization_state,
+ feedback_vector);
+
+ __ bind(&is_baseline);
+ {
+ // Load the feedback vector from the closure.
+ __ Ld_d(feedback_vector,
+ FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
+ __ Ld_d(feedback_vector,
+ FieldMemOperand(feedback_vector, Cell::kValueOffset));
+
+ Label install_baseline_code;
+ // Check if feedback vector is valid. If not, call prepare for baseline to
+ // allocate it.
+ __ Ld_d(t0, FieldMemOperand(feedback_vector, HeapObject::kMapOffset));
+ __ Ld_hu(t0, FieldMemOperand(t0, Map::kInstanceTypeOffset));
+ __ Branch(&install_baseline_code, ne, t0, Operand(FEEDBACK_VECTOR_TYPE));
+
+ // Check for an optimization marker.
+ LoadOptimizationStateAndJumpIfNeedsProcessing(
+ masm, optimization_state, feedback_vector,
+ &has_optimized_code_or_marker);
+
+ // Load the baseline code into the closure.
+ __ Move(a2, kInterpreterBytecodeArrayRegister);
+ static_assert(kJavaScriptCallCodeStartRegister == a2, "ABI mismatch");
+ ReplaceClosureCodeWithOptimizedCode(masm, a2, closure);
+ __ JumpCodeObject(a2);
+
+ __ bind(&install_baseline_code);
+ GenerateTailCallToReturnedCode(masm, Runtime::kInstallBaselineCode);
+ }
+
+ __ bind(&compile_lazy);
+ GenerateTailCallToReturnedCode(masm, Runtime::kCompileLazy);
+ // Unreachable code.
+ __ break_(0xCC);
+
+ __ bind(&stack_overflow);
+ __ CallRuntime(Runtime::kThrowStackOverflow);
+ // Unreachable code.
+ __ break_(0xCC);
+}
+
+static void GenerateInterpreterPushArgs(MacroAssembler* masm, Register num_args,
+ Register start_address,
+ Register scratch, Register scratch2) {
+ // Find the address of the last argument.
+ __ Sub_d(scratch, num_args, Operand(1));
+ __ slli_d(scratch, scratch, kPointerSizeLog2);
+ __ Sub_d(start_address, start_address, scratch);
+
+ // Push the arguments.
+ __ PushArray(start_address, num_args, scratch, scratch2,
+ TurboAssembler::PushArrayOrder::kReverse);
+}
+
+// static
+void Builtins::Generate_InterpreterPushArgsThenCallImpl(
+ MacroAssembler* masm, ConvertReceiverMode receiver_mode,
+ InterpreterPushArgsMode mode) {
+ DCHECK(mode != InterpreterPushArgsMode::kArrayFunction);
+ // ----------- S t a t e -------------
+ // -- a0 : the number of arguments (not including the receiver)
+ // -- a2 : the address of the first argument to be pushed. Subsequent
+ // arguments should be consecutive above this, in the same order as
+ // they are to be pushed onto the stack.
+ // -- a1 : the target to call (can be any Object).
+ // -----------------------------------
+ Label stack_overflow;
+ if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
+ // The spread argument should not be pushed.
+ __ Sub_d(a0, a0, Operand(1));
+ }
+
+ __ Add_d(a3, a0, Operand(1)); // Add one for receiver.
+
+ __ StackOverflowCheck(a3, a4, t0, &stack_overflow);
+
+ if (receiver_mode == ConvertReceiverMode::kNullOrUndefined) {
+ // Don't copy receiver.
+ __ mov(a3, a0);
+ }
+
+ // This function modifies a2, t0 and a4.
+ GenerateInterpreterPushArgs(masm, a3, a2, a4, t0);
+
+ if (receiver_mode == ConvertReceiverMode::kNullOrUndefined) {
+ __ PushRoot(RootIndex::kUndefinedValue);
+ }
+
+ if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
+ // Pass the spread in the register a2.
+ // a2 already points to the penultime argument, the spread
+ // is below that.
+ __ Ld_d(a2, MemOperand(a2, -kSystemPointerSize));
+ }
+
+ // Call the target.
+ if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
+ __ Jump(BUILTIN_CODE(masm->isolate(), CallWithSpread),
+ RelocInfo::CODE_TARGET);
+ } else {
+ __ Jump(masm->isolate()->builtins()->Call(ConvertReceiverMode::kAny),
+ RelocInfo::CODE_TARGET);
+ }
+
+ __ bind(&stack_overflow);
+ {
+ __ TailCallRuntime(Runtime::kThrowStackOverflow);
+ // Unreachable code.
+ __ break_(0xCC);
+ }
+}
+
+// static
+void Builtins::Generate_InterpreterPushArgsThenConstructImpl(
+ MacroAssembler* masm, InterpreterPushArgsMode mode) {
+ // ----------- S t a t e -------------
+ // -- a0 : argument count (not including receiver)
+ // -- a3 : new target
+ // -- a1 : constructor to call
+ // -- a2 : allocation site feedback if available, undefined otherwise.
+ // -- a4 : address of the first argument
+ // -----------------------------------
+ Label stack_overflow;
+ __ addi_d(a6, a0, 1);
+ __ StackOverflowCheck(a6, a5, t0, &stack_overflow);
+
+ if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
+ // The spread argument should not be pushed.
+ __ Sub_d(a0, a0, Operand(1));
+ }
+
+ // Push the arguments, This function modifies t0, a4 and a5.
+ GenerateInterpreterPushArgs(masm, a0, a4, a5, t0);
+
+ // Push a slot for the receiver.
+ __ Push(zero_reg);
+
+ if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
+ // Pass the spread in the register a2.
+ // a4 already points to the penultimate argument, the spread
+ // lies in the next interpreter register.
+ __ Ld_d(a2, MemOperand(a4, -kSystemPointerSize));
+ } else {
+ __ AssertUndefinedOrAllocationSite(a2, t0);
+ }
+
+ if (mode == InterpreterPushArgsMode::kArrayFunction) {
+ __ AssertFunction(a1);
+
+ // Tail call to the function-specific construct stub (still in the caller
+ // context at this point).
+ __ Jump(BUILTIN_CODE(masm->isolate(), ArrayConstructorImpl),
+ RelocInfo::CODE_TARGET);
+ } else if (mode == InterpreterPushArgsMode::kWithFinalSpread) {
+ // Call the constructor with a0, a1, and a3 unmodified.
+ __ Jump(BUILTIN_CODE(masm->isolate(), ConstructWithSpread),
+ RelocInfo::CODE_TARGET);
+ } else {
+ DCHECK_EQ(InterpreterPushArgsMode::kOther, mode);
+ // Call the constructor with a0, a1, and a3 unmodified.
+ __ Jump(BUILTIN_CODE(masm->isolate(), Construct), RelocInfo::CODE_TARGET);
+ }
+
+ __ bind(&stack_overflow);
+ {
+ __ TailCallRuntime(Runtime::kThrowStackOverflow);
+ // Unreachable code.
+ __ break_(0xCC);
+ }
+}
+
+static void Generate_InterpreterEnterBytecode(MacroAssembler* masm) {
+ // Set the return address to the correct point in the interpreter entry
+ // trampoline.
+ Label builtin_trampoline, trampoline_loaded;
+ Smi interpreter_entry_return_pc_offset(
+ masm->isolate()->heap()->interpreter_entry_return_pc_offset());
+ DCHECK_NE(interpreter_entry_return_pc_offset, Smi::zero());
+
+ // If the SFI function_data is an InterpreterData, the function will have a
+ // custom copy of the interpreter entry trampoline for profiling. If so,
+ // get the custom trampoline, otherwise grab the entry address of the global
+ // trampoline.
+ __ Ld_d(t0, MemOperand(fp, StandardFrameConstants::kFunctionOffset));
+ __ Ld_d(t0, FieldMemOperand(t0, JSFunction::kSharedFunctionInfoOffset));
+ __ Ld_d(t0, FieldMemOperand(t0, SharedFunctionInfo::kFunctionDataOffset));
+ __ GetObjectType(t0, kInterpreterDispatchTableRegister,
+ kInterpreterDispatchTableRegister);
+ __ Branch(&builtin_trampoline, ne, kInterpreterDispatchTableRegister,
+ Operand(INTERPRETER_DATA_TYPE));
+
+ __ Ld_d(t0,
+ FieldMemOperand(t0, InterpreterData::kInterpreterTrampolineOffset));
+ __ Add_d(t0, t0, Operand(Code::kHeaderSize - kHeapObjectTag));
+ __ Branch(&trampoline_loaded);
+
+ __ bind(&builtin_trampoline);
+ __ li(t0, ExternalReference::
+ address_of_interpreter_entry_trampoline_instruction_start(
+ masm->isolate()));
+ __ Ld_d(t0, MemOperand(t0, 0));
+
+ __ bind(&trampoline_loaded);
+ __ Add_d(ra, t0, Operand(interpreter_entry_return_pc_offset.value()));
+
+ // Initialize the dispatch table register.
+ __ li(kInterpreterDispatchTableRegister,
+ ExternalReference::interpreter_dispatch_table_address(masm->isolate()));
+
+ // Get the bytecode array pointer from the frame.
+ __ Ld_d(kInterpreterBytecodeArrayRegister,
+ MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
+
+ if (FLAG_debug_code) {
+ // Check function data field is actually a BytecodeArray object.
+ __ SmiTst(kInterpreterBytecodeArrayRegister, kScratchReg);
+ __ Assert(ne,
+ AbortReason::kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry,
+ kScratchReg, Operand(zero_reg));
+ __ GetObjectType(kInterpreterBytecodeArrayRegister, a1, a1);
+ __ Assert(eq,
+ AbortReason::kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry,
+ a1, Operand(BYTECODE_ARRAY_TYPE));
+ }
+
+ // Get the target bytecode offset from the frame.
+ __ SmiUntag(kInterpreterBytecodeOffsetRegister,
+ MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
+
+ if (FLAG_debug_code) {
+ Label okay;
+ __ Branch(&okay, ge, kInterpreterBytecodeOffsetRegister,
+ Operand(BytecodeArray::kHeaderSize - kHeapObjectTag));
+ // Unreachable code.
+ __ break_(0xCC);
+ __ bind(&okay);
+ }
+
+ // Dispatch to the target bytecode.
+ __ Add_d(a1, kInterpreterBytecodeArrayRegister,
+ kInterpreterBytecodeOffsetRegister);
+ __ Ld_bu(a7, MemOperand(a1, 0));
+ __ Alsl_d(a1, a7, kInterpreterDispatchTableRegister, kPointerSizeLog2, t7);
+ __ Ld_d(kJavaScriptCallCodeStartRegister, MemOperand(a1, 0));
+ __ Jump(kJavaScriptCallCodeStartRegister);
+}
+
+void Builtins::Generate_InterpreterEnterAtNextBytecode(MacroAssembler* masm) {
+ // Advance the current bytecode offset stored within the given interpreter
+ // stack frame. This simulates what all bytecode handlers do upon completion
+ // of the underlying operation.
+ __ Ld_d(kInterpreterBytecodeArrayRegister,
+ MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
+ __ Ld_d(kInterpreterBytecodeOffsetRegister,
+ MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
+ __ SmiUntag(kInterpreterBytecodeOffsetRegister);
+
+ Label enter_bytecode, function_entry_bytecode;
+ __ Branch(&function_entry_bytecode, eq, kInterpreterBytecodeOffsetRegister,
+ Operand(BytecodeArray::kHeaderSize - kHeapObjectTag +
+ kFunctionEntryBytecodeOffset));
+
+ // Load the current bytecode.
+ __ Add_d(a1, kInterpreterBytecodeArrayRegister,
+ kInterpreterBytecodeOffsetRegister);
+ __ Ld_bu(a1, MemOperand(a1, 0));
+
+ // Advance to the next bytecode.
+ Label if_return;
+ AdvanceBytecodeOffsetOrReturn(masm, kInterpreterBytecodeArrayRegister,
+ kInterpreterBytecodeOffsetRegister, a1, a2, a3,
+ a4, &if_return);
+
+ __ bind(&enter_bytecode);
+ // Convert new bytecode offset to a Smi and save in the stackframe.
+ __ SmiTag(a2, kInterpreterBytecodeOffsetRegister);
+ __ St_d(a2, MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
+
+ Generate_InterpreterEnterBytecode(masm);
+
+ __ bind(&function_entry_bytecode);
+ // If the code deoptimizes during the implicit function entry stack interrupt
+ // check, it will have a bailout ID of kFunctionEntryBytecodeOffset, which is
+ // not a valid bytecode offset. Detect this case and advance to the first
+ // actual bytecode.
+ __ li(kInterpreterBytecodeOffsetRegister,
+ Operand(BytecodeArray::kHeaderSize - kHeapObjectTag));
+ __ Branch(&enter_bytecode);
+
+ // We should never take the if_return path.
+ __ bind(&if_return);
+ __ Abort(AbortReason::kInvalidBytecodeAdvance);
+}
+
+void Builtins::Generate_InterpreterEnterAtBytecode(MacroAssembler* masm) {
+ Generate_InterpreterEnterBytecode(masm);
+}
+
+namespace {
+void Generate_ContinueToBuiltinHelper(MacroAssembler* masm,
+ bool java_script_builtin,
+ bool with_result) {
+ const RegisterConfiguration* config(RegisterConfiguration::Default());
+ int allocatable_register_count = config->num_allocatable_general_registers();
+ UseScratchRegisterScope temps(masm);
+ Register scratch = temps.Acquire();
+ if (with_result) {
+ if (java_script_builtin) {
+ __ mov(scratch, a0);
+ } else {
+ // Overwrite the hole inserted by the deoptimizer with the return value
+ // from the LAZY deopt point.
+ __ St_d(
+ a0,
+ MemOperand(
+ sp, config->num_allocatable_general_registers() * kPointerSize +
+ BuiltinContinuationFrameConstants::kFixedFrameSize));
+ }
+ }
+ for (int i = allocatable_register_count - 1; i >= 0; --i) {
+ int code = config->GetAllocatableGeneralCode(i);
+ __ Pop(Register::from_code(code));
+ if (java_script_builtin && code == kJavaScriptCallArgCountRegister.code()) {
+ __ SmiUntag(Register::from_code(code));
+ }
+ }
+
+ if (with_result && java_script_builtin) {
+ // Overwrite the hole inserted by the deoptimizer with the return value from
+ // the LAZY deopt point. t0 contains the arguments count, the return value
+ // from LAZY is always the last argument.
+ __ Add_d(a0, a0,
+ Operand(BuiltinContinuationFrameConstants::kFixedSlotCount));
+ __ Alsl_d(t0, a0, sp, kSystemPointerSizeLog2, t7);
+ __ St_d(scratch, MemOperand(t0, 0));
+ // Recover arguments count.
+ __ Sub_d(a0, a0,
+ Operand(BuiltinContinuationFrameConstants::kFixedSlotCount));
+ }
+
+ __ Ld_d(
+ fp,
+ MemOperand(sp, BuiltinContinuationFrameConstants::kFixedFrameSizeFromFp));
+ // Load builtin index (stored as a Smi) and use it to get the builtin start
+ // address from the builtins table.
+ __ Pop(t0);
+ __ Add_d(sp, sp,
+ Operand(BuiltinContinuationFrameConstants::kFixedFrameSizeFromFp));
+ __ Pop(ra);
+ __ LoadEntryFromBuiltinIndex(t0);
+ __ Jump(t0);
+}
+} // namespace
+
+void Builtins::Generate_ContinueToCodeStubBuiltin(MacroAssembler* masm) {
+ Generate_ContinueToBuiltinHelper(masm, false, false);
+}
+
+void Builtins::Generate_ContinueToCodeStubBuiltinWithResult(
+ MacroAssembler* masm) {
+ Generate_ContinueToBuiltinHelper(masm, false, true);
+}
+
+void Builtins::Generate_ContinueToJavaScriptBuiltin(MacroAssembler* masm) {
+ Generate_ContinueToBuiltinHelper(masm, true, false);
+}
+
+void Builtins::Generate_ContinueToJavaScriptBuiltinWithResult(
+ MacroAssembler* masm) {
+ Generate_ContinueToBuiltinHelper(masm, true, true);
+}
+
+void Builtins::Generate_NotifyDeoptimized(MacroAssembler* masm) {
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ CallRuntime(Runtime::kNotifyDeoptimized);
+ }
+
+ DCHECK_EQ(kInterpreterAccumulatorRegister.code(), a0.code());
+ __ Ld_d(a0, MemOperand(sp, 0 * kPointerSize));
+ __ Add_d(sp, sp, Operand(1 * kPointerSize)); // Remove state.
+ __ Ret();
+}
+
+namespace {
+
+void Generate_OSREntry(MacroAssembler* masm, Register entry_address,
+ Operand offset = Operand(zero_reg)) {
+ __ Add_d(ra, entry_address, offset);
+ // And "return" to the OSR entry point of the function.
+ __ Ret();
+}
+
+void OnStackReplacement(MacroAssembler* masm, bool is_interpreter) {
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ CallRuntime(Runtime::kCompileForOnStackReplacement);
+ }
+
+ // If the code object is null, just return to the caller.
+ __ Ret(eq, a0, Operand(Smi::zero()));
+
+ if (is_interpreter) {
+ // Drop the handler frame that is be sitting on top of the actual
+ // JavaScript frame. This is the case then OSR is triggered from bytecode.
+ __ LeaveFrame(StackFrame::STUB);
+ }
+
+ // Load deoptimization data from the code object.
+ // <deopt_data> = <code>[#deoptimization_data_offset]
+ __ Ld_d(a1, MemOperand(a0, Code::kDeoptimizationDataOrInterpreterDataOffset -
+ kHeapObjectTag));
+
+ // Load the OSR entrypoint offset from the deoptimization data.
+ // <osr_offset> = <deopt_data>[#header_size + #osr_pc_offset]
+ __ SmiUntag(a1, MemOperand(a1, FixedArray::OffsetOfElementAt(
+ DeoptimizationData::kOsrPcOffsetIndex) -
+ kHeapObjectTag));
+
+ // Compute the target address = code_obj + header_size + osr_offset
+ // <entry_addr> = <code_obj> + #header_size + <osr_offset>
+ __ Add_d(a0, a0, a1);
+ Generate_OSREntry(masm, a0, Operand(Code::kHeaderSize - kHeapObjectTag));
+}
+} // namespace
+
+void Builtins::Generate_InterpreterOnStackReplacement(MacroAssembler* masm) {
+ return OnStackReplacement(masm, true);
+}
+
+void Builtins::Generate_BaselineOnStackReplacement(MacroAssembler* masm) {
+ __ Ld_d(kContextRegister,
+ MemOperand(fp, StandardFrameConstants::kContextOffset));
+ return OnStackReplacement(masm, false);
+}
+
+// static
+void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- a0 : argc
+ // -- sp[0] : receiver
+ // -- sp[4] : thisArg
+ // -- sp[8] : argArray
+ // -----------------------------------
+
+ Register argc = a0;
+ Register arg_array = a2;
+ Register receiver = a1;
+ Register this_arg = a5;
+ Register undefined_value = a3;
+ Register scratch = a4;
+
+ __ LoadRoot(undefined_value, RootIndex::kUndefinedValue);
+
+ // 1. Load receiver into a1, argArray into a2 (if present), remove all
+ // arguments from the stack (including the receiver), and push thisArg (if
+ // present) instead.
+ {
+ // Claim (2 - argc) dummy arguments form the stack, to put the stack in a
+ // consistent state for a simple pop operation.
+
+ __ mov(scratch, argc);
+ __ Ld_d(this_arg, MemOperand(sp, kPointerSize));
+ __ Ld_d(arg_array, MemOperand(sp, 2 * kPointerSize));
+ __ Movz(arg_array, undefined_value, scratch); // if argc == 0
+ __ Movz(this_arg, undefined_value, scratch); // if argc == 0
+ __ Sub_d(scratch, scratch, Operand(1));
+ __ Movz(arg_array, undefined_value, scratch); // if argc == 1
+ __ Ld_d(receiver, MemOperand(sp, 0));
+ __ Alsl_d(sp, argc, sp, kSystemPointerSizeLog2, t7);
+ __ St_d(this_arg, MemOperand(sp, 0));
+ }
+
+ // ----------- S t a t e -------------
+ // -- a2 : argArray
+ // -- a1 : receiver
+ // -- a3 : undefined root value
+ // -- sp[0] : thisArg
+ // -----------------------------------
+
+ // 2. We don't need to check explicitly for callable receiver here,
+ // since that's the first thing the Call/CallWithArrayLike builtins
+ // will do.
+
+ // 3. Tail call with no arguments if argArray is null or undefined.
+ Label no_arguments;
+ __ JumpIfRoot(arg_array, RootIndex::kNullValue, &no_arguments);
+ __ Branch(&no_arguments, eq, arg_array, Operand(undefined_value));
+
+ // 4a. Apply the receiver to the given argArray.
+ __ Jump(BUILTIN_CODE(masm->isolate(), CallWithArrayLike),
+ RelocInfo::CODE_TARGET);
+
+ // 4b. The argArray is either null or undefined, so we tail call without any
+ // arguments to the receiver.
+ __ bind(&no_arguments);
+ {
+ __ mov(a0, zero_reg);
+ DCHECK(receiver == a1);
+ __ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
+ }
+}
+
+// static
+void Builtins::Generate_FunctionPrototypeCall(MacroAssembler* masm) {
+ // 1. Get the callable to call (passed as receiver) from the stack.
+ { __ Pop(a1); }
+
+ // 2. Make sure we have at least one argument.
+ // a0: actual number of arguments
+ {
+ Label done;
+ __ Branch(&done, ne, a0, Operand(zero_reg));
+ __ PushRoot(RootIndex::kUndefinedValue);
+ __ Add_d(a0, a0, Operand(1));
+ __ bind(&done);
+ }
+
+ // 3. Adjust the actual number of arguments.
+ __ addi_d(a0, a0, -1);
+
+ // 4. Call the callable.
+ __ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
+}
+
+void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- a0 : argc
+ // -- sp[0] : receiver
+ // -- sp[8] : target (if argc >= 1)
+ // -- sp[16] : thisArgument (if argc >= 2)
+ // -- sp[24] : argumentsList (if argc == 3)
+ // -----------------------------------
+
+ Register argc = a0;
+ Register arguments_list = a2;
+ Register target = a1;
+ Register this_argument = a5;
+ Register undefined_value = a3;
+ Register scratch = a4;
+
+ __ LoadRoot(undefined_value, RootIndex::kUndefinedValue);
+
+ // 1. Load target into a1 (if present), argumentsList into a2 (if present),
+ // remove all arguments from the stack (including the receiver), and push
+ // thisArgument (if present) instead.
+ {
+ // Claim (3 - argc) dummy arguments form the stack, to put the stack in a
+ // consistent state for a simple pop operation.
+
+ __ mov(scratch, argc);
+ __ Ld_d(target, MemOperand(sp, kPointerSize));
+ __ Ld_d(this_argument, MemOperand(sp, 2 * kPointerSize));
+ __ Ld_d(arguments_list, MemOperand(sp, 3 * kPointerSize));
+ __ Movz(arguments_list, undefined_value, scratch); // if argc == 0
+ __ Movz(this_argument, undefined_value, scratch); // if argc == 0
+ __ Movz(target, undefined_value, scratch); // if argc == 0
+ __ Sub_d(scratch, scratch, Operand(1));
+ __ Movz(arguments_list, undefined_value, scratch); // if argc == 1
+ __ Movz(this_argument, undefined_value, scratch); // if argc == 1
+ __ Sub_d(scratch, scratch, Operand(1));
+ __ Movz(arguments_list, undefined_value, scratch); // if argc == 2
+
+ __ Alsl_d(sp, argc, sp, kSystemPointerSizeLog2, t7);
+ __ St_d(this_argument, MemOperand(sp, 0)); // Overwrite receiver
+ }
+
+ // ----------- S t a t e -------------
+ // -- a2 : argumentsList
+ // -- a1 : target
+ // -- a3 : undefined root value
+ // -- sp[0] : thisArgument
+ // -----------------------------------
+
+ // 2. We don't need to check explicitly for callable target here,
+ // since that's the first thing the Call/CallWithArrayLike builtins
+ // will do.
+
+ // 3. Apply the target to the given argumentsList.
+ __ Jump(BUILTIN_CODE(masm->isolate(), CallWithArrayLike),
+ RelocInfo::CODE_TARGET);
+}
+
+void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- a0 : argc
+ // -- sp[0] : receiver
+ // -- sp[8] : target
+ // -- sp[16] : argumentsList
+ // -- sp[24] : new.target (optional)
+ // -----------------------------------
+
+ Register argc = a0;
+ Register arguments_list = a2;
+ Register target = a1;
+ Register new_target = a3;
+ Register undefined_value = a4;
+ Register scratch = a5;
+
+ __ LoadRoot(undefined_value, RootIndex::kUndefinedValue);
+
+ // 1. Load target into a1 (if present), argumentsList into a2 (if present),
+ // new.target into a3 (if present, otherwise use target), remove all
+ // arguments from the stack (including the receiver), and push thisArgument
+ // (if present) instead.
+ {
+ // Claim (3 - argc) dummy arguments form the stack, to put the stack in a
+ // consistent state for a simple pop operation.
+
+ __ mov(scratch, argc);
+ __ Ld_d(target, MemOperand(sp, kPointerSize));
+ __ Ld_d(arguments_list, MemOperand(sp, 2 * kPointerSize));
+ __ Ld_d(new_target, MemOperand(sp, 3 * kPointerSize));
+ __ Movz(arguments_list, undefined_value, scratch); // if argc == 0
+ __ Movz(new_target, undefined_value, scratch); // if argc == 0
+ __ Movz(target, undefined_value, scratch); // if argc == 0
+ __ Sub_d(scratch, scratch, Operand(1));
+ __ Movz(arguments_list, undefined_value, scratch); // if argc == 1
+ __ Movz(new_target, target, scratch); // if argc == 1
+ __ Sub_d(scratch, scratch, Operand(1));
+ __ Movz(new_target, target, scratch); // if argc == 2
+
+ __ Alsl_d(sp, argc, sp, kSystemPointerSizeLog2, t7);
+ __ St_d(undefined_value, MemOperand(sp, 0)); // Overwrite receiver
+ }
+
+ // ----------- S t a t e -------------
+ // -- a2 : argumentsList
+ // -- a1 : target
+ // -- a3 : new.target
+ // -- sp[0] : receiver (undefined)
+ // -----------------------------------
+
+ // 2. We don't need to check explicitly for constructor target here,
+ // since that's the first thing the Construct/ConstructWithArrayLike
+ // builtins will do.
+
+ // 3. We don't need to check explicitly for constructor new.target here,
+ // since that's the second thing the Construct/ConstructWithArrayLike
+ // builtins will do.
+
+ // 4. Construct the target with the given new.target and argumentsList.
+ __ Jump(BUILTIN_CODE(masm->isolate(), ConstructWithArrayLike),
+ RelocInfo::CODE_TARGET);
+}
+
+// static
+void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
+ Handle<Code> code) {
+ // ----------- S t a t e -------------
+ // -- a1 : target
+ // -- a0 : number of parameters on the stack (not including the receiver)
+ // -- a2 : arguments list (a FixedArray)
+ // -- a4 : len (number of elements to push from args)
+ // -- a3 : new.target (for [[Construct]])
+ // -----------------------------------
+ if (FLAG_debug_code) {
+ // Allow a2 to be a FixedArray, or a FixedDoubleArray if a4 == 0.
+ Label ok, fail;
+ __ AssertNotSmi(a2);
+ __ GetObjectType(a2, t8, t8);
+ __ Branch(&ok, eq, t8, Operand(FIXED_ARRAY_TYPE));
+ __ Branch(&fail, ne, t8, Operand(FIXED_DOUBLE_ARRAY_TYPE));
+ __ Branch(&ok, eq, a4, Operand(zero_reg));
+ // Fall through.
+ __ bind(&fail);
+ __ Abort(AbortReason::kOperandIsNotAFixedArray);
+
+ __ bind(&ok);
+ }
+
+ Register args = a2;
+ Register len = a4;
+
+ // Check for stack overflow.
+ Label stack_overflow;
+ __ StackOverflowCheck(len, kScratchReg, a5, &stack_overflow);
+
+ // Move the arguments already in the stack,
+ // including the receiver and the return address.
+ {
+ Label copy;
+ Register src = a6, dest = a7;
+ __ mov(src, sp);
+ __ slli_d(t0, a4, kSystemPointerSizeLog2);
+ __ Sub_d(sp, sp, Operand(t0));
+ // Update stack pointer.
+ __ mov(dest, sp);
+ __ Add_d(t0, a0, Operand(zero_reg));
+
+ __ bind(&copy);
+ __ Ld_d(t1, MemOperand(src, 0));
+ __ St_d(t1, MemOperand(dest, 0));
+ __ Sub_d(t0, t0, Operand(1));
+ __ Add_d(src, src, Operand(kSystemPointerSize));
+ __ Add_d(dest, dest, Operand(kSystemPointerSize));
+ __ Branch(&copy, ge, t0, Operand(zero_reg));
+ }
+
+ // Push arguments onto the stack (thisArgument is already on the stack).
+ {
+ Label done, push, loop;
+ Register src = a6;
+ Register scratch = len;
+
+ __ addi_d(src, args, FixedArray::kHeaderSize - kHeapObjectTag);
+ __ Add_d(a0, a0, len); // The 'len' argument for Call() or Construct().
+ __ Branch(&done, eq, len, Operand(zero_reg));
+ __ slli_d(scratch, len, kPointerSizeLog2);
+ __ Sub_d(scratch, sp, Operand(scratch));
+ __ LoadRoot(t1, RootIndex::kTheHoleValue);
+ __ bind(&loop);
+ __ Ld_d(a5, MemOperand(src, 0));
+ __ addi_d(src, src, kPointerSize);
+ __ Branch(&push, ne, a5, Operand(t1));
+ __ LoadRoot(a5, RootIndex::kUndefinedValue);
+ __ bind(&push);
+ __ St_d(a5, MemOperand(a7, 0));
+ __ Add_d(a7, a7, Operand(kSystemPointerSize));
+ __ Add_d(scratch, scratch, Operand(kSystemPointerSize));
+ __ Branch(&loop, ne, scratch, Operand(sp));
+ __ bind(&done);
+ }
+
+ // Tail-call to the actual Call or Construct builtin.
+ __ Jump(code, RelocInfo::CODE_TARGET);
+
+ __ bind(&stack_overflow);
+ __ TailCallRuntime(Runtime::kThrowStackOverflow);
+}
+
+// static
+void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
+ CallOrConstructMode mode,
+ Handle<Code> code) {
+ // ----------- S t a t e -------------
+ // -- a0 : the number of arguments (not including the receiver)
+ // -- a3 : the new.target (for [[Construct]] calls)
+ // -- a1 : the target to call (can be any Object)
+ // -- a2 : start index (to support rest parameters)
+ // -----------------------------------
+
+ // Check if new.target has a [[Construct]] internal method.
+ if (mode == CallOrConstructMode::kConstruct) {
+ Label new_target_constructor, new_target_not_constructor;
+ __ JumpIfSmi(a3, &new_target_not_constructor);
+ __ Ld_d(t1, FieldMemOperand(a3, HeapObject::kMapOffset));
+ __ Ld_bu(t1, FieldMemOperand(t1, Map::kBitFieldOffset));
+ __ And(t1, t1, Operand(Map::Bits1::IsConstructorBit::kMask));
+ __ Branch(&new_target_constructor, ne, t1, Operand(zero_reg));
+ __ bind(&new_target_not_constructor);
+ {
+ FrameScope scope(masm, StackFrame::MANUAL);
+ __ EnterFrame(StackFrame::INTERNAL);
+ __ Push(a3);
+ __ CallRuntime(Runtime::kThrowNotConstructor);
+ }
+ __ bind(&new_target_constructor);
+ }
+
+ Label stack_done, stack_overflow;
+ __ Ld_d(a7, MemOperand(fp, StandardFrameConstants::kArgCOffset));
+ __ Sub_w(a7, a7, a2);
+ __ Branch(&stack_done, le, a7, Operand(zero_reg));
+ {
+ // Check for stack overflow.
+ __ StackOverflowCheck(a7, a4, a5, &stack_overflow);
+
+ // Forward the arguments from the caller frame.
+
+ // Point to the first argument to copy (skipping the receiver).
+ __ Add_d(a6, fp,
+ Operand(CommonFrameConstants::kFixedFrameSizeAboveFp +
+ kSystemPointerSize));
+ __ Alsl_d(a6, a2, a6, kSystemPointerSizeLog2, t7);
+
+ // Move the arguments already in the stack,
+ // including the receiver and the return address.
+ {
+ Label copy;
+ Register src = t0, dest = a2;
+ __ mov(src, sp);
+ // Update stack pointer.
+ __ slli_d(t1, a7, kSystemPointerSizeLog2);
+ __ Sub_d(sp, sp, Operand(t1));
+ __ mov(dest, sp);
+ __ Add_d(t2, a0, Operand(zero_reg));
+
+ __ bind(&copy);
+ __ Ld_d(t1, MemOperand(src, 0));
+ __ St_d(t1, MemOperand(dest, 0));
+ __ Sub_d(t2, t2, Operand(1));
+ __ Add_d(src, src, Operand(kSystemPointerSize));
+ __ Add_d(dest, dest, Operand(kSystemPointerSize));
+ __ Branch(&copy, ge, t2, Operand(zero_reg));
+ }
+
+ // Copy arguments from the caller frame.
+ // TODO(victorgomes): Consider using forward order as potentially more cache
+ // friendly.
+ {
+ Label loop;
+ __ Add_d(a0, a0, a7);
+ __ bind(&loop);
+ {
+ __ Sub_w(a7, a7, Operand(1));
+ __ Alsl_d(t0, a7, a6, kPointerSizeLog2, t7);
+ __ Ld_d(kScratchReg, MemOperand(t0, 0));
+ __ Alsl_d(t0, a7, a2, kPointerSizeLog2, t7);
+ __ St_d(kScratchReg, MemOperand(t0, 0));
+ __ Branch(&loop, ne, a7, Operand(zero_reg));
+ }
+ }
+ }
+ __ Branch(&stack_done);
+ __ bind(&stack_overflow);
+ __ TailCallRuntime(Runtime::kThrowStackOverflow);
+ __ bind(&stack_done);
+
+ // Tail-call to the {code} handler.
+ __ Jump(code, RelocInfo::CODE_TARGET);
+}
+
+// static
+void Builtins::Generate_CallFunction(MacroAssembler* masm,
+ ConvertReceiverMode mode) {
+ // ----------- S t a t e -------------
+ // -- a0 : the number of arguments (not including the receiver)
+ // -- a1 : the function to call (checked to be a JSFunction)
+ // -----------------------------------
+ __ AssertFunction(a1);
+
+ // See ES6 section 9.2.1 [[Call]] ( thisArgument, argumentsList)
+ // Check that function is not a "classConstructor".
+ Label class_constructor;
+ __ Ld_d(a2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
+ __ Ld_wu(a3, FieldMemOperand(a2, SharedFunctionInfo::kFlagsOffset));
+ __ And(kScratchReg, a3,
+ Operand(SharedFunctionInfo::IsClassConstructorBit::kMask));
+ __ Branch(&class_constructor, ne, kScratchReg, Operand(zero_reg));
+
+ // Enter the context of the function; ToObject has to run in the function
+ // context, and we also need to take the global proxy from the function
+ // context in case of conversion.
+ __ Ld_d(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
+ // We need to convert the receiver for non-native sloppy mode functions.
+ Label done_convert;
+ __ Ld_wu(a3, FieldMemOperand(a2, SharedFunctionInfo::kFlagsOffset));
+ __ And(kScratchReg, a3,
+ Operand(SharedFunctionInfo::IsNativeBit::kMask |
+ SharedFunctionInfo::IsStrictBit::kMask));
+ __ Branch(&done_convert, ne, kScratchReg, Operand(zero_reg));
+ {
+ // ----------- S t a t e -------------
+ // -- a0 : the number of arguments (not including the receiver)
+ // -- a1 : the function to call (checked to be a JSFunction)
+ // -- a2 : the shared function info.
+ // -- cp : the function context.
+ // -----------------------------------
+
+ if (mode == ConvertReceiverMode::kNullOrUndefined) {
+ // Patch receiver to global proxy.
+ __ LoadGlobalProxy(a3);
+ } else {
+ Label convert_to_object, convert_receiver;
+ __ LoadReceiver(a3, a0);
+ __ JumpIfSmi(a3, &convert_to_object);
+ STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
+ __ GetObjectType(a3, a4, a4);
+ __ Branch(&done_convert, hs, a4, Operand(FIRST_JS_RECEIVER_TYPE));
+ if (mode != ConvertReceiverMode::kNotNullOrUndefined) {
+ Label convert_global_proxy;
+ __ JumpIfRoot(a3, RootIndex::kUndefinedValue, &convert_global_proxy);
+ __ JumpIfNotRoot(a3, RootIndex::kNullValue, &convert_to_object);
+ __ bind(&convert_global_proxy);
+ {
+ // Patch receiver to global proxy.
+ __ LoadGlobalProxy(a3);
+ }
+ __ Branch(&convert_receiver);
+ }
+ __ bind(&convert_to_object);
+ {
+ // Convert receiver using ToObject.
+ // TODO(bmeurer): Inline the allocation here to avoid building the frame
+ // in the fast case? (fall back to AllocateInNewSpace?)
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ SmiTag(a0);
+ __ Push(a0, a1);
+ __ mov(a0, a3);
+ __ Push(cp);
+ __ Call(BUILTIN_CODE(masm->isolate(), ToObject),
+ RelocInfo::CODE_TARGET);
+ __ Pop(cp);
+ __ mov(a3, a0);
+ __ Pop(a0, a1);
+ __ SmiUntag(a0);
+ }
+ __ Ld_d(a2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
+ __ bind(&convert_receiver);
+ }
+ __ StoreReceiver(a3, a0, kScratchReg);
+ }
+ __ bind(&done_convert);
+
+ // ----------- S t a t e -------------
+ // -- a0 : the number of arguments (not including the receiver)
+ // -- a1 : the function to call (checked to be a JSFunction)
+ // -- a2 : the shared function info.
+ // -- cp : the function context.
+ // -----------------------------------
+
+ __ Ld_hu(
+ a2, FieldMemOperand(a2, SharedFunctionInfo::kFormalParameterCountOffset));
+ __ InvokeFunctionCode(a1, no_reg, a2, a0, InvokeType::kJump);
+
+ // The function is a "classConstructor", need to raise an exception.
+ __ bind(&class_constructor);
+ {
+ FrameScope frame(masm, StackFrame::INTERNAL);
+ __ Push(a1);
+ __ CallRuntime(Runtime::kThrowConstructorNonCallableError);
+ }
+}
+
+// static
+void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- a0 : the number of arguments (not including the receiver)
+ // -- a1 : the function to call (checked to be a JSBoundFunction)
+ // -----------------------------------
+ __ AssertBoundFunction(a1);
+
+ // Patch the receiver to [[BoundThis]].
+ {
+ __ Ld_d(t0, FieldMemOperand(a1, JSBoundFunction::kBoundThisOffset));
+ __ StoreReceiver(t0, a0, kScratchReg);
+ }
+
+ // Load [[BoundArguments]] into a2 and length of that into a4.
+ __ Ld_d(a2, FieldMemOperand(a1, JSBoundFunction::kBoundArgumentsOffset));
+ __ SmiUntag(a4, FieldMemOperand(a2, FixedArray::kLengthOffset));
+
+ // ----------- S t a t e -------------
+ // -- a0 : the number of arguments (not including the receiver)
+ // -- a1 : the function to call (checked to be a JSBoundFunction)
+ // -- a2 : the [[BoundArguments]] (implemented as FixedArray)
+ // -- a4 : the number of [[BoundArguments]]
+ // -----------------------------------
+
+ // Reserve stack space for the [[BoundArguments]].
+ {
+ Label done;
+ __ slli_d(a5, a4, kPointerSizeLog2);
+ __ Sub_d(t0, sp, Operand(a5));
+ // Check the stack for overflow. We are not trying to catch interruptions
+ // (i.e. debug break and preemption) here, so check the "real stack limit".
+ __ LoadStackLimit(kScratchReg,
+ MacroAssembler::StackLimitKind::kRealStackLimit);
+ __ Branch(&done, hs, t0, Operand(kScratchReg));
+ {
+ FrameScope scope(masm, StackFrame::MANUAL);
+ __ EnterFrame(StackFrame::INTERNAL);
+ __ CallRuntime(Runtime::kThrowStackOverflow);
+ }
+ __ bind(&done);
+ }
+
+ // Pop receiver.
+ __ Pop(t0);
+
+ // Push [[BoundArguments]].
+ {
+ Label loop, done_loop;
+ __ SmiUntag(a4, FieldMemOperand(a2, FixedArray::kLengthOffset));
+ __ Add_d(a0, a0, Operand(a4));
+ __ Add_d(a2, a2, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+ __ bind(&loop);
+ __ Sub_d(a4, a4, Operand(1));
+ __ Branch(&done_loop, lt, a4, Operand(zero_reg));
+ __ Alsl_d(a5, a4, a2, kPointerSizeLog2, t7);
+ __ Ld_d(kScratchReg, MemOperand(a5, 0));
+ __ Push(kScratchReg);
+ __ Branch(&loop);
+ __ bind(&done_loop);
+ }
+
+ // Push receiver.
+ __ Push(t0);
+
+ // Call the [[BoundTargetFunction]] via the Call builtin.
+ __ Ld_d(a1, FieldMemOperand(a1, JSBoundFunction::kBoundTargetFunctionOffset));
+ __ Jump(BUILTIN_CODE(masm->isolate(), Call_ReceiverIsAny),
+ RelocInfo::CODE_TARGET);
+}
+
+// static
+void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) {
+ // ----------- S t a t e -------------
+ // -- a0 : the number of arguments (not including the receiver)
+ // -- a1 : the target to call (can be any Object).
+ // -----------------------------------
+
+ Label non_callable, non_smi;
+ __ JumpIfSmi(a1, &non_callable);
+ __ bind(&non_smi);
+ __ LoadMap(t1, a1);
+ __ GetInstanceTypeRange(t1, t2, FIRST_JS_FUNCTION_TYPE, t8);
+ __ Jump(masm->isolate()->builtins()->CallFunction(mode),
+ RelocInfo::CODE_TARGET, ls, t8,
+ Operand(LAST_JS_FUNCTION_TYPE - FIRST_JS_FUNCTION_TYPE));
+ __ Jump(BUILTIN_CODE(masm->isolate(), CallBoundFunction),
+ RelocInfo::CODE_TARGET, eq, t2, Operand(JS_BOUND_FUNCTION_TYPE));
+
+ // Check if target has a [[Call]] internal method.
+ __ Ld_bu(t1, FieldMemOperand(t1, Map::kBitFieldOffset));
+ __ And(t1, t1, Operand(Map::Bits1::IsCallableBit::kMask));
+ __ Branch(&non_callable, eq, t1, Operand(zero_reg));
+
+ __ Jump(BUILTIN_CODE(masm->isolate(), CallProxy), RelocInfo::CODE_TARGET, eq,
+ t2, Operand(JS_PROXY_TYPE));
+
+ // 2. Call to something else, which might have a [[Call]] internal method (if
+ // not we raise an exception).
+ // Overwrite the original receiver with the (original) target.
+ __ StoreReceiver(a1, a0, kScratchReg);
+ // Let the "call_as_function_delegate" take care of the rest.
+ __ LoadNativeContextSlot(a1, Context::CALL_AS_FUNCTION_DELEGATE_INDEX);
+ __ Jump(masm->isolate()->builtins()->CallFunction(
+ ConvertReceiverMode::kNotNullOrUndefined),
+ RelocInfo::CODE_TARGET);
+
+ // 3. Call to something that is not callable.
+ __ bind(&non_callable);
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ Push(a1);
+ __ CallRuntime(Runtime::kThrowCalledNonCallable);
+ }
+}
+
+void Builtins::Generate_ConstructFunction(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- a0 : the number of arguments (not including the receiver)
+ // -- a1 : the constructor to call (checked to be a JSFunction)
+ // -- a3 : the new target (checked to be a constructor)
+ // -----------------------------------
+ __ AssertConstructor(a1);
+ __ AssertFunction(a1);
+
+ // Calling convention for function specific ConstructStubs require
+ // a2 to contain either an AllocationSite or undefined.
+ __ LoadRoot(a2, RootIndex::kUndefinedValue);
+
+ Label call_generic_stub;
+
+ // Jump to JSBuiltinsConstructStub or JSConstructStubGeneric.
+ __ Ld_d(a4, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
+ __ Ld_wu(a4, FieldMemOperand(a4, SharedFunctionInfo::kFlagsOffset));
+ __ And(a4, a4, Operand(SharedFunctionInfo::ConstructAsBuiltinBit::kMask));
+ __ Branch(&call_generic_stub, eq, a4, Operand(zero_reg));
+
+ __ Jump(BUILTIN_CODE(masm->isolate(), JSBuiltinsConstructStub),
+ RelocInfo::CODE_TARGET);
+
+ __ bind(&call_generic_stub);
+ __ Jump(BUILTIN_CODE(masm->isolate(), JSConstructStubGeneric),
+ RelocInfo::CODE_TARGET);
+}
+
+// static
+void Builtins::Generate_ConstructBoundFunction(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- a0 : the number of arguments (not including the receiver)
+ // -- a1 : the function to call (checked to be a JSBoundFunction)
+ // -- a3 : the new target (checked to be a constructor)
+ // -----------------------------------
+ __ AssertConstructor(a1);
+ __ AssertBoundFunction(a1);
+
+ // Load [[BoundArguments]] into a2 and length of that into a4.
+ __ Ld_d(a2, FieldMemOperand(a1, JSBoundFunction::kBoundArgumentsOffset));
+ __ SmiUntag(a4, FieldMemOperand(a2, FixedArray::kLengthOffset));
+
+ // ----------- S t a t e -------------
+ // -- a0 : the number of arguments (not including the receiver)
+ // -- a1 : the function to call (checked to be a JSBoundFunction)
+ // -- a2 : the [[BoundArguments]] (implemented as FixedArray)
+ // -- a3 : the new target (checked to be a constructor)
+ // -- a4 : the number of [[BoundArguments]]
+ // -----------------------------------
+
+ // Reserve stack space for the [[BoundArguments]].
+ {
+ Label done;
+ __ slli_d(a5, a4, kPointerSizeLog2);
+ __ Sub_d(t0, sp, Operand(a5));
+ // Check the stack for overflow. We are not trying to catch interruptions
+ // (i.e. debug break and preemption) here, so check the "real stack limit".
+ __ LoadStackLimit(kScratchReg,
+ MacroAssembler::StackLimitKind::kRealStackLimit);
+ __ Branch(&done, hs, t0, Operand(kScratchReg));
+ {
+ FrameScope scope(masm, StackFrame::MANUAL);
+ __ EnterFrame(StackFrame::INTERNAL);
+ __ CallRuntime(Runtime::kThrowStackOverflow);
+ }
+ __ bind(&done);
+ }
+
+ // Pop receiver.
+ __ Pop(t0);
+
+ // Push [[BoundArguments]].
+ {
+ Label loop, done_loop;
+ __ SmiUntag(a4, FieldMemOperand(a2, FixedArray::kLengthOffset));
+ __ Add_d(a0, a0, Operand(a4));
+ __ Add_d(a2, a2, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+ __ bind(&loop);
+ __ Sub_d(a4, a4, Operand(1));
+ __ Branch(&done_loop, lt, a4, Operand(zero_reg));
+ __ Alsl_d(a5, a4, a2, kPointerSizeLog2, t7);
+ __ Ld_d(kScratchReg, MemOperand(a5, 0));
+ __ Push(kScratchReg);
+ __ Branch(&loop);
+ __ bind(&done_loop);
+ }
+
+ // Push receiver.
+ __ Push(t0);
+
+ // Patch new.target to [[BoundTargetFunction]] if new.target equals target.
+ {
+ Label skip_load;
+ __ Branch(&skip_load, ne, a1, Operand(a3));
+ __ Ld_d(a3,
+ FieldMemOperand(a1, JSBoundFunction::kBoundTargetFunctionOffset));
+ __ bind(&skip_load);
+ }
+
+ // Construct the [[BoundTargetFunction]] via the Construct builtin.
+ __ Ld_d(a1, FieldMemOperand(a1, JSBoundFunction::kBoundTargetFunctionOffset));
+ __ Jump(BUILTIN_CODE(masm->isolate(), Construct), RelocInfo::CODE_TARGET);
+}
+
+// static
+void Builtins::Generate_Construct(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- a0 : the number of arguments (not including the receiver)
+ // -- a1 : the constructor to call (can be any Object)
+ // -- a3 : the new target (either the same as the constructor or
+ // the JSFunction on which new was invoked initially)
+ // -----------------------------------
+
+ // Check if target is a Smi.
+ Label non_constructor, non_proxy;
+ __ JumpIfSmi(a1, &non_constructor);
+
+ // Check if target has a [[Construct]] internal method.
+ __ Ld_d(t1, FieldMemOperand(a1, HeapObject::kMapOffset));
+ __ Ld_bu(t3, FieldMemOperand(t1, Map::kBitFieldOffset));
+ __ And(t3, t3, Operand(Map::Bits1::IsConstructorBit::kMask));
+ __ Branch(&non_constructor, eq, t3, Operand(zero_reg));
+
+ // Dispatch based on instance type.
+ __ GetInstanceTypeRange(t1, t2, FIRST_JS_FUNCTION_TYPE, t8);
+ __ Jump(BUILTIN_CODE(masm->isolate(), ConstructFunction),
+ RelocInfo::CODE_TARGET, ls, t8,
+ Operand(LAST_JS_FUNCTION_TYPE - FIRST_JS_FUNCTION_TYPE));
+
+ // Only dispatch to bound functions after checking whether they are
+ // constructors.
+ __ Jump(BUILTIN_CODE(masm->isolate(), ConstructBoundFunction),
+ RelocInfo::CODE_TARGET, eq, t2, Operand(JS_BOUND_FUNCTION_TYPE));
+
+ // Only dispatch to proxies after checking whether they are constructors.
+ __ Branch(&non_proxy, ne, t2, Operand(JS_PROXY_TYPE));
+ __ Jump(BUILTIN_CODE(masm->isolate(), ConstructProxy),
+ RelocInfo::CODE_TARGET);
+
+ // Called Construct on an exotic Object with a [[Construct]] internal method.
+ __ bind(&non_proxy);
+ {
+ // Overwrite the original receiver with the (original) target.
+ __ StoreReceiver(a1, a0, kScratchReg);
+ // Let the "call_as_constructor_delegate" take care of the rest.
+ __ LoadNativeContextSlot(a1, Context::CALL_AS_CONSTRUCTOR_DELEGATE_INDEX);
+ __ Jump(masm->isolate()->builtins()->CallFunction(),
+ RelocInfo::CODE_TARGET);
+ }
+
+ // Called Construct on an Object that doesn't have a [[Construct]] internal
+ // method.
+ __ bind(&non_constructor);
+ __ Jump(BUILTIN_CODE(masm->isolate(), ConstructedNonConstructable),
+ RelocInfo::CODE_TARGET);
+}
+
+#if V8_ENABLE_WEBASSEMBLY
+void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) {
+ // The function index was put in t0 by the jump table trampoline.
+ // Convert to Smi for the runtime call
+ __ SmiTag(kWasmCompileLazyFuncIndexRegister);
+ {
+ HardAbortScope hard_abort(masm); // Avoid calls to Abort.
+ FrameScope scope(masm, StackFrame::WASM_COMPILE_LAZY);
+
+ // Save all parameter registers (see wasm-linkage.h). They might be
+ // overwritten in the runtime call below. We don't have any callee-saved
+ // registers in wasm, so no need to store anything else.
+ RegList gp_regs = 0;
+ for (Register gp_param_reg : wasm::kGpParamRegisters) {
+ gp_regs |= gp_param_reg.bit();
+ }
+
+ RegList fp_regs = 0;
+ for (DoubleRegister fp_param_reg : wasm::kFpParamRegisters) {
+ fp_regs |= fp_param_reg.bit();
+ }
+
+ CHECK_EQ(NumRegs(gp_regs), arraysize(wasm::kGpParamRegisters));
+ CHECK_EQ(NumRegs(fp_regs), arraysize(wasm::kFpParamRegisters));
+ CHECK_EQ(WasmCompileLazyFrameConstants::kNumberOfSavedGpParamRegs,
+ NumRegs(gp_regs));
+ CHECK_EQ(WasmCompileLazyFrameConstants::kNumberOfSavedFpParamRegs,
+ NumRegs(fp_regs));
+
+ __ MultiPush(gp_regs);
+ __ MultiPushFPU(fp_regs);
+
+ // kFixedFrameSizeFromFp is hard coded to include space for Simd
+ // registers, so we still need to allocate extra (unused) space on the stack
+ // as if they were saved.
+ __ Sub_d(sp, sp, base::bits::CountPopulation(fp_regs) * kDoubleSize);
+
+ // Pass instance and function index as an explicit arguments to the runtime
+ // function.
+ __ Push(kWasmInstanceRegister, kWasmCompileLazyFuncIndexRegister);
+ // Initialize the JavaScript context with 0. CEntry will use it to
+ // set the current context on the isolate.
+ __ Move(kContextRegister, Smi::zero());
+ __ CallRuntime(Runtime::kWasmCompileLazy, 2);
+ __ mov(t8, a0);
+
+ __ Add_d(sp, sp, base::bits::CountPopulation(fp_regs) * kDoubleSize);
+ // Restore registers.
+ __ MultiPopFPU(fp_regs);
+ __ MultiPop(gp_regs);
+ }
+ // Finally, jump to the entrypoint.
+ __ Jump(t8);
+}
+
+void Builtins::Generate_WasmDebugBreak(MacroAssembler* masm) {
+ HardAbortScope hard_abort(masm); // Avoid calls to Abort.
+ {
+ FrameScope scope(masm, StackFrame::WASM_DEBUG_BREAK);
+
+ // Save all parameter registers. They might hold live values, we restore
+ // them after the runtime call.
+ __ MultiPush(WasmDebugBreakFrameConstants::kPushedGpRegs);
+ __ MultiPushFPU(WasmDebugBreakFrameConstants::kPushedFpRegs);
+
+ // Initialize the JavaScript context with 0. CEntry will use it to
+ // set the current context on the isolate.
+ __ Move(cp, Smi::zero());
+ __ CallRuntime(Runtime::kWasmDebugBreak, 0);
+
+ // Restore registers.
+ __ MultiPopFPU(WasmDebugBreakFrameConstants::kPushedFpRegs);
+ __ MultiPop(WasmDebugBreakFrameConstants::kPushedGpRegs);
+ }
+ __ Ret();
+}
+
+void Builtins::Generate_GenericJSToWasmWrapper(MacroAssembler* masm) {
+ __ Trap();
+}
+
+void Builtins::Generate_WasmOnStackReplace(MacroAssembler* masm) {
+ // Only needed on x64.
+ __ Trap();
+}
+
+#endif // V8_ENABLE_WEBASSEMBLY
+
+void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
+ SaveFPRegsMode save_doubles, ArgvMode argv_mode,
+ bool builtin_exit_frame) {
+ // Called from JavaScript; parameters are on stack as if calling JS function
+ // a0: number of arguments including receiver
+ // a1: pointer to builtin function
+ // fp: frame pointer (restored after C call)
+ // sp: stack pointer (restored as callee's sp after C call)
+ // cp: current context (C callee-saved)
+ //
+ // If argv_mode == ArgvMode::kRegister:
+ // a2: pointer to the first argument
+
+ if (argv_mode == ArgvMode::kRegister) {
+ // Move argv into the correct register.
+ __ mov(s1, a2);
+ } else {
+ // Compute the argv pointer in a callee-saved register.
+ __ Alsl_d(s1, a0, sp, kPointerSizeLog2, t7);
+ __ Sub_d(s1, s1, kPointerSize);
+ }
+
+ // Enter the exit frame that transitions from JavaScript to C++.
+ FrameScope scope(masm, StackFrame::MANUAL);
+ __ EnterExitFrame(
+ save_doubles == SaveFPRegsMode::kSave, 0,
+ builtin_exit_frame ? StackFrame::BUILTIN_EXIT : StackFrame::EXIT);
+
+ // s0: number of arguments including receiver (C callee-saved)
+ // s1: pointer to first argument (C callee-saved)
+ // s2: pointer to builtin function (C callee-saved)
+
+ // Prepare arguments for C routine.
+ // a0 = argc
+ __ mov(s0, a0);
+ __ mov(s2, a1);
+
+ // We are calling compiled C/C++ code. a0 and a1 hold our two arguments. We
+ // also need to reserve the 4 argument slots on the stack.
+
+ __ AssertStackIsAligned();
+
+ // a0 = argc, a1 = argv, a2 = isolate
+ __ li(a2, ExternalReference::isolate_address(masm->isolate()));
+ __ mov(a1, s1);
+
+ __ StoreReturnAddressAndCall(s2);
+
+ // Result returned in a0 or a1:a0 - do not destroy these registers!
+
+ // Check result for exception sentinel.
+ Label exception_returned;
+ __ LoadRoot(a4, RootIndex::kException);
+ __ Branch(&exception_returned, eq, a4, Operand(a0));
+
+ // Check that there is no pending exception, otherwise we
+ // should have returned the exception sentinel.
+ if (FLAG_debug_code) {
+ Label okay;
+ ExternalReference pending_exception_address = ExternalReference::Create(
+ IsolateAddressId::kPendingExceptionAddress, masm->isolate());
+ __ li(a2, pending_exception_address);
+ __ Ld_d(a2, MemOperand(a2, 0));
+ __ LoadRoot(a4, RootIndex::kTheHoleValue);
+ // Cannot use check here as it attempts to generate call into runtime.
+ __ Branch(&okay, eq, a4, Operand(a2));
+ __ stop();
+ __ bind(&okay);
+ }
+
+ // Exit C frame and return.
+ // a0:a1: result
+ // sp: stack pointer
+ // fp: frame pointer
+ Register argc = argv_mode == ArgvMode::kRegister
+ // We don't want to pop arguments so set argc to no_reg.
+ ? no_reg
+ // s0: still holds argc (callee-saved).
+ : s0;
+ __ LeaveExitFrame(save_doubles == SaveFPRegsMode::kSave, argc, EMIT_RETURN);
+
+ // Handling of exception.
+ __ bind(&exception_returned);
+
+ ExternalReference pending_handler_context_address = ExternalReference::Create(
+ IsolateAddressId::kPendingHandlerContextAddress, masm->isolate());
+ ExternalReference pending_handler_entrypoint_address =
+ ExternalReference::Create(
+ IsolateAddressId::kPendingHandlerEntrypointAddress, masm->isolate());
+ ExternalReference pending_handler_fp_address = ExternalReference::Create(
+ IsolateAddressId::kPendingHandlerFPAddress, masm->isolate());
+ ExternalReference pending_handler_sp_address = ExternalReference::Create(
+ IsolateAddressId::kPendingHandlerSPAddress, masm->isolate());
+
+ // Ask the runtime for help to determine the handler. This will set a0 to
+ // contain the current pending exception, don't clobber it.
+ ExternalReference find_handler =
+ ExternalReference::Create(Runtime::kUnwindAndFindExceptionHandler);
+ {
+ FrameScope scope(masm, StackFrame::MANUAL);
+ __ PrepareCallCFunction(3, 0, a0);
+ __ mov(a0, zero_reg);
+ __ mov(a1, zero_reg);
+ __ li(a2, ExternalReference::isolate_address(masm->isolate()));
+ __ CallCFunction(find_handler, 3);
+ }
+
+ // Retrieve the handler context, SP and FP.
+ __ li(cp, pending_handler_context_address);
+ __ Ld_d(cp, MemOperand(cp, 0));
+ __ li(sp, pending_handler_sp_address);
+ __ Ld_d(sp, MemOperand(sp, 0));
+ __ li(fp, pending_handler_fp_address);
+ __ Ld_d(fp, MemOperand(fp, 0));
+
+ // If the handler is a JS frame, restore the context to the frame. Note that
+ // the context will be set to (cp == 0) for non-JS frames.
+ Label zero;
+ __ Branch(&zero, eq, cp, Operand(zero_reg));
+ __ St_d(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ __ bind(&zero);
+
+ // Clear c_entry_fp, like we do in `LeaveExitFrame`.
+ {
+ UseScratchRegisterScope temps(masm);
+ Register scratch = temps.Acquire();
+ __ li(scratch, ExternalReference::Create(IsolateAddressId::kCEntryFPAddress,
+ masm->isolate()));
+ __ St_d(zero_reg, MemOperand(scratch, 0));
+ }
+
+ // Compute the handler entry address and jump to it.
+ __ li(t7, pending_handler_entrypoint_address);
+ __ Ld_d(t7, MemOperand(t7, 0));
+ __ Jump(t7);
+}
+
+void Builtins::Generate_DoubleToI(MacroAssembler* masm) {
+ Label done;
+ Register result_reg = t0;
+
+ Register scratch = GetRegisterThatIsNotOneOf(result_reg);
+ Register scratch2 = GetRegisterThatIsNotOneOf(result_reg, scratch);
+ Register scratch3 = GetRegisterThatIsNotOneOf(result_reg, scratch, scratch2);
+ DoubleRegister double_scratch = kScratchDoubleReg;
+
+ // Account for saved regs.
+ const int kArgumentOffset = 4 * kPointerSize;
+
+ __ Push(result_reg);
+ __ Push(scratch, scratch2, scratch3);
+
+ // Load double input.
+ __ Fld_d(double_scratch, MemOperand(sp, kArgumentOffset));
+
+ // Try a conversion to a signed integer.
+ __ ftintrz_w_d(double_scratch, double_scratch);
+ // Move the converted value into the result register.
+ __ movfr2gr_s(scratch3, double_scratch);
+
+ // Retrieve and restore the FCSR.
+ __ movfcsr2gr(scratch);
+
+ // Check for overflow and NaNs.
+ __ And(scratch, scratch,
+ kFCSRExceptionCauseMask ^ kFCSRDivideByZeroCauseMask);
+ // If we had no exceptions then set result_reg and we are done.
+ Label error;
+ __ Branch(&error, ne, scratch, Operand(zero_reg));
+ __ Move(result_reg, scratch3);
+ __ Branch(&done);
+ __ bind(&error);
+
+ // Load the double value and perform a manual truncation.
+ Register input_high = scratch2;
+ Register input_low = scratch3;
+
+ __ Ld_w(input_low,
+ MemOperand(sp, kArgumentOffset + Register::kMantissaOffset));
+ __ Ld_w(input_high,
+ MemOperand(sp, kArgumentOffset + Register::kExponentOffset));
+
+ Label normal_exponent;
+ // Extract the biased exponent in result.
+ __ bstrpick_w(result_reg, input_high,
+ HeapNumber::kExponentShift + HeapNumber::kExponentBits - 1,
+ HeapNumber::kExponentShift);
+
+ // Check for Infinity and NaNs, which should return 0.
+ __ Sub_w(scratch, result_reg, HeapNumber::kExponentMask);
+ __ Movz(result_reg, zero_reg, scratch);
+ __ Branch(&done, eq, scratch, Operand(zero_reg));
+
+ // Express exponent as delta to (number of mantissa bits + 31).
+ __ Sub_w(result_reg, result_reg,
+ Operand(HeapNumber::kExponentBias + HeapNumber::kMantissaBits + 31));
+
+ // If the delta is strictly positive, all bits would be shifted away,
+ // which means that we can return 0.
+ __ Branch(&normal_exponent, le, result_reg, Operand(zero_reg));
+ __ mov(result_reg, zero_reg);
+ __ Branch(&done);
+
+ __ bind(&normal_exponent);
+ const int kShiftBase = HeapNumber::kNonMantissaBitsInTopWord - 1;
+ // Calculate shift.
+ __ Add_w(scratch, result_reg,
+ Operand(kShiftBase + HeapNumber::kMantissaBits));
+
+ // Save the sign.
+ Register sign = result_reg;
+ result_reg = no_reg;
+ __ And(sign, input_high, Operand(HeapNumber::kSignMask));
+
+ // On ARM shifts > 31 bits are valid and will result in zero. On LOONG64 we
+ // need to check for this specific case.
+ Label high_shift_needed, high_shift_done;
+ __ Branch(&high_shift_needed, lt, scratch, Operand(32));
+ __ mov(input_high, zero_reg);
+ __ Branch(&high_shift_done);
+ __ bind(&high_shift_needed);
+
+ // Set the implicit 1 before the mantissa part in input_high.
+ __ Or(input_high, input_high,
+ Operand(1 << HeapNumber::kMantissaBitsInTopWord));
+ // Shift the mantissa bits to the correct position.
+ // We don't need to clear non-mantissa bits as they will be shifted away.
+ // If they weren't, it would mean that the answer is in the 32bit range.
+ __ sll_w(input_high, input_high, scratch);
+
+ __ bind(&high_shift_done);
+
+ // Replace the shifted bits with bits from the lower mantissa word.
+ Label pos_shift, shift_done;
+ __ li(kScratchReg, 32);
+ __ sub_w(scratch, kScratchReg, scratch);
+ __ Branch(&pos_shift, ge, scratch, Operand(zero_reg));
+
+ // Negate scratch.
+ __ Sub_w(scratch, zero_reg, scratch);
+ __ sll_w(input_low, input_low, scratch);
+ __ Branch(&shift_done);
+
+ __ bind(&pos_shift);
+ __ srl_w(input_low, input_low, scratch);
+
+ __ bind(&shift_done);
+ __ Or(input_high, input_high, Operand(input_low));
+ // Restore sign if necessary.
+ __ mov(scratch, sign);
+ result_reg = sign;
+ sign = no_reg;
+ __ Sub_w(result_reg, zero_reg, input_high);
+ __ Movz(result_reg, input_high, scratch);
+
+ __ bind(&done);
+
+ __ St_d(result_reg, MemOperand(sp, kArgumentOffset));
+ __ Pop(scratch, scratch2, scratch3);
+ __ Pop(result_reg);
+ __ Ret();
+}
+
+namespace {
+
+int AddressOffset(ExternalReference ref0, ExternalReference ref1) {
+ int64_t offset = (ref0.address() - ref1.address());
+ DCHECK(static_cast<int>(offset) == offset);
+ return static_cast<int>(offset);
+}
+
+// Calls an API function. Allocates HandleScope, extracts returned value
+// from handle and propagates exceptions. Restores context. stack_space
+// - space to be unwound on exit (includes the call JS arguments space and
+// the additional space allocated for the fast call).
+void CallApiFunctionAndReturn(MacroAssembler* masm, Register function_address,
+ ExternalReference thunk_ref, int stack_space,
+ MemOperand* stack_space_operand,
+ MemOperand return_value_operand) {
+ Isolate* isolate = masm->isolate();
+ ExternalReference next_address =
+ ExternalReference::handle_scope_next_address(isolate);
+ const int kNextOffset = 0;
+ const int kLimitOffset = AddressOffset(
+ ExternalReference::handle_scope_limit_address(isolate), next_address);
+ const int kLevelOffset = AddressOffset(
+ ExternalReference::handle_scope_level_address(isolate), next_address);
+
+ DCHECK(function_address == a1 || function_address == a2);
+
+ Label profiler_enabled, end_profiler_check;
+ __ li(t7, ExternalReference::is_profiling_address(isolate));
+ __ Ld_b(t7, MemOperand(t7, 0));
+ __ Branch(&profiler_enabled, ne, t7, Operand(zero_reg));
+ __ li(t7, ExternalReference::address_of_runtime_stats_flag());
+ __ Ld_w(t7, MemOperand(t7, 0));
+ __ Branch(&profiler_enabled, ne, t7, Operand(zero_reg));
+ {
+ // Call the api function directly.
+ __ mov(t7, function_address);
+ __ Branch(&end_profiler_check);
+ }
+
+ __ bind(&profiler_enabled);
+ {
+ // Additional parameter is the address of the actual callback.
+ __ li(t7, thunk_ref);
+ }
+ __ bind(&end_profiler_check);
+
+ // Allocate HandleScope in callee-save registers.
+ __ li(s5, next_address);
+ __ Ld_d(s0, MemOperand(s5, kNextOffset));
+ __ Ld_d(s1, MemOperand(s5, kLimitOffset));
+ __ Ld_w(s2, MemOperand(s5, kLevelOffset));
+ __ Add_w(s2, s2, Operand(1));
+ __ St_w(s2, MemOperand(s5, kLevelOffset));
+
+ __ StoreReturnAddressAndCall(t7);
+
+ Label promote_scheduled_exception;
+ Label delete_allocated_handles;
+ Label leave_exit_frame;
+ Label return_value_loaded;
+
+ // Load value from ReturnValue.
+ __ Ld_d(a0, return_value_operand);
+ __ bind(&return_value_loaded);
+
+ // No more valid handles (the result handle was the last one). Restore
+ // previous handle scope.
+ __ St_d(s0, MemOperand(s5, kNextOffset));
+ if (FLAG_debug_code) {
+ __ Ld_w(a1, MemOperand(s5, kLevelOffset));
+ __ Check(eq, AbortReason::kUnexpectedLevelAfterReturnFromApiCall, a1,
+ Operand(s2));
+ }
+ __ Sub_w(s2, s2, Operand(1));
+ __ St_w(s2, MemOperand(s5, kLevelOffset));
+ __ Ld_d(kScratchReg, MemOperand(s5, kLimitOffset));
+ __ Branch(&delete_allocated_handles, ne, s1, Operand(kScratchReg));
+
+ // Leave the API exit frame.
+ __ bind(&leave_exit_frame);
+
+ if (stack_space_operand == nullptr) {
+ DCHECK_NE(stack_space, 0);
+ __ li(s0, Operand(stack_space));
+ } else {
+ DCHECK_EQ(stack_space, 0);
+ __ Ld_d(s0, *stack_space_operand);
+ }
+
+ static constexpr bool kDontSaveDoubles = false;
+ static constexpr bool kRegisterContainsSlotCount = false;
+ __ LeaveExitFrame(kDontSaveDoubles, s0, NO_EMIT_RETURN,
+ kRegisterContainsSlotCount);
+
+ // Check if the function scheduled an exception.
+ __ LoadRoot(a4, RootIndex::kTheHoleValue);
+ __ li(kScratchReg, ExternalReference::scheduled_exception_address(isolate));
+ __ Ld_d(a5, MemOperand(kScratchReg, 0));
+ __ Branch(&promote_scheduled_exception, ne, a4, Operand(a5));
+
+ __ Ret();
+
+ // Re-throw by promoting a scheduled exception.
+ __ bind(&promote_scheduled_exception);
+ __ TailCallRuntime(Runtime::kPromoteScheduledException);
+
+ // HandleScope limit has changed. Delete allocated extensions.
+ __ bind(&delete_allocated_handles);
+ __ St_d(s1, MemOperand(s5, kLimitOffset));
+ __ mov(s0, a0);
+ __ PrepareCallCFunction(1, s1);
+ __ li(a0, ExternalReference::isolate_address(isolate));
+ __ CallCFunction(ExternalReference::delete_handle_scope_extensions(), 1);
+ __ mov(a0, s0);
+ __ jmp(&leave_exit_frame);
+}
+
+} // namespace
+
+void Builtins::Generate_CallApiCallback(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- cp : context
+ // -- a1 : api function address
+ // -- a2 : arguments count (not including the receiver)
+ // -- a3 : call data
+ // -- a0 : holder
+ // -- sp[0] : receiver
+ // -- sp[8] : first argument
+ // -- ...
+ // -- sp[(argc) * 8] : last argument
+ // -----------------------------------
+
+ Register api_function_address = a1;
+ Register argc = a2;
+ Register call_data = a3;
+ Register holder = a0;
+ Register scratch = t0;
+ Register base = t1; // For addressing MemOperands on the stack.
+
+ DCHECK(!AreAliased(api_function_address, argc, call_data, holder, scratch,
+ base));
+
+ using FCA = FunctionCallbackArguments;
+
+ STATIC_ASSERT(FCA::kArgsLength == 6);
+ STATIC_ASSERT(FCA::kNewTargetIndex == 5);
+ STATIC_ASSERT(FCA::kDataIndex == 4);
+ STATIC_ASSERT(FCA::kReturnValueOffset == 3);
+ STATIC_ASSERT(FCA::kReturnValueDefaultValueIndex == 2);
+ STATIC_ASSERT(FCA::kIsolateIndex == 1);
+ STATIC_ASSERT(FCA::kHolderIndex == 0);
+
+ // Set up FunctionCallbackInfo's implicit_args on the stack as follows:
+ //
+ // Target state:
+ // sp[0 * kPointerSize]: kHolder
+ // sp[1 * kPointerSize]: kIsolate
+ // sp[2 * kPointerSize]: undefined (kReturnValueDefaultValue)
+ // sp[3 * kPointerSize]: undefined (kReturnValue)
+ // sp[4 * kPointerSize]: kData
+ // sp[5 * kPointerSize]: undefined (kNewTarget)
+
+ // Set up the base register for addressing through MemOperands. It will point
+ // at the receiver (located at sp + argc * kPointerSize).
+ __ Alsl_d(base, argc, sp, kPointerSizeLog2, t7);
+
+ // Reserve space on the stack.
+ __ Sub_d(sp, sp, Operand(FCA::kArgsLength * kPointerSize));
+
+ // kHolder.
+ __ St_d(holder, MemOperand(sp, 0 * kPointerSize));
+
+ // kIsolate.
+ __ li(scratch, ExternalReference::isolate_address(masm->isolate()));
+ __ St_d(scratch, MemOperand(sp, 1 * kPointerSize));
+
+ // kReturnValueDefaultValue and kReturnValue.
+ __ LoadRoot(scratch, RootIndex::kUndefinedValue);
+ __ St_d(scratch, MemOperand(sp, 2 * kPointerSize));
+ __ St_d(scratch, MemOperand(sp, 3 * kPointerSize));
+
+ // kData.
+ __ St_d(call_data, MemOperand(sp, 4 * kPointerSize));
+
+ // kNewTarget.
+ __ St_d(scratch, MemOperand(sp, 5 * kPointerSize));
+
+ // Keep a pointer to kHolder (= implicit_args) in a scratch register.
+ // We use it below to set up the FunctionCallbackInfo object.
+ __ mov(scratch, sp);
+
+ // Allocate the v8::Arguments structure in the arguments' space since
+ // it's not controlled by GC.
+ static constexpr int kApiStackSpace = 4;
+ static constexpr bool kDontSaveDoubles = false;
+ FrameScope frame_scope(masm, StackFrame::MANUAL);
+ __ EnterExitFrame(kDontSaveDoubles, kApiStackSpace);
+
+ // EnterExitFrame may align the sp.
+
+ // FunctionCallbackInfo::implicit_args_ (points at kHolder as set up above).
+ // Arguments are after the return address (pushed by EnterExitFrame()).
+ __ St_d(scratch, MemOperand(sp, 1 * kPointerSize));
+
+ // FunctionCallbackInfo::values_ (points at the first varargs argument passed
+ // on the stack).
+ __ Add_d(scratch, scratch,
+ Operand((FCA::kArgsLength + 1) * kSystemPointerSize));
+
+ __ St_d(scratch, MemOperand(sp, 2 * kPointerSize));
+
+ // FunctionCallbackInfo::length_.
+ // Stored as int field, 32-bit integers within struct on stack always left
+ // justified by n64 ABI.
+ __ St_w(argc, MemOperand(sp, 3 * kPointerSize));
+
+ // We also store the number of bytes to drop from the stack after returning
+ // from the API function here.
+ // Note: Unlike on other architectures, this stores the number of slots to
+ // drop, not the number of bytes.
+ __ Add_d(scratch, argc, Operand(FCA::kArgsLength + 1 /* receiver */));
+ __ St_d(scratch, MemOperand(sp, 4 * kPointerSize));
+
+ // v8::InvocationCallback's argument.
+ DCHECK(!AreAliased(api_function_address, scratch, a0));
+ __ Add_d(a0, sp, Operand(1 * kPointerSize));
+
+ ExternalReference thunk_ref = ExternalReference::invoke_function_callback();
+
+ // There are two stack slots above the arguments we constructed on the stack.
+ // TODO(jgruber): Document what these arguments are.
+ static constexpr int kStackSlotsAboveFCA = 2;
+ MemOperand return_value_operand(
+ fp, (kStackSlotsAboveFCA + FCA::kReturnValueOffset) * kPointerSize);
+
+ static constexpr int kUseStackSpaceOperand = 0;
+ MemOperand stack_space_operand(sp, 4 * kPointerSize);
+
+ AllowExternalCallThatCantCauseGC scope(masm);
+ CallApiFunctionAndReturn(masm, api_function_address, thunk_ref,
+ kUseStackSpaceOperand, &stack_space_operand,
+ return_value_operand);
+}
+
+void Builtins::Generate_CallApiGetter(MacroAssembler* masm) {
+ // Build v8::PropertyCallbackInfo::args_ array on the stack and push property
+ // name below the exit frame to make GC aware of them.
+ STATIC_ASSERT(PropertyCallbackArguments::kShouldThrowOnErrorIndex == 0);
+ STATIC_ASSERT(PropertyCallbackArguments::kHolderIndex == 1);
+ STATIC_ASSERT(PropertyCallbackArguments::kIsolateIndex == 2);
+ STATIC_ASSERT(PropertyCallbackArguments::kReturnValueDefaultValueIndex == 3);
+ STATIC_ASSERT(PropertyCallbackArguments::kReturnValueOffset == 4);
+ STATIC_ASSERT(PropertyCallbackArguments::kDataIndex == 5);
+ STATIC_ASSERT(PropertyCallbackArguments::kThisIndex == 6);
+ STATIC_ASSERT(PropertyCallbackArguments::kArgsLength == 7);
+
+ Register receiver = ApiGetterDescriptor::ReceiverRegister();
+ Register holder = ApiGetterDescriptor::HolderRegister();
+ Register callback = ApiGetterDescriptor::CallbackRegister();
+ Register scratch = a4;
+ DCHECK(!AreAliased(receiver, holder, callback, scratch));
+
+ Register api_function_address = a2;
+
+ // Here and below +1 is for name() pushed after the args_ array.
+ using PCA = PropertyCallbackArguments;
+ __ Sub_d(sp, sp, (PCA::kArgsLength + 1) * kPointerSize);
+ __ St_d(receiver, MemOperand(sp, (PCA::kThisIndex + 1) * kPointerSize));
+ __ Ld_d(scratch, FieldMemOperand(callback, AccessorInfo::kDataOffset));
+ __ St_d(scratch, MemOperand(sp, (PCA::kDataIndex + 1) * kPointerSize));
+ __ LoadRoot(scratch, RootIndex::kUndefinedValue);
+ __ St_d(scratch,
+ MemOperand(sp, (PCA::kReturnValueOffset + 1) * kPointerSize));
+ __ St_d(scratch, MemOperand(sp, (PCA::kReturnValueDefaultValueIndex + 1) *
+ kPointerSize));
+ __ li(scratch, ExternalReference::isolate_address(masm->isolate()));
+ __ St_d(scratch, MemOperand(sp, (PCA::kIsolateIndex + 1) * kPointerSize));
+ __ St_d(holder, MemOperand(sp, (PCA::kHolderIndex + 1) * kPointerSize));
+ // should_throw_on_error -> false
+ DCHECK_EQ(0, Smi::zero().ptr());
+ __ St_d(zero_reg,
+ MemOperand(sp, (PCA::kShouldThrowOnErrorIndex + 1) * kPointerSize));
+ __ Ld_d(scratch, FieldMemOperand(callback, AccessorInfo::kNameOffset));
+ __ St_d(scratch, MemOperand(sp, 0 * kPointerSize));
+
+ // v8::PropertyCallbackInfo::args_ array and name handle.
+ const int kStackUnwindSpace = PropertyCallbackArguments::kArgsLength + 1;
+
+ // Load address of v8::PropertyAccessorInfo::args_ array and name handle.
+ __ mov(a0, sp); // a0 = Handle<Name>
+ __ Add_d(a1, a0, Operand(1 * kPointerSize)); // a1 = v8::PCI::args_
+
+ const int kApiStackSpace = 1;
+ FrameScope frame_scope(masm, StackFrame::MANUAL);
+ __ EnterExitFrame(false, kApiStackSpace);
+
+ // Create v8::PropertyCallbackInfo object on the stack and initialize
+ // it's args_ field.
+ __ St_d(a1, MemOperand(sp, 1 * kPointerSize));
+ __ Add_d(a1, sp, Operand(1 * kPointerSize));
+ // a1 = v8::PropertyCallbackInfo&
+
+ ExternalReference thunk_ref =
+ ExternalReference::invoke_accessor_getter_callback();
+
+ __ Ld_d(scratch, FieldMemOperand(callback, AccessorInfo::kJsGetterOffset));
+ __ Ld_d(api_function_address,
+ FieldMemOperand(scratch, Foreign::kForeignAddressOffset));
+
+ // +3 is to skip prolog, return address and name handle.
+ MemOperand return_value_operand(
+ fp, (PropertyCallbackArguments::kReturnValueOffset + 3) * kPointerSize);
+ MemOperand* const kUseStackSpaceConstant = nullptr;
+ CallApiFunctionAndReturn(masm, api_function_address, thunk_ref,
+ kStackUnwindSpace, kUseStackSpaceConstant,
+ return_value_operand);
+}
+
+void Builtins::Generate_DirectCEntry(MacroAssembler* masm) {
+ // The sole purpose of DirectCEntry is for movable callers (e.g. any general
+ // purpose Code object) to be able to call into C functions that may trigger
+ // GC and thus move the caller.
+ //
+ // DirectCEntry places the return address on the stack (updated by the GC),
+ // making the call GC safe. The irregexp backend relies on this.
+
+ __ St_d(ra, MemOperand(sp, 0)); // Store the return address.
+ __ Call(t7); // Call the C++ function.
+ __ Ld_d(ra, MemOperand(sp, 0)); // Return to calling code.
+
+ // TODO(LOONG_dev): LOONG64 Check this assert.
+ if (FLAG_debug_code && FLAG_enable_slow_asserts) {
+ // In case of an error the return address may point to a memory area
+ // filled with kZapValue by the GC. Dereference the address and check for
+ // this.
+ __ Ld_d(a4, MemOperand(ra, 0));
+ __ Assert(ne, AbortReason::kReceivedInvalidReturnAddress, a4,
+ Operand(reinterpret_cast<uint64_t>(kZapValue)));
+ }
+
+ __ Jump(ra);
+}
+
+namespace {
+
+// This code tries to be close to ia32 code so that any changes can be
+// easily ported.
+void Generate_DeoptimizationEntry(MacroAssembler* masm,
+ DeoptimizeKind deopt_kind) {
+ Isolate* isolate = masm->isolate();
+
+ // Unlike on ARM we don't save all the registers, just the useful ones.
+ // For the rest, there are gaps on the stack, so the offsets remain the same.
+ const int kNumberOfRegisters = Register::kNumRegisters;
+
+ RegList restored_regs = kJSCallerSaved | kCalleeSaved;
+ RegList saved_regs = restored_regs | sp.bit() | ra.bit();
+
+ const int kDoubleRegsSize = kDoubleSize * DoubleRegister::kNumRegisters;
+
+ // Save all double FPU registers before messing with them.
+ __ Sub_d(sp, sp, Operand(kDoubleRegsSize));
+ const RegisterConfiguration* config = RegisterConfiguration::Default();
+ for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
+ int code = config->GetAllocatableDoubleCode(i);
+ const DoubleRegister fpu_reg = DoubleRegister::from_code(code);
+ int offset = code * kDoubleSize;
+ __ Fst_d(fpu_reg, MemOperand(sp, offset));
+ }
+
+ // Push saved_regs (needed to populate FrameDescription::registers_).
+ // Leave gaps for other registers.
+ __ Sub_d(sp, sp, kNumberOfRegisters * kPointerSize);
+ for (int16_t i = kNumberOfRegisters - 1; i >= 0; i--) {
+ if ((saved_regs & (1 << i)) != 0) {
+ __ St_d(ToRegister(i), MemOperand(sp, kPointerSize * i));
+ }
+ }
+
+ __ li(a2,
+ ExternalReference::Create(IsolateAddressId::kCEntryFPAddress, isolate));
+ __ St_d(fp, MemOperand(a2, 0));
+
+ const int kSavedRegistersAreaSize =
+ (kNumberOfRegisters * kPointerSize) + kDoubleRegsSize;
+
+ __ li(a2, Operand(Deoptimizer::kFixedExitSizeMarker));
+ // Get the address of the location in the code object (a3) (return
+ // address for lazy deoptimization) and compute the fp-to-sp delta in
+ // register a4.
+ __ mov(a3, ra);
+ __ Add_d(a4, sp, Operand(kSavedRegistersAreaSize));
+
+ __ sub_d(a4, fp, a4);
+
+ // Allocate a new deoptimizer object.
+ __ PrepareCallCFunction(6, a5);
+ // Pass six arguments, according to n64 ABI.
+ __ mov(a0, zero_reg);
+ Label context_check;
+ __ Ld_d(a1, MemOperand(fp, CommonFrameConstants::kContextOrFrameTypeOffset));
+ __ JumpIfSmi(a1, &context_check);
+ __ Ld_d(a0, MemOperand(fp, StandardFrameConstants::kFunctionOffset));
+ __ bind(&context_check);
+ __ li(a1, Operand(static_cast<int>(deopt_kind)));
+ // a2: bailout id already loaded.
+ // a3: code address or 0 already loaded.
+ // a4: already has fp-to-sp delta.
+ __ li(a5, ExternalReference::isolate_address(isolate));
+
+ // Call Deoptimizer::New().
+ {
+ AllowExternalCallThatCantCauseGC scope(masm);
+ __ CallCFunction(ExternalReference::new_deoptimizer_function(), 6);
+ }
+
+ // Preserve "deoptimizer" object in register a0 and get the input
+ // frame descriptor pointer to a1 (deoptimizer->input_);
+ // Move deopt-obj to a0 for call to Deoptimizer::ComputeOutputFrames() below.
+ __ Ld_d(a1, MemOperand(a0, Deoptimizer::input_offset()));
+
+ // Copy core registers into FrameDescription::registers_[kNumRegisters].
+ DCHECK_EQ(Register::kNumRegisters, kNumberOfRegisters);
+ for (int i = 0; i < kNumberOfRegisters; i++) {
+ int offset = (i * kPointerSize) + FrameDescription::registers_offset();
+ if ((saved_regs & (1 << i)) != 0) {
+ __ Ld_d(a2, MemOperand(sp, i * kPointerSize));
+ __ St_d(a2, MemOperand(a1, offset));
+ } else if (FLAG_debug_code) {
+ __ li(a2, Operand(kDebugZapValue));
+ __ St_d(a2, MemOperand(a1, offset));
+ }
+ }
+
+ int double_regs_offset = FrameDescription::double_registers_offset();
+ // Copy FPU registers to
+ // double_registers_[DoubleRegister::kNumAllocatableRegisters]
+ for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
+ int code = config->GetAllocatableDoubleCode(i);
+ int dst_offset = code * kDoubleSize + double_regs_offset;
+ int src_offset = code * kDoubleSize + kNumberOfRegisters * kPointerSize;
+ __ Fld_d(f0, MemOperand(sp, src_offset));
+ __ Fst_d(f0, MemOperand(a1, dst_offset));
+ }
+
+ // Remove the saved registers from the stack.
+ __ Add_d(sp, sp, Operand(kSavedRegistersAreaSize));
+
+ // Compute a pointer to the unwinding limit in register a2; that is
+ // the first stack slot not part of the input frame.
+ __ Ld_d(a2, MemOperand(a1, FrameDescription::frame_size_offset()));
+ __ add_d(a2, a2, sp);
+
+ // Unwind the stack down to - but not including - the unwinding
+ // limit and copy the contents of the activation frame to the input
+ // frame description.
+ __ Add_d(a3, a1, Operand(FrameDescription::frame_content_offset()));
+ Label pop_loop;
+ Label pop_loop_header;
+ __ Branch(&pop_loop_header);
+ __ bind(&pop_loop);
+ __ Pop(a4);
+ __ St_d(a4, MemOperand(a3, 0));
+ __ addi_d(a3, a3, sizeof(uint64_t));
+ __ bind(&pop_loop_header);
+ __ BranchShort(&pop_loop, ne, a2, Operand(sp));
+ // Compute the output frame in the deoptimizer.
+ __ Push(a0); // Preserve deoptimizer object across call.
+ // a0: deoptimizer object; a1: scratch.
+ __ PrepareCallCFunction(1, a1);
+ // Call Deoptimizer::ComputeOutputFrames().
+ {
+ AllowExternalCallThatCantCauseGC scope(masm);
+ __ CallCFunction(ExternalReference::compute_output_frames_function(), 1);
+ }
+ __ Pop(a0); // Restore deoptimizer object (class Deoptimizer).
+
+ __ Ld_d(sp, MemOperand(a0, Deoptimizer::caller_frame_top_offset()));
+
+ // Replace the current (input) frame with the output frames.
+ Label outer_push_loop, inner_push_loop, outer_loop_header, inner_loop_header;
+ // Outer loop state: a4 = current "FrameDescription** output_",
+ // a1 = one past the last FrameDescription**.
+ __ Ld_w(a1, MemOperand(a0, Deoptimizer::output_count_offset()));
+ __ Ld_d(a4, MemOperand(a0, Deoptimizer::output_offset())); // a4 is output_.
+ __ Alsl_d(a1, a1, a4, kPointerSizeLog2);
+ __ Branch(&outer_loop_header);
+ __ bind(&outer_push_loop);
+ // Inner loop state: a2 = current FrameDescription*, a3 = loop index.
+ __ Ld_d(a2, MemOperand(a4, 0)); // output_[ix]
+ __ Ld_d(a3, MemOperand(a2, FrameDescription::frame_size_offset()));
+ __ Branch(&inner_loop_header);
+ __ bind(&inner_push_loop);
+ __ Sub_d(a3, a3, Operand(sizeof(uint64_t)));
+ __ Add_d(a6, a2, Operand(a3));
+ __ Ld_d(a7, MemOperand(a6, FrameDescription::frame_content_offset()));
+ __ Push(a7);
+ __ bind(&inner_loop_header);
+ __ BranchShort(&inner_push_loop, ne, a3, Operand(zero_reg));
+
+ __ Add_d(a4, a4, Operand(kPointerSize));
+ __ bind(&outer_loop_header);
+ __ BranchShort(&outer_push_loop, lt, a4, Operand(a1));
+
+ __ Ld_d(a1, MemOperand(a0, Deoptimizer::input_offset()));
+ for (int i = 0; i < config->num_allocatable_double_registers(); ++i) {
+ int code = config->GetAllocatableDoubleCode(i);
+ const DoubleRegister fpu_reg = DoubleRegister::from_code(code);
+ int src_offset = code * kDoubleSize + double_regs_offset;
+ __ Fld_d(fpu_reg, MemOperand(a1, src_offset));
+ }
+
+ // Push pc and continuation from the last output frame.
+ __ Ld_d(a6, MemOperand(a2, FrameDescription::pc_offset()));
+ __ Push(a6);
+ __ Ld_d(a6, MemOperand(a2, FrameDescription::continuation_offset()));
+ __ Push(a6);
+
+ // Technically restoring 'at' should work unless zero_reg is also restored
+ // but it's safer to check for this.
+ DCHECK(!(t7.bit() & restored_regs));
+ // Restore the registers from the last output frame.
+ __ mov(t7, a2);
+ for (int i = kNumberOfRegisters - 1; i >= 0; i--) {
+ int offset = (i * kPointerSize) + FrameDescription::registers_offset();
+ if ((restored_regs & (1 << i)) != 0) {
+ __ Ld_d(ToRegister(i), MemOperand(t7, offset));
+ }
+ }
+
+ __ Pop(t7); // Get continuation, leave pc on stack.
+ __ Pop(ra);
+ __ Jump(t7);
+ __ stop();
+}
+
+} // namespace
+
+void Builtins::Generate_DeoptimizationEntry_Eager(MacroAssembler* masm) {
+ Generate_DeoptimizationEntry(masm, DeoptimizeKind::kEager);
+}
+
+void Builtins::Generate_DeoptimizationEntry_Soft(MacroAssembler* masm) {
+ Generate_DeoptimizationEntry(masm, DeoptimizeKind::kSoft);
+}
+
+void Builtins::Generate_DeoptimizationEntry_Bailout(MacroAssembler* masm) {
+ Generate_DeoptimizationEntry(masm, DeoptimizeKind::kBailout);
+}
+
+void Builtins::Generate_DeoptimizationEntry_Lazy(MacroAssembler* masm) {
+ Generate_DeoptimizationEntry(masm, DeoptimizeKind::kLazy);
+}
+
+namespace {
+
+// Restarts execution either at the current or next (in execution order)
+// bytecode. If there is baseline code on the shared function info, converts an
+// interpreter frame into a baseline frame and continues execution in baseline
+// code. Otherwise execution continues with bytecode.
+void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
+ bool next_bytecode,
+ bool is_osr = false) {
+ Label start;
+ __ bind(&start);
+
+ // Get function from the frame.
+ Register closure = a1;
+ __ Ld_d(closure, MemOperand(fp, StandardFrameConstants::kFunctionOffset));
+
+ // Get the Code object from the shared function info.
+ Register code_obj = s1;
+ __ Ld_d(code_obj,
+ FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
+ __ Ld_d(code_obj,
+ FieldMemOperand(code_obj, SharedFunctionInfo::kFunctionDataOffset));
+
+ // Check if we have baseline code. For OSR entry it is safe to assume we
+ // always have baseline code.
+ if (!is_osr) {
+ Label start_with_baseline;
+ __ GetObjectType(code_obj, t2, t2);
+ __ Branch(&start_with_baseline, eq, t2, Operand(CODET_TYPE));
+
+ // Start with bytecode as there is no baseline code.
+ Builtin builtin_id = next_bytecode
+ ? Builtin::kInterpreterEnterAtNextBytecode
+ : Builtin::kInterpreterEnterAtBytecode;
+ __ Jump(masm->isolate()->builtins()->code_handle(builtin_id),
+ RelocInfo::CODE_TARGET);
+
+ // Start with baseline code.
+ __ bind(&start_with_baseline);
+ } else if (FLAG_debug_code) {
+ __ GetObjectType(code_obj, t2, t2);
+ __ Assert(eq, AbortReason::kExpectedBaselineData, t2, Operand(CODET_TYPE));
+ }
+
+ if (FLAG_debug_code) {
+ AssertCodeIsBaseline(masm, code_obj, t2);
+ }
+
+ // Replace BytecodeOffset with the feedback vector.
+ Register feedback_vector = a2;
+ __ Ld_d(feedback_vector,
+ FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
+ __ Ld_d(feedback_vector,
+ FieldMemOperand(feedback_vector, Cell::kValueOffset));
+
+ Label install_baseline_code;
+ // Check if feedback vector is valid. If not, call prepare for baseline to
+ // allocate it.
+ __ GetObjectType(feedback_vector, t2, t2);
+ __ Branch(&install_baseline_code, ne, t2, Operand(FEEDBACK_VECTOR_TYPE));
+
+ // Save BytecodeOffset from the stack frame.
+ __ SmiUntag(kInterpreterBytecodeOffsetRegister,
+ MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
+ // Replace BytecodeOffset with the feedback vector.
+ __ St_d(feedback_vector,
+ MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
+ feedback_vector = no_reg;
+
+ // Compute baseline pc for bytecode offset.
+ ExternalReference get_baseline_pc_extref;
+ if (next_bytecode || is_osr) {
+ get_baseline_pc_extref =
+ ExternalReference::baseline_pc_for_next_executed_bytecode();
+ } else {
+ get_baseline_pc_extref =
+ ExternalReference::baseline_pc_for_bytecode_offset();
+ }
+
+ Register get_baseline_pc = a3;
+ __ li(get_baseline_pc, get_baseline_pc_extref);
+
+ // If the code deoptimizes during the implicit function entry stack interrupt
+ // check, it will have a bailout ID of kFunctionEntryBytecodeOffset, which is
+ // not a valid bytecode offset.
+ // TODO(pthier): Investigate if it is feasible to handle this special case
+ // in TurboFan instead of here.
+ Label valid_bytecode_offset, function_entry_bytecode;
+ if (!is_osr) {
+ __ Branch(&function_entry_bytecode, eq, kInterpreterBytecodeOffsetRegister,
+ Operand(BytecodeArray::kHeaderSize - kHeapObjectTag +
+ kFunctionEntryBytecodeOffset));
+ }
+
+ __ Sub_d(kInterpreterBytecodeOffsetRegister,
+ kInterpreterBytecodeOffsetRegister,
+ (BytecodeArray::kHeaderSize - kHeapObjectTag));
+
+ __ bind(&valid_bytecode_offset);
+ // Get bytecode array from the stack frame.
+ __ Ld_d(kInterpreterBytecodeArrayRegister,
+ MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
+ // Save the accumulator register, since it's clobbered by the below call.
+ __ Push(kInterpreterAccumulatorRegister);
+ {
+ Register arg_reg_1 = a0;
+ Register arg_reg_2 = a1;
+ Register arg_reg_3 = a2;
+ __ Move(arg_reg_1, code_obj);
+ __ Move(arg_reg_2, kInterpreterBytecodeOffsetRegister);
+ __ Move(arg_reg_3, kInterpreterBytecodeArrayRegister);
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ CallCFunction(get_baseline_pc, 3, 0);
+ }
+ __ Add_d(code_obj, code_obj, kReturnRegister0);
+ __ Pop(kInterpreterAccumulatorRegister);
+
+ if (is_osr) {
+ // Reset the OSR loop nesting depth to disarm back edges.
+ // TODO(pthier): Separate baseline Sparkplug from TF arming and don't disarm
+ // Sparkplug here.
+ // TODO(liuyu): Remove Ld as arm64 after register reallocation.
+ __ Ld_d(kInterpreterBytecodeArrayRegister,
+ MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
+ __ St_h(zero_reg,
+ FieldMemOperand(kInterpreterBytecodeArrayRegister,
+ BytecodeArray::kOsrLoopNestingLevelOffset));
+ Generate_OSREntry(masm, code_obj,
+ Operand(Code::kHeaderSize - kHeapObjectTag));
+ } else {
+ __ Add_d(code_obj, code_obj, Code::kHeaderSize - kHeapObjectTag);
+ __ Jump(code_obj);
+ }
+ __ Trap(); // Unreachable.
+
+ if (!is_osr) {
+ __ bind(&function_entry_bytecode);
+ // If the bytecode offset is kFunctionEntryOffset, get the start address of
+ // the first bytecode.
+ __ mov(kInterpreterBytecodeOffsetRegister, zero_reg);
+ if (next_bytecode) {
+ __ li(get_baseline_pc,
+ ExternalReference::baseline_pc_for_bytecode_offset());
+ }
+ __ Branch(&valid_bytecode_offset);
+ }
+
+ __ bind(&install_baseline_code);
+ {
+ FrameScope scope(masm, StackFrame::INTERNAL);
+ __ Push(kInterpreterAccumulatorRegister);
+ __ Push(closure);
+ __ CallRuntime(Runtime::kInstallBaselineCode, 1);
+ __ Pop(kInterpreterAccumulatorRegister);
+ }
+ // Retry from the start after installing baseline code.
+ __ Branch(&start);
+}
+
+} // namespace
+
+void Builtins::Generate_BaselineOrInterpreterEnterAtBytecode(
+ MacroAssembler* masm) {
+ Generate_BaselineOrInterpreterEntry(masm, false);
+}
+
+void Builtins::Generate_BaselineOrInterpreterEnterAtNextBytecode(
+ MacroAssembler* masm) {
+ Generate_BaselineOrInterpreterEntry(masm, true);
+}
+
+void Builtins::Generate_InterpreterOnStackReplacement_ToBaseline(
+ MacroAssembler* masm) {
+ Generate_BaselineOrInterpreterEntry(masm, false, true);
+}
+
+void Builtins::Generate_DynamicCheckMapsTrampoline(MacroAssembler* masm) {
+ Generate_DynamicCheckMapsTrampoline<DynamicCheckMapsDescriptor>(
+ masm, BUILTIN_CODE(masm->isolate(), DynamicCheckMaps));
+}
+
+void Builtins::Generate_DynamicCheckMapsWithFeedbackVectorTrampoline(
+ MacroAssembler* masm) {
+ Generate_DynamicCheckMapsTrampoline<
+ DynamicCheckMapsWithFeedbackVectorDescriptor>(
+ masm, BUILTIN_CODE(masm->isolate(), DynamicCheckMapsWithFeedbackVector));
+}
+
+template <class Descriptor>
+void Builtins::Generate_DynamicCheckMapsTrampoline(
+ MacroAssembler* masm, Handle<Code> builtin_target) {
+ FrameScope scope(masm, StackFrame::MANUAL);
+ __ EnterFrame(StackFrame::INTERNAL);
+
+ // Only save the registers that the DynamicCheckMaps builtin can clobber.
+ Descriptor descriptor;
+ RegList registers = descriptor.allocatable_registers();
+ // FLAG_debug_code is enabled CSA checks will call C function and so we need
+ // to save all CallerSaved registers too.
+ if (FLAG_debug_code) registers |= kJSCallerSaved;
+ __ MaybeSaveRegisters(registers);
+
+ // Load the immediate arguments from the deopt exit to pass to the builtin.
+ Register slot_arg = descriptor.GetRegisterParameter(Descriptor::kSlot);
+ Register handler_arg = descriptor.GetRegisterParameter(Descriptor::kHandler);
+ __ Ld_d(handler_arg, MemOperand(fp, CommonFrameConstants::kCallerPCOffset));
+ __ Ld_d(
+ slot_arg,
+ MemOperand(handler_arg, Deoptimizer::kEagerWithResumeImmedArgs1PcOffset));
+ __ Ld_d(
+ handler_arg,
+ MemOperand(handler_arg, Deoptimizer::kEagerWithResumeImmedArgs2PcOffset));
+ __ Call(builtin_target, RelocInfo::CODE_TARGET);
+
+ Label deopt, bailout;
+ __ Branch(&deopt, ne, a0,
+ Operand(static_cast<int64_t>(DynamicCheckMapsStatus::kSuccess)));
+
+ __ MaybeRestoreRegisters(registers);
+ __ LeaveFrame(StackFrame::INTERNAL);
+ __ Ret();
+
+ __ bind(&deopt);
+ __ Branch(&bailout, eq, a0,
+ Operand(static_cast<int64_t>(DynamicCheckMapsStatus::kBailout)));
+
+ if (FLAG_debug_code) {
+ __ Assert(eq, AbortReason::kUnexpectedDynamicCheckMapsStatus, a0,
+ Operand(static_cast<int64_t>(DynamicCheckMapsStatus::kDeopt)));
+ }
+ __ MaybeRestoreRegisters(registers);
+ __ LeaveFrame(StackFrame::INTERNAL);
+ Handle<Code> deopt_eager = masm->isolate()->builtins()->code_handle(
+ Deoptimizer::GetDeoptimizationEntry(DeoptimizeKind::kEager));
+ __ Jump(deopt_eager, RelocInfo::CODE_TARGET);
+
+ __ bind(&bailout);
+ __ MaybeRestoreRegisters(registers);
+ __ LeaveFrame(StackFrame::INTERNAL);
+ Handle<Code> deopt_bailout = masm->isolate()->builtins()->code_handle(
+ Deoptimizer::GetDeoptimizationEntry(DeoptimizeKind::kBailout));
+ __ Jump(deopt_bailout, RelocInfo::CODE_TARGET);
+}
+
+#undef __
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_TARGET_ARCH_LOONG64
diff --git a/deps/v8/src/builtins/mips/builtins-mips.cc b/deps/v8/src/builtins/mips/builtins-mips.cc
index 8f4bf4d06b..9a97f0fa4e 100644
--- a/deps/v8/src/builtins/mips/builtins-mips.cc
+++ b/deps/v8/src/builtins/mips/builtins-mips.cc
@@ -612,6 +612,16 @@ void Builtins::Generate_RunMicrotasksTrampoline(MacroAssembler* masm) {
__ Jump(BUILTIN_CODE(masm->isolate(), RunMicrotasks), RelocInfo::CODE_TARGET);
}
+static void AssertCodeIsBaseline(MacroAssembler* masm, Register code,
+ Register scratch) {
+ DCHECK(!AreAliased(code, scratch));
+ // Verify that the code kind is baseline code via the CodeKind.
+ __ lw(scratch, FieldMemOperand(code, Code::kFlagsOffset));
+ __ DecodeField<Code::KindField>(scratch);
+ __ Assert(eq, AbortReason::kExpectedBaselineData, scratch,
+ Operand(static_cast<int>(CodeKind::BASELINE)));
+}
+
static void GetSharedFunctionInfoBytecodeOrBaseline(MacroAssembler* masm,
Register sfi_data,
Register scratch1,
@@ -620,7 +630,15 @@ static void GetSharedFunctionInfoBytecodeOrBaseline(MacroAssembler* masm,
Label done;
__ GetObjectType(sfi_data, scratch1, scratch1);
- __ Branch(is_baseline, eq, scratch1, Operand(BASELINE_DATA_TYPE));
+ if (FLAG_debug_code) {
+ Label not_baseline;
+ __ Branch(&not_baseline, ne, scratch1, Operand(CODET_TYPE));
+ AssertCodeIsBaseline(masm, sfi_data, scratch1);
+ __ Branch(is_baseline);
+ __ bind(&not_baseline);
+ } else {
+ __ Branch(is_baseline, eq, scratch1, Operand(CODET_TYPE));
+ }
__ Branch(&done, ne, scratch1, Operand(INTERPRETER_DATA_TYPE));
__ lw(sfi_data,
FieldMemOperand(sfi_data, InterpreterData::kBytecodeArrayOffset));
@@ -1389,8 +1407,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
&has_optimized_code_or_marker);
// Load the baseline code into the closure.
- __ Lw(a2, FieldMemOperand(kInterpreterBytecodeArrayRegister,
- BaselineData::kBaselineCodeOffset));
+ __ Move(a2, kInterpreterBytecodeArrayRegister);
static_assert(kJavaScriptCallCodeStartRegister == a2, "ABI mismatch");
ReplaceClosureCodeWithOptimizedCode(masm, a2, closure, t4, t5);
__ JumpCodeObject(a2);
@@ -1779,7 +1796,8 @@ void OnStackReplacement(MacroAssembler* masm, bool is_interpreter) {
}
// Load deoptimization data from the code object.
// <deopt_data> = <code>[#deoptimization_data_offset]
- __ lw(a1, MemOperand(v0, Code::kDeoptimizationDataOffset - kHeapObjectTag));
+ __ lw(a1, MemOperand(v0, Code::kDeoptimizationDataOrInterpreterDataOffset -
+ kHeapObjectTag));
// Load the OSR entrypoint offset from the deoptimization data.
// <osr_offset> = <deopt_data>[#header_size + #osr_pc_offset]
@@ -2723,12 +2741,6 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
__ sw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
__ bind(&zero);
- // Reset the masking register. This is done independent of the underlying
- // feature flag {FLAG_untrusted_code_mitigations} to make the snapshot work
- // with both configurations. It is safe to always do this, because the
- // underlying register is caller-saved and can be arbitrarily clobbered.
- __ ResetSpeculationPoisonRegister();
-
// Clear c_entry_fp, like we do in `LeaveExitFrame`.
{
UseScratchRegisterScope temps(masm);
@@ -3964,7 +3976,7 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
if (!is_osr) {
Label start_with_baseline;
__ GetObjectType(code_obj, t6, t6);
- __ Branch(&start_with_baseline, eq, t6, Operand(BASELINE_DATA_TYPE));
+ __ Branch(&start_with_baseline, eq, t6, Operand(CODET_TYPE));
// Start with bytecode as there is no baseline code.
Builtin builtin_id = next_bytecode
@@ -3977,12 +3989,12 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
__ bind(&start_with_baseline);
} else if (FLAG_debug_code) {
__ GetObjectType(code_obj, t6, t6);
- __ Assert(eq, AbortReason::kExpectedBaselineData, t6,
- Operand(BASELINE_DATA_TYPE));
+ __ Assert(eq, AbortReason::kExpectedBaselineData, t6, Operand(CODET_TYPE));
}
- // Load baseline code from baseline data.
- __ Lw(code_obj, FieldMemOperand(code_obj, BaselineData::kBaselineCodeOffset));
+ if (FLAG_debug_code) {
+ AssertCodeIsBaseline(masm, code_obj, t2);
+ }
// Replace BytecodeOffset with the feedback vector.
Register feedback_vector = a2;
diff --git a/deps/v8/src/builtins/mips64/builtins-mips64.cc b/deps/v8/src/builtins/mips64/builtins-mips64.cc
index 45e1c32f82..3f8824d97d 100644
--- a/deps/v8/src/builtins/mips64/builtins-mips64.cc
+++ b/deps/v8/src/builtins/mips64/builtins-mips64.cc
@@ -300,6 +300,16 @@ void Builtins::Generate_JSBuiltinsConstructStub(MacroAssembler* masm) {
Generate_JSBuiltinsConstructStubHelper(masm);
}
+static void AssertCodeIsBaseline(MacroAssembler* masm, Register code,
+ Register scratch) {
+ DCHECK(!AreAliased(code, scratch));
+ // Verify that the code kind is baseline code via the CodeKind.
+ __ Ld(scratch, FieldMemOperand(code, Code::kFlagsOffset));
+ __ DecodeField<Code::KindField>(scratch);
+ __ Assert(eq, AbortReason::kExpectedBaselineData, scratch,
+ Operand(static_cast<int>(CodeKind::BASELINE)));
+}
+
// TODO(v8:11429): Add a path for "not_compiled" and unify the two uses under
// the more general dispatch.
static void GetSharedFunctionInfoBytecodeOrBaseline(MacroAssembler* masm,
@@ -309,11 +319,18 @@ static void GetSharedFunctionInfoBytecodeOrBaseline(MacroAssembler* masm,
Label done;
__ GetObjectType(sfi_data, scratch1, scratch1);
- __ Branch(is_baseline, eq, scratch1, Operand(BASELINE_DATA_TYPE));
+ if (FLAG_debug_code) {
+ Label not_baseline;
+ __ Branch(&not_baseline, ne, scratch1, Operand(CODET_TYPE));
+ AssertCodeIsBaseline(masm, sfi_data, scratch1);
+ __ Branch(is_baseline);
+ __ bind(&not_baseline);
+ } else {
+ __ Branch(is_baseline, eq, scratch1, Operand(CODET_TYPE));
+ }
__ Branch(&done, ne, scratch1, Operand(INTERPRETER_DATA_TYPE));
__ Ld(sfi_data,
FieldMemOperand(sfi_data, InterpreterData::kBytecodeArrayOffset));
-
__ bind(&done);
}
@@ -1402,8 +1419,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
&has_optimized_code_or_marker);
// Load the baseline code into the closure.
- __ Ld(a2, FieldMemOperand(kInterpreterBytecodeArrayRegister,
- BaselineData::kBaselineCodeOffset));
+ __ Move(a2, kInterpreterBytecodeArrayRegister);
static_assert(kJavaScriptCallCodeStartRegister == a2, "ABI mismatch");
ReplaceClosureCodeWithOptimizedCode(masm, a2, closure, t0, t1);
__ JumpCodeObject(a2);
@@ -1788,7 +1804,8 @@ void OnStackReplacement(MacroAssembler* masm, bool is_interpreter) {
}
// Load deoptimization data from the code object.
// <deopt_data> = <code>[#deoptimization_data_offset]
- __ Ld(a1, MemOperand(v0, Code::kDeoptimizationDataOffset - kHeapObjectTag));
+ __ Ld(a1, MemOperand(v0, Code::kDeoptimizationDataOrInterpreterDataOffset -
+ kHeapObjectTag));
// Load the OSR entrypoint offset from the deoptimization data.
// <osr_offset> = <deopt_data>[#header_size + #osr_pc_offset]
@@ -2814,12 +2831,6 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
__ Sd(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
__ bind(&zero);
- // Reset the masking register. This is done independent of the underlying
- // feature flag {FLAG_untrusted_code_mitigations} to make the snapshot work
- // with both configurations. It is safe to always do this, because the
- // underlying register is caller-saved and can be arbitrarily clobbered.
- __ ResetSpeculationPoisonRegister();
-
// Clear c_entry_fp, like we do in `LeaveExitFrame`.
{
UseScratchRegisterScope temps(masm);
@@ -3549,7 +3560,7 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
if (!is_osr) {
Label start_with_baseline;
__ GetObjectType(code_obj, t2, t2);
- __ Branch(&start_with_baseline, eq, t2, Operand(BASELINE_DATA_TYPE));
+ __ Branch(&start_with_baseline, eq, t2, Operand(CODET_TYPE));
// Start with bytecode as there is no baseline code.
Builtin builtin_id = next_bytecode
@@ -3562,12 +3573,12 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
__ bind(&start_with_baseline);
} else if (FLAG_debug_code) {
__ GetObjectType(code_obj, t2, t2);
- __ Assert(eq, AbortReason::kExpectedBaselineData, t2,
- Operand(BASELINE_DATA_TYPE));
+ __ Assert(eq, AbortReason::kExpectedBaselineData, t2, Operand(CODET_TYPE));
}
- // Load baseline code from baseline data.
- __ Ld(code_obj, FieldMemOperand(code_obj, BaselineData::kBaselineCodeOffset));
+ if (FLAG_debug_code) {
+ AssertCodeIsBaseline(masm, code_obj, t2);
+ }
// Replace BytecodeOffset with the feedback vector.
Register feedback_vector = a2;
diff --git a/deps/v8/src/builtins/ppc/builtins-ppc.cc b/deps/v8/src/builtins/ppc/builtins-ppc.cc
index 02b76175ec..4c2533e68d 100644
--- a/deps/v8/src/builtins/ppc/builtins-ppc.cc
+++ b/deps/v8/src/builtins/ppc/builtins-ppc.cc
@@ -1641,7 +1641,8 @@ void Builtins::Generate_InterpreterOnStackReplacement(MacroAssembler* masm) {
// Load deoptimization data from the code object.
// <deopt_data> = <code>[#deoptimization_data_offset]
__ LoadTaggedPointerField(
- r4, FieldMemOperand(r3, Code::kDeoptimizationDataOffset), r0);
+ r4, FieldMemOperand(r3, Code::kDeoptimizationDataOrInterpreterDataOffset),
+ r0);
{
ConstantPoolUnavailableScope constant_pool_unavailable(masm);
@@ -2646,12 +2647,6 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
__ StoreU64(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
__ bind(&skip);
- // Reset the masking register. This is done independent of the underlying
- // feature flag {FLAG_untrusted_code_mitigations} to make the snapshot work
- // with both configurations. It is safe to always do this, because the
- // underlying register is caller-saved and can be arbitrarily clobbered.
- __ ResetSpeculationPoisonRegister();
-
// Clear c_entry_fp, like we do in `LeaveExitFrame`.
{
UseScratchRegisterScope temps(masm);
diff --git a/deps/v8/src/builtins/riscv64/builtins-riscv64.cc b/deps/v8/src/builtins/riscv64/builtins-riscv64.cc
index f79e392f48..c90352bea1 100644
--- a/deps/v8/src/builtins/riscv64/builtins-riscv64.cc
+++ b/deps/v8/src/builtins/riscv64/builtins-riscv64.cc
@@ -320,6 +320,15 @@ void Builtins::Generate_JSBuiltinsConstructStub(MacroAssembler* masm) {
Generate_JSBuiltinsConstructStubHelper(masm);
}
+static void AssertCodeIsBaseline(MacroAssembler* masm, Register code,
+ Register scratch) {
+ DCHECK(!AreAliased(code, scratch));
+ // Verify that the code kind is baseline code via the CodeKind.
+ __ Ld(scratch, FieldMemOperand(code, Code::kFlagsOffset));
+ __ DecodeField<Code::KindField>(scratch);
+ __ Assert(eq, AbortReason::kExpectedBaselineData, scratch,
+ Operand(static_cast<int>(CodeKind::BASELINE)));
+}
// TODO(v8:11429): Add a path for "not_compiled" and unify the two uses under
// the more general dispatch.
static void GetSharedFunctionInfoBytecodeOrBaseline(MacroAssembler* masm,
@@ -330,7 +339,8 @@ static void GetSharedFunctionInfoBytecodeOrBaseline(MacroAssembler* masm,
Label done;
__ GetObjectType(sfi_data, scratch1, scratch1);
- __ Branch(is_baseline, eq, scratch1, Operand(BASELINE_DATA_TYPE));
+ __ Branch(is_baseline, eq, scratch1, Operand(CODET_TYPE));
+
__ Branch(&done, ne, scratch1, Operand(INTERPRETER_DATA_TYPE),
Label::Distance::kNear);
__ LoadTaggedPointerField(
@@ -401,17 +411,15 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
a3, FieldMemOperand(a4, JSFunction::kSharedFunctionInfoOffset));
__ Lhu(a3,
FieldMemOperand(a3, SharedFunctionInfo::kFormalParameterCountOffset));
- UseScratchRegisterScope temps(masm);
- Register scratch = temps.Acquire();
__ LoadTaggedPointerField(
- scratch,
+ t1,
FieldMemOperand(a1, JSGeneratorObject::kParametersAndRegistersOffset));
{
Label done_loop, loop;
__ bind(&loop);
__ Sub64(a3, a3, Operand(1));
__ Branch(&done_loop, lt, a3, Operand(zero_reg), Label::Distance::kNear);
- __ CalcScaledAddress(kScratchReg, scratch, a3, kTaggedSizeLog2);
+ __ CalcScaledAddress(kScratchReg, t1, a3, kTaggedSizeLog2);
__ LoadAnyTaggedField(
kScratchReg, FieldMemOperand(kScratchReg, FixedArray::kHeaderSize));
__ Push(kScratchReg);
@@ -575,9 +583,14 @@ void Generate_JSEntryVariant(MacroAssembler* masm, StackFrame::Type type,
__ li(s3, Operand(StackFrame::TypeToMarker(type)));
ExternalReference c_entry_fp = ExternalReference::Create(
IsolateAddressId::kCEntryFPAddress, masm->isolate());
- __ li(s4, c_entry_fp);
- __ Ld(s4, MemOperand(s4));
+ __ li(s5, c_entry_fp);
+ __ Ld(s4, MemOperand(s5));
__ Push(s1, s2, s3, s4);
+ // Clear c_entry_fp, now we've pushed its previous value to the stack.
+ // If the c_entry_fp is not already zero and we don't clear it, the
+ // SafeStackFrameIterator will assume we are executing C++ and miss the JS
+ // frames on top.
+ __ Sd(zero_reg, MemOperand(s5));
// Set up frame pointer for the frame to be pushed.
__ Add64(fp, sp, -EntryFrameConstants::kCallerFPOffset);
// Registers:
@@ -1160,9 +1173,9 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) {
// store the bytecode offset.
if (FLAG_debug_code) {
UseScratchRegisterScope temps(masm);
- Register type = temps.Acquire();
- __ GetObjectType(feedback_vector, type, type);
- __ Assert(eq, AbortReason::kExpectedFeedbackVector, type,
+ Register invocation_count = temps.Acquire();
+ __ GetObjectType(feedback_vector, invocation_count, invocation_count);
+ __ Assert(eq, AbortReason::kExpectedFeedbackVector, invocation_count,
Operand(FEEDBACK_VECTOR_TYPE));
}
// Our stack is currently aligned. We have have to push something along with
@@ -1171,8 +1184,7 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) {
// TODO(v8:11429,leszeks): Consider guaranteeing that this call leaves
// `undefined` in the accumulator register, to skip the load in the baseline
// code.
- __ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kUndefinedValue);
- __ Push(feedback_vector, kInterpreterAccumulatorRegister);
+ __ Push(feedback_vector);
}
Label call_stack_guard;
@@ -1203,7 +1215,7 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) {
{
ASM_CODE_COMMENT_STRING(masm, "Optimized marker check");
// Drop the frame created by the baseline call.
- __ Pop(fp, ra);
+ __ Pop(ra, fp);
MaybeOptimizeCodeOrTailCallOptimizedCodeSlot(masm, optimization_state,
feedback_vector);
__ Trap();
@@ -1212,14 +1224,13 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) {
__ bind(&call_stack_guard);
{
ASM_CODE_COMMENT_STRING(masm, "Stack/interrupt call");
- Register new_target = descriptor.GetRegisterParameter(
- BaselineOutOfLinePrologueDescriptor::kJavaScriptCallNewTarget);
-
FrameScope frame_scope(masm, StackFrame::INTERNAL);
// Save incoming new target or generator
- __ Push(zero_reg, new_target);
- __ CallRuntime(Runtime::kStackGuard);
- __ Pop(new_target, zero_reg);
+ __ Push(kJavaScriptCallNewTargetRegister);
+ __ SmiTag(frame_size);
+ __ Push(frame_size);
+ __ CallRuntime(Runtime::kStackGuardWithGap);
+ __ Pop(kJavaScriptCallNewTargetRegister);
}
__ Ret();
temps.Exclude(kScratchReg.bit() | kScratchReg2.bit());
@@ -1239,7 +1250,7 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) {
// o ra: return address
//
// The function builds an interpreter frame. See InterpreterFrameConstants in
-// frames.h for its layout.
+// frames-constants.h for its layout.
void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
Register closure = a1;
Register feedback_vector = a2;
@@ -1466,36 +1477,28 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ bind(&is_baseline);
{
// Load the feedback vector from the closure.
- __ Ld(feedback_vector,
- FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
- __ Ld(feedback_vector,
- FieldMemOperand(feedback_vector, Cell::kValueOffset));
+ __ LoadTaggedPointerField(
+ feedback_vector,
+ FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
+ __ LoadTaggedPointerField(
+ feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset));
Label install_baseline_code;
// Check if feedback vector is valid. If not, call prepare for baseline to
// allocate it.
- __ Ld(scratch, FieldMemOperand(feedback_vector, HeapObject::kMapOffset));
- __ Lh(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
+ __ LoadTaggedPointerField(
+ scratch, FieldMemOperand(feedback_vector, HeapObject::kMapOffset));
+ __ Lhu(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
__ Branch(&install_baseline_code, ne, scratch,
Operand(FEEDBACK_VECTOR_TYPE));
- // Read off the optimization state in the feedback vector.
- // TODO(v8:11429): Is this worth doing here? Baseline code will check it
- // anyway...
- __ Ld(optimization_state,
- FieldMemOperand(feedback_vector, FeedbackVector::kFlagsOffset));
-
- // Check if there is optimized code or a optimization marker that needes to
- // be processed.
- __ And(
- scratch, optimization_state,
- Operand(FeedbackVector::kHasOptimizedCodeOrCompileOptimizedMarkerMask));
- __ Branch(&has_optimized_code_or_marker, ne, scratch, Operand(zero_reg));
+ // Check for an optimization marker.
+ LoadOptimizationStateAndJumpIfNeedsProcessing(
+ masm, optimization_state, feedback_vector,
+ &has_optimized_code_or_marker);
// Load the baseline code into the closure.
- __ LoadTaggedPointerField(
- a2, FieldMemOperand(kInterpreterBytecodeArrayRegister,
- BaselineData::kBaselineCodeOffset));
+ __ Move(a2, kInterpreterBytecodeArrayRegister);
static_assert(kJavaScriptCallCodeStartRegister == a2, "ABI mismatch");
ReplaceClosureCodeWithOptimizedCode(masm, a2, closure, scratch, scratch2);
__ JumpCodeObject(a2);
@@ -1888,7 +1891,8 @@ void OnStackReplacement(MacroAssembler* masm, bool is_interpreter) {
// Load deoptimization data from the code object.
// <deopt_data> = <code>[#deoptimization_data_offset]
__ LoadTaggedPointerField(
- a1, MemOperand(a0, Code::kDeoptimizationDataOffset - kHeapObjectTag));
+ a1, MemOperand(a0, Code::kDeoptimizationDataOrInterpreterDataOffset -
+ kHeapObjectTag));
// Load the OSR entrypoint offset from the deoptimization data.
// <osr_offset> = <deopt_data>[#header_size + #osr_pc_offset]
@@ -2713,6 +2717,7 @@ void Builtins::Generate_Construct(MacroAssembler* masm) {
RelocInfo::CODE_TARGET);
}
+#if V8_ENABLE_WEBASSEMBLY
void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) {
// The function index was put in t0 by the jump table trampoline.
// Convert to Smi for the runtime call
@@ -2728,7 +2733,7 @@ void Builtins::Generate_WasmCompileLazy(MacroAssembler* masm) {
for (Register gp_param_reg : wasm::kGpParamRegisters) {
gp_regs |= gp_param_reg.bit();
}
- // Also push x1, because we must push multiples of 16 bytes (see
+ // Also push a1, because we must push multiples of 16 bytes (see
// {TurboAssembler::PushCPURegList}.
CHECK_EQ(0, NumRegs(gp_regs) % 2);
@@ -2786,6 +2791,7 @@ void Builtins::Generate_WasmDebugBreak(MacroAssembler* masm) {
}
__ Ret();
}
+#endif // V8_ENABLE_WEBASSEMBLY
void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
SaveFPRegsMode save_doubles, ArgvMode argv_mode,
@@ -2909,12 +2915,6 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
__ Sd(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
__ bind(&zero);
- // Reset the masking register. This is done independent of the underlying
- // feature flag {FLAG_untrusted_code_mitigations} to make the snapshot work
- // with both configurations. It is safe to always do this, because the
- // underlying register is caller-saved and can be arbitrarily clobbered.
- __ ResetSpeculationPoisonRegister();
-
// Compute the handler entry address and jump to it.
UseScratchRegisterScope temp(masm);
Register scratch = temp.Acquire();
@@ -3640,7 +3640,6 @@ namespace {
void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
bool next_bytecode,
bool is_osr = false) {
- __ Push(zero_reg, kInterpreterAccumulatorRegister);
Label start;
__ bind(&start);
@@ -3649,7 +3648,7 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
__ Ld(closure, MemOperand(fp, StandardFrameConstants::kFunctionOffset));
// Get the Code object from the shared function info.
- Register code_obj = a4;
+ Register code_obj = s1;
__ LoadTaggedPointerField(
code_obj,
FieldMemOperand(closure, JSFunction::kSharedFunctionInfoOffset));
@@ -3664,10 +3663,9 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
UseScratchRegisterScope temps(masm);
Register scratch = temps.Acquire();
__ GetObjectType(code_obj, scratch, scratch);
- __ Branch(&start_with_baseline, eq, scratch, Operand(BASELINE_DATA_TYPE));
+ __ Branch(&start_with_baseline, eq, scratch, Operand(CODET_TYPE));
// Start with bytecode as there is no baseline code.
- __ Pop(zero_reg, kInterpreterAccumulatorRegister);
Builtin builtin_id = next_bytecode
? Builtin::kInterpreterEnterAtNextBytecode
: Builtin::kInterpreterEnterAtBytecode;
@@ -3681,13 +3679,13 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
Register scratch = temps.Acquire();
__ GetObjectType(code_obj, scratch, scratch);
__ Assert(eq, AbortReason::kExpectedBaselineData, scratch,
- Operand(BASELINE_DATA_TYPE));
+ Operand(CODET_TYPE));
+ }
+ if (FLAG_debug_code) {
+ UseScratchRegisterScope temps(masm);
+ Register scratch = temps.Acquire();
+ AssertCodeIsBaseline(masm, code_obj, scratch);
}
-
- // Load baseline code from baseline data.
- __ LoadTaggedPointerField(
- code_obj, FieldMemOperand(code_obj, BaselineData::kBaselineCodeOffset));
-
// Replace BytecodeOffset with the feedback vector.
Register feedback_vector = a2;
__ LoadTaggedPointerField(
@@ -3701,7 +3699,7 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
UseScratchRegisterScope temps(masm);
Register type = temps.Acquire();
__ GetObjectType(feedback_vector, type, type);
- __ Branch(&install_baseline_code, eq, type, Operand(FEEDBACK_VECTOR_TYPE));
+ __ Branch(&install_baseline_code, ne, type, Operand(FEEDBACK_VECTOR_TYPE));
// Save BytecodeOffset from the stack frame.
__ SmiUntag(kInterpreterBytecodeOffsetRegister,
MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
@@ -3711,7 +3709,6 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
feedback_vector = no_reg;
// Compute baseline pc for bytecode offset.
- __ Push(zero_reg, kInterpreterAccumulatorRegister);
ExternalReference get_baseline_pc_extref;
if (next_bytecode || is_osr) {
get_baseline_pc_extref =
@@ -3744,6 +3741,7 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
// Get bytecode array from the stack frame.
__ Ld(kInterpreterBytecodeArrayRegister,
MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
+ __ Push(kInterpreterAccumulatorRegister);
{
Register arg_reg_1 = a0;
Register arg_reg_2 = a1;
@@ -3755,13 +3753,15 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
__ CallCFunction(get_baseline_pc, 3, 0);
}
__ Add64(code_obj, code_obj, kReturnRegister0);
- __ Pop(kInterpreterAccumulatorRegister, zero_reg);
+ __ Pop(kInterpreterAccumulatorRegister);
if (is_osr) {
// Reset the OSR loop nesting depth to disarm back edges.
// TODO(pthier): Separate baseline Sparkplug from TF arming and don't disarm
// Sparkplug here.
- __ Sd(zero_reg, FieldMemOperand(kInterpreterBytecodeArrayRegister,
+ __ Ld(kInterpreterBytecodeArrayRegister,
+ MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
+ __ Sh(zero_reg, FieldMemOperand(kInterpreterBytecodeArrayRegister,
BytecodeArray::kOsrLoopNestingLevelOffset));
Generate_OSREntry(masm, code_obj,
Operand(Code::kHeaderSize - kHeapObjectTag));
@@ -3786,8 +3786,10 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
__ bind(&install_baseline_code);
{
FrameScope scope(masm, StackFrame::INTERNAL);
+ __ Push(kInterpreterAccumulatorRegister);
__ Push(closure);
__ CallRuntime(Runtime::kInstallBaselineCode, 1);
+ __ Pop(kInterpreterAccumulatorRegister);
}
// Retry from the start after installing baseline code.
__ Branch(&start);
diff --git a/deps/v8/src/builtins/s390/builtins-s390.cc b/deps/v8/src/builtins/s390/builtins-s390.cc
index 5129cc6ee3..5ee2cf7c6a 100644
--- a/deps/v8/src/builtins/s390/builtins-s390.cc
+++ b/deps/v8/src/builtins/s390/builtins-s390.cc
@@ -1681,7 +1681,8 @@ void Builtins::Generate_InterpreterOnStackReplacement(MacroAssembler* masm) {
// Load deoptimization data from the code object.
// <deopt_data> = <code>[#deoptimization_data_offset]
__ LoadTaggedPointerField(
- r3, FieldMemOperand(r2, Code::kDeoptimizationDataOffset));
+ r3,
+ FieldMemOperand(r2, Code::kDeoptimizationDataOrInterpreterDataOffset));
// Load the OSR entrypoint offset from the deoptimization data.
// <osr_offset> = <deopt_data>[#header_size + #osr_pc_offset]
@@ -2679,12 +2680,6 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
__ StoreU64(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
__ bind(&skip);
- // Reset the masking register. This is done independent of the underlying
- // feature flag {FLAG_untrusted_code_mitigations} to make the snapshot work
- // with both configurations. It is safe to always do this, because the
- // underlying register is caller-saved and can be arbitrarily clobbered.
- __ ResetSpeculationPoisonRegister();
-
// Clear c_entry_fp, like we do in `LeaveExitFrame`.
{
UseScratchRegisterScope temps(masm);
diff --git a/deps/v8/src/builtins/setup-builtins-internal.cc b/deps/v8/src/builtins/setup-builtins-internal.cc
index 2724f9a200..5ad0319f63 100644
--- a/deps/v8/src/builtins/setup-builtins-internal.cc
+++ b/deps/v8/src/builtins/setup-builtins-internal.cc
@@ -156,11 +156,11 @@ Code BuildWithCodeStubAssemblerJS(Isolate* isolate, Builtin builtin,
CanonicalHandleScope canonical(isolate);
Zone zone(isolate->allocator(), ZONE_NAME, kCompressGraphZone);
- const int argc_with_recv =
- (argc == kDontAdaptArgumentsSentinel) ? 0 : argc + 1;
- compiler::CodeAssemblerState state(
- isolate, &zone, argc_with_recv, CodeKind::BUILTIN, name,
- PoisoningMitigationLevel::kDontPoison, builtin);
+ const int argc_with_recv = (argc == kDontAdaptArgumentsSentinel)
+ ? 0
+ : argc + (kJSArgcIncludesReceiver ? 0 : 1);
+ compiler::CodeAssemblerState state(isolate, &zone, argc_with_recv,
+ CodeKind::BUILTIN, name, builtin);
generator(&state);
Handle<Code> code = compiler::CodeAssembler::GenerateCode(
&state, BuiltinAssemblerOptions(isolate, builtin),
@@ -183,9 +183,8 @@ Code BuildWithCodeStubAssemblerCS(Isolate* isolate, Builtin builtin,
CallInterfaceDescriptor descriptor(interface_descriptor);
// Ensure descriptor is already initialized.
DCHECK_LE(0, descriptor.GetRegisterParameterCount());
- compiler::CodeAssemblerState state(
- isolate, &zone, descriptor, CodeKind::BUILTIN, name,
- PoisoningMitigationLevel::kDontPoison, builtin);
+ compiler::CodeAssemblerState state(isolate, &zone, descriptor,
+ CodeKind::BUILTIN, name, builtin);
generator(&state);
Handle<Code> code = compiler::CodeAssembler::GenerateCode(
&state, BuiltinAssemblerOptions(isolate, builtin),
diff --git a/deps/v8/src/builtins/typed-array-createtypedarray.tq b/deps/v8/src/builtins/typed-array-createtypedarray.tq
index 2f94f6205f..cb3443284d 100644
--- a/deps/v8/src/builtins/typed-array-createtypedarray.tq
+++ b/deps/v8/src/builtins/typed-array-createtypedarray.tq
@@ -62,7 +62,6 @@ transitioning macro AllocateTypedArray(implicit context: Context)(
typedArray.bit_field.is_length_tracking = isLengthTracking;
typedArray.bit_field.is_backed_by_rab =
IsResizableArrayBuffer(buffer) && !IsSharedArrayBuffer(buffer);
- typed_array::AllocateJSTypedArrayExternalPointerEntry(typedArray);
if constexpr (isOnHeap) {
typed_array::SetJSTypedArrayOnHeapDataPtr(typedArray, elements, byteOffset);
} else {
diff --git a/deps/v8/src/builtins/typed-array-every.tq b/deps/v8/src/builtins/typed-array-every.tq
index fdd4961dee..8c662bffb7 100644
--- a/deps/v8/src/builtins/typed-array-every.tq
+++ b/deps/v8/src/builtins/typed-array-every.tq
@@ -7,24 +7,43 @@
namespace typed_array {
const kBuiltinNameEvery: constexpr string = '%TypedArray%.prototype.every';
+// https://tc39.github.io/ecma262/#sec-%typedarray%.prototype.every
transitioning macro EveryAllElements(implicit context: Context)(
array: typed_array::AttachedJSTypedArray, callbackfn: Callable,
thisArg: JSAny): Boolean {
let witness = typed_array::NewAttachedJSTypedArrayWitness(array);
const length: uintptr = witness.Get().length;
+
+ // 6. Repeat, while k < len
for (let k: uintptr = 0; k < length; k++) {
- // BUG(4895): We should throw on detached buffers rather than simply exit.
- witness.Recheck() otherwise break;
- const value: JSAny = witness.Load(k);
+ // 6a. Let Pk be ! ToString(𝔽(k)).
+ // There is no need to cast ToString to load elements.
+
+ // 6b. Let kValue be ! Get(O, Pk).
+ // kValue must be undefined when the buffer is detached.
+ let value: JSAny;
+ try {
+ witness.Recheck() otherwise goto IsDetached;
+ value = witness.Load(k);
+ } label IsDetached deferred {
+ value = Undefined;
+ }
+
+ // 6c. Let testResult be ! ToBoolean(? Call(callbackfn, thisArg, « kValue,
+ // 𝔽(k), O »)).
// TODO(v8:4153): Consider versioning this loop for Smi and non-Smi
// indices to optimize Convert<Number>(k) for the most common case.
const result = Call(
context, callbackfn, thisArg, value, Convert<Number>(k),
witness.GetStable());
+ // 6d. If testResult is false, return false.
if (!ToBoolean(result)) {
return False;
}
+ // 6e. Set k to k + 1. (done by the loop).
}
+
+ // 7. Return true.
return True;
}
diff --git a/deps/v8/src/builtins/typed-array-filter.tq b/deps/v8/src/builtins/typed-array-filter.tq
index 15d40f92eb..18fbce9f09 100644
--- a/deps/v8/src/builtins/typed-array-filter.tq
+++ b/deps/v8/src/builtins/typed-array-filter.tq
@@ -38,11 +38,15 @@ transitioning javascript builtin TypedArrayPrototypeFilter(
// 8. Let captured be 0.
// 9. Repeat, while k < len
for (let k: uintptr = 0; k < len; k++) {
- witness.Recheck() otherwise IsDetached;
-
+ let value: JSAny;
// a. Let Pk be ! ToString(k).
// b. Let kValue be ? Get(O, Pk).
- const value: JSAny = witness.Load(k);
+ try {
+ witness.Recheck() otherwise goto IsDetached;
+ value = witness.Load(k);
+ } label IsDetached deferred {
+ value = Undefined;
+ }
// c. Let selected be ToBoolean(? Call(callbackfn, T, « kValue, k, O
// »)).
@@ -57,7 +61,7 @@ transitioning javascript builtin TypedArrayPrototypeFilter(
// ii. Increase captured by 1.
if (ToBoolean(selected)) kept.Push(value);
- // e.Increase k by 1.
+ // e. Increase k by 1. (done by the loop)
}
// 10. Let A be ? TypedArraySpeciesCreate(O, captured).
diff --git a/deps/v8/src/builtins/typed-array-find.tq b/deps/v8/src/builtins/typed-array-find.tq
index 24a13dbc23..b37b4ef8a9 100644
--- a/deps/v8/src/builtins/typed-array-find.tq
+++ b/deps/v8/src/builtins/typed-array-find.tq
@@ -7,24 +7,45 @@
namespace typed_array {
const kBuiltinNameFind: constexpr string = '%TypedArray%.prototype.find';
+// https://tc39.github.io/ecma262/#sec-%typedarray%.prototype.find
transitioning macro FindAllElements(implicit context: Context)(
- array: typed_array::AttachedJSTypedArray, callbackfn: Callable,
+ array: typed_array::AttachedJSTypedArray, predicate: Callable,
thisArg: JSAny): JSAny {
let witness = typed_array::NewAttachedJSTypedArrayWitness(array);
const length: uintptr = witness.Get().length;
+
+ // 6. Repeat, while k < len
for (let k: uintptr = 0; k < length; k++) {
- // BUG(4895): We should throw on detached buffers rather than simply exit.
- witness.Recheck() otherwise break;
- const value: JSAny = witness.Load(k);
+ // 6a. Let Pk be ! ToString(𝔽(k)).
+ // There is no need to cast ToString to load elements.
+
+ // 6b. Let kValue be ! Get(O, Pk).
+ // kValue must be undefined when the buffer is detached.
+ let value: JSAny;
+ try {
+ witness.Recheck() otherwise goto IsDetached;
+ value = witness.Load(k);
+ } label IsDetached deferred {
+ value = Undefined;
+ }
+
+ // 6c. Let testResult be ! ToBoolean(? Call(predicate, thisArg, « kValue,
+ // 𝔽(k), O »)).
// TODO(v8:4153): Consider versioning this loop for Smi and non-Smi
// indices to optimize Convert<Number>(k) for the most common case.
const result = Call(
- context, callbackfn, thisArg, value, Convert<Number>(k),
+ context, predicate, thisArg, value, Convert<Number>(k),
witness.GetStable());
+
+ // 6d. If testResult is true, return kValue.
if (ToBoolean(result)) {
return value;
}
+
+ // 6e. Set k to k + 1. (done by the loop).
}
+
+ // 7. Return undefined.
return Undefined;
}
@@ -39,9 +60,9 @@ TypedArrayPrototypeFind(
otherwise NotTypedArray;
const uarray = typed_array::EnsureAttached(array) otherwise IsDetached;
- const callbackfn = Cast<Callable>(arguments[0]) otherwise NotCallable;
+ const predicate = Cast<Callable>(arguments[0]) otherwise NotCallable;
const thisArg = arguments[1];
- return FindAllElements(uarray, callbackfn, thisArg);
+ return FindAllElements(uarray, predicate, thisArg);
} label NotCallable deferred {
ThrowTypeError(MessageTemplate::kCalledNonCallable, arguments[0]);
} label NotTypedArray deferred {
diff --git a/deps/v8/src/builtins/typed-array-findindex.tq b/deps/v8/src/builtins/typed-array-findindex.tq
index 7bb01151f3..aede90dc7f 100644
--- a/deps/v8/src/builtins/typed-array-findindex.tq
+++ b/deps/v8/src/builtins/typed-array-findindex.tq
@@ -9,19 +9,33 @@ const kBuiltinNameFindIndex: constexpr string =
'%TypedArray%.prototype.findIndex';
transitioning macro FindIndexAllElements(implicit context: Context)(
- array: typed_array::AttachedJSTypedArray, callbackfn: Callable,
+ array: typed_array::AttachedJSTypedArray, predicate: Callable,
thisArg: JSAny): Number {
let witness = typed_array::NewAttachedJSTypedArrayWitness(array);
const length: uintptr = witness.Get().length;
+
+ // 6. Repeat, while k < len
for (let k: uintptr = 0; k < length; k++) {
- // BUG(4895): We should throw on detached buffers rather than simply exit.
- witness.Recheck() otherwise break;
- const value: JSAny = witness.Load(k);
+ // 6a. Let Pk be ! ToString(𝔽(k)).
+ // There is no need to cast ToString to load elements.
+
+ // 6b. Let kValue be ! Get(O, Pk).
+ // kValue must be undefined when the buffer is detached.
+ let value: JSAny;
+ try {
+ witness.Recheck() otherwise goto IsDetached;
+ value = witness.Load(k);
+ } label IsDetached deferred {
+ value = Undefined;
+ }
+
+ // 6c. Let testResult be ! ToBoolean(? Call(predicate, thisArg, « kValue,
+ // 𝔽(k), O »)).
// TODO(v8:4153): Consider versioning this loop for Smi and non-Smi
// indices to optimize Convert<Number>(k) for the most common case.
const indexNumber: Number = Convert<Number>(k);
const result = Call(
- context, callbackfn, thisArg, value, indexNumber, witness.GetStable());
+ context, predicate, thisArg, value, indexNumber, witness.GetStable());
if (ToBoolean(result)) {
return indexNumber;
}
@@ -40,9 +54,9 @@ TypedArrayPrototypeFindIndex(
otherwise NotTypedArray;
const uarray = typed_array::EnsureAttached(array) otherwise IsDetached;
- const callbackfn = Cast<Callable>(arguments[0]) otherwise NotCallable;
+ const predicate = Cast<Callable>(arguments[0]) otherwise NotCallable;
const thisArg = arguments[1];
- return FindIndexAllElements(uarray, callbackfn, thisArg);
+ return FindIndexAllElements(uarray, predicate, thisArg);
} label NotCallable deferred {
ThrowTypeError(MessageTemplate::kCalledNonCallable, arguments[0]);
} label NotTypedArray deferred {
diff --git a/deps/v8/src/builtins/typed-array-findlast.tq b/deps/v8/src/builtins/typed-array-findlast.tq
index 634e17b936..15f67760c0 100644
--- a/deps/v8/src/builtins/typed-array-findlast.tq
+++ b/deps/v8/src/builtins/typed-array-findlast.tq
@@ -8,56 +8,28 @@ namespace typed_array {
const kBuiltinNameFindLast: constexpr string =
'%TypedArray%.prototype.findLast';
-// Continuation part of
-// https://tc39.es/proposal-array-find-from-last/index.html#sec-%typedarray%.prototype.findlast
-// when array buffer was detached.
-transitioning builtin FindLastAllElementsDetachedContinuation(
- implicit context: Context)(
- array: JSTypedArray, predicate: Callable, thisArg: JSAny,
- initialK: Number): JSAny {
- // 6. Repeat, while k ≥ 0
- for (let k: Number = initialK; k >= 0; k--) {
- // 6a. Let Pk be ! ToString(𝔽(k)).
- // there is no need to cast ToString to load elements.
-
- // 6b. Let kValue be ! Get(O, Pk).
- // kValue must be undefined when the buffer was detached.
-
- // 6c. Let testResult be ! ToBoolean(? Call(predicate, thisArg, « kValue,
- // 𝔽(k), O »)).
- // TODO(v8:4153): Consider versioning this loop for Smi and non-Smi
- // indices to optimize Convert<Number>(k) for the most common case.
- const result =
- Call(context, predicate, thisArg, Undefined, Convert<Number>(k), array);
- // 6d. If testResult is true, return kValue.
- if (ToBoolean(result)) {
- return Undefined;
- }
-
- // 6e. Set k to k - 1. (done by the loop).
- }
-
- // 7. Return undefined.
- return Undefined;
-}
-
// https://tc39.es/proposal-array-find-from-last/index.html#sec-%typedarray%.prototype.findlast
transitioning macro FindLastAllElements(implicit context: Context)(
array: typed_array::AttachedJSTypedArray, predicate: Callable,
- thisArg: JSAny): JSAny labels
-Bailout(Number) {
+ thisArg: JSAny): JSAny {
let witness = typed_array::NewAttachedJSTypedArrayWitness(array);
// 3. Let len be O.[[ArrayLength]].
const length: uintptr = witness.Get().length;
// 5. Let k be len - 1.
// 6. Repeat, while k ≥ 0
for (let k: uintptr = length; k-- > 0;) {
- witness.Recheck() otherwise goto Bailout(Convert<Number>(k));
// 6a. Let Pk be ! ToString(𝔽(k)).
- // there is no need to cast ToString to load elements.
+ // There is no need to cast ToString to load elements.
// 6b. Let kValue be ! Get(O, Pk).
- const value: JSAny = witness.Load(k);
+ // kValue must be undefined when the buffer was detached.
+ let value: JSAny;
+ try {
+ witness.Recheck() otherwise goto IsDetached;
+ value = witness.Load(k);
+ } label IsDetached deferred {
+ value = Undefined;
+ }
// 6c. Let testResult be ! ToBoolean(? Call(predicate, thisArg, « kValue,
// 𝔽(k), O »)).
@@ -94,13 +66,7 @@ TypedArrayPrototypeFindLast(
// 4. If IsCallable(predicate) is false, throw a TypeError exception.
const predicate = Cast<Callable>(arguments[0]) otherwise NotCallable;
const thisArg = arguments[1];
- try {
- return FindLastAllElements(uarray, predicate, thisArg)
- otherwise Bailout;
- } label Bailout(k: Number) deferred {
- return FindLastAllElementsDetachedContinuation(
- uarray, predicate, thisArg, k);
- }
+ return FindLastAllElements(uarray, predicate, thisArg);
} label NotCallable deferred {
ThrowTypeError(MessageTemplate::kCalledNonCallable, arguments[0]);
} label NotTypedArray deferred {
diff --git a/deps/v8/src/builtins/typed-array-findlastindex.tq b/deps/v8/src/builtins/typed-array-findlastindex.tq
index 4b20114c91..56d139d8b1 100644
--- a/deps/v8/src/builtins/typed-array-findlastindex.tq
+++ b/deps/v8/src/builtins/typed-array-findlastindex.tq
@@ -8,57 +8,28 @@ namespace typed_array {
const kBuiltinNameFindLastIndex: constexpr string =
'%TypedArray%.prototype.findIndexLast';
-// Continuation part of
-// https://tc39.es/proposal-array-find-from-last/index.html#sec-%typedarray%.prototype.findlastindex
-// when array buffer was detached.
-transitioning builtin FindLastIndexAllElementsDetachedContinuation(
- implicit context: Context)(
- array: JSTypedArray, predicate: Callable, thisArg: JSAny,
- initialK: Number): Number {
- // 6. Repeat, while k ≥ 0
- for (let k: Number = initialK; k >= 0; k--) {
- // 6a. Let Pk be ! ToString(𝔽(k)).
- // there is no need to cast ToString to load elements.
-
- // 6b. Let kValue be ! Get(O, Pk).
- // kValue must be undefined when the buffer was detached.
-
- // 6c. Let testResult be ! ToBoolean(? Call(predicate, thisArg, « kValue,
- // 𝔽(k), O »)).
- // TODO(v8:4153): Consider versioning this loop for Smi and non-Smi
- // indices to optimize Convert<Number>(k) for the most common case.
- const indexNumber: Number = Convert<Number>(k);
- const result =
- Call(context, predicate, thisArg, Undefined, indexNumber, array);
- // 6d. If testResult is true, return 𝔽(k).
- if (ToBoolean(result)) {
- return indexNumber;
- }
-
- // 6e. Set k to k - 1. (done by the loop).
- }
-
- // 7. Return -1𝔽.
- return -1;
-}
-
// https://tc39.es/proposal-array-find-from-last/index.html#sec-%typedarray%.prototype.findlastindex
transitioning macro FindLastIndexAllElements(implicit context: Context)(
array: typed_array::AttachedJSTypedArray, predicate: Callable,
- thisArg: JSAny): Number labels
-Bailout(Number) {
+ thisArg: JSAny): Number {
let witness = typed_array::NewAttachedJSTypedArrayWitness(array);
// 3. Let len be O.[[ArrayLength]].
const length: uintptr = witness.Get().length;
// 5. Let k be len - 1.
// 6. Repeat, while k ≥ 0
for (let k: uintptr = length; k-- > 0;) {
- witness.Recheck() otherwise goto Bailout(Convert<Number>(k));
// 6a. Let Pk be ! ToString(𝔽(k)).
- // there is no need to cast ToString to load elements.
+ // There is no need to cast ToString to load elements.
// 6b. Let kValue be ! Get(O, Pk).
- const value: JSAny = witness.Load(k);
+ // kValue must be undefined when the buffer was detached.
+ let value: JSAny;
+ try {
+ witness.Recheck() otherwise goto IsDetached;
+ value = witness.Load(k);
+ } label IsDetached deferred {
+ value = Undefined;
+ }
// 6c. Let testResult be ! ToBoolean(? Call(predicate, thisArg, « kValue,
// 𝔽(k), O »)).
@@ -96,13 +67,7 @@ TypedArrayPrototypeFindLastIndex(
const predicate = Cast<Callable>(arguments[0]) otherwise NotCallable;
const thisArg = arguments[1];
- try {
- return FindLastIndexAllElements(uarray, predicate, thisArg)
- otherwise Bailout;
- } label Bailout(k: Number) deferred {
- return FindLastIndexAllElementsDetachedContinuation(
- uarray, predicate, thisArg, k);
- }
+ return FindLastIndexAllElements(uarray, predicate, thisArg);
} label NotCallable deferred {
ThrowTypeError(MessageTemplate::kCalledNonCallable, arguments[0]);
} label NotTypedArray deferred {
diff --git a/deps/v8/src/builtins/typed-array-foreach.tq b/deps/v8/src/builtins/typed-array-foreach.tq
index d696d9c8dd..fa227bc75b 100644
--- a/deps/v8/src/builtins/typed-array-foreach.tq
+++ b/deps/v8/src/builtins/typed-array-foreach.tq
@@ -12,16 +12,33 @@ transitioning macro ForEachAllElements(implicit context: Context)(
thisArg: JSAny): Undefined {
let witness = typed_array::NewAttachedJSTypedArrayWitness(array);
const length: uintptr = witness.Get().length;
+
+ // 6. Repeat, while k < len
for (let k: uintptr = 0; k < length; k++) {
- // BUG(4895): We should throw on detached buffers rather than simply exit.
- witness.Recheck() otherwise break;
- const value: JSAny = witness.Load(k);
+ // 6a. Let Pk be ! ToString(𝔽(k)).
+ // There is no need to cast ToString to load elements.
+
+ // 6b. Let kValue be ! Get(O, Pk).
+ // kValue must be undefined when the buffer is detached.
+ let value: JSAny;
+ try {
+ witness.Recheck() otherwise goto IsDetached;
+ value = witness.Load(k);
+ } label IsDetached deferred {
+ value = Undefined;
+ }
+
+ // 6c. Perform ? Call(callbackfn, thisArg, « kValue, 𝔽(k), O »).
// TODO(v8:4153): Consider versioning this loop for Smi and non-Smi
// indices to optimize Convert<Number>(k) for the most common case.
Call(
context, callbackfn, thisArg, value, Convert<Number>(k),
witness.GetStable());
+
+ // 6d. Set k to k + 1. (done by the loop).
}
+
+ // 7. Return undefined.
return Undefined;
}
diff --git a/deps/v8/src/builtins/typed-array-reduce.tq b/deps/v8/src/builtins/typed-array-reduce.tq
index a54ed1040e..0261599106 100644
--- a/deps/v8/src/builtins/typed-array-reduce.tq
+++ b/deps/v8/src/builtins/typed-array-reduce.tq
@@ -12,11 +12,17 @@ transitioning macro ReduceAllElements(implicit context: Context)(
initialValue: JSAny|TheHole): JSAny {
let witness = typed_array::NewAttachedJSTypedArrayWitness(array);
const length: uintptr = witness.Get().length;
+
let accumulator = initialValue;
for (let k: uintptr = 0; k < length; k++) {
- // BUG(4895): We should throw on detached buffers rather than simply exit.
- witness.Recheck() otherwise break;
- const value: JSAny = witness.Load(k);
+ let value: JSAny;
+ try {
+ witness.Recheck()
+ otherwise goto IsDetached;
+ value = witness.Load(k);
+ } label IsDetached deferred {
+ value = Undefined;
+ }
typeswitch (accumulator) {
case (TheHole): {
accumulator = value;
diff --git a/deps/v8/src/builtins/typed-array-reduceright.tq b/deps/v8/src/builtins/typed-array-reduceright.tq
index 9ba2f70de4..5449c4f1fc 100644
--- a/deps/v8/src/builtins/typed-array-reduceright.tq
+++ b/deps/v8/src/builtins/typed-array-reduceright.tq
@@ -8,6 +8,7 @@ namespace typed_array {
const kBuiltinNameReduceRight: constexpr string =
'%TypedArray%.prototype.reduceRight';
+// https://tc39.github.io/ecma262/#sec-%typedarray%.prototype.reduceright
transitioning macro ReduceRightAllElements(implicit context: Context)(
array: typed_array::AttachedJSTypedArray, callbackfn: Callable,
initialValue: JSAny|TheHole): JSAny {
@@ -15,9 +16,14 @@ transitioning macro ReduceRightAllElements(implicit context: Context)(
const length: uintptr = witness.Get().length;
let accumulator = initialValue;
for (let k: uintptr = length; k-- > 0;) {
- // BUG(4895): We should throw on detached buffers rather than simply exit.
- witness.Recheck() otherwise break;
- const value: JSAny = witness.Load(k);
+ let value: JSAny;
+ try {
+ witness.Recheck()
+ otherwise goto IsDetached;
+ value = witness.Load(k);
+ } label IsDetached deferred {
+ value = Undefined;
+ }
typeswitch (accumulator) {
case (TheHole): {
accumulator = value;
diff --git a/deps/v8/src/builtins/typed-array-set.tq b/deps/v8/src/builtins/typed-array-set.tq
index f4d2a40f41..eeb521e3f6 100644
--- a/deps/v8/src/builtins/typed-array-set.tq
+++ b/deps/v8/src/builtins/typed-array-set.tq
@@ -115,18 +115,6 @@ TypedArrayPrototypeSetArray(implicit context: Context, receiver: JSAny)(
IfDetached {
// Steps 9-13 are not observable, do them later.
- // TODO(v8:8906): This ported behaviour is an observable spec violation and
- // the comment below seems to be outdated. Consider removing this code.
- try {
- const _arrayArgNum = Cast<Number>(arrayArg) otherwise NotNumber;
- // For number as a first argument, throw TypeError instead of silently
- // ignoring the call, so that users know they did something wrong.
- // (Consistent with Firefox and Blink/WebKit)
- ThrowTypeError(MessageTemplate::kInvalidArgument);
- } label NotNumber {
- // Proceed to step 14.
- }
-
// 14. Let src be ? ToObject(array).
const src: JSReceiver = ToObject_Inline(context, arrayArg);
diff --git a/deps/v8/src/builtins/typed-array-some.tq b/deps/v8/src/builtins/typed-array-some.tq
index ecdfae1e8a..9946907680 100644
--- a/deps/v8/src/builtins/typed-array-some.tq
+++ b/deps/v8/src/builtins/typed-array-some.tq
@@ -7,24 +7,45 @@
namespace typed_array {
const kBuiltinNameSome: constexpr string = '%TypedArray%.prototype.some';
+// https://tc39.es/ecma262/#sec-%typedarray%.prototype.some
transitioning macro SomeAllElements(implicit context: Context)(
array: typed_array::AttachedJSTypedArray, callbackfn: Callable,
thisArg: JSAny): Boolean {
let witness = typed_array::NewAttachedJSTypedArrayWitness(array);
const length: uintptr = witness.Get().length;
+
+ // 6. Repeat, while k < len
for (let k: uintptr = 0; k < length; k++) {
- // BUG(4895): We should throw on detached buffers rather than simply exit.
- witness.Recheck() otherwise break;
- const value: JSAny = witness.Load(k);
+ // 6a. Let Pk be ! ToString(𝔽(k)).
+ // There is no need to cast ToString to load elements.
+
+ // 6b. Let kValue be ! Get(O, Pk).
+ // kValue must be undefined when the buffer is detached.
+ let value: JSAny;
+ try {
+ witness.Recheck() otherwise goto IsDetached;
+ value = witness.Load(k);
+ } label IsDetached deferred {
+ value = Undefined;
+ }
+
+ // 6c. Let testResult be ! ToBoolean(? Call(callbackfn, thisArg, « kValue,
+ // 𝔽(k), O »)).
// TODO(v8:4153): Consider versioning this loop for Smi and non-Smi
// indices to optimize Convert<Number>(k) for the most common case.
const result = Call(
context, callbackfn, thisArg, value, Convert<Number>(k),
witness.GetStable());
+
+ // 6d. If testResult is true, return true.
if (ToBoolean(result)) {
return True;
}
+
+ // 6e. Set k to k + 1. (done by the loop).
}
+
+ // 7. Return false.
return False;
}
@@ -41,6 +62,7 @@ TypedArrayPrototypeSome(
const callbackfn = Cast<Callable>(arguments[0]) otherwise NotCallable;
const thisArg = arguments[1];
+
return SomeAllElements(uarray, callbackfn, thisArg);
} label NotCallable deferred {
ThrowTypeError(MessageTemplate::kCalledNonCallable, arguments[0]);
diff --git a/deps/v8/src/builtins/typed-array.tq b/deps/v8/src/builtins/typed-array.tq
index 87bcb2fb59..582388b75d 100644
--- a/deps/v8/src/builtins/typed-array.tq
+++ b/deps/v8/src/builtins/typed-array.tq
@@ -161,10 +161,6 @@ macro GetTypedArrayAccessor(elementsKind: ElementsKind): TypedArrayAccessor {
unreachable;
}
-extern macro
-TypedArrayBuiltinsAssembler::AllocateJSTypedArrayExternalPointerEntry(
- JSTypedArray): void;
-
extern macro TypedArrayBuiltinsAssembler::SetJSTypedArrayOnHeapDataPtr(
JSTypedArray, ByteArray, uintptr): void;
extern macro TypedArrayBuiltinsAssembler::SetJSTypedArrayOffHeapDataPtr(
diff --git a/deps/v8/src/builtins/wasm.tq b/deps/v8/src/builtins/wasm.tq
index 7fc4a03e35..ec786311be 100644
--- a/deps/v8/src/builtins/wasm.tq
+++ b/deps/v8/src/builtins/wasm.tq
@@ -369,16 +369,6 @@ builtin WasmArrayCopyWithChecks(
SmiFromUint32(srcIndex), SmiFromUint32(length));
}
-// We put all uint32 parameters at the beginning so that they are assigned to
-// registers.
-builtin WasmArrayCopy(
- dstIndex: uint32, srcIndex: uint32, length: uint32, dstArray: WasmArray,
- srcArray: WasmArray): JSAny {
- tail runtime::WasmArrayCopy(
- LoadContextFromFrame(), dstArray, SmiFromUint32(dstIndex), srcArray,
- SmiFromUint32(srcIndex), SmiFromUint32(length));
-}
-
// Redeclaration with different typing (value is an Object, not JSAny).
extern transitioning runtime
CreateDataProperty(implicit context: Context)(JSReceiver, JSAny, Object);
diff --git a/deps/v8/src/builtins/x64/builtins-x64.cc b/deps/v8/src/builtins/x64/builtins-x64.cc
index 14186e3be6..f5ef0877bc 100644
--- a/deps/v8/src/builtins/x64/builtins-x64.cc
+++ b/deps/v8/src/builtins/x64/builtins-x64.cc
@@ -83,6 +83,35 @@ static void GenerateTailCallToReturnedCode(
namespace {
+enum class ArgumentsElementType {
+ kRaw, // Push arguments as they are.
+ kHandle // Dereference arguments before pushing.
+};
+
+void Generate_PushArguments(MacroAssembler* masm, Register array, Register argc,
+ Register scratch,
+ ArgumentsElementType element_type) {
+ DCHECK(!AreAliased(array, argc, scratch, kScratchRegister));
+ Register counter = scratch;
+ Label loop, entry;
+ if (kJSArgcIncludesReceiver) {
+ __ leaq(counter, Operand(argc, -kJSArgcReceiverSlots));
+ } else {
+ __ movq(counter, argc);
+ }
+ __ jmp(&entry);
+ __ bind(&loop);
+ Operand value(array, counter, times_system_pointer_size, 0);
+ if (element_type == ArgumentsElementType::kHandle) {
+ __ movq(kScratchRegister, value);
+ value = Operand(kScratchRegister, 0);
+ }
+ __ Push(value);
+ __ bind(&entry);
+ __ decq(counter);
+ __ j(greater_equal, &loop, Label::kNear);
+}
+
void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- rax: number of arguments
@@ -112,7 +141,9 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
__ leaq(rbx, Operand(rbp, StandardFrameConstants::kCallerSPOffset +
kSystemPointerSize));
// Copy arguments to the expression stack.
- __ PushArray(rbx, rax, rcx);
+ // rbx: Pointer to start of arguments.
+ // rax: Number of arguments.
+ Generate_PushArguments(masm, rbx, rax, rcx, ArgumentsElementType::kRaw);
// The receiver for the builtin/api call.
__ PushRoot(RootIndex::kTheHoleValue);
@@ -129,8 +160,10 @@ void Generate_JSBuiltinsConstructStubHelper(MacroAssembler* masm) {
}
// Remove caller arguments from the stack and return.
- __ DropArguments(rbx, rcx, TurboAssembler::kCountIsSmi,
- TurboAssembler::kCountExcludesReceiver);
+ __ DropArguments(rbx, rcx, MacroAssembler::kCountIsSmi,
+ kJSArgcIncludesReceiver
+ ? TurboAssembler::kCountIncludesReceiver
+ : TurboAssembler::kCountExcludesReceiver);
__ ret(0);
@@ -236,7 +269,9 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
// InvokeFunction.
// Copy arguments to the expression stack.
- __ PushArray(rbx, rax, rcx);
+ // rbx: Pointer to start of arguments.
+ // rax: Number of arguments.
+ Generate_PushArguments(masm, rbx, rax, rcx, ArgumentsElementType::kRaw);
// Push implicit receiver.
__ Push(r8);
@@ -279,8 +314,10 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
__ movq(rbx, Operand(rbp, ConstructFrameConstants::kLengthOffset));
__ LeaveFrame(StackFrame::CONSTRUCT);
// Remove caller arguments from the stack and return.
- __ DropArguments(rbx, rcx, TurboAssembler::kCountIsSmi,
- TurboAssembler::kCountExcludesReceiver);
+ __ DropArguments(rbx, rcx, MacroAssembler::kCountIsSmi,
+ kJSArgcIncludesReceiver
+ ? TurboAssembler::kCountIncludesReceiver
+ : TurboAssembler::kCountExcludesReceiver);
__ ret(0);
// If the result is a smi, it is *not* an object in the ECMA sense.
@@ -607,18 +644,12 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
__ bind(&enough_stack_space);
- // Copy arguments to the stack in a loop.
+ // Copy arguments to the stack.
// Register rbx points to array of pointers to handle locations.
// Push the values of these handles.
- Label loop, entry;
- __ movq(rcx, rax);
- __ jmp(&entry, Label::kNear);
- __ bind(&loop);
- __ movq(kScratchRegister, Operand(rbx, rcx, times_system_pointer_size, 0));
- __ Push(Operand(kScratchRegister, 0)); // dereference handle
- __ bind(&entry);
- __ decq(rcx);
- __ j(greater_equal, &loop, Label::kNear);
+ // rbx: Pointer to start of arguments.
+ // rax: Number of arguments.
+ Generate_PushArguments(masm, rbx, rax, rcx, ArgumentsElementType::kHandle);
// Push the receiver.
__ Push(r9);
@@ -651,6 +682,21 @@ void Builtins::Generate_RunMicrotasksTrampoline(MacroAssembler* masm) {
__ Jump(BUILTIN_CODE(masm->isolate(), RunMicrotasks), RelocInfo::CODE_TARGET);
}
+static void AssertCodeIsBaselineAllowClobber(MacroAssembler* masm,
+ Register code, Register scratch) {
+ // Verify that the code kind is baseline code via the CodeKind.
+ __ movl(scratch, FieldOperand(code, Code::kFlagsOffset));
+ __ DecodeField<Code::KindField>(scratch);
+ __ cmpl(scratch, Immediate(static_cast<int>(CodeKind::BASELINE)));
+ __ Assert(equal, AbortReason::kExpectedBaselineData);
+}
+
+static void AssertCodeIsBaseline(MacroAssembler* masm, Register code,
+ Register scratch) {
+ DCHECK(!AreAliased(code, scratch));
+ return AssertCodeIsBaselineAllowClobber(masm, code, scratch);
+}
+
static void GetSharedFunctionInfoBytecodeOrBaseline(MacroAssembler* masm,
Register sfi_data,
Register scratch1,
@@ -659,8 +705,21 @@ static void GetSharedFunctionInfoBytecodeOrBaseline(MacroAssembler* masm,
Label done;
__ LoadMap(scratch1, sfi_data);
- __ CmpInstanceType(scratch1, BASELINE_DATA_TYPE);
- __ j(equal, is_baseline);
+ __ CmpInstanceType(scratch1, CODET_TYPE);
+ if (FLAG_debug_code) {
+ Label not_baseline;
+ __ j(not_equal, &not_baseline);
+ if (V8_EXTERNAL_CODE_SPACE_BOOL) {
+ __ LoadCodeDataContainerCodeNonBuiltin(scratch1, sfi_data);
+ AssertCodeIsBaselineAllowClobber(masm, scratch1, scratch1);
+ } else {
+ AssertCodeIsBaseline(masm, sfi_data, scratch1);
+ }
+ __ j(equal, is_baseline);
+ __ bind(&not_baseline);
+ } else {
+ __ j(equal, is_baseline);
+ }
__ CmpInstanceType(scratch1, INTERPRETER_DATA_TYPE);
__ j(not_equal, &done, Label::kNear);
@@ -736,7 +795,9 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
rcx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
__ movzxwq(
rcx, FieldOperand(rcx, SharedFunctionInfo::kFormalParameterCountOffset));
-
+ if (kJSArgcIncludesReceiver) {
+ __ decq(rcx);
+ }
__ LoadTaggedPointerField(
rbx, FieldOperand(rdx, JSGeneratorObject::kParametersAndRegistersOffset));
@@ -771,7 +832,7 @@ void Builtins::Generate_ResumeGeneratorTrampoline(MacroAssembler* masm) {
__ jmp(&ok);
__ bind(&is_baseline);
- __ CmpObjectType(rcx, BASELINE_DATA_TYPE, rcx);
+ __ CmpObjectType(rcx, CODET_TYPE, rcx);
__ Assert(equal, AbortReason::kMissingBytecodeArray);
__ bind(&ok);
@@ -862,7 +923,7 @@ static void LeaveInterpreterFrame(MacroAssembler* masm, Register scratch1,
Operand(rbp, StandardFrameConstants::kArgCOffset));
__ leaq(actual_params_size,
Operand(actual_params_size, times_system_pointer_size,
- kSystemPointerSize));
+ kJSArgcIncludesReceiver ? 0 : kSystemPointerSize));
// If actual is bigger than formal, then we should use it to free up the stack
// arguments.
@@ -1107,7 +1168,7 @@ static void MaybeOptimizeCodeOrTailCallOptimizedCodeSlot(
// stack left to right.
//
// The live registers are:
-// o rax: actual argument count (not including the receiver)
+// o rax: actual argument count
// o rdi: the JS function object being called
// o rdx: the incoming new target or generator object
// o rsi: our context
@@ -1335,9 +1396,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
&has_optimized_code_or_marker);
// Load the baseline code into the closure.
- __ LoadTaggedPointerField(rcx,
- FieldOperand(kInterpreterBytecodeArrayRegister,
- BaselineData::kBaselineCodeOffset));
+ __ Move(rcx, kInterpreterBytecodeArrayRegister);
static_assert(kJavaScriptCallCodeStartRegister == rcx, "ABI mismatch");
ReplaceClosureCodeWithOptimizedCode(
masm, rcx, closure, kInterpreterBytecodeArrayRegister,
@@ -1374,7 +1433,7 @@ void Builtins::Generate_InterpreterPushArgsThenCallImpl(
InterpreterPushArgsMode mode) {
DCHECK(mode != InterpreterPushArgsMode::kArrayFunction);
// ----------- S t a t e -------------
- // -- rax : the number of arguments (not including the receiver)
+ // -- rax : the number of arguments
// -- rbx : the address of the first argument to be pushed. Subsequent
// arguments should be consecutive above this, in the same order as
// they are to be pushed onto the stack.
@@ -1387,7 +1446,15 @@ void Builtins::Generate_InterpreterPushArgsThenCallImpl(
__ decl(rax);
}
- __ leal(rcx, Operand(rax, 1)); // Add one for receiver.
+ int argc_modification = kJSArgcIncludesReceiver ? 0 : 1;
+ if (receiver_mode == ConvertReceiverMode::kNullOrUndefined) {
+ argc_modification -= 1;
+ }
+ if (argc_modification != 0) {
+ __ leal(rcx, Operand(rax, argc_modification));
+ } else {
+ __ movl(rcx, rax);
+ }
// Add a stack check before pushing arguments.
__ StackOverflowCheck(rcx, &stack_overflow);
@@ -1395,11 +1462,6 @@ void Builtins::Generate_InterpreterPushArgsThenCallImpl(
// Pop return address to allow tail-call after pushing arguments.
__ PopReturnAddressTo(kScratchRegister);
- if (receiver_mode == ConvertReceiverMode::kNullOrUndefined) {
- // Don't copy receiver.
- __ decq(rcx);
- }
-
// rbx and rdx will be modified.
GenerateInterpreterPushArgs(masm, rcx, rbx, rdx);
@@ -1439,7 +1501,7 @@ void Builtins::Generate_InterpreterPushArgsThenCallImpl(
void Builtins::Generate_InterpreterPushArgsThenConstructImpl(
MacroAssembler* masm, InterpreterPushArgsMode mode) {
// ----------- S t a t e -------------
- // -- rax : the number of arguments (not including the receiver)
+ // -- rax : the number of arguments
// -- rdx : the new target (either the same as the constructor or
// the JSFunction on which new was invoked initially)
// -- rdi : the constructor to call (can be any Object)
@@ -1462,7 +1524,12 @@ void Builtins::Generate_InterpreterPushArgsThenConstructImpl(
}
// rcx and r8 will be modified.
- GenerateInterpreterPushArgs(masm, rax, rcx, r8);
+ Register argc_without_receiver = rax;
+ if (kJSArgcIncludesReceiver) {
+ argc_without_receiver = r11;
+ __ leaq(argc_without_receiver, Operand(rax, -kJSArgcReceiverSlots));
+ }
+ GenerateInterpreterPushArgs(masm, argc_without_receiver, rcx, r8);
// Push slot for the receiver to be constructed.
__ Push(Immediate(0));
@@ -1809,7 +1876,8 @@ void Generate_ContinueToBuiltinHelper(MacroAssembler* masm,
// the LAZY deopt point. rax contains the arguments count, the return value
// from LAZY is always the last argument.
__ movq(Operand(rsp, rax, times_system_pointer_size,
- BuiltinContinuationFrameConstants::kFixedFrameSize),
+ BuiltinContinuationFrameConstants::kFixedFrameSize -
+ (kJSArgcIncludesReceiver ? kSystemPointerSize : 0)),
kScratchRegister);
}
__ movq(
@@ -1883,19 +1951,20 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
__ LoadRoot(rdx, RootIndex::kUndefinedValue);
__ movq(rbx, rdx);
__ movq(rdi, args[0]);
- __ testq(rax, rax);
- __ j(zero, &no_this_arg, Label::kNear);
+ __ cmpq(rax, Immediate(JSParameterCount(0)));
+ __ j(equal, &no_this_arg, Label::kNear);
{
__ movq(rdx, args[1]);
- __ cmpq(rax, Immediate(1));
+ __ cmpq(rax, Immediate(JSParameterCount(1)));
__ j(equal, &no_arg_array, Label::kNear);
__ movq(rbx, args[2]);
__ bind(&no_arg_array);
}
__ bind(&no_this_arg);
- __ DropArgumentsAndPushNewReceiver(rax, rdx, rcx,
- TurboAssembler::kCountIsInteger,
- TurboAssembler::kCountExcludesReceiver);
+ __ DropArgumentsAndPushNewReceiver(
+ rax, rdx, rcx, TurboAssembler::kCountIsInteger,
+ kJSArgcIncludesReceiver ? TurboAssembler::kCountIncludesReceiver
+ : TurboAssembler::kCountExcludesReceiver);
}
// ----------- S t a t e -------------
@@ -1923,7 +1992,7 @@ void Builtins::Generate_FunctionPrototypeApply(MacroAssembler* masm) {
// Function.prototype.apply() yet, we use a normal Call builtin here.
__ bind(&no_arguments);
{
- __ Move(rax, 0);
+ __ Move(rax, JSParameterCount(0));
__ Jump(masm->isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
}
}
@@ -1937,7 +2006,7 @@ void Builtins::Generate_FunctionPrototypeCall(MacroAssembler* masm) {
// ...
// rsp[8 * n] : Argument n-1
// rsp[8 * (n + 1)] : Argument n
- // rax contains the number of arguments, n, not counting the receiver.
+ // rax contains the number of arguments, n.
// 1. Get the callable to call (passed as receiver) from the stack.
{
@@ -1952,8 +2021,13 @@ void Builtins::Generate_FunctionPrototypeCall(MacroAssembler* masm) {
// 3. Make sure we have at least one argument.
{
Label done;
- __ testq(rax, rax);
- __ j(not_zero, &done, Label::kNear);
+ if (kJSArgcIncludesReceiver) {
+ __ cmpq(rax, Immediate(JSParameterCount(0)));
+ __ j(greater, &done, Label::kNear);
+ } else {
+ __ testq(rax, rax);
+ __ j(not_zero, &done, Label::kNear);
+ }
__ PushRoot(RootIndex::kUndefinedValue);
__ incq(rax);
__ bind(&done);
@@ -1989,18 +2063,19 @@ void Builtins::Generate_ReflectApply(MacroAssembler* masm) {
__ LoadRoot(rdi, RootIndex::kUndefinedValue);
__ movq(rdx, rdi);
__ movq(rbx, rdi);
- __ cmpq(rax, Immediate(1));
+ __ cmpq(rax, Immediate(JSParameterCount(1)));
__ j(below, &done, Label::kNear);
__ movq(rdi, args[1]); // target
__ j(equal, &done, Label::kNear);
__ movq(rdx, args[2]); // thisArgument
- __ cmpq(rax, Immediate(3));
+ __ cmpq(rax, Immediate(JSParameterCount(3)));
__ j(below, &done, Label::kNear);
__ movq(rbx, args[3]); // argumentsList
__ bind(&done);
- __ DropArgumentsAndPushNewReceiver(rax, rdx, rcx,
- TurboAssembler::kCountIsInteger,
- TurboAssembler::kCountExcludesReceiver);
+ __ DropArgumentsAndPushNewReceiver(
+ rax, rdx, rcx, TurboAssembler::kCountIsInteger,
+ kJSArgcIncludesReceiver ? TurboAssembler::kCountIncludesReceiver
+ : TurboAssembler::kCountExcludesReceiver);
}
// ----------- S t a t e -------------
@@ -2039,20 +2114,21 @@ void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
__ LoadRoot(rdi, RootIndex::kUndefinedValue);
__ movq(rdx, rdi);
__ movq(rbx, rdi);
- __ cmpq(rax, Immediate(1));
+ __ cmpq(rax, Immediate(JSParameterCount(1)));
__ j(below, &done, Label::kNear);
__ movq(rdi, args[1]); // target
__ movq(rdx, rdi); // new.target defaults to target
__ j(equal, &done, Label::kNear);
__ movq(rbx, args[2]); // argumentsList
- __ cmpq(rax, Immediate(3));
+ __ cmpq(rax, Immediate(JSParameterCount(3)));
__ j(below, &done, Label::kNear);
__ movq(rdx, args[3]); // new.target
__ bind(&done);
__ DropArgumentsAndPushNewReceiver(
rax, masm->RootAsOperand(RootIndex::kUndefinedValue), rcx,
TurboAssembler::kCountIsInteger,
- TurboAssembler::kCountExcludesReceiver);
+ kJSArgcIncludesReceiver ? TurboAssembler::kCountIncludesReceiver
+ : TurboAssembler::kCountExcludesReceiver);
}
// ----------- S t a t e -------------
@@ -2076,13 +2152,68 @@ void Builtins::Generate_ReflectConstruct(MacroAssembler* masm) {
RelocInfo::CODE_TARGET);
}
+namespace {
+
+// Allocate new stack space for |count| arguments and shift all existing
+// arguments already on the stack. |pointer_to_new_space_out| points to the
+// first free slot on the stack to copy additional arguments to and
+// |argc_in_out| is updated to include |count|.
+void Generate_AllocateSpaceAndShiftExistingArguments(
+ MacroAssembler* masm, Register count, Register argc_in_out,
+ Register pointer_to_new_space_out, Register scratch1, Register scratch2) {
+ DCHECK(!AreAliased(count, argc_in_out, pointer_to_new_space_out, scratch1,
+ scratch2, kScratchRegister));
+ // Use pointer_to_new_space_out as scratch until we set it to the correct
+ // value at the end.
+ Register old_rsp = pointer_to_new_space_out;
+ Register new_space = kScratchRegister;
+ __ movq(old_rsp, rsp);
+
+ __ leaq(new_space, Operand(count, times_system_pointer_size, 0));
+ __ AllocateStackSpace(new_space);
+
+ Register copy_count = argc_in_out;
+ if (!kJSArgcIncludesReceiver) {
+ // We have a spare register, so use it instead of clobbering argc.
+ // lea + add (to add the count to argc in the end) uses 1 less byte than
+ // inc + lea (with base, index and disp), at the cost of 1 extra register.
+ copy_count = scratch1;
+ __ leaq(copy_count, Operand(argc_in_out, 1)); // Include the receiver.
+ }
+ Register current = scratch2;
+ Register value = kScratchRegister;
+
+ Label loop, entry;
+ __ Move(current, 0);
+ __ jmp(&entry);
+ __ bind(&loop);
+ __ movq(value, Operand(old_rsp, current, times_system_pointer_size, 0));
+ __ movq(Operand(rsp, current, times_system_pointer_size, 0), value);
+ __ incq(current);
+ __ bind(&entry);
+ __ cmpq(current, copy_count);
+ __ j(less_equal, &loop, Label::kNear);
+
+ // Point to the next free slot above the shifted arguments (copy_count + 1
+ // slot for the return address).
+ __ leaq(
+ pointer_to_new_space_out,
+ Operand(rsp, copy_count, times_system_pointer_size, kSystemPointerSize));
+ // We use addl instead of addq here because we can omit REX.W, saving 1 byte.
+ // We are especially constrained here because we are close to reaching the
+ // limit for a near jump to the stackoverflow label, so every byte counts.
+ __ addl(argc_in_out, count); // Update total number of arguments.
+}
+
+} // namespace
+
// static
// TODO(v8:11615): Observe Code::kMaxArguments in CallOrConstructVarargs
void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
Handle<Code> code) {
// ----------- S t a t e -------------
// -- rdi : target
- // -- rax : number of parameters on the stack (not including the receiver)
+ // -- rax : number of parameters on the stack
// -- rbx : arguments list (a FixedArray)
// -- rcx : len (number of elements to push from args)
// -- rdx : new.target (for [[Construct]])
@@ -2114,28 +2245,10 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
// Push additional arguments onto the stack.
// Move the arguments already in the stack,
// including the receiver and the return address.
- {
- Label copy, check;
- Register src = r8, dest = rsp, num = r9, current = r12;
- __ movq(src, rsp);
- __ leaq(kScratchRegister, Operand(rcx, times_system_pointer_size, 0));
- __ AllocateStackSpace(kScratchRegister);
- __ leaq(num, Operand(rax, 2)); // Number of words to copy.
- // +2 for receiver and return address.
- __ Move(current, 0);
- __ jmp(&check);
- __ bind(&copy);
- __ movq(kScratchRegister,
- Operand(src, current, times_system_pointer_size, 0));
- __ movq(Operand(dest, current, times_system_pointer_size, 0),
- kScratchRegister);
- __ incq(current);
- __ bind(&check);
- __ cmpq(current, num);
- __ j(less, &copy);
- __ leaq(r8, Operand(rsp, num, times_system_pointer_size, 0));
- }
-
+ // rcx: Number of arguments to make room for.
+ // rax: Number of arguments already on the stack.
+ // r8: Points to first free slot on the stack after arguments were shifted.
+ Generate_AllocateSpaceAndShiftExistingArguments(masm, rcx, rax, r8, r9, r12);
// Copy the additional arguments onto the stack.
{
Register value = r12;
@@ -2156,7 +2269,6 @@ void Builtins::Generate_CallOrConstructVarargs(MacroAssembler* masm,
__ incl(current);
__ jmp(&loop);
__ bind(&done);
- __ addq(rax, current);
}
// Tail-call to the actual Call or Construct builtin.
@@ -2171,7 +2283,7 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
CallOrConstructMode mode,
Handle<Code> code) {
// ----------- S t a t e -------------
- // -- rax : the number of arguments (not including the receiver)
+ // -- rax : the number of arguments
// -- rdx : the new target (for [[Construct]] calls)
// -- rdi : the target to call (can be any Object)
// -- rcx : start index (to support rest parameters)
@@ -2197,12 +2309,14 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
Label stack_done, stack_overflow;
__ movq(r8, Operand(rbp, StandardFrameConstants::kArgCOffset));
+ if (kJSArgcIncludesReceiver) {
+ __ decq(r8);
+ }
__ subl(r8, rcx);
__ j(less_equal, &stack_done);
{
// ----------- S t a t e -------------
- // -- rax : the number of arguments already in the stack (not including the
- // receiver)
+ // -- rax : the number of arguments already in the stack
// -- rbp : point to the caller stack frame
// -- rcx : start index (to support rest parameters)
// -- rdx : the new target (for [[Construct]] calls)
@@ -2216,29 +2330,11 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
// Forward the arguments from the caller frame.
// Move the arguments already in the stack,
// including the receiver and the return address.
- {
- Label copy, check;
- Register src = r9, dest = rsp, num = r12, current = r15;
- __ movq(src, rsp);
- __ leaq(kScratchRegister, Operand(r8, times_system_pointer_size, 0));
- __ AllocateStackSpace(kScratchRegister);
- __ leaq(num, Operand(rax, 2)); // Number of words to copy.
- // +2 for receiver and return address.
- __ Move(current, 0);
- __ jmp(&check);
- __ bind(&copy);
- __ movq(kScratchRegister,
- Operand(src, current, times_system_pointer_size, 0));
- __ movq(Operand(dest, current, times_system_pointer_size, 0),
- kScratchRegister);
- __ incq(current);
- __ bind(&check);
- __ cmpq(current, num);
- __ j(less, &copy);
- __ leaq(r9, Operand(rsp, num, times_system_pointer_size, 0));
- }
-
- __ addl(rax, r8); // Update total number of arguments.
+ // r8: Number of arguments to make room for.
+ // rax: Number of arguments already on the stack.
+ // r9: Points to first free slot on the stack after arguments were shifted.
+ Generate_AllocateSpaceAndShiftExistingArguments(masm, r8, rax, r9, r12,
+ r15);
// Point to the first argument to copy (skipping receiver).
__ leaq(rcx, Operand(rcx, times_system_pointer_size,
@@ -2274,7 +2370,7 @@ void Builtins::Generate_CallOrConstructForwardVarargs(MacroAssembler* masm,
void Builtins::Generate_CallFunction(MacroAssembler* masm,
ConvertReceiverMode mode) {
// ----------- S t a t e -------------
- // -- rax : the number of arguments (not including the receiver)
+ // -- rax : the number of arguments
// -- rdi : the function to call (checked to be a JSFunction)
// -----------------------------------
@@ -2291,7 +2387,7 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
__ j(not_zero, &class_constructor);
// ----------- S t a t e -------------
- // -- rax : the number of arguments (not including the receiver)
+ // -- rax : the number of arguments
// -- rdx : the shared function info.
// -- rdi : the function to call (checked to be a JSFunction)
// -----------------------------------
@@ -2308,7 +2404,7 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
__ j(not_zero, &done_convert);
{
// ----------- S t a t e -------------
- // -- rax : the number of arguments (not including the receiver)
+ // -- rax : the number of arguments
// -- rdx : the shared function info.
// -- rdi : the function to call (checked to be a JSFunction)
// -- rsi : the function context.
@@ -2365,7 +2461,7 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
__ bind(&done_convert);
// ----------- S t a t e -------------
- // -- rax : the number of arguments (not including the receiver)
+ // -- rax : the number of arguments
// -- rdx : the shared function info.
// -- rdi : the function to call (checked to be a JSFunction)
// -- rsi : the function context.
@@ -2373,7 +2469,6 @@ void Builtins::Generate_CallFunction(MacroAssembler* masm,
__ movzxwq(
rbx, FieldOperand(rdx, SharedFunctionInfo::kFormalParameterCountOffset));
-
__ InvokeFunctionCode(rdi, no_reg, rbx, rax, InvokeType::kJump);
// The function is a "classConstructor", need to raise an exception.
@@ -2389,7 +2484,7 @@ namespace {
void Generate_PushBoundArguments(MacroAssembler* masm) {
// ----------- S t a t e -------------
- // -- rax : the number of arguments (not including the receiver)
+ // -- rax : the number of arguments
// -- rdx : new.target (only in case of [[Construct]])
// -- rdi : target (checked to be a JSBoundFunction)
// -----------------------------------
@@ -2403,7 +2498,7 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
__ j(zero, &no_bound_arguments);
{
// ----------- S t a t e -------------
- // -- rax : the number of arguments (not including the receiver)
+ // -- rax : the number of arguments
// -- rdx : new.target (only in case of [[Construct]])
// -- rdi : target (checked to be a JSBoundFunction)
// -- rcx : the [[BoundArguments]] (implemented as FixedArray)
@@ -2467,7 +2562,7 @@ void Generate_PushBoundArguments(MacroAssembler* masm) {
// static
void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm) {
// ----------- S t a t e -------------
- // -- rax : the number of arguments (not including the receiver)
+ // -- rax : the number of arguments
// -- rdi : the function to call (checked to be a JSBoundFunction)
// -----------------------------------
__ AssertBoundFunction(rdi);
@@ -2491,7 +2586,7 @@ void Builtins::Generate_CallBoundFunctionImpl(MacroAssembler* masm) {
// static
void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) {
// ----------- S t a t e -------------
- // -- rax : the number of arguments (not including the receiver)
+ // -- rax : the number of arguments
// -- rdi : the target to call (can be any Object)
// -----------------------------------
StackArgumentsAccessor args(rax);
@@ -2540,7 +2635,7 @@ void Builtins::Generate_Call(MacroAssembler* masm, ConvertReceiverMode mode) {
// static
void Builtins::Generate_ConstructFunction(MacroAssembler* masm) {
// ----------- S t a t e -------------
- // -- rax : the number of arguments (not including the receiver)
+ // -- rax : the number of arguments
// -- rdx : the new target (checked to be a constructor)
// -- rdi : the constructor to call (checked to be a JSFunction)
// -----------------------------------
@@ -2566,7 +2661,7 @@ void Builtins::Generate_ConstructFunction(MacroAssembler* masm) {
// static
void Builtins::Generate_ConstructBoundFunction(MacroAssembler* masm) {
// ----------- S t a t e -------------
- // -- rax : the number of arguments (not including the receiver)
+ // -- rax : the number of arguments
// -- rdx : the new target (checked to be a constructor)
// -- rdi : the constructor to call (checked to be a JSBoundFunction)
// -----------------------------------
@@ -2595,7 +2690,7 @@ void Builtins::Generate_ConstructBoundFunction(MacroAssembler* masm) {
// static
void Builtins::Generate_Construct(MacroAssembler* masm) {
// ----------- S t a t e -------------
- // -- rax : the number of arguments (not including the receiver)
+ // -- rax : the number of arguments
// -- rdx : the new target (either the same as the constructor or
// the JSFunction on which new was invoked initially)
// -- rdi : the constructor to call (can be any Object)
@@ -2676,8 +2771,8 @@ void OnStackReplacement(MacroAssembler* masm, bool is_interpreter) {
}
// Load deoptimization data from the code object.
- __ LoadTaggedPointerField(rbx,
- FieldOperand(rax, Code::kDeoptimizationDataOffset));
+ __ LoadTaggedPointerField(
+ rbx, FieldOperand(rax, Code::kDeoptimizationDataOrInterpreterDataOffset));
// Load the OSR entrypoint offset from the deoptimization data.
__ SmiUntagField(
@@ -2876,6 +2971,9 @@ void Builtins::Generate_GenericJSToWasmWrapper(MacroAssembler* masm) {
// Put the in_parameter count on the stack, we only need it at the very end
// when we pop the parameters off the stack.
Register in_param_count = rax;
+ if (kJSArgcIncludesReceiver) {
+ __ decq(in_param_count);
+ }
__ movq(MemOperand(rbp, kInParamCountOffset), in_param_count);
in_param_count = no_reg;
@@ -3691,12 +3789,6 @@ void Builtins::Generate_CEntry(MacroAssembler* masm, int result_size,
__ movq(Operand(rbp, StandardFrameConstants::kContextOffset), rsi);
__ bind(&skip);
- // Reset the masking register. This is done independent of the underlying
- // feature flag {FLAG_untrusted_code_mitigations} to make the snapshot work
- // with both configurations. It is safe to always do this, because the
- // underlying register is caller-saved and can be arbitrarily clobbered.
- __ ResetSpeculationPoisonRegister();
-
// Clear c_entry_fp, like we do in `LeaveExitFrame`.
ExternalReference c_entry_fp_address = ExternalReference::Create(
IsolateAddressId::kCEntryFPAddress, masm->isolate());
@@ -4384,7 +4476,7 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
// always have baseline code.
if (!is_osr) {
Label start_with_baseline;
- __ CmpObjectType(code_obj, BASELINE_DATA_TYPE, kScratchRegister);
+ __ CmpObjectType(code_obj, CODET_TYPE, kScratchRegister);
__ j(equal, &start_with_baseline);
// Start with bytecode as there is no baseline code.
@@ -4397,16 +4489,17 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
// Start with baseline code.
__ bind(&start_with_baseline);
} else if (FLAG_debug_code) {
- __ CmpObjectType(code_obj, BASELINE_DATA_TYPE, kScratchRegister);
+ __ CmpObjectType(code_obj, CODET_TYPE, kScratchRegister);
__ Assert(equal, AbortReason::kExpectedBaselineData);
}
// Load baseline code from baseline data.
- __ LoadTaggedPointerField(
- code_obj, FieldOperand(code_obj, BaselineData::kBaselineCodeOffset));
if (V8_EXTERNAL_CODE_SPACE_BOOL) {
__ LoadCodeDataContainerCodeNonBuiltin(code_obj, code_obj);
}
+ if (FLAG_debug_code) {
+ AssertCodeIsBaseline(masm, code_obj, r11);
+ }
// Load the feedback vector.
Register feedback_vector = r11;
diff --git a/deps/v8/src/codegen/OWNERS b/deps/v8/src/codegen/OWNERS
index 364d34fb09..6644faa7fb 100644
--- a/deps/v8/src/codegen/OWNERS
+++ b/deps/v8/src/codegen/OWNERS
@@ -8,9 +8,6 @@ jkummerow@chromium.org
leszeks@chromium.org
mslekova@chromium.org
mvstanton@chromium.org
-mythria@chromium.org
neis@chromium.org
nicohartmann@chromium.org
-rmcilroy@chromium.org
-solanes@chromium.org
zhin@chromium.org
diff --git a/deps/v8/src/codegen/arm/assembler-arm.cc b/deps/v8/src/codegen/arm/assembler-arm.cc
index 970386be72..b49d9ed186 100644
--- a/deps/v8/src/codegen/arm/assembler-arm.cc
+++ b/deps/v8/src/codegen/arm/assembler-arm.cc
@@ -4786,7 +4786,7 @@ static Instr EncodeNeonPairwiseOp(NeonPairwiseOp op, NeonDataType dt,
void Assembler::vpadd(DwVfpRegister dst, DwVfpRegister src1,
DwVfpRegister src2) {
DCHECK(IsEnabled(NEON));
- // Dd = vpadd(Dn, Dm) SIMD integer pairwise ADD.
+ // Dd = vpadd(Dn, Dm) SIMD floating point pairwise ADD.
// Instruction details available in ARM DDI 0406C.b, A8-982.
int vd, d;
dst.split_code(&vd, &d);
@@ -5472,8 +5472,7 @@ void Assembler::CheckConstPool(bool force_emit, bool require_jump) {
if (!entry.is_merged()) {
if (IsOnHeap() && RelocInfo::IsEmbeddedObjectMode(entry.rmode())) {
int offset = pc_offset();
- saved_handles_for_raw_object_ptr_.push_back(
- std::make_pair(offset, entry.value()));
+ saved_handles_for_raw_object_ptr_.emplace_back(offset, entry.value());
Handle<HeapObject> object(reinterpret_cast<Address*>(entry.value()));
emit(object->ptr());
DCHECK(EmbeddedObjectMatches(offset, object));
diff --git a/deps/v8/src/codegen/arm/macro-assembler-arm.cc b/deps/v8/src/codegen/arm/macro-assembler-arm.cc
index 26d16406a6..43bbd86207 100644
--- a/deps/v8/src/codegen/arm/macro-assembler-arm.cc
+++ b/deps/v8/src/codegen/arm/macro-assembler-arm.cc
@@ -343,29 +343,32 @@ void TurboAssembler::LoadCodeObjectEntry(Register destination,
DCHECK(root_array_available());
Label if_code_is_off_heap, out;
- UseScratchRegisterScope temps(this);
- Register scratch = temps.Acquire();
-
- DCHECK(!AreAliased(destination, scratch));
- DCHECK(!AreAliased(code_object, scratch));
-
- // Check whether the Code object is an off-heap trampoline. If so, call its
- // (off-heap) entry point directly without going through the (on-heap)
- // trampoline. Otherwise, just call the Code object as always.
- ldr(scratch, FieldMemOperand(code_object, Code::kFlagsOffset));
- tst(scratch, Operand(Code::IsOffHeapTrampoline::kMask));
- b(ne, &if_code_is_off_heap);
-
- // Not an off-heap trampoline, the entry point is at
- // Code::raw_instruction_start().
- add(destination, code_object, Operand(Code::kHeaderSize - kHeapObjectTag));
- jmp(&out);
-
- // An off-heap trampoline, the entry point is loaded from the builtin entry
- // table.
- bind(&if_code_is_off_heap);
- ldr(scratch, FieldMemOperand(code_object, Code::kBuiltinIndexOffset));
- lsl(destination, scratch, Operand(kSystemPointerSizeLog2));
+ {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+
+ DCHECK(!AreAliased(destination, scratch));
+ DCHECK(!AreAliased(code_object, scratch));
+
+ // Check whether the Code object is an off-heap trampoline. If so, call
+ // its (off-heap) entry point directly without going through the (on-heap)
+ // trampoline. Otherwise, just call the Code object as always.
+ ldr(scratch, FieldMemOperand(code_object, Code::kFlagsOffset));
+ tst(scratch, Operand(Code::IsOffHeapTrampoline::kMask));
+ b(ne, &if_code_is_off_heap);
+
+ // Not an off-heap trampoline, the entry point is at
+ // Code::raw_instruction_start().
+ add(destination, code_object,
+ Operand(Code::kHeaderSize - kHeapObjectTag));
+ jmp(&out);
+
+ // An off-heap trampoline, the entry point is loaded from the builtin
+ // entry table.
+ bind(&if_code_is_off_heap);
+ ldr(scratch, FieldMemOperand(code_object, Code::kBuiltinIndexOffset));
+ lsl(destination, scratch, Operand(kSystemPointerSizeLog2));
+ }
add(destination, destination, kRootRegister);
ldr(destination,
MemOperand(destination, IsolateData::builtin_entry_table_offset()));
@@ -1669,7 +1672,11 @@ void MacroAssembler::InvokePrologue(Register expected_parameter_count,
str(scratch, MemOperand(dest, kSystemPointerSize, PostIndex));
sub(num, num, Operand(1), SetCC);
bind(&check);
- b(ge, &copy);
+ if (kJSArgcIncludesReceiver) {
+ b(gt, &copy);
+ } else {
+ b(ge, &copy);
+ }
}
// Fill remaining expected arguments with undefined values.
@@ -2660,10 +2667,6 @@ void TurboAssembler::ComputeCodeStartAddress(Register dst) {
sub(dst, pc, Operand(pc_offset() + Instruction::kPcLoadDelta));
}
-void TurboAssembler::ResetSpeculationPoisonRegister() {
- mov(kSpeculationPoisonRegister, Operand(-1));
-}
-
void TurboAssembler::CallForDeoptimization(Builtin target, int, Label* exit,
DeoptimizeKind kind, Label* ret,
Label*) {
diff --git a/deps/v8/src/codegen/arm/macro-assembler-arm.h b/deps/v8/src/codegen/arm/macro-assembler-arm.h
index 41bc5ec544..bcecaec429 100644
--- a/deps/v8/src/codegen/arm/macro-assembler-arm.h
+++ b/deps/v8/src/codegen/arm/macro-assembler-arm.h
@@ -560,8 +560,6 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
// This is an alternative to embedding the {CodeObject} handle as a reference.
void ComputeCodeStartAddress(Register dst);
- void ResetSpeculationPoisonRegister();
-
// Control-flow integrity:
// Define a function entrypoint. This doesn't emit any code for this
diff --git a/deps/v8/src/codegen/arm/register-arm.h b/deps/v8/src/codegen/arm/register-arm.h
index 6608ad4ede..8cc838945d 100644
--- a/deps/v8/src/codegen/arm/register-arm.h
+++ b/deps/v8/src/codegen/arm/register-arm.h
@@ -336,7 +336,6 @@ constexpr Register kReturnRegister2 = r2;
constexpr Register kJSFunctionRegister = r1;
constexpr Register kContextRegister = r7;
constexpr Register kAllocateSizeRegister = r1;
-constexpr Register kSpeculationPoisonRegister = r9;
constexpr Register kInterpreterAccumulatorRegister = r0;
constexpr Register kInterpreterBytecodeOffsetRegister = r5;
constexpr Register kInterpreterBytecodeArrayRegister = r6;
diff --git a/deps/v8/src/codegen/arm64/macro-assembler-arm64.cc b/deps/v8/src/codegen/arm64/macro-assembler-arm64.cc
index ef95b4e813..09065414cc 100644
--- a/deps/v8/src/codegen/arm64/macro-assembler-arm64.cc
+++ b/deps/v8/src/codegen/arm64/macro-assembler-arm64.cc
@@ -1516,9 +1516,7 @@ void MacroAssembler::AssertCodeT(Register object) {
UseScratchRegisterScope temps(this);
Register temp = temps.AcquireX();
- CompareObjectType(
- object, temp, temp,
- V8_EXTERNAL_CODE_SPACE_BOOL ? CODE_DATA_CONTAINER_TYPE : CODE_TYPE);
+ CompareObjectType(object, temp, temp, CODET_TYPE);
Check(eq, AbortReason::kOperandIsNotACodeT);
}
@@ -1846,8 +1844,7 @@ void TurboAssembler::Jump(Address target, RelocInfo::Mode rmode,
Condition cond) {
int64_t offset = CalculateTargetOffset(target, rmode, pc_);
if (RelocInfo::IsRuntimeEntry(rmode) && IsOnHeap()) {
- saved_offsets_for_runtime_entries_.push_back(
- std::make_pair(pc_offset(), offset));
+ saved_offsets_for_runtime_entries_.emplace_back(pc_offset(), offset);
offset = CalculateTargetOffset(target, RelocInfo::NONE, pc_);
}
JumpHelper(offset, rmode, cond);
@@ -1895,8 +1892,7 @@ void TurboAssembler::Call(Address target, RelocInfo::Mode rmode) {
if (CanUseNearCallOrJump(rmode)) {
int64_t offset = CalculateTargetOffset(target, rmode, pc_);
if (IsOnHeap() && RelocInfo::IsRuntimeEntry(rmode)) {
- saved_offsets_for_runtime_entries_.push_back(
- std::make_pair(pc_offset(), offset));
+ saved_offsets_for_runtime_entries_.emplace_back(pc_offset(), offset);
offset = CalculateTargetOffset(target, RelocInfo::NONE, pc_);
}
DCHECK(IsNearCallOffset(offset));
@@ -2281,7 +2277,11 @@ void MacroAssembler::InvokePrologue(Register formal_parameter_count,
Register slots_to_copy = x4;
Register slots_to_claim = x5;
- Add(slots_to_copy, actual_argument_count, 1); // Copy with receiver.
+ if (kJSArgcIncludesReceiver) {
+ Mov(slots_to_copy, actual_argument_count);
+ } else {
+ Add(slots_to_copy, actual_argument_count, 1); // Copy with receiver.
+ }
Mov(slots_to_claim, extra_argument_count);
Tbz(extra_argument_count, 0, &even_extra_count);
@@ -2295,7 +2295,9 @@ void MacroAssembler::InvokePrologue(Register formal_parameter_count,
Register scratch = x11;
Add(slots_to_claim, extra_argument_count, 1);
And(scratch, actual_argument_count, 1);
- Eor(scratch, scratch, 1);
+ if (!kJSArgcIncludesReceiver) {
+ Eor(scratch, scratch, 1);
+ }
Sub(slots_to_claim, slots_to_claim, Operand(scratch, LSL, 1));
}
@@ -2316,10 +2318,13 @@ void MacroAssembler::InvokePrologue(Register formal_parameter_count,
}
Bind(&skip_move);
- Register actual_argument_with_receiver = x4;
+ Register actual_argument_with_receiver = actual_argument_count;
Register pointer_next_value = x5;
- Add(actual_argument_with_receiver, actual_argument_count,
- 1); // {slots_to_copy} was scratched.
+ if (!kJSArgcIncludesReceiver) {
+ actual_argument_with_receiver = x4;
+ Add(actual_argument_with_receiver, actual_argument_count,
+ 1); // {slots_to_copy} was scratched.
+ }
// Copy extra arguments as undefined values.
{
@@ -2919,6 +2924,18 @@ void TurboAssembler::StoreTaggedField(const Register& value,
}
}
+void TurboAssembler::AtomicStoreTaggedField(const Register& value,
+ const Register& dst_base,
+ const Register& dst_index,
+ const Register& temp) {
+ Add(temp, dst_base, dst_index);
+ if (COMPRESS_POINTERS_BOOL) {
+ Stlr(value.W(), temp);
+ } else {
+ Stlr(value, temp);
+ }
+}
+
void TurboAssembler::DecompressTaggedSigned(const Register& destination,
const MemOperand& field_operand) {
ASM_CODE_COMMENT(this);
@@ -2950,6 +2967,40 @@ void TurboAssembler::DecompressAnyTagged(const Register& destination,
Add(destination, kPtrComprCageBaseRegister, destination);
}
+void TurboAssembler::AtomicDecompressTaggedSigned(const Register& destination,
+ const Register& base,
+ const Register& index,
+ const Register& temp) {
+ ASM_CODE_COMMENT(this);
+ Add(temp, base, index);
+ Ldar(destination.W(), temp);
+ if (FLAG_debug_code) {
+ // Corrupt the top 32 bits. Made up of 16 fixed bits and 16 pc offset bits.
+ Add(destination, destination,
+ ((kDebugZapValue << 16) | (pc_offset() & 0xffff)) << 32);
+ }
+}
+
+void TurboAssembler::AtomicDecompressTaggedPointer(const Register& destination,
+ const Register& base,
+ const Register& index,
+ const Register& temp) {
+ ASM_CODE_COMMENT(this);
+ Add(temp, base, index);
+ Ldar(destination.W(), temp);
+ Add(destination, kPtrComprCageBaseRegister, destination);
+}
+
+void TurboAssembler::AtomicDecompressAnyTagged(const Register& destination,
+ const Register& base,
+ const Register& index,
+ const Register& temp) {
+ ASM_CODE_COMMENT(this);
+ Add(temp, base, index);
+ Ldar(destination.W(), temp);
+ Add(destination, kPtrComprCageBaseRegister, destination);
+}
+
void TurboAssembler::CheckPageFlag(const Register& object, int mask,
Condition cc, Label* condition_met) {
ASM_CODE_COMMENT(this);
@@ -3540,10 +3591,6 @@ void TurboAssembler::ComputeCodeStartAddress(const Register& rd) {
adr(rd, -pc_offset());
}
-void TurboAssembler::ResetSpeculationPoisonRegister() {
- Mov(kSpeculationPoisonRegister, -1);
-}
-
void TurboAssembler::RestoreFPAndLR() {
static_assert(StandardFrameConstants::kCallerFPOffset + kSystemPointerSize ==
StandardFrameConstants::kCallerPCOffset,
@@ -3575,6 +3622,16 @@ void TurboAssembler::StoreReturnAddressInWasmExitFrame(Label* return_location) {
}
#endif // V8_ENABLE_WEBASSEMBLY
+void TurboAssembler::PopcntHelper(Register dst, Register src) {
+ UseScratchRegisterScope temps(this);
+ VRegister scratch = temps.AcquireV(kFormat8B);
+ VRegister tmp = src.Is32Bits() ? scratch.S() : scratch.D();
+ Fmov(tmp, src);
+ Cnt(scratch, scratch);
+ Addv(scratch.B(), scratch);
+ Fmov(dst, tmp);
+}
+
void TurboAssembler::I64x2BitMask(Register dst, VRegister src) {
ASM_CODE_COMMENT(this);
UseScratchRegisterScope scope(this);
diff --git a/deps/v8/src/codegen/arm64/macro-assembler-arm64.h b/deps/v8/src/codegen/arm64/macro-assembler-arm64.h
index 9128ba2c18..11a5e7eb9a 100644
--- a/deps/v8/src/codegen/arm64/macro-assembler-arm64.h
+++ b/deps/v8/src/codegen/arm64/macro-assembler-arm64.h
@@ -1192,6 +1192,29 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
LSPAIR_MACRO_LIST(DECLARE_FUNCTION)
#undef DECLARE_FUNCTION
+ void St1(const VRegister& vt, const MemOperand& dst) {
+ DCHECK(allow_macro_instructions());
+ st1(vt, dst);
+ }
+ void St1(const VRegister& vt, const VRegister& vt2, const MemOperand& dst) {
+ DCHECK(allow_macro_instructions());
+ st1(vt, vt2, dst);
+ }
+ void St1(const VRegister& vt, const VRegister& vt2, const VRegister& vt3,
+ const MemOperand& dst) {
+ DCHECK(allow_macro_instructions());
+ st1(vt, vt2, vt3, dst);
+ }
+ void St1(const VRegister& vt, const VRegister& vt2, const VRegister& vt3,
+ const VRegister& vt4, const MemOperand& dst) {
+ DCHECK(allow_macro_instructions());
+ st1(vt, vt2, vt3, vt4, dst);
+ }
+ void St1(const VRegister& vt, int lane, const MemOperand& dst) {
+ DCHECK(allow_macro_instructions());
+ st1(vt, lane, dst);
+ }
+
#define NEON_2VREG_SHIFT_MACRO_LIST(V) \
V(rshrn, Rshrn) \
V(rshrn2, Rshrn2) \
@@ -1347,8 +1370,6 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
// This is an alternative to embedding the {CodeObject} handle as a reference.
void ComputeCodeStartAddress(const Register& rd);
- void ResetSpeculationPoisonRegister();
-
// ---------------------------------------------------------------------------
// Pointer compression Support
@@ -1373,6 +1394,9 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void StoreTaggedField(const Register& value,
const MemOperand& dst_field_operand);
+ void AtomicStoreTaggedField(const Register& value, const Register& dst_base,
+ const Register& dst_index, const Register& temp);
+
void DecompressTaggedSigned(const Register& destination,
const MemOperand& field_operand);
void DecompressTaggedPointer(const Register& destination,
@@ -1382,6 +1406,17 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void DecompressAnyTagged(const Register& destination,
const MemOperand& field_operand);
+ void AtomicDecompressTaggedSigned(const Register& destination,
+ const Register& base, const Register& index,
+ const Register& temp);
+ void AtomicDecompressTaggedPointer(const Register& destination,
+ const Register& base,
+ const Register& index,
+ const Register& temp);
+ void AtomicDecompressAnyTagged(const Register& destination,
+ const Register& base, const Register& index,
+ const Register& temp);
+
// Restore FP and LR from the values stored in the current frame. This will
// authenticate the LR when pointer authentication is enabled.
void RestoreFPAndLR();
@@ -1390,9 +1425,10 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void StoreReturnAddressInWasmExitFrame(Label* return_location);
#endif // V8_ENABLE_WEBASSEMBLY
- // Wasm SIMD helpers. These instructions don't have direct lowering to native
- // instructions. These helpers allow us to define the optimal code sequence,
- // and be used in both TurboFan and Liftoff.
+ // Wasm helpers. These instructions don't have direct lowering
+ // to native instructions. These helpers allow us to define the optimal code
+ // sequence, and be used in both TurboFan and Liftoff.
+ void PopcntHelper(Register dst, Register src);
void I64x2BitMask(Register dst, VRegister src);
void I64x2AllTrue(Register dst, VRegister src);
@@ -1645,28 +1681,6 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
DCHECK(allow_macro_instructions());
ld4r(vt, vt2, vt3, vt4, src);
}
- void St1(const VRegister& vt, const MemOperand& dst) {
- DCHECK(allow_macro_instructions());
- st1(vt, dst);
- }
- void St1(const VRegister& vt, const VRegister& vt2, const MemOperand& dst) {
- DCHECK(allow_macro_instructions());
- st1(vt, vt2, dst);
- }
- void St1(const VRegister& vt, const VRegister& vt2, const VRegister& vt3,
- const MemOperand& dst) {
- DCHECK(allow_macro_instructions());
- st1(vt, vt2, vt3, dst);
- }
- void St1(const VRegister& vt, const VRegister& vt2, const VRegister& vt3,
- const VRegister& vt4, const MemOperand& dst) {
- DCHECK(allow_macro_instructions());
- st1(vt, vt2, vt3, vt4, dst);
- }
- void St1(const VRegister& vt, int lane, const MemOperand& dst) {
- DCHECK(allow_macro_instructions());
- st1(vt, lane, dst);
- }
void St2(const VRegister& vt, const VRegister& vt2, const MemOperand& dst) {
DCHECK(allow_macro_instructions());
st2(vt, vt2, dst);
diff --git a/deps/v8/src/codegen/arm64/register-arm64.h b/deps/v8/src/codegen/arm64/register-arm64.h
index 5b234526a4..29a4212aac 100644
--- a/deps/v8/src/codegen/arm64/register-arm64.h
+++ b/deps/v8/src/codegen/arm64/register-arm64.h
@@ -547,8 +547,6 @@ using Simd128Register = VRegister;
// Lists of registers.
class V8_EXPORT_PRIVATE CPURegList {
public:
- CPURegList() = default;
-
template <typename... CPURegisters>
explicit CPURegList(CPURegister reg0, CPURegisters... regs)
: list_(CPURegister::ListOf(reg0, regs...)),
@@ -701,8 +699,6 @@ constexpr Register kJSFunctionRegister = x1;
constexpr Register kContextRegister = cp;
constexpr Register kAllocateSizeRegister = x1;
-constexpr Register kSpeculationPoisonRegister = x23;
-
constexpr Register kInterpreterAccumulatorRegister = x0;
constexpr Register kInterpreterBytecodeOffsetRegister = x19;
constexpr Register kInterpreterBytecodeArrayRegister = x20;
diff --git a/deps/v8/src/codegen/assembler-arch.h b/deps/v8/src/codegen/assembler-arch.h
index 3569644e52..2e1b56c467 100644
--- a/deps/v8/src/codegen/assembler-arch.h
+++ b/deps/v8/src/codegen/assembler-arch.h
@@ -21,6 +21,8 @@
#include "src/codegen/mips/assembler-mips.h"
#elif V8_TARGET_ARCH_MIPS64
#include "src/codegen/mips64/assembler-mips64.h"
+#elif V8_TARGET_ARCH_LOONG64
+#include "src/codegen/loong64/assembler-loong64.h"
#elif V8_TARGET_ARCH_S390
#include "src/codegen/s390/assembler-s390.h"
#elif V8_TARGET_ARCH_RISCV64
diff --git a/deps/v8/src/codegen/assembler-inl.h b/deps/v8/src/codegen/assembler-inl.h
index c04b6d9687..084f12cc7e 100644
--- a/deps/v8/src/codegen/assembler-inl.h
+++ b/deps/v8/src/codegen/assembler-inl.h
@@ -21,6 +21,8 @@
#include "src/codegen/mips/assembler-mips-inl.h"
#elif V8_TARGET_ARCH_MIPS64
#include "src/codegen/mips64/assembler-mips64-inl.h"
+#elif V8_TARGET_ARCH_LOONG64
+#include "src/codegen/loong64/assembler-loong64-inl.h"
#elif V8_TARGET_ARCH_S390
#include "src/codegen/s390/assembler-s390-inl.h"
#elif V8_TARGET_ARCH_RISCV64
diff --git a/deps/v8/src/codegen/assembler.cc b/deps/v8/src/codegen/assembler.cc
index dfd406694a..cacbfbd679 100644
--- a/deps/v8/src/codegen/assembler.cc
+++ b/deps/v8/src/codegen/assembler.cc
@@ -248,6 +248,12 @@ AssemblerBase::AssemblerBase(const AssemblerOptions& options,
if (!buffer_) buffer_ = NewAssemblerBuffer(kDefaultBufferSize);
buffer_start_ = buffer_->start();
pc_ = buffer_start_;
+ if (IsOnHeap()) {
+ saved_handles_for_raw_object_ptr_.reserve(
+ kSavedHandleForRawObjectsInitialSize);
+ saved_offsets_for_runtime_entries_.reserve(
+ kSavedOffsetForRuntimeEntriesInitialSize);
+ }
}
AssemblerBase::~AssemblerBase() = default;
diff --git a/deps/v8/src/codegen/assembler.h b/deps/v8/src/codegen/assembler.h
index 7373b5d48b..f1e5b85f1f 100644
--- a/deps/v8/src/codegen/assembler.h
+++ b/deps/v8/src/codegen/assembler.h
@@ -276,8 +276,10 @@ class V8_EXPORT_PRIVATE AssemblerBase : public Malloced {
int pc_offset() const { return static_cast<int>(pc_ - buffer_start_); }
int pc_offset_for_safepoint() {
-#if defined(V8_TARGET_ARCH_MIPS) || defined(V8_TARGET_ARCH_MIPS64)
- // Mips needs it's own implementation to avoid trampoline's influence.
+#if defined(V8_TARGET_ARCH_MIPS) || defined(V8_TARGET_ARCH_MIPS64) || \
+ defined(V8_TARGET_ARCH_LOONG64)
+ // MIPS and LOONG need to use their own implementation to avoid trampoline's
+ // influence.
UNREACHABLE();
#else
return pc_offset();
@@ -418,6 +420,10 @@ class V8_EXPORT_PRIVATE AssemblerBase : public Malloced {
CodeCommentsWriter code_comments_writer_;
// Relocation information when code allocated directly on heap.
+ // These constants correspond to the 99% percentile of a selected number of JS
+ // frameworks and benchmarks, including jquery, lodash, d3 and speedometer3.
+ const int kSavedHandleForRawObjectsInitialSize = 60;
+ const int kSavedOffsetForRuntimeEntriesInitialSize = 100;
std::vector<std::pair<uint32_t, Address>> saved_handles_for_raw_object_ptr_;
std::vector<std::pair<uint32_t, uint32_t>> saved_offsets_for_runtime_entries_;
diff --git a/deps/v8/src/codegen/atomic-memory-order.h b/deps/v8/src/codegen/atomic-memory-order.h
new file mode 100644
index 0000000000..fc56cd34e3
--- /dev/null
+++ b/deps/v8/src/codegen/atomic-memory-order.h
@@ -0,0 +1,35 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_CODEGEN_ATOMIC_MEMORY_ORDER_H_
+#define V8_CODEGEN_ATOMIC_MEMORY_ORDER_H_
+
+#include <ostream>
+
+#include "src/base/logging.h"
+
+namespace v8 {
+namespace internal {
+
+// Atomic memory orders supported by the compiler.
+enum class AtomicMemoryOrder : uint8_t { kAcqRel, kSeqCst };
+
+inline size_t hash_value(AtomicMemoryOrder order) {
+ return static_cast<uint8_t>(order);
+}
+
+inline std::ostream& operator<<(std::ostream& os, AtomicMemoryOrder order) {
+ switch (order) {
+ case AtomicMemoryOrder::kAcqRel:
+ return os << "kAcqRel";
+ case AtomicMemoryOrder::kSeqCst:
+ return os << "kSeqCst";
+ }
+ UNREACHABLE();
+}
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_CODEGEN_ATOMIC_MEMORY_ORDER_H_
diff --git a/deps/v8/src/codegen/code-factory.cc b/deps/v8/src/codegen/code-factory.cc
index f3cb604478..dcf19a0ad5 100644
--- a/deps/v8/src/codegen/code-factory.cc
+++ b/deps/v8/src/codegen/code-factory.cc
@@ -378,24 +378,47 @@ Callable CodeFactory::ArraySingleArgumentConstructor(
#ifdef V8_IS_TSAN
// static
-Builtin CodeFactory::GetTSANRelaxedStoreStub(SaveFPRegsMode fp_mode, int size) {
- if (size == kInt8Size) {
- return fp_mode == SaveFPRegsMode::kIgnore
- ? Builtin::kTSANRelaxedStore8IgnoreFP
- : Builtin::kTSANRelaxedStore8SaveFP;
- } else if (size == kInt16Size) {
- return fp_mode == SaveFPRegsMode::kIgnore
- ? Builtin::kTSANRelaxedStore16IgnoreFP
- : Builtin::kTSANRelaxedStore16SaveFP;
- } else if (size == kInt32Size) {
- return fp_mode == SaveFPRegsMode::kIgnore
- ? Builtin::kTSANRelaxedStore32IgnoreFP
- : Builtin::kTSANRelaxedStore32SaveFP;
+Builtin CodeFactory::GetTSANStoreStub(SaveFPRegsMode fp_mode, int size,
+ std::memory_order order) {
+ if (order == std::memory_order_relaxed) {
+ if (size == kInt8Size) {
+ return fp_mode == SaveFPRegsMode::kIgnore
+ ? Builtin::kTSANRelaxedStore8IgnoreFP
+ : Builtin::kTSANRelaxedStore8SaveFP;
+ } else if (size == kInt16Size) {
+ return fp_mode == SaveFPRegsMode::kIgnore
+ ? Builtin::kTSANRelaxedStore16IgnoreFP
+ : Builtin::kTSANRelaxedStore16SaveFP;
+ } else if (size == kInt32Size) {
+ return fp_mode == SaveFPRegsMode::kIgnore
+ ? Builtin::kTSANRelaxedStore32IgnoreFP
+ : Builtin::kTSANRelaxedStore32SaveFP;
+ } else {
+ CHECK_EQ(size, kInt64Size);
+ return fp_mode == SaveFPRegsMode::kIgnore
+ ? Builtin::kTSANRelaxedStore64IgnoreFP
+ : Builtin::kTSANRelaxedStore64SaveFP;
+ }
} else {
- CHECK_EQ(size, kInt64Size);
- return fp_mode == SaveFPRegsMode::kIgnore
- ? Builtin::kTSANRelaxedStore64IgnoreFP
- : Builtin::kTSANRelaxedStore64SaveFP;
+ DCHECK_EQ(order, std::memory_order_seq_cst);
+ if (size == kInt8Size) {
+ return fp_mode == SaveFPRegsMode::kIgnore
+ ? Builtin::kTSANSeqCstStore8IgnoreFP
+ : Builtin::kTSANSeqCstStore8SaveFP;
+ } else if (size == kInt16Size) {
+ return fp_mode == SaveFPRegsMode::kIgnore
+ ? Builtin::kTSANSeqCstStore16IgnoreFP
+ : Builtin::kTSANSeqCstStore16SaveFP;
+ } else if (size == kInt32Size) {
+ return fp_mode == SaveFPRegsMode::kIgnore
+ ? Builtin::kTSANSeqCstStore32IgnoreFP
+ : Builtin::kTSANSeqCstStore32SaveFP;
+ } else {
+ CHECK_EQ(size, kInt64Size);
+ return fp_mode == SaveFPRegsMode::kIgnore
+ ? Builtin::kTSANSeqCstStore64IgnoreFP
+ : Builtin::kTSANSeqCstStore64SaveFP;
+ }
}
}
diff --git a/deps/v8/src/codegen/code-factory.h b/deps/v8/src/codegen/code-factory.h
index 4780678dad..05b27bef0e 100644
--- a/deps/v8/src/codegen/code-factory.h
+++ b/deps/v8/src/codegen/code-factory.h
@@ -90,7 +90,8 @@ class V8_EXPORT_PRIVATE CodeFactory final {
AllocationSiteOverrideMode override_mode);
#ifdef V8_IS_TSAN
- static Builtin GetTSANRelaxedStoreStub(SaveFPRegsMode fp_mode, int size);
+ static Builtin GetTSANStoreStub(SaveFPRegsMode fp_mode, int size,
+ std::memory_order order);
static Builtin GetTSANRelaxedLoadStub(SaveFPRegsMode fp_mode, int size);
#endif // V8_IS_TSAN
};
diff --git a/deps/v8/src/codegen/code-stub-assembler.cc b/deps/v8/src/codegen/code-stub-assembler.cc
index e25135dece..92686eff12 100644
--- a/deps/v8/src/codegen/code-stub-assembler.cc
+++ b/deps/v8/src/codegen/code-stub-assembler.cc
@@ -2193,9 +2193,10 @@ TNode<IntPtrT> CodeStubAssembler::LoadArrayLength(
}
template <typename Array, typename TIndex, typename TValue>
-TNode<TValue> CodeStubAssembler::LoadArrayElement(
- TNode<Array> array, int array_header_size, TNode<TIndex> index_node,
- int additional_offset, LoadSensitivity needs_poisoning) {
+TNode<TValue> CodeStubAssembler::LoadArrayElement(TNode<Array> array,
+ int array_header_size,
+ TNode<TIndex> index_node,
+ int additional_offset) {
// TODO(v8:9708): Do we want to keep both IntPtrT and UintPtrT variants?
static_assert(std::is_same<TIndex, Smi>::value ||
std::is_same<TIndex, UintPtrT>::value ||
@@ -2210,23 +2211,17 @@ TNode<TValue> CodeStubAssembler::LoadArrayElement(
CSA_ASSERT(this, IsOffsetInBounds(offset, LoadArrayLength(array),
array_header_size));
constexpr MachineType machine_type = MachineTypeOf<TValue>::value;
- // TODO(gsps): Remove the Load case once LoadFromObject supports poisoning
- if (needs_poisoning == LoadSensitivity::kSafe) {
- return UncheckedCast<TValue>(LoadFromObject(machine_type, array, offset));
- } else {
- return UncheckedCast<TValue>(
- Load(machine_type, array, offset, needs_poisoning));
- }
+ return UncheckedCast<TValue>(LoadFromObject(machine_type, array, offset));
}
template V8_EXPORT_PRIVATE TNode<MaybeObject>
CodeStubAssembler::LoadArrayElement<TransitionArray, IntPtrT>(
- TNode<TransitionArray>, int, TNode<IntPtrT>, int, LoadSensitivity);
+ TNode<TransitionArray>, int, TNode<IntPtrT>, int);
template <typename TIndex>
TNode<Object> CodeStubAssembler::LoadFixedArrayElement(
TNode<FixedArray> object, TNode<TIndex> index, int additional_offset,
- LoadSensitivity needs_poisoning, CheckBounds check_bounds) {
+ CheckBounds check_bounds) {
// TODO(v8:9708): Do we want to keep both IntPtrT and UintPtrT variants?
static_assert(std::is_same<TIndex, Smi>::value ||
std::is_same<TIndex, UintPtrT>::value ||
@@ -2238,25 +2233,22 @@ TNode<Object> CodeStubAssembler::LoadFixedArrayElement(
if (NeedsBoundsCheck(check_bounds)) {
FixedArrayBoundsCheck(object, index, additional_offset);
}
- TNode<MaybeObject> element =
- LoadArrayElement(object, FixedArray::kHeaderSize, index,
- additional_offset, needs_poisoning);
+ TNode<MaybeObject> element = LoadArrayElement(object, FixedArray::kHeaderSize,
+ index, additional_offset);
return CAST(element);
}
template V8_EXPORT_PRIVATE TNode<Object>
CodeStubAssembler::LoadFixedArrayElement<Smi>(TNode<FixedArray>, TNode<Smi>,
- int, LoadSensitivity,
- CheckBounds);
+ int, CheckBounds);
template V8_EXPORT_PRIVATE TNode<Object>
CodeStubAssembler::LoadFixedArrayElement<UintPtrT>(TNode<FixedArray>,
TNode<UintPtrT>, int,
- LoadSensitivity,
CheckBounds);
template V8_EXPORT_PRIVATE TNode<Object>
CodeStubAssembler::LoadFixedArrayElement<IntPtrT>(TNode<FixedArray>,
TNode<IntPtrT>, int,
- LoadSensitivity, CheckBounds);
+ CheckBounds);
void CodeStubAssembler::FixedArrayBoundsCheck(TNode<FixedArrayBase> array,
TNode<Smi> index,
@@ -2291,9 +2283,8 @@ void CodeStubAssembler::FixedArrayBoundsCheck(TNode<FixedArrayBase> array,
TNode<Object> CodeStubAssembler::LoadPropertyArrayElement(
TNode<PropertyArray> object, TNode<IntPtrT> index) {
int additional_offset = 0;
- LoadSensitivity needs_poisoning = LoadSensitivity::kSafe;
return CAST(LoadArrayElement(object, PropertyArray::kHeaderSize, index,
- additional_offset, needs_poisoning));
+ additional_offset));
}
TNode<IntPtrT> CodeStubAssembler::LoadPropertyArrayLength(
@@ -2648,7 +2639,7 @@ TNode<Int32T> CodeStubAssembler::LoadAndUntagToWord32FixedArrayElement(
TNode<MaybeObject> CodeStubAssembler::LoadWeakFixedArrayElement(
TNode<WeakFixedArray> object, TNode<IntPtrT> index, int additional_offset) {
return LoadArrayElement(object, WeakFixedArray::kHeaderSize, index,
- additional_offset, LoadSensitivity::kSafe);
+ additional_offset);
}
TNode<Float64T> CodeStubAssembler::LoadFixedDoubleArrayElement(
@@ -2934,11 +2925,18 @@ TNode<BytecodeArray> CodeStubAssembler::LoadSharedFunctionInfoBytecodeArray(
Label check_for_interpreter_data(this, &var_result);
Label done(this, &var_result);
- GotoIfNot(HasInstanceType(var_result.value(), BASELINE_DATA_TYPE),
+ GotoIfNot(HasInstanceType(var_result.value(), CODET_TYPE),
&check_for_interpreter_data);
- TNode<HeapObject> baseline_data = LoadObjectField<HeapObject>(
- var_result.value(), BaselineData::kDataOffset);
- var_result = baseline_data;
+ {
+ TNode<Code> code = FromCodeT(CAST(var_result.value()));
+ CSA_ASSERT(
+ this, Word32Equal(DecodeWord32<Code::KindField>(LoadObjectField<Int32T>(
+ code, Code::kFlagsOffset)),
+ Int32Constant(static_cast<int>(CodeKind::BASELINE))));
+ TNode<HeapObject> baseline_data = LoadObjectField<HeapObject>(
+ code, Code::kDeoptimizationDataOrInterpreterDataOffset);
+ var_result = baseline_data;
+ }
Goto(&check_for_interpreter_data);
BIND(&check_for_interpreter_data);
@@ -3197,7 +3195,8 @@ TNode<Smi> CodeStubAssembler::BuildAppendJSArray(ElementsKind kind,
// Resize the capacity of the fixed array if it doesn't fit.
TNode<IntPtrT> first = arg_index->value();
- TNode<BInt> growth = IntPtrToBInt(IntPtrSub(args->GetLength(), first));
+ TNode<BInt> growth =
+ IntPtrToBInt(IntPtrSub(args->GetLengthWithoutReceiver(), first));
PossiblyGrowElementsCapacity(kind, array, var_length.value(), &var_elements,
growth, &pre_bailout);
@@ -4350,17 +4349,11 @@ TNode<FixedArray> CodeStubAssembler::ExtractToFixedArray(
{
bool handle_old_space = !FLAG_young_generation_large_objects;
if (handle_old_space) {
- if (extract_flags & ExtractFixedArrayFlag::kNewSpaceAllocationOnly) {
- handle_old_space = false;
- CSA_ASSERT(this, Word32BinaryNot(FixedArraySizeDoesntFitInNewSpace(
- count, FixedArray::kHeaderSize)));
- } else {
- int constant_count;
- handle_old_space =
- !TryGetIntPtrOrSmiConstantValue(count, &constant_count) ||
- (constant_count >
- FixedArray::GetMaxLengthForNewSpaceAllocation(PACKED_ELEMENTS));
- }
+ int constant_count;
+ handle_old_space =
+ !TryGetIntPtrOrSmiConstantValue(count, &constant_count) ||
+ (constant_count >
+ FixedArray::GetMaxLengthForNewSpaceAllocation(PACKED_ELEMENTS));
}
Label old_space(this, Label::kDeferred);
@@ -4563,10 +4556,7 @@ TNode<FixedArrayBase> CodeStubAssembler::ExtractFixedArray(
var_holes_converted != nullptr ? HoleConversionMode::kConvertToUndefined
: HoleConversionMode::kDontConvert;
TVARIABLE(FixedArrayBase, var_result);
- const AllocationFlags allocation_flags =
- (extract_flags & ExtractFixedArrayFlag::kNewSpaceAllocationOnly)
- ? CodeStubAssembler::kNone
- : CodeStubAssembler::kAllowLargeObjectAllocation;
+ auto allocation_flags = CodeStubAssembler::kAllowLargeObjectAllocation;
if (!first) {
first = IntPtrOrSmiConstant<TIndex>(0);
}
@@ -9535,7 +9525,8 @@ TNode<Object> CodeStubAssembler::CallGetterIfAccessor(
GetCreationContext(CAST(holder), if_bailout);
var_value = CallBuiltin(
Builtin::kCallFunctionTemplate_CheckAccessAndCompatibleReceiver,
- creation_context, getter, IntPtrConstant(0), receiver);
+ creation_context, getter, IntPtrConstant(i::JSParameterCount(0)),
+ receiver);
Goto(&done);
BIND(&runtime);
@@ -13806,9 +13797,8 @@ void CodeStubAssembler::ThrowIfArrayBufferViewBufferIsDetached(
TNode<RawPtrT> CodeStubAssembler::LoadJSArrayBufferBackingStorePtr(
TNode<JSArrayBuffer> array_buffer) {
- return LoadExternalPointerFromObject(array_buffer,
- JSArrayBuffer::kBackingStoreOffset,
- kArrayBufferBackingStoreTag);
+ return LoadObjectField<RawPtrT>(array_buffer,
+ JSArrayBuffer::kBackingStoreOffset);
}
TNode<JSArrayBuffer> CodeStubAssembler::LoadJSArrayBufferViewBuffer(
@@ -14093,7 +14083,8 @@ TNode<RawPtrT> CodeStubArguments::AtIndexPtr(TNode<IntPtrT> index) const {
}
TNode<Object> CodeStubArguments::AtIndex(TNode<IntPtrT> index) const {
- CSA_ASSERT(assembler_, assembler_->UintPtrOrSmiLessThan(index, GetLength()));
+ CSA_ASSERT(assembler_, assembler_->UintPtrOrSmiLessThan(
+ index, GetLengthWithoutReceiver()));
return assembler_->LoadFullTagged(AtIndexPtr(index));
}
@@ -14101,9 +14092,19 @@ TNode<Object> CodeStubArguments::AtIndex(int index) const {
return AtIndex(assembler_->IntPtrConstant(index));
}
+TNode<IntPtrT> CodeStubArguments::GetLengthWithoutReceiver() const {
+ TNode<IntPtrT> argc = argc_;
+ if (kJSArgcIncludesReceiver) {
+ argc = assembler_->IntPtrSub(argc, assembler_->IntPtrConstant(1));
+ }
+ return argc;
+}
+
TNode<IntPtrT> CodeStubArguments::GetLengthWithReceiver() const {
- TNode<IntPtrT> argc = GetLength();
- argc = assembler_->IntPtrAdd(argc, assembler_->IntPtrConstant(1));
+ TNode<IntPtrT> argc = argc_;
+ if (!kJSArgcIncludesReceiver) {
+ argc = assembler_->IntPtrAdd(argc, assembler_->IntPtrConstant(1));
+ }
return argc;
}
@@ -14113,8 +14114,9 @@ TNode<Object> CodeStubArguments::GetOptionalArgumentValue(
CodeStubAssembler::Label argument_missing(assembler_),
argument_done(assembler_, &result);
- assembler_->GotoIf(assembler_->UintPtrGreaterThanOrEqual(index, argc_),
- &argument_missing);
+ assembler_->GotoIf(
+ assembler_->UintPtrGreaterThanOrEqual(index, GetLengthWithoutReceiver()),
+ &argument_missing);
result = AtIndex(index);
assembler_->Goto(&argument_done);
@@ -14135,7 +14137,7 @@ void CodeStubArguments::ForEach(
first = assembler_->IntPtrConstant(0);
}
if (last == nullptr) {
- last = argc_;
+ last = GetLengthWithoutReceiver();
}
TNode<RawPtrT> start = AtIndexPtr(first);
TNode<RawPtrT> end = AtIndexPtr(last);
@@ -14150,8 +14152,7 @@ void CodeStubArguments::ForEach(
}
void CodeStubArguments::PopAndReturn(TNode<Object> value) {
- TNode<IntPtrT> pop_count =
- assembler_->IntPtrAdd(argc_, assembler_->IntPtrConstant(1));
+ TNode<IntPtrT> pop_count = GetLengthWithReceiver();
assembler_->PopAndReturn(pop_count, value);
}
@@ -14336,7 +14337,7 @@ TNode<Code> CodeStubAssembler::GetSharedFunctionInfoCode(
int32_t case_values[] = {
BYTECODE_ARRAY_TYPE,
- BASELINE_DATA_TYPE,
+ CODET_TYPE,
UNCOMPILED_DATA_WITHOUT_PREPARSE_DATA_TYPE,
UNCOMPILED_DATA_WITH_PREPARSE_DATA_TYPE,
FUNCTION_TEMPLATE_INFO_TYPE,
@@ -14380,7 +14381,7 @@ TNode<Code> CodeStubAssembler::GetSharedFunctionInfoCode(
// IsBaselineData: Execute baseline code
BIND(&check_is_baseline_data);
{
- TNode<CodeT> baseline_code = LoadBaselineDataBaselineCode(CAST(sfi_data));
+ TNode<CodeT> baseline_code = CAST(sfi_data);
sfi_code = FromCodeT(baseline_code);
Goto(&done);
}
@@ -14563,7 +14564,15 @@ TNode<Object> CodeStubAssembler::GetArgumentValue(TorqueStructArguments args,
}
TorqueStructArguments CodeStubAssembler::GetFrameArguments(
- TNode<RawPtrT> frame, TNode<IntPtrT> argc) {
+ TNode<RawPtrT> frame, TNode<IntPtrT> argc,
+ FrameArgumentsArgcType argc_type) {
+ if (kJSArgcIncludesReceiver &&
+ argc_type == FrameArgumentsArgcType::kCountExcludesReceiver) {
+ argc = IntPtrAdd(argc, IntPtrConstant(kJSArgcReceiverSlots));
+ } else if (!kJSArgcIncludesReceiver &&
+ argc_type == FrameArgumentsArgcType::kCountIncludesReceiver) {
+ argc = IntPtrSub(argc, IntPtrConstant(1));
+ }
return CodeStubArguments(this, argc, frame).GetTorqueArguments();
}
diff --git a/deps/v8/src/codegen/code-stub-assembler.h b/deps/v8/src/codegen/code-stub-assembler.h
index 008af6006f..f869ac687f 100644
--- a/deps/v8/src/codegen/code-stub-assembler.h
+++ b/deps/v8/src/codegen/code-stub-assembler.h
@@ -261,16 +261,17 @@ enum class PrimitiveType { kBoolean, kNumber, kString, kSymbol };
#define CSA_ASSERT_BRANCH(csa, gen, ...) \
(csa)->Assert(gen, #gen, __FILE__, __LINE__, CSA_ASSERT_ARGS(__VA_ARGS__))
-#define CSA_ASSERT_JS_ARGC_OP(csa, Op, op, expected) \
- (csa)->Assert( \
- [&]() -> TNode<BoolT> { \
- const TNode<Word32T> argc = (csa)->UncheckedParameter<Word32T>( \
- Descriptor::kJSActualArgumentsCount); \
- return (csa)->Op(argc, (csa)->Int32Constant(expected)); \
- }, \
- "argc " #op " " #expected, __FILE__, __LINE__, \
- {{SmiFromInt32((csa)->UncheckedParameter<Int32T>( \
- Descriptor::kJSActualArgumentsCount)), \
+#define CSA_ASSERT_JS_ARGC_OP(csa, Op, op, expected) \
+ (csa)->Assert( \
+ [&]() -> TNode<BoolT> { \
+ const TNode<Word32T> argc = (csa)->UncheckedParameter<Word32T>( \
+ Descriptor::kJSActualArgumentsCount); \
+ return (csa)->Op(argc, \
+ (csa)->Int32Constant(i::JSParameterCount(expected))); \
+ }, \
+ "argc " #op " " #expected, __FILE__, __LINE__, \
+ {{SmiFromInt32((csa)->UncheckedParameter<Int32T>( \
+ Descriptor::kJSActualArgumentsCount)), \
"argc"}})
#define CSA_ASSERT_JS_ARGC_EQ(csa, expected) \
@@ -1107,15 +1108,14 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
TNode<RawPtrT> LoadJSTypedArrayExternalPointerPtr(
TNode<JSTypedArray> holder) {
- return LoadExternalPointerFromObject(holder,
- JSTypedArray::kExternalPointerOffset,
- kTypedArrayExternalPointerTag);
+ return LoadObjectField<RawPtrT>(holder,
+ JSTypedArray::kExternalPointerOffset);
}
void StoreJSTypedArrayExternalPointerPtr(TNode<JSTypedArray> holder,
TNode<RawPtrT> value) {
- StoreExternalPointerToObject(holder, JSTypedArray::kExternalPointerOffset,
- value, kTypedArrayExternalPointerTag);
+ StoreObjectFieldNoWriteBarrier<RawPtrT>(
+ holder, JSTypedArray::kExternalPointerOffset, value);
}
// Load value from current parent frame by given offset in bytes.
@@ -1448,40 +1448,35 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
// Array is any array-like type that has a fixed header followed by
// tagged elements.
template <typename Array, typename TIndex, typename TValue = MaybeObject>
- TNode<TValue> LoadArrayElement(
- TNode<Array> array, int array_header_size, TNode<TIndex> index,
- int additional_offset = 0,
- LoadSensitivity needs_poisoning = LoadSensitivity::kSafe);
+ TNode<TValue> LoadArrayElement(TNode<Array> array, int array_header_size,
+ TNode<TIndex> index,
+ int additional_offset = 0);
template <typename TIndex>
TNode<Object> LoadFixedArrayElement(
TNode<FixedArray> object, TNode<TIndex> index, int additional_offset = 0,
- LoadSensitivity needs_poisoning = LoadSensitivity::kSafe,
CheckBounds check_bounds = CheckBounds::kAlways);
// This doesn't emit a bounds-check. As part of the security-performance
// tradeoff, only use it if it is performance critical.
- TNode<Object> UnsafeLoadFixedArrayElement(
- TNode<FixedArray> object, TNode<IntPtrT> index, int additional_offset = 0,
- LoadSensitivity needs_poisoning = LoadSensitivity::kSafe) {
+ TNode<Object> UnsafeLoadFixedArrayElement(TNode<FixedArray> object,
+ TNode<IntPtrT> index,
+ int additional_offset = 0) {
return LoadFixedArrayElement(object, index, additional_offset,
- needs_poisoning, CheckBounds::kDebugOnly);
+ CheckBounds::kDebugOnly);
}
- TNode<Object> LoadFixedArrayElement(
- TNode<FixedArray> object, int index, int additional_offset = 0,
- LoadSensitivity needs_poisoning = LoadSensitivity::kSafe) {
+ TNode<Object> LoadFixedArrayElement(TNode<FixedArray> object, int index,
+ int additional_offset = 0) {
return LoadFixedArrayElement(object, IntPtrConstant(index),
- additional_offset, needs_poisoning);
+ additional_offset);
}
// This doesn't emit a bounds-check. As part of the security-performance
// tradeoff, only use it if it is performance critical.
- TNode<Object> UnsafeLoadFixedArrayElement(
- TNode<FixedArray> object, int index, int additional_offset = 0,
- LoadSensitivity needs_poisoning = LoadSensitivity::kSafe) {
+ TNode<Object> UnsafeLoadFixedArrayElement(TNode<FixedArray> object, int index,
+ int additional_offset = 0) {
return LoadFixedArrayElement(object, IntPtrConstant(index),
- additional_offset, needs_poisoning,
- CheckBounds::kDebugOnly);
+ additional_offset, CheckBounds::kDebugOnly);
}
TNode<Object> LoadPropertyArrayElement(TNode<PropertyArray> object,
@@ -2138,7 +2133,6 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
kFixedArrays = 1,
kFixedDoubleArrays = 2,
kDontCopyCOW = 4,
- kNewSpaceAllocationOnly = 8,
kAllFixedArrays = kFixedArrays | kFixedDoubleArrays,
kAllFixedArraysDontCopyCOW = kAllFixedArrays | kDontCopyCOW
};
@@ -3647,8 +3641,28 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
TNode<Object> GetArgumentValue(TorqueStructArguments args,
TNode<IntPtrT> index);
- TorqueStructArguments GetFrameArguments(TNode<RawPtrT> frame,
- TNode<IntPtrT> argc);
+ enum class FrameArgumentsArgcType {
+ kCountIncludesReceiver,
+ kCountExcludesReceiver
+ };
+
+ TorqueStructArguments GetFrameArguments(
+ TNode<RawPtrT> frame, TNode<IntPtrT> argc,
+ FrameArgumentsArgcType argc_type =
+ FrameArgumentsArgcType::kCountExcludesReceiver);
+
+ inline TNode<Int32T> JSParameterCount(TNode<Int32T> argc_without_receiver) {
+ return kJSArgcIncludesReceiver
+ ? Int32Add(argc_without_receiver,
+ Int32Constant(kJSArgcReceiverSlots))
+ : argc_without_receiver;
+ }
+ inline TNode<Word32T> JSParameterCount(TNode<Word32T> argc_without_receiver) {
+ return kJSArgcIncludesReceiver
+ ? Int32Add(argc_without_receiver,
+ Int32Constant(kJSArgcReceiverSlots))
+ : argc_without_receiver;
+ }
// Support for printf-style debugging
void Print(const char* s);
@@ -4086,7 +4100,7 @@ class V8_EXPORT_PRIVATE CodeStubArguments {
CodeStubArguments(CodeStubAssembler* assembler,
TorqueStructArguments torque_arguments)
: assembler_(assembler),
- argc_(torque_arguments.length),
+ argc_(torque_arguments.actual_count),
base_(torque_arguments.base),
fp_(torque_arguments.frame) {}
@@ -4104,12 +4118,12 @@ class V8_EXPORT_PRIVATE CodeStubArguments {
TNode<Object> AtIndex(int index) const;
// Return the number of arguments (excluding the receiver).
- TNode<IntPtrT> GetLength() const { return argc_; }
+ TNode<IntPtrT> GetLengthWithoutReceiver() const;
// Return the number of arguments (including the receiver).
TNode<IntPtrT> GetLengthWithReceiver() const;
TorqueStructArguments GetTorqueArguments() const {
- return TorqueStructArguments{fp_, base_, argc_};
+ return TorqueStructArguments{fp_, base_, GetLengthWithoutReceiver(), argc_};
}
TNode<Object> GetOptionalArgumentValue(TNode<IntPtrT> index,
diff --git a/deps/v8/src/codegen/compiler.cc b/deps/v8/src/codegen/compiler.cc
index 4fd70a8d9e..9fab1cd40f 100644
--- a/deps/v8/src/codegen/compiler.cc
+++ b/deps/v8/src/codegen/compiler.cc
@@ -1064,8 +1064,8 @@ Handle<Code> ContinuationForConcurrentOptimization(
function->set_code(function->feedback_vector().optimized_code());
}
return handle(function->code(), isolate);
- } else if (function->shared().HasBaselineData()) {
- Code baseline_code = function->shared().baseline_data().baseline_code();
+ } else if (function->shared().HasBaselineCode()) {
+ Code baseline_code = function->shared().baseline_code(kAcquireLoad);
function->set_code(baseline_code);
return handle(baseline_code, isolate);
}
@@ -1179,9 +1179,13 @@ void SpawnDuplicateConcurrentJobForStressTesting(Isolate* isolate,
isolate->concurrent_recompilation_enabled() &&
mode == ConcurrencyMode::kNotConcurrent &&
isolate->node_observer() == nullptr);
+ GetOptimizedCodeResultHandling result_handling =
+ FLAG_stress_concurrent_inlining_attach_code
+ ? GetOptimizedCodeResultHandling::kDefault
+ : GetOptimizedCodeResultHandling::kDiscardForTesting;
USE(GetOptimizedCode(isolate, function, ConcurrencyMode::kConcurrent,
code_kind, BytecodeOffset::None(), nullptr,
- GetOptimizedCodeResultHandling::kDiscardForTesting));
+ result_handling));
}
bool FailAndClearPendingException(Isolate* isolate) {
@@ -1308,6 +1312,7 @@ void FinalizeUnoptimizedScriptCompilation(
void CompileAllWithBaseline(Isolate* isolate,
const FinalizeUnoptimizedCompilationDataList&
finalize_unoptimized_compilation_data_list) {
+ CodePageCollectionMemoryModificationScope code_allocation(isolate->heap());
for (const auto& finalize_data : finalize_unoptimized_compilation_data_list) {
Handle<SharedFunctionInfo> shared_info = finalize_data.function_handle();
IsCompiledScope is_compiled_scope(*shared_info, isolate);
@@ -1975,7 +1980,7 @@ bool Compiler::CompileSharedWithBaseline(Isolate* isolate,
DCHECK(is_compiled_scope->is_compiled());
// Early return for already baseline-compiled functions.
- if (shared->HasBaselineData()) return true;
+ if (shared->HasBaselineCode()) return true;
// Check if we actually can compile with baseline.
if (!CanCompileWithBaseline(isolate, *shared)) return false;
@@ -1998,12 +2003,8 @@ bool Compiler::CompileSharedWithBaseline(Isolate* isolate,
// report these somehow, or silently ignore them?
return false;
}
+ shared->set_baseline_code(*code, kReleaseStore);
- Handle<HeapObject> function_data =
- handle(HeapObject::cast(shared->function_data(kAcquireLoad)), isolate);
- Handle<BaselineData> baseline_data =
- isolate->factory()->NewBaselineData(code, function_data);
- shared->set_baseline_data(*baseline_data);
if (V8_LIKELY(FLAG_use_osr)) {
// Arm back edges for OSR
shared->GetBytecodeArray(isolate).set_osr_loop_nesting_level(
@@ -2035,7 +2036,7 @@ bool Compiler::CompileBaseline(Isolate* isolate, Handle<JSFunction> function,
// Baseline code needs a feedback vector.
JSFunction::EnsureFeedbackVector(function, is_compiled_scope);
- Code baseline_code = shared->baseline_data().baseline_code(isolate);
+ Code baseline_code = shared->baseline_code(kAcquireLoad);
DCHECK_EQ(baseline_code.kind(), CodeKind::BASELINE);
function->set_code(baseline_code);
@@ -2210,7 +2211,7 @@ MaybeHandle<JSFunction> Compiler::GetFunctionFromEval(
// position, but store it as negative value for lazy translation.
StackTraceFrameIterator it(isolate);
if (!it.done() && it.is_javascript()) {
- FrameSummary summary = FrameSummary::GetTop(it.javascript_frame());
+ FrameSummary summary = it.GetTopValidFrame();
script->set_eval_from_shared(
summary.AsJavaScript().function()->shared());
script->set_origin_options(OriginOptionsForEval(*summary.script()));
@@ -2830,13 +2831,10 @@ MaybeHandle<SharedFunctionInfo> CompileScriptOnBothBackgroundAndMainThread(
return maybe_result;
}
-} // namespace
-
-// static
-MaybeHandle<SharedFunctionInfo> Compiler::GetSharedFunctionInfoForScript(
+MaybeHandle<SharedFunctionInfo> GetSharedFunctionInfoForScriptImpl(
Isolate* isolate, Handle<String> source,
const ScriptDetails& script_details, v8::Extension* extension,
- AlignedCachedData* cached_data,
+ AlignedCachedData* cached_data, BackgroundDeserializeTask* deserialize_task,
ScriptCompiler::CompileOptions compile_options,
ScriptCompiler::NoCacheReason no_cache_reason, NativesFlag natives) {
ScriptCompileTimerScope compile_timer(isolate, no_cache_reason);
@@ -2844,9 +2842,12 @@ MaybeHandle<SharedFunctionInfo> Compiler::GetSharedFunctionInfoForScript(
if (compile_options == ScriptCompiler::kNoCompileOptions ||
compile_options == ScriptCompiler::kEagerCompile) {
DCHECK_NULL(cached_data);
+ DCHECK_NULL(deserialize_task);
} else {
- DCHECK(compile_options == ScriptCompiler::kConsumeCodeCache);
- DCHECK(cached_data);
+ DCHECK_EQ(compile_options, ScriptCompiler::kConsumeCodeCache);
+ // Have to have exactly one of cached_data or deserialize_task.
+ DCHECK(cached_data || deserialize_task);
+ DCHECK(!(cached_data && deserialize_task));
DCHECK_NULL(extension);
}
int source_length = source->length();
@@ -2882,17 +2883,26 @@ MaybeHandle<SharedFunctionInfo> Compiler::GetSharedFunctionInfoForScript(
RCS_SCOPE(isolate, RuntimeCallCounterId::kCompileDeserialize);
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"),
"V8.CompileDeserialize");
- Handle<SharedFunctionInfo> inner_result;
- if (CodeSerializer::Deserialize(isolate, cached_data, source,
- script_details.origin_options)
- .ToHandle(&inner_result) &&
- inner_result->is_compiled()) {
- // Promote to per-isolate compilation cache.
- is_compiled_scope = inner_result->is_compiled_scope(isolate);
- DCHECK(is_compiled_scope.is_compiled());
- compilation_cache->PutScript(source, language_mode, inner_result);
- maybe_result = inner_result;
+ if (deserialize_task) {
+ // If there's a cache consume task, finish it.
+ maybe_result = deserialize_task->Finish(isolate, source,
+ script_details.origin_options);
} else {
+ maybe_result = CodeSerializer::Deserialize(
+ isolate, cached_data, source, script_details.origin_options);
+ }
+
+ bool consuming_code_cache_succeeded = false;
+ Handle<SharedFunctionInfo> result;
+ if (maybe_result.ToHandle(&result)) {
+ is_compiled_scope = result->is_compiled_scope(isolate);
+ if (is_compiled_scope.is_compiled()) {
+ consuming_code_cache_succeeded = true;
+ // Promote to per-isolate compilation cache.
+ compilation_cache->PutScript(source, language_mode, result);
+ }
+ }
+ if (!consuming_code_cache_succeeded) {
// Deserializer failed. Fall through to compile.
compile_timer.set_consuming_code_cache_failed();
}
@@ -2937,6 +2947,51 @@ MaybeHandle<SharedFunctionInfo> Compiler::GetSharedFunctionInfoForScript(
return maybe_result;
}
+} // namespace
+
+MaybeHandle<SharedFunctionInfo> Compiler::GetSharedFunctionInfoForScript(
+ Isolate* isolate, Handle<String> source,
+ const ScriptDetails& script_details,
+ ScriptCompiler::CompileOptions compile_options,
+ ScriptCompiler::NoCacheReason no_cache_reason, NativesFlag natives) {
+ return GetSharedFunctionInfoForScriptImpl(
+ isolate, source, script_details, nullptr, nullptr, nullptr,
+ compile_options, no_cache_reason, natives);
+}
+
+MaybeHandle<SharedFunctionInfo>
+Compiler::GetSharedFunctionInfoForScriptWithExtension(
+ Isolate* isolate, Handle<String> source,
+ const ScriptDetails& script_details, v8::Extension* extension,
+ ScriptCompiler::CompileOptions compile_options, NativesFlag natives) {
+ return GetSharedFunctionInfoForScriptImpl(
+ isolate, source, script_details, extension, nullptr, nullptr,
+ compile_options, ScriptCompiler::kNoCacheBecauseV8Extension, natives);
+}
+
+MaybeHandle<SharedFunctionInfo>
+Compiler::GetSharedFunctionInfoForScriptWithCachedData(
+ Isolate* isolate, Handle<String> source,
+ const ScriptDetails& script_details, AlignedCachedData* cached_data,
+ ScriptCompiler::CompileOptions compile_options,
+ ScriptCompiler::NoCacheReason no_cache_reason, NativesFlag natives) {
+ return GetSharedFunctionInfoForScriptImpl(
+ isolate, source, script_details, nullptr, cached_data, nullptr,
+ compile_options, no_cache_reason, natives);
+}
+
+MaybeHandle<SharedFunctionInfo>
+Compiler::GetSharedFunctionInfoForScriptWithDeserializeTask(
+ Isolate* isolate, Handle<String> source,
+ const ScriptDetails& script_details,
+ BackgroundDeserializeTask* deserialize_task,
+ ScriptCompiler::CompileOptions compile_options,
+ ScriptCompiler::NoCacheReason no_cache_reason, NativesFlag natives) {
+ return GetSharedFunctionInfoForScriptImpl(
+ isolate, source, script_details, nullptr, nullptr, deserialize_task,
+ compile_options, no_cache_reason, natives);
+}
+
// static
MaybeHandle<JSFunction> Compiler::GetWrappedFunction(
Handle<String> source, Handle<FixedArray> arguments,
diff --git a/deps/v8/src/codegen/compiler.h b/deps/v8/src/codegen/compiler.h
index 0d1582d872..97bd6bd027 100644
--- a/deps/v8/src/codegen/compiler.h
+++ b/deps/v8/src/codegen/compiler.h
@@ -161,8 +161,39 @@ class V8_EXPORT_PRIVATE Compiler : public AllStatic {
// Create a shared function info object for a String source.
static MaybeHandle<SharedFunctionInfo> GetSharedFunctionInfoForScript(
Isolate* isolate, Handle<String> source,
+ const ScriptDetails& script_details,
+ ScriptCompiler::CompileOptions compile_options,
+ ScriptCompiler::NoCacheReason no_cache_reason,
+ NativesFlag is_natives_code);
+
+ // Create a shared function info object for a String source.
+ static MaybeHandle<SharedFunctionInfo>
+ GetSharedFunctionInfoForScriptWithExtension(
+ Isolate* isolate, Handle<String> source,
const ScriptDetails& script_details, v8::Extension* extension,
- AlignedCachedData* cached_data,
+ ScriptCompiler::CompileOptions compile_options,
+ NativesFlag is_natives_code);
+
+ // Create a shared function info object for a String source and serialized
+ // cached data. The cached data may be rejected, in which case this function
+ // will set cached_data->rejected() to true.
+ static MaybeHandle<SharedFunctionInfo>
+ GetSharedFunctionInfoForScriptWithCachedData(
+ Isolate* isolate, Handle<String> source,
+ const ScriptDetails& script_details, AlignedCachedData* cached_data,
+ ScriptCompiler::CompileOptions compile_options,
+ ScriptCompiler::NoCacheReason no_cache_reason,
+ NativesFlag is_natives_code);
+
+ // Create a shared function info object for a String source and a task that
+ // has deserialized cached data on a background thread. The cached data from
+ // the task may be rejected, in which case this function will set
+ // deserialize_task->rejected() to true.
+ static MaybeHandle<SharedFunctionInfo>
+ GetSharedFunctionInfoForScriptWithDeserializeTask(
+ Isolate* isolate, Handle<String> source,
+ const ScriptDetails& script_details,
+ BackgroundDeserializeTask* deserialize_task,
ScriptCompiler::CompileOptions compile_options,
ScriptCompiler::NoCacheReason no_cache_reason,
NativesFlag is_natives_code);
@@ -571,6 +602,8 @@ class V8_EXPORT_PRIVATE BackgroundDeserializeTask {
Handle<String> source,
ScriptOriginOptions origin_options);
+ bool rejected() const { return cached_data_.rejected(); }
+
private:
Isolate* isolate_for_local_isolate_;
AlignedCachedData cached_data_;
diff --git a/deps/v8/src/codegen/constant-pool.cc b/deps/v8/src/codegen/constant-pool.cc
index 9af91d7a15..510f59185c 100644
--- a/deps/v8/src/codegen/constant-pool.cc
+++ b/deps/v8/src/codegen/constant-pool.cc
@@ -356,8 +356,7 @@ void ConstantPool::Emit(const ConstantPoolKey& key) {
if (assm_->IsOnHeap() && RelocInfo::IsEmbeddedObjectMode(key.rmode())) {
int offset = assm_->pc_offset();
Assembler::EmbeddedObjectIndex index = key.value64();
- assm_->saved_handles_for_raw_object_ptr_.push_back(
- std::make_pair(offset, index));
+ assm_->saved_handles_for_raw_object_ptr_.emplace_back(offset, index);
Handle<Object> object = assm_->GetEmbeddedObject(index);
assm_->dq(object->ptr());
DCHECK(assm_->EmbeddedObjectMatches(offset, object, index));
diff --git a/deps/v8/src/codegen/constants-arch.h b/deps/v8/src/codegen/constants-arch.h
index 2417be5d4d..7eb32bafde 100644
--- a/deps/v8/src/codegen/constants-arch.h
+++ b/deps/v8/src/codegen/constants-arch.h
@@ -15,6 +15,8 @@
#include "src/codegen/mips/constants-mips.h"
#elif V8_TARGET_ARCH_MIPS64
#include "src/codegen/mips64/constants-mips64.h"
+#elif V8_TARGET_ARCH_LOONG64
+#include "src/codegen/loong64/constants-loong64.h"
#elif V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_PPC64
#include "src/codegen/ppc/constants-ppc.h"
#elif V8_TARGET_ARCH_S390
diff --git a/deps/v8/src/codegen/cpu-features.h b/deps/v8/src/codegen/cpu-features.h
index ab6608679f..3cdae6d4c8 100644
--- a/deps/v8/src/codegen/cpu-features.h
+++ b/deps/v8/src/codegen/cpu-features.h
@@ -51,6 +51,9 @@ enum CpuFeature {
MIPSr6,
MIPS_SIMD, // MSA instructions
+#elif V8_TARGET_ARCH_LOONG64
+ FPU,
+
#elif V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_PPC64
PPC_6_PLUS,
PPC_7_PLUS,
diff --git a/deps/v8/src/codegen/external-reference.cc b/deps/v8/src/codegen/external-reference.cc
index e1d8c5d96e..0c04e84a68 100644
--- a/deps/v8/src/codegen/external-reference.cc
+++ b/deps/v8/src/codegen/external-reference.cc
@@ -145,6 +145,19 @@ constexpr struct alignas(16) {
} wasm_uint32_max_as_double = {uint64_t{0x41efffffffe00000},
uint64_t{0x41efffffffe00000}};
+// This is 2147483648.0, which is 1 more than INT32_MAX.
+constexpr struct alignas(16) {
+ uint32_t a;
+ uint32_t b;
+ uint32_t c;
+ uint32_t d;
+} wasm_int32_overflow_as_float = {
+ uint32_t{0x4f00'0000},
+ uint32_t{0x4f00'0000},
+ uint32_t{0x4f00'0000},
+ uint32_t{0x4f00'0000},
+};
+
// Implementation of ExternalReference
static ExternalReference::Type BuiltinCallTypeForResultSize(int result_size) {
@@ -400,6 +413,7 @@ IF_WASM(FUNCTION_REFERENCE, wasm_memory_fill, wasm::memory_fill_wrapper)
IF_WASM(FUNCTION_REFERENCE, wasm_float64_pow, wasm::float64_pow_wrapper)
IF_WASM(FUNCTION_REFERENCE, wasm_call_trap_callback_for_testing,
wasm::call_trap_callback_for_testing)
+IF_WASM(FUNCTION_REFERENCE, wasm_array_copy, wasm::array_copy_wrapper)
static void f64_acos_wrapper(Address data) {
double input = ReadUnalignedValue<double>(data);
@@ -618,6 +632,11 @@ ExternalReference ExternalReference::address_of_wasm_uint32_max_as_double() {
reinterpret_cast<Address>(&wasm_uint32_max_as_double));
}
+ExternalReference ExternalReference::address_of_wasm_int32_overflow_as_float() {
+ return ExternalReference(
+ reinterpret_cast<Address>(&wasm_int32_overflow_as_float));
+}
+
ExternalReference
ExternalReference::address_of_enable_experimental_regexp_engine() {
return ExternalReference(&FLAG_enable_experimental_regexp_engine);
@@ -688,6 +707,8 @@ ExternalReference ExternalReference::invoke_accessor_getter_callback() {
#define re_stack_check_func RegExpMacroAssemblerMIPS::CheckStackGuardState
#elif V8_TARGET_ARCH_MIPS64
#define re_stack_check_func RegExpMacroAssemblerMIPS::CheckStackGuardState
+#elif V8_TARGET_ARCH_LOONG64
+#define re_stack_check_func RegExpMacroAssemblerLOONG64::CheckStackGuardState
#elif V8_TARGET_ARCH_S390
#define re_stack_check_func RegExpMacroAssemblerS390::CheckStackGuardState
#elif V8_TARGET_ARCH_RISCV64
@@ -1180,7 +1201,7 @@ namespace {
// address, with the same value. This is done in order for TSAN to see these
// stores from generated code.
// Note that {value} is an int64_t irrespective of the store size. This is on
-// purpose to keep the function signatures the same accross stores. The
+// purpose to keep the function signatures the same across stores. The
// static_cast inside the method will ignore the bits which will not be stored.
void tsan_relaxed_store_8_bits(Address addr, int64_t value) {
#if V8_TARGET_ARCH_X64
@@ -1218,6 +1239,44 @@ void tsan_relaxed_store_64_bits(Address addr, int64_t value) {
#endif // V8_TARGET_ARCH_X64
}
+// Same as above, for sequentially consistent stores.
+void tsan_seq_cst_store_8_bits(Address addr, int64_t value) {
+#if V8_TARGET_ARCH_X64
+ base::SeqCst_Store(reinterpret_cast<base::Atomic8*>(addr),
+ static_cast<base::Atomic8>(value));
+#else
+ UNREACHABLE();
+#endif // V8_TARGET_ARCH_X64
+}
+
+void tsan_seq_cst_store_16_bits(Address addr, int64_t value) {
+#if V8_TARGET_ARCH_X64
+ base::SeqCst_Store(reinterpret_cast<base::Atomic16*>(addr),
+ static_cast<base::Atomic16>(value));
+#else
+ UNREACHABLE();
+#endif // V8_TARGET_ARCH_X64
+}
+
+void tsan_seq_cst_store_32_bits(Address addr, int64_t value) {
+#if V8_TARGET_ARCH_X64
+ base::SeqCst_Store(reinterpret_cast<base::Atomic32*>(addr),
+ static_cast<base::Atomic32>(value));
+#else
+ UNREACHABLE();
+#endif // V8_TARGET_ARCH_X64
+}
+
+void tsan_seq_cst_store_64_bits(Address addr, int64_t value) {
+#if V8_TARGET_ARCH_X64
+ base::SeqCst_Store(reinterpret_cast<base::Atomic64*>(addr),
+ static_cast<base::Atomic64>(value));
+#else
+ UNREACHABLE();
+#endif // V8_TARGET_ARCH_X64
+}
+
+// Same as above, for relaxed loads.
base::Atomic32 tsan_relaxed_load_32_bits(Address addr, int64_t value) {
#if V8_TARGET_ARCH_X64
return base::Relaxed_Load(reinterpret_cast<base::Atomic32*>(addr));
@@ -1245,6 +1304,14 @@ IF_TSAN(FUNCTION_REFERENCE, tsan_relaxed_store_function_32_bits,
tsan_relaxed_store_32_bits)
IF_TSAN(FUNCTION_REFERENCE, tsan_relaxed_store_function_64_bits,
tsan_relaxed_store_64_bits)
+IF_TSAN(FUNCTION_REFERENCE, tsan_seq_cst_store_function_8_bits,
+ tsan_seq_cst_store_8_bits)
+IF_TSAN(FUNCTION_REFERENCE, tsan_seq_cst_store_function_16_bits,
+ tsan_seq_cst_store_16_bits)
+IF_TSAN(FUNCTION_REFERENCE, tsan_seq_cst_store_function_32_bits,
+ tsan_seq_cst_store_32_bits)
+IF_TSAN(FUNCTION_REFERENCE, tsan_seq_cst_store_function_64_bits,
+ tsan_seq_cst_store_64_bits)
IF_TSAN(FUNCTION_REFERENCE, tsan_relaxed_load_function_32_bits,
tsan_relaxed_load_32_bits)
IF_TSAN(FUNCTION_REFERENCE, tsan_relaxed_load_function_64_bits,
diff --git a/deps/v8/src/codegen/external-reference.h b/deps/v8/src/codegen/external-reference.h
index cbc3463841..ca62ff9d7a 100644
--- a/deps/v8/src/codegen/external-reference.h
+++ b/deps/v8/src/codegen/external-reference.h
@@ -111,13 +111,6 @@ class StatsCounter;
V(address_of_runtime_stats_flag, "TracingFlags::runtime_stats") \
V(address_of_the_hole_nan, "the_hole_nan") \
V(address_of_uint32_bias, "uint32_bias") \
- V(address_of_wasm_i8x16_swizzle_mask, "wasm_i8x16_swizzle_mask") \
- V(address_of_wasm_i8x16_popcnt_mask, "wasm_i8x16_popcnt_mask") \
- V(address_of_wasm_i8x16_splat_0x01, "wasm_i8x16_splat_0x01") \
- V(address_of_wasm_i8x16_splat_0x0f, "wasm_i8x16_splat_0x0f") \
- V(address_of_wasm_i8x16_splat_0x33, "wasm_i8x16_splat_0x33") \
- V(address_of_wasm_i8x16_splat_0x55, "wasm_i8x16_splat_0x55") \
- V(address_of_wasm_i16x8_splat_0x0001, "wasm_16x8_splat_0x0001") \
V(baseline_pc_for_bytecode_offset, "BaselinePCForBytecodeOffset") \
V(baseline_pc_for_next_executed_bytecode, \
"BaselinePCForNextExecutedBytecode") \
@@ -247,12 +240,21 @@ class StatsCounter;
IF_WASM(V, wasm_memory_init, "wasm::memory_init") \
IF_WASM(V, wasm_memory_copy, "wasm::memory_copy") \
IF_WASM(V, wasm_memory_fill, "wasm::memory_fill") \
+ IF_WASM(V, wasm_array_copy, "wasm::array_copy") \
+ V(address_of_wasm_i8x16_swizzle_mask, "wasm_i8x16_swizzle_mask") \
+ V(address_of_wasm_i8x16_popcnt_mask, "wasm_i8x16_popcnt_mask") \
+ V(address_of_wasm_i8x16_splat_0x01, "wasm_i8x16_splat_0x01") \
+ V(address_of_wasm_i8x16_splat_0x0f, "wasm_i8x16_splat_0x0f") \
+ V(address_of_wasm_i8x16_splat_0x33, "wasm_i8x16_splat_0x33") \
+ V(address_of_wasm_i8x16_splat_0x55, "wasm_i8x16_splat_0x55") \
+ V(address_of_wasm_i16x8_splat_0x0001, "wasm_16x8_splat_0x0001") \
V(address_of_wasm_f64x2_convert_low_i32x4_u_int_mask, \
"wasm_f64x2_convert_low_i32x4_u_int_mask") \
V(supports_wasm_simd_128_address, "wasm::supports_wasm_simd_128_address") \
V(address_of_wasm_double_2_power_52, "wasm_double_2_power_52") \
V(address_of_wasm_int32_max_as_double, "wasm_int32_max_as_double") \
V(address_of_wasm_uint32_max_as_double, "wasm_uint32_max_as_double") \
+ V(address_of_wasm_int32_overflow_as_float, "wasm_int32_overflow_as_float") \
V(write_barrier_marking_from_code_function, "WriteBarrier::MarkingFromCode") \
V(call_enqueue_microtask_function, "MicrotaskQueue::CallEnqueueMicrotask") \
V(call_enter_context_function, "call_enter_context_function") \
@@ -274,6 +276,14 @@ class StatsCounter;
"tsan_relaxed_store_function_32_bits") \
IF_TSAN(V, tsan_relaxed_store_function_64_bits, \
"tsan_relaxed_store_function_64_bits") \
+ IF_TSAN(V, tsan_seq_cst_store_function_8_bits, \
+ "tsan_seq_cst_store_function_8_bits") \
+ IF_TSAN(V, tsan_seq_cst_store_function_16_bits, \
+ "tsan_seq_cst_store_function_16_bits") \
+ IF_TSAN(V, tsan_seq_cst_store_function_32_bits, \
+ "tsan_seq_cst_store_function_32_bits") \
+ IF_TSAN(V, tsan_seq_cst_store_function_64_bits, \
+ "tsan_seq_cst_store_function_64_bits") \
IF_TSAN(V, tsan_relaxed_load_function_32_bits, \
"tsan_relaxed_load_function_32_bits") \
IF_TSAN(V, tsan_relaxed_load_function_64_bits, \
diff --git a/deps/v8/src/codegen/ia32/assembler-ia32.cc b/deps/v8/src/codegen/ia32/assembler-ia32.cc
index 90f8e8b70c..e921c11552 100644
--- a/deps/v8/src/codegen/ia32/assembler-ia32.cc
+++ b/deps/v8/src/codegen/ia32/assembler-ia32.cc
@@ -291,6 +291,8 @@ Register Operand::reg() const {
return Register::from_code(buf_[0] & 0x07);
}
+bool operator!=(Operand op, XMMRegister r) { return !op.is_reg(r); }
+
void Assembler::AllocateAndInstallRequestedHeapObjects(Isolate* isolate) {
DCHECK_IMPLIES(isolate == nullptr, heap_object_requests_.empty());
for (auto& request : heap_object_requests_) {
@@ -688,6 +690,14 @@ void Assembler::movq(XMMRegister dst, Operand src) {
emit_operand(dst, src);
}
+void Assembler::movq(Operand dst, XMMRegister src) {
+ EnsureSpace ensure_space(this);
+ EMIT(0x66);
+ EMIT(0x0F);
+ EMIT(0xD6);
+ emit_operand(src, dst);
+}
+
void Assembler::cmov(Condition cc, Register dst, Operand src) {
EnsureSpace ensure_space(this);
// Opcode: 0f 40 + cc /r.
diff --git a/deps/v8/src/codegen/ia32/assembler-ia32.h b/deps/v8/src/codegen/ia32/assembler-ia32.h
index 89a65ee99b..31fc2c0221 100644
--- a/deps/v8/src/codegen/ia32/assembler-ia32.h
+++ b/deps/v8/src/codegen/ia32/assembler-ia32.h
@@ -306,6 +306,8 @@ ASSERT_TRIVIALLY_COPYABLE(Operand);
static_assert(sizeof(Operand) <= 2 * kSystemPointerSize,
"Operand must be small enough to pass it by value");
+bool operator!=(Operand op, XMMRegister r);
+
// -----------------------------------------------------------------------------
// A Displacement describes the 32bit immediate field of an instruction which
// may be used together with a Label in order to refer to a yet unknown code
@@ -535,6 +537,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
void movzx_w(Register dst, Operand src);
void movq(XMMRegister dst, Operand src);
+ void movq(Operand dst, XMMRegister src);
// Conditional moves
void cmov(Condition cc, Register dst, Register src) {
@@ -1544,6 +1547,9 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
void vmovdqa(XMMRegister dst, Operand src) {
vinstr(0x6F, dst, xmm0, src, k66, k0F, kWIG);
}
+ void vmovdqa(XMMRegister dst, XMMRegister src) {
+ vinstr(0x6F, dst, xmm0, src, k66, k0F, kWIG);
+ }
void vmovdqu(XMMRegister dst, Operand src) {
vinstr(0x6F, dst, xmm0, src, kF3, k0F, kWIG);
}
@@ -1709,6 +1715,8 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
}
PACKED_CMP_LIST(AVX_CMP_P)
+ // vcmpgeps/vcmpgepd only in AVX.
+ AVX_CMP_P(cmpge, 0xd)
#undef AVX_CMP_P
#undef PACKED_CMP_LIST
@@ -1790,6 +1798,19 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
SSE4_RM_INSTRUCTION_LIST(DECLARE_SSE4_AVX_RM_INSTRUCTION)
#undef DECLARE_SSE4_AVX_RM_INSTRUCTION
+ // AVX2 instructions
+#define AVX2_INSTRUCTION(instr, prefix, escape1, escape2, opcode) \
+ void instr(XMMRegister dst, XMMRegister src) { \
+ vinstr(0x##opcode, dst, xmm0, src, k##prefix, k##escape1##escape2, kW0, \
+ AVX2); \
+ } \
+ void instr(XMMRegister dst, Operand src) { \
+ vinstr(0x##opcode, dst, xmm0, src, k##prefix, k##escape1##escape2, kW0, \
+ AVX2); \
+ }
+ AVX2_BROADCAST_LIST(AVX2_INSTRUCTION)
+#undef AVX2_INSTRUCTION
+
// Prefetch src position into cache level.
// Level 1, 2 or 3 specifies CPU cache level. Level 0 specifies a
// non-temporal
diff --git a/deps/v8/src/codegen/ia32/macro-assembler-ia32.cc b/deps/v8/src/codegen/ia32/macro-assembler-ia32.cc
index c95ea8ad2c..e11d6223ea 100644
--- a/deps/v8/src/codegen/ia32/macro-assembler-ia32.cc
+++ b/deps/v8/src/codegen/ia32/macro-assembler-ia32.cc
@@ -631,319 +631,6 @@ void TurboAssembler::Cvttsd2ui(Register dst, Operand src, XMMRegister tmp) {
add(dst, Immediate(0x80000000));
}
-void TurboAssembler::Pmulhrsw(XMMRegister dst, XMMRegister src1,
- XMMRegister src2) {
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope avx_scope(this, AVX);
- vpmulhrsw(dst, src1, src2);
- } else {
- if (dst != src1) {
- movaps(dst, src1);
- }
- CpuFeatureScope sse_scope(this, SSSE3);
- pmulhrsw(dst, src2);
- }
-}
-
-void TurboAssembler::I16x8Q15MulRSatS(XMMRegister dst, XMMRegister src1,
- XMMRegister src2, XMMRegister scratch) {
- ASM_CODE_COMMENT(this);
- // k = i16x8.splat(0x8000)
- Pcmpeqd(scratch, scratch);
- Psllw(scratch, scratch, byte{15});
-
- Pmulhrsw(dst, src1, src2);
- Pcmpeqw(scratch, dst);
- Pxor(dst, scratch);
-}
-
-void TurboAssembler::I8x16Popcnt(XMMRegister dst, XMMRegister src,
- XMMRegister tmp1, XMMRegister tmp2,
- Register scratch) {
- ASM_CODE_COMMENT(this);
- DCHECK_NE(dst, tmp1);
- DCHECK_NE(src, tmp1);
- DCHECK_NE(dst, tmp2);
- DCHECK_NE(src, tmp2);
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope avx_scope(this, AVX);
- vmovdqa(tmp1, ExternalReferenceAsOperand(
- ExternalReference::address_of_wasm_i8x16_splat_0x0f(),
- scratch));
- vpandn(tmp2, tmp1, src);
- vpand(dst, tmp1, src);
- vmovdqa(tmp1, ExternalReferenceAsOperand(
- ExternalReference::address_of_wasm_i8x16_popcnt_mask(),
- scratch));
- vpsrlw(tmp2, tmp2, 4);
- vpshufb(dst, tmp1, dst);
- vpshufb(tmp2, tmp1, tmp2);
- vpaddb(dst, dst, tmp2);
- } else if (CpuFeatures::IsSupported(ATOM)) {
- // Pre-Goldmont low-power Intel microarchitectures have very slow
- // PSHUFB instruction, thus use PSHUFB-free divide-and-conquer
- // algorithm on these processors. ATOM CPU feature captures exactly
- // the right set of processors.
- movaps(tmp1, src);
- psrlw(tmp1, 1);
- if (dst != src) {
- movaps(dst, src);
- }
- andps(tmp1,
- ExternalReferenceAsOperand(
- ExternalReference::address_of_wasm_i8x16_splat_0x55(), scratch));
- psubb(dst, tmp1);
- Operand splat_0x33 = ExternalReferenceAsOperand(
- ExternalReference::address_of_wasm_i8x16_splat_0x33(), scratch);
- movaps(tmp1, dst);
- andps(dst, splat_0x33);
- psrlw(tmp1, 2);
- andps(tmp1, splat_0x33);
- paddb(dst, tmp1);
- movaps(tmp1, dst);
- psrlw(dst, 4);
- paddb(dst, tmp1);
- andps(dst,
- ExternalReferenceAsOperand(
- ExternalReference::address_of_wasm_i8x16_splat_0x0f(), scratch));
- } else {
- CpuFeatureScope sse_scope(this, SSSE3);
- movaps(tmp1,
- ExternalReferenceAsOperand(
- ExternalReference::address_of_wasm_i8x16_splat_0x0f(), scratch));
- Operand mask = ExternalReferenceAsOperand(
- ExternalReference::address_of_wasm_i8x16_popcnt_mask(), scratch);
- if (tmp2 != tmp1) {
- movaps(tmp2, tmp1);
- }
- andps(tmp1, src);
- andnps(tmp2, src);
- psrlw(tmp2, 4);
- movaps(dst, mask);
- pshufb(dst, tmp1);
- movaps(tmp1, mask);
- pshufb(tmp1, tmp2);
- paddb(dst, tmp1);
- }
-}
-
-void TurboAssembler::F64x2ConvertLowI32x4U(XMMRegister dst, XMMRegister src,
- Register tmp) {
- // dst = [ src_low, 0x43300000, src_high, 0x4330000 ];
- // 0x43300000'00000000 is a special double where the significand bits
- // precisely represents all uint32 numbers.
- if (!CpuFeatures::IsSupported(AVX) && dst != src) {
- movaps(dst, src);
- src = dst;
- }
- Unpcklps(dst, src,
- ExternalReferenceAsOperand(
- ExternalReference::
- address_of_wasm_f64x2_convert_low_i32x4_u_int_mask(),
- tmp));
- Subpd(dst, dst,
- ExternalReferenceAsOperand(
- ExternalReference::address_of_wasm_double_2_power_52(), tmp));
-}
-
-void TurboAssembler::I32x4TruncSatF64x2SZero(XMMRegister dst, XMMRegister src,
- XMMRegister scratch,
- Register tmp) {
- ASM_CODE_COMMENT(this);
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope avx_scope(this, AVX);
- XMMRegister original_dst = dst;
- // Make sure we don't overwrite src.
- if (dst == src) {
- DCHECK_NE(scratch, src);
- dst = scratch;
- }
- // dst = 0 if src == NaN, else all ones.
- vcmpeqpd(dst, src, src);
- // dst = 0 if src == NaN, else INT32_MAX as double.
- vandpd(dst, dst,
- ExternalReferenceAsOperand(
- ExternalReference::address_of_wasm_int32_max_as_double(), tmp));
- // dst = 0 if src == NaN, src is saturated to INT32_MAX as double.
- vminpd(dst, src, dst);
- // Values > INT32_MAX already saturated, values < INT32_MIN raises an
- // exception, which is masked and returns 0x80000000.
- vcvttpd2dq(dst, dst);
-
- if (original_dst != dst) {
- vmovaps(original_dst, dst);
- }
- } else {
- if (dst != src) {
- movaps(dst, src);
- }
- movaps(scratch, dst);
- cmpeqpd(scratch, dst);
- andps(scratch,
- ExternalReferenceAsOperand(
- ExternalReference::address_of_wasm_int32_max_as_double(), tmp));
- minpd(dst, scratch);
- cvttpd2dq(dst, dst);
- }
-}
-
-void TurboAssembler::I32x4TruncSatF64x2UZero(XMMRegister dst, XMMRegister src,
- XMMRegister scratch,
- Register tmp) {
- ASM_CODE_COMMENT(this);
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope avx_scope(this, AVX);
- vxorpd(scratch, scratch, scratch);
- // Saturate to 0.
- vmaxpd(dst, src, scratch);
- // Saturate to UINT32_MAX.
- vminpd(dst, dst,
- ExternalReferenceAsOperand(
- ExternalReference::address_of_wasm_uint32_max_as_double(), tmp));
- // Truncate.
- vroundpd(dst, dst, kRoundToZero);
- // Add to special double where significant bits == uint32.
- vaddpd(dst, dst,
- ExternalReferenceAsOperand(
- ExternalReference::address_of_wasm_double_2_power_52(), tmp));
- // Extract low 32 bits of each double's significand, zero top lanes.
- // dst = [dst[0], dst[2], 0, 0]
- vshufps(dst, dst, scratch, 0x88);
- } else {
- CpuFeatureScope scope(this, SSE4_1);
- if (dst != src) {
- movaps(dst, src);
- }
-
- xorps(scratch, scratch);
- maxpd(dst, scratch);
- minpd(dst,
- ExternalReferenceAsOperand(
- ExternalReference::address_of_wasm_uint32_max_as_double(), tmp));
- roundpd(dst, dst, kRoundToZero);
- addpd(dst,
- ExternalReferenceAsOperand(
- ExternalReference::address_of_wasm_double_2_power_52(), tmp));
- shufps(dst, scratch, 0x88);
- }
-}
-
-void TurboAssembler::I16x8ExtAddPairwiseI8x16S(XMMRegister dst, XMMRegister src,
- XMMRegister tmp,
- Register scratch) {
- // pmaddubsw treats the first operand as unsigned, so pass the external
- // reference to as the first operand.
- Operand op = ExternalReferenceAsOperand(
- ExternalReference::address_of_wasm_i8x16_splat_0x01(), scratch);
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope avx_scope(this, AVX);
- vmovdqa(tmp, op);
- vpmaddubsw(dst, tmp, src);
- } else {
- CpuFeatureScope sse_scope(this, SSSE3);
- if (dst == src) {
- movaps(tmp, op);
- pmaddubsw(tmp, src);
- movaps(dst, tmp);
- } else {
- movaps(dst, op);
- pmaddubsw(dst, src);
- }
- }
-}
-
-void TurboAssembler::I16x8ExtAddPairwiseI8x16U(XMMRegister dst, XMMRegister src,
- Register scratch) {
- Operand op = ExternalReferenceAsOperand(
- ExternalReference::address_of_wasm_i8x16_splat_0x01(), scratch);
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope avx_scope(this, AVX);
- vpmaddubsw(dst, src, op);
- } else {
- CpuFeatureScope sse_scope(this, SSSE3);
- movaps(dst, src);
- pmaddubsw(dst, op);
- }
-}
-
-void TurboAssembler::I32x4ExtAddPairwiseI16x8S(XMMRegister dst, XMMRegister src,
- Register scratch) {
- Operand op = ExternalReferenceAsOperand(
- ExternalReference::address_of_wasm_i16x8_splat_0x0001(), scratch);
- // pmaddwd multiplies signed words in src and op, producing
- // signed doublewords, then adds pairwise.
- // src = |a|b|c|d|e|f|g|h|
- // dst = | a*1 + b*1 | c*1 + d*1 | e*1 + f*1 | g*1 + h*1 |
- Pmaddwd(dst, src, op);
-}
-
-void TurboAssembler::I32x4ExtAddPairwiseI16x8U(XMMRegister dst, XMMRegister src,
- XMMRegister tmp) {
- ASM_CODE_COMMENT(this);
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope avx_scope(this, AVX);
- // src = |a|b|c|d|e|f|g|h| (low)
- // scratch = |0|a|0|c|0|e|0|g|
- vpsrld(tmp, src, 16);
- // dst = |0|b|0|d|0|f|0|h|
- vpblendw(dst, src, tmp, 0xAA);
- // dst = |a+b|c+d|e+f|g+h|
- vpaddd(dst, tmp, dst);
- } else if (CpuFeatures::IsSupported(SSE4_1)) {
- CpuFeatureScope sse_scope(this, SSE4_1);
- // There is a potentially better lowering if we get rip-relative constants,
- // see https://github.com/WebAssembly/simd/pull/380.
- movaps(tmp, src);
- psrld(tmp, 16);
- if (dst != src) {
- movaps(dst, src);
- }
- pblendw(dst, tmp, 0xAA);
- paddd(dst, tmp);
- } else {
- // src = |a|b|c|d|e|f|g|h|
- // tmp = i32x4.splat(0x0000FFFF)
- pcmpeqd(tmp, tmp);
- psrld(tmp, byte{16});
- // tmp =|0|b|0|d|0|f|0|h|
- andps(tmp, src);
- // dst = |0|a|0|c|0|e|0|g|
- if (dst != src) {
- movaps(dst, src);
- }
- psrld(dst, byte{16});
- // dst = |a+b|c+d|e+f|g+h|
- paddd(dst, tmp);
- }
-}
-
-void TurboAssembler::I8x16Swizzle(XMMRegister dst, XMMRegister src,
- XMMRegister mask, XMMRegister scratch,
- Register tmp, bool omit_add) {
- if (omit_add) {
- Pshufb(dst, src, mask);
- return;
- }
-
- // Out-of-range indices should return 0, add 112 so that any value > 15
- // saturates to 128 (top bit set), so pshufb will zero that lane.
- Operand op = ExternalReferenceAsOperand(
- ExternalReference::address_of_wasm_i8x16_swizzle_mask(), tmp);
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope avx_scope(this, AVX);
- vpaddusb(scratch, mask, op);
- vpshufb(dst, src, scratch);
- } else {
- CpuFeatureScope sse_scope(this, SSSE3);
- movaps(scratch, op);
- if (dst != src) {
- movaps(dst, src);
- }
- paddusb(scratch, mask);
- pshufb(dst, scratch);
- }
-}
-
void TurboAssembler::ShlPair(Register high, Register low, uint8_t shift) {
DCHECK_GE(63, shift);
if (shift >= 32) {
@@ -1584,8 +1271,10 @@ void MacroAssembler::InvokePrologue(Register expected_parameter_count,
lea(scratch,
Operand(expected_parameter_count, times_system_pointer_size, 0));
AllocateStackSpace(scratch);
- // Extra words are the receiver and the return address (if a jump).
- int extra_words = type == InvokeType::kCall ? 1 : 2;
+ // Extra words are the receiver (if not already included in argc) and the
+ // return address (if a jump).
+ int extra_words = type == InvokeType::kCall ? 0 : 1;
+ if (!kJSArgcIncludesReceiver) extra_words++;
lea(num, Operand(eax, extra_words)); // Number of words to copy.
Move(current, 0);
// Fall-through to the loop body because there are non-zero words to copy.
@@ -1895,22 +1584,6 @@ void TurboAssembler::Move(XMMRegister dst, uint64_t src) {
}
}
-void TurboAssembler::Pshufb(XMMRegister dst, XMMRegister src, Operand mask) {
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope scope(this, AVX);
- vpshufb(dst, src, mask);
- return;
- }
-
- // Make sure these are different so that we won't overwrite mask.
- DCHECK(!mask.is_reg(dst));
- CpuFeatureScope sse_scope(this, SSSE3);
- if (dst != src) {
- movaps(dst, src);
- }
- pshufb(dst, mask);
-}
-
void TurboAssembler::Pextrd(Register dst, XMMRegister src, uint8_t imm8) {
if (imm8 == 0) {
Movd(dst, src);
@@ -2015,16 +1688,6 @@ void TurboAssembler::Pinsrw(XMMRegister dst, XMMRegister src1, Operand src2,
}
}
-void TurboAssembler::Vbroadcastss(XMMRegister dst, Operand src) {
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope avx_scope(this, AVX);
- vbroadcastss(dst, src);
- return;
- }
- movss(dst, src);
- shufps(dst, dst, static_cast<byte>(0));
-}
-
void TurboAssembler::Lzcnt(Register dst, Operand src) {
if (CpuFeatures::IsSupported(LZCNT)) {
CpuFeatureScope scope(this, LZCNT);
@@ -2385,63 +2048,6 @@ void TurboAssembler::Jump(Handle<Code> code_object, RelocInfo::Mode rmode) {
jmp(code_object, rmode);
}
-void TurboAssembler::RetpolineCall(Register reg) {
- ASM_CODE_COMMENT(this);
- Label setup_return, setup_target, inner_indirect_branch, capture_spec;
-
- jmp(&setup_return); // Jump past the entire retpoline below.
-
- bind(&inner_indirect_branch);
- call(&setup_target);
-
- bind(&capture_spec);
- pause();
- jmp(&capture_spec);
-
- bind(&setup_target);
- mov(Operand(esp, 0), reg);
- ret(0);
-
- bind(&setup_return);
- call(&inner_indirect_branch); // Callee will return after this instruction.
-}
-
-void TurboAssembler::RetpolineCall(Address destination, RelocInfo::Mode rmode) {
- ASM_CODE_COMMENT(this);
- Label setup_return, setup_target, inner_indirect_branch, capture_spec;
-
- jmp(&setup_return); // Jump past the entire retpoline below.
-
- bind(&inner_indirect_branch);
- call(&setup_target);
-
- bind(&capture_spec);
- pause();
- jmp(&capture_spec);
-
- bind(&setup_target);
- mov(Operand(esp, 0), destination, rmode);
- ret(0);
-
- bind(&setup_return);
- call(&inner_indirect_branch); // Callee will return after this instruction.
-}
-
-void TurboAssembler::RetpolineJump(Register reg) {
- ASM_CODE_COMMENT(this);
- Label setup_target, capture_spec;
-
- call(&setup_target);
-
- bind(&capture_spec);
- pause();
- jmp(&capture_spec);
-
- bind(&setup_target);
- mov(Operand(esp, 0), reg);
- ret(0);
-}
-
void TurboAssembler::CheckPageFlag(Register object, Register scratch, int mask,
Condition cc, Label* condition_met,
Label::Distance condition_met_distance) {
diff --git a/deps/v8/src/codegen/ia32/macro-assembler-ia32.h b/deps/v8/src/codegen/ia32/macro-assembler-ia32.h
index 527c357047..bf8f356e8c 100644
--- a/deps/v8/src/codegen/ia32/macro-assembler-ia32.h
+++ b/deps/v8/src/codegen/ia32/macro-assembler-ia32.h
@@ -68,9 +68,10 @@ class StackArgumentsAccessor {
DISALLOW_IMPLICIT_CONSTRUCTORS(StackArgumentsAccessor);
};
-class V8_EXPORT_PRIVATE TurboAssembler : public SharedTurboAssembler {
+class V8_EXPORT_PRIVATE TurboAssembler
+ : public SharedTurboAssemblerBase<TurboAssembler> {
public:
- using SharedTurboAssembler::SharedTurboAssembler;
+ using SharedTurboAssemblerBase<TurboAssembler>::SharedTurboAssemblerBase;
void CheckPageFlag(Register object, Register scratch, int mask, Condition cc,
Label* condition_met,
@@ -158,15 +159,10 @@ class V8_EXPORT_PRIVATE TurboAssembler : public SharedTurboAssembler {
JumpMode jump_mode = JumpMode::kJump);
void Jump(const ExternalReference& reference);
- void RetpolineCall(Register reg);
- void RetpolineCall(Address destination, RelocInfo::Mode rmode);
-
void Jump(Handle<Code> code_object, RelocInfo::Mode rmode);
void LoadMap(Register destination, Register object);
- void RetpolineJump(Register reg);
-
void Trap();
void DebugBreak();
@@ -326,10 +322,8 @@ class V8_EXPORT_PRIVATE TurboAssembler : public SharedTurboAssembler {
name(dst, src2); \
} \
}
- AVX_OP3_WITH_MOVE(Cmpeqps, cmpeqps, XMMRegister, XMMRegister)
AVX_OP3_WITH_MOVE(Movlps, movlps, XMMRegister, Operand)
AVX_OP3_WITH_MOVE(Movhps, movhps, XMMRegister, Operand)
- AVX_OP3_WITH_MOVE(Pmaddwd, pmaddwd, XMMRegister, Operand)
#undef AVX_OP3_WITH_MOVE
// TODO(zhin): Remove after moving more definitions into SharedTurboAssembler.
@@ -340,14 +334,6 @@ class V8_EXPORT_PRIVATE TurboAssembler : public SharedTurboAssembler {
SharedTurboAssembler::Movhps(dst, src);
}
- void Pshufb(XMMRegister dst, XMMRegister src) { Pshufb(dst, dst, src); }
- void Pshufb(XMMRegister dst, Operand src) { Pshufb(dst, dst, src); }
- // Handles SSE and AVX. On SSE, moves src to dst if they are not equal.
- void Pshufb(XMMRegister dst, XMMRegister src, XMMRegister mask) {
- Pshufb(dst, src, Operand(mask));
- }
- void Pshufb(XMMRegister dst, XMMRegister src, Operand mask);
-
void Pextrd(Register dst, XMMRegister src, uint8_t imm8);
void Pinsrb(XMMRegister dst, Register src, int8_t imm8) {
Pinsrb(dst, Operand(src), imm8);
@@ -367,7 +353,6 @@ class V8_EXPORT_PRIVATE TurboAssembler : public SharedTurboAssembler {
void Pinsrw(XMMRegister dst, Operand src, int8_t imm8);
// Moves src1 to dst if AVX is not supported.
void Pinsrw(XMMRegister dst, XMMRegister src1, Operand src2, int8_t imm8);
- void Vbroadcastss(XMMRegister dst, Operand src);
// Expression support
// cvtsi2sd instruction only writes to the low 64-bit of dst register, which
@@ -395,32 +380,6 @@ class V8_EXPORT_PRIVATE TurboAssembler : public SharedTurboAssembler {
}
void Cvttsd2ui(Register dst, Operand src, XMMRegister tmp);
- // Handles SSE and AVX. On SSE, moves src to dst if they are not equal.
- void Pmulhrsw(XMMRegister dst, XMMRegister src1, XMMRegister src2);
-
- // These Wasm SIMD ops do not have direct lowerings on IA32. These
- // helpers are optimized to produce the fastest and smallest codegen.
- // Defined here to allow usage on both TurboFan and Liftoff.
- void I16x8Q15MulRSatS(XMMRegister dst, XMMRegister src1, XMMRegister src2,
- XMMRegister scratch);
- void I8x16Popcnt(XMMRegister dst, XMMRegister src, XMMRegister tmp1,
- XMMRegister tmp2, Register scratch);
- void F64x2ConvertLowI32x4U(XMMRegister dst, XMMRegister src, Register tmp);
- void I32x4TruncSatF64x2SZero(XMMRegister dst, XMMRegister src,
- XMMRegister scratch, Register tmp);
- void I32x4TruncSatF64x2UZero(XMMRegister dst, XMMRegister src,
- XMMRegister scratch, Register tmp);
- void I16x8ExtAddPairwiseI8x16S(XMMRegister dst, XMMRegister src,
- XMMRegister tmp, Register scratch);
- void I16x8ExtAddPairwiseI8x16U(XMMRegister dst, XMMRegister src,
- Register scratch);
- void I32x4ExtAddPairwiseI16x8S(XMMRegister dst, XMMRegister src,
- Register scratch);
- void I32x4ExtAddPairwiseI16x8U(XMMRegister dst, XMMRegister src,
- XMMRegister tmp);
- void I8x16Swizzle(XMMRegister dst, XMMRegister src, XMMRegister mask,
- XMMRegister scratch, Register tmp, bool omit_add = false);
-
void Push(Register src) { push(src); }
void Push(Operand src) { push(src); }
void Push(Immediate value);
@@ -480,9 +439,6 @@ class V8_EXPORT_PRIVATE TurboAssembler : public SharedTurboAssembler {
// This is an alternative to embedding the {CodeObject} handle as a reference.
void ComputeCodeStartAddress(Register dst);
- // TODO(860429): Remove remaining poisoning infrastructure on ia32.
- void ResetSpeculationPoisonRegister() { UNREACHABLE(); }
-
// Control-flow integrity:
// Define a function entrypoint. This doesn't emit any code for this
diff --git a/deps/v8/src/codegen/ia32/register-ia32.h b/deps/v8/src/codegen/ia32/register-ia32.h
index 5dc035d966..37a5783ded 100644
--- a/deps/v8/src/codegen/ia32/register-ia32.h
+++ b/deps/v8/src/codegen/ia32/register-ia32.h
@@ -161,9 +161,6 @@ constexpr Register kWasmCompileLazyFuncIndexRegister = edi;
constexpr Register kRootRegister = ebx;
-// TODO(860429): Remove remaining poisoning infrastructure on ia32.
-constexpr Register kSpeculationPoisonRegister = no_reg;
-
constexpr DoubleRegister kFPReturnRegister0 = xmm1; // xmm0 isn't allocatable.
} // namespace internal
diff --git a/deps/v8/src/codegen/ia32/sse-instr.h b/deps/v8/src/codegen/ia32/sse-instr.h
index d775dfdd77..ef81e1014f 100644
--- a/deps/v8/src/codegen/ia32/sse-instr.h
+++ b/deps/v8/src/codegen/ia32/sse-instr.h
@@ -102,4 +102,10 @@
V(pmovzxdq, 66, 0F, 38, 35) \
V(ptest, 66, 0F, 38, 17)
+// These require AVX2, and we only define the VEX-128 versions.
+#define AVX2_BROADCAST_LIST(V) \
+ V(vpbroadcastd, 66, 0F, 38, 58) \
+ V(vpbroadcastb, 66, 0F, 38, 78) \
+ V(vpbroadcastw, 66, 0F, 38, 79)
+
#endif // V8_CODEGEN_IA32_SSE_INSTR_H_
diff --git a/deps/v8/src/codegen/interface-descriptors-inl.h b/deps/v8/src/codegen/interface-descriptors-inl.h
index cf4ff5b0e6..d5a8ccf6e4 100644
--- a/deps/v8/src/codegen/interface-descriptors-inl.h
+++ b/deps/v8/src/codegen/interface-descriptors-inl.h
@@ -27,6 +27,8 @@
#include "src/codegen/mips64/interface-descriptors-mips64-inl.h"
#elif V8_TARGET_ARCH_MIPS
#include "src/codegen/mips/interface-descriptors-mips-inl.h"
+#elif V8_TARGET_ARCH_LOONG64
+#include "src/codegen/loong64/interface-descriptors-loong64-inl.h"
#elif V8_TARGET_ARCH_RISCV64
#include "src/codegen/riscv64/interface-descriptors-riscv64-inl.h"
#else
@@ -318,9 +320,10 @@ constexpr auto LoadWithReceiverBaselineDescriptor::registers() {
// static
constexpr auto BaselineOutOfLinePrologueDescriptor::registers() {
// TODO(v8:11421): Implement on other platforms.
-#if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_ARM || \
- V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_PPC64 || V8_TARGET_ARCH_S390 || \
- V8_TARGET_ARCH_RISCV64 || V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_MIPS
+#if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_ARM || \
+ V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_PPC64 || V8_TARGET_ARCH_S390 || \
+ V8_TARGET_ARCH_RISCV64 || V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_MIPS || \
+ V8_TARGET_ARCH_LOONG64
return RegisterArray(
kContextRegister, kJSFunctionRegister, kJavaScriptCallArgCountRegister,
kJavaScriptCallExtraArg1Register, kJavaScriptCallNewTargetRegister,
@@ -341,7 +344,7 @@ constexpr auto BaselineLeaveFrameDescriptor::registers() {
#if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64 || \
V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_PPC64 || \
V8_TARGET_ARCH_S390 || V8_TARGET_ARCH_RISCV64 || V8_TARGET_ARCH_MIPS64 || \
- V8_TARGET_ARCH_MIPS
+ V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_LOONG64
return RegisterArray(ParamsSizeRegister(), WeightRegister());
#else
return DefaultRegisterArray();
diff --git a/deps/v8/src/codegen/interface-descriptors.h b/deps/v8/src/codegen/interface-descriptors.h
index cf4840bfd7..87bef49f37 100644
--- a/deps/v8/src/codegen/interface-descriptors.h
+++ b/deps/v8/src/codegen/interface-descriptors.h
@@ -111,8 +111,8 @@ namespace internal {
V(StringAt) \
V(StringAtAsString) \
V(StringSubstring) \
- IF_TSAN(V, TSANRelaxedStore) \
- IF_TSAN(V, TSANRelaxedLoad) \
+ IF_TSAN(V, TSANStore) \
+ IF_TSAN(V, TSANLoad) \
V(TypeConversion) \
V(TypeConversionNoContext) \
V(TypeConversion_Baseline) \
@@ -1053,26 +1053,26 @@ class WriteBarrierDescriptor final
};
#ifdef V8_IS_TSAN
-class TSANRelaxedStoreDescriptor final
- : public StaticCallInterfaceDescriptor<TSANRelaxedStoreDescriptor> {
+class TSANStoreDescriptor final
+ : public StaticCallInterfaceDescriptor<TSANStoreDescriptor> {
public:
DEFINE_PARAMETERS_NO_CONTEXT(kAddress, kValue)
DEFINE_PARAMETER_TYPES(MachineType::Pointer(), // kAddress
MachineType::AnyTagged()) // kValue
- DECLARE_DESCRIPTOR(TSANRelaxedStoreDescriptor)
+ DECLARE_DESCRIPTOR(TSANStoreDescriptor)
static constexpr auto registers();
static constexpr bool kRestrictAllocatableRegisters = true;
};
-class TSANRelaxedLoadDescriptor final
- : public StaticCallInterfaceDescriptor<TSANRelaxedLoadDescriptor> {
+class TSANLoadDescriptor final
+ : public StaticCallInterfaceDescriptor<TSANLoadDescriptor> {
public:
DEFINE_PARAMETERS_NO_CONTEXT(kAddress)
DEFINE_PARAMETER_TYPES(MachineType::Pointer()) // kAddress
- DECLARE_DESCRIPTOR(TSANRelaxedLoadDescriptor)
+ DECLARE_DESCRIPTOR(TSANLoadDescriptor)
static constexpr auto registers();
static constexpr bool kRestrictAllocatableRegisters = true;
diff --git a/deps/v8/src/codegen/loong64/assembler-loong64-inl.h b/deps/v8/src/codegen/loong64/assembler-loong64-inl.h
new file mode 100644
index 0000000000..597d5e048e
--- /dev/null
+++ b/deps/v8/src/codegen/loong64/assembler-loong64-inl.h
@@ -0,0 +1,249 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_CODEGEN_LOONG64_ASSEMBLER_LOONG64_INL_H_
+#define V8_CODEGEN_LOONG64_ASSEMBLER_LOONG64_INL_H_
+
+#include "src/codegen/assembler.h"
+#include "src/codegen/loong64/assembler-loong64.h"
+#include "src/debug/debug.h"
+#include "src/objects/objects-inl.h"
+
+namespace v8 {
+namespace internal {
+
+bool CpuFeatures::SupportsOptimizer() { return IsSupported(FPU); }
+
+// -----------------------------------------------------------------------------
+// Operand and MemOperand.
+
+bool Operand::is_reg() const { return rm_.is_valid(); }
+
+int64_t Operand::immediate() const {
+ DCHECK(!is_reg());
+ DCHECK(!IsHeapObjectRequest());
+ return value_.immediate;
+}
+
+// -----------------------------------------------------------------------------
+// RelocInfo.
+
+void RelocInfo::apply(intptr_t delta) {
+ if (IsInternalReference(rmode_)) {
+ // Absolute code pointer inside code object moves with the code object.
+ Assembler::RelocateInternalReference(rmode_, pc_, delta);
+ } else {
+ DCHECK(IsRelativeCodeTarget(rmode_));
+ Assembler::RelocateRelativeReference(rmode_, pc_, delta);
+ }
+}
+
+Address RelocInfo::target_address() {
+ DCHECK(IsCodeTargetMode(rmode_) || IsRuntimeEntry(rmode_) ||
+ IsWasmCall(rmode_));
+ return Assembler::target_address_at(pc_, constant_pool_);
+}
+
+Address RelocInfo::target_address_address() {
+ DCHECK(HasTargetAddressAddress());
+ // Read the address of the word containing the target_address in an
+ // instruction stream.
+ // The only architecture-independent user of this function is the serializer.
+ // The serializer uses it to find out how many raw bytes of instruction to
+ // output before the next target.
+ // For an instruction like LUI/ORI where the target bits are mixed into the
+ // instruction bits, the size of the target will be zero, indicating that the
+ // serializer should not step forward in memory after a target is resolved
+ // and written. In this case the target_address_address function should
+ // return the end of the instructions to be patched, allowing the
+ // deserializer to deserialize the instructions as raw bytes and put them in
+ // place, ready to be patched with the target. After jump optimization,
+ // that is the address of the instruction that follows J/JAL/JR/JALR
+ // instruction.
+ return pc_ + Assembler::kInstructionsFor64BitConstant * kInstrSize;
+}
+
+Address RelocInfo::constant_pool_entry_address() { UNREACHABLE(); }
+
+int RelocInfo::target_address_size() { return Assembler::kSpecialTargetSize; }
+
+void Assembler::deserialization_set_special_target_at(
+ Address instruction_payload, Code code, Address target) {
+ set_target_address_at(instruction_payload,
+ !code.is_null() ? code.constant_pool() : kNullAddress,
+ target);
+}
+
+int Assembler::deserialization_special_target_size(
+ Address instruction_payload) {
+ return kSpecialTargetSize;
+}
+
+void Assembler::deserialization_set_target_internal_reference_at(
+ Address pc, Address target, RelocInfo::Mode mode) {
+ WriteUnalignedValue<Address>(pc, target);
+}
+
+HeapObject RelocInfo::target_object() {
+ DCHECK(IsCodeTarget(rmode_) || IsFullEmbeddedObject(rmode_) ||
+ IsDataEmbeddedObject(rmode_));
+ if (IsDataEmbeddedObject(rmode_)) {
+ return HeapObject::cast(Object(ReadUnalignedValue<Address>(pc_)));
+ }
+ return HeapObject::cast(
+ Object(Assembler::target_address_at(pc_, constant_pool_)));
+}
+
+HeapObject RelocInfo::target_object_no_host(Isolate* isolate) {
+ return target_object();
+}
+
+Handle<HeapObject> RelocInfo::target_object_handle(Assembler* origin) {
+ if (IsDataEmbeddedObject(rmode_)) {
+ return Handle<HeapObject>::cast(ReadUnalignedValue<Handle<Object>>(pc_));
+ } else if (IsCodeTarget(rmode_) || IsFullEmbeddedObject(rmode_)) {
+ return Handle<HeapObject>(reinterpret_cast<Address*>(
+ Assembler::target_address_at(pc_, constant_pool_)));
+ } else {
+ DCHECK(IsRelativeCodeTarget(rmode_));
+ return origin->relative_code_target_object_handle_at(pc_);
+ }
+}
+
+void RelocInfo::set_target_object(Heap* heap, HeapObject target,
+ WriteBarrierMode write_barrier_mode,
+ ICacheFlushMode icache_flush_mode) {
+ DCHECK(IsCodeTarget(rmode_) || IsFullEmbeddedObject(rmode_) ||
+ IsDataEmbeddedObject(rmode_));
+ if (IsDataEmbeddedObject(rmode_)) {
+ WriteUnalignedValue(pc_, target.ptr());
+ // No need to flush icache since no instructions were changed.
+ } else {
+ Assembler::set_target_address_at(pc_, constant_pool_, target.ptr(),
+ icache_flush_mode);
+ }
+ if (write_barrier_mode == UPDATE_WRITE_BARRIER && !host().is_null() &&
+ !FLAG_disable_write_barriers) {
+ WriteBarrierForCode(host(), this, target);
+ }
+}
+
+Address RelocInfo::target_external_reference() {
+ DCHECK(rmode_ == EXTERNAL_REFERENCE);
+ return Assembler::target_address_at(pc_, constant_pool_);
+}
+
+void RelocInfo::set_target_external_reference(
+ Address target, ICacheFlushMode icache_flush_mode) {
+ DCHECK(rmode_ == RelocInfo::EXTERNAL_REFERENCE);
+ Assembler::set_target_address_at(pc_, constant_pool_, target,
+ icache_flush_mode);
+}
+
+Address RelocInfo::target_internal_reference() {
+ if (rmode_ == INTERNAL_REFERENCE) {
+ return Memory<Address>(pc_);
+ } else {
+ UNREACHABLE();
+ }
+}
+
+Address RelocInfo::target_internal_reference_address() {
+ DCHECK(rmode_ == INTERNAL_REFERENCE);
+ return pc_;
+}
+
+Handle<Code> Assembler::relative_code_target_object_handle_at(
+ Address pc) const {
+ Instr instr = Assembler::instr_at(pc);
+ int32_t code_target_index = instr & kImm26Mask;
+ code_target_index = ((code_target_index & 0x3ff) << 22 >> 6) |
+ ((code_target_index >> 10) & kImm16Mask);
+ return GetCodeTarget(code_target_index);
+}
+
+Address RelocInfo::target_runtime_entry(Assembler* origin) {
+ DCHECK(IsRuntimeEntry(rmode_));
+ return target_address();
+}
+
+void RelocInfo::set_target_runtime_entry(Address target,
+ WriteBarrierMode write_barrier_mode,
+ ICacheFlushMode icache_flush_mode) {
+ DCHECK(IsRuntimeEntry(rmode_));
+ if (target_address() != target)
+ set_target_address(target, write_barrier_mode, icache_flush_mode);
+}
+
+Address RelocInfo::target_off_heap_target() {
+ DCHECK(IsOffHeapTarget(rmode_));
+ return Assembler::target_address_at(pc_, constant_pool_);
+}
+
+void RelocInfo::WipeOut() {
+ DCHECK(IsFullEmbeddedObject(rmode_) || IsCodeTarget(rmode_) ||
+ IsRuntimeEntry(rmode_) || IsExternalReference(rmode_) ||
+ IsInternalReference(rmode_) || IsOffHeapTarget(rmode_));
+ if (IsInternalReference(rmode_)) {
+ Memory<Address>(pc_) = kNullAddress;
+ } else {
+ Assembler::set_target_address_at(pc_, constant_pool_, kNullAddress);
+ }
+}
+
+// -----------------------------------------------------------------------------
+// Assembler.
+
+void Assembler::CheckBuffer() {
+ if (buffer_space() <= kGap) {
+ GrowBuffer();
+ }
+}
+
+void Assembler::EmitHelper(Instr x) {
+ *reinterpret_cast<Instr*>(pc_) = x;
+ pc_ += kInstrSize;
+ CheckTrampolinePoolQuick();
+}
+
+template <>
+inline void Assembler::EmitHelper(uint8_t x);
+
+template <typename T>
+void Assembler::EmitHelper(T x) {
+ *reinterpret_cast<T*>(pc_) = x;
+ pc_ += sizeof(x);
+ CheckTrampolinePoolQuick();
+}
+
+template <>
+void Assembler::EmitHelper(uint8_t x) {
+ *reinterpret_cast<uint8_t*>(pc_) = x;
+ pc_ += sizeof(x);
+ if (reinterpret_cast<intptr_t>(pc_) % kInstrSize == 0) {
+ CheckTrampolinePoolQuick();
+ }
+}
+
+void Assembler::emit(Instr x) {
+ if (!is_buffer_growth_blocked()) {
+ CheckBuffer();
+ }
+ EmitHelper(x);
+}
+
+void Assembler::emit(uint64_t data) {
+ // CheckForEmitInForbiddenSlot();
+ if (!is_buffer_growth_blocked()) {
+ CheckBuffer();
+ }
+ EmitHelper(data);
+}
+
+EnsureSpace::EnsureSpace(Assembler* assembler) { assembler->CheckBuffer(); }
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_CODEGEN_LOONG64_ASSEMBLER_LOONG64_INL_H_
diff --git a/deps/v8/src/codegen/loong64/assembler-loong64.cc b/deps/v8/src/codegen/loong64/assembler-loong64.cc
new file mode 100644
index 0000000000..cc1eaa7d12
--- /dev/null
+++ b/deps/v8/src/codegen/loong64/assembler-loong64.cc
@@ -0,0 +1,2405 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/codegen/loong64/assembler-loong64.h"
+
+#if V8_TARGET_ARCH_LOONG64
+
+#include "src/base/cpu.h"
+#include "src/codegen/loong64/assembler-loong64-inl.h"
+#include "src/codegen/machine-type.h"
+#include "src/codegen/safepoint-table.h"
+#include "src/codegen/string-constants.h"
+#include "src/deoptimizer/deoptimizer.h"
+#include "src/objects/heap-number-inl.h"
+
+namespace v8 {
+namespace internal {
+
+bool CpuFeatures::SupportsWasmSimd128() { return false; }
+
+void CpuFeatures::ProbeImpl(bool cross_compile) {
+ supported_ |= 1u << FPU;
+
+ // Only use statically determined features for cross compile (snapshot).
+ if (cross_compile) return;
+
+#ifdef __loongarch__
+ // Probe for additional features at runtime.
+ base::CPU cpu;
+ supported_ |= 1u << FPU;
+#endif
+
+ // Set a static value on whether Simd is supported.
+ // This variable is only used for certain archs to query SupportWasmSimd128()
+ // at runtime in builtins using an extern ref. Other callers should use
+ // CpuFeatures::SupportWasmSimd128().
+ CpuFeatures::supports_wasm_simd_128_ = CpuFeatures::SupportsWasmSimd128();
+}
+
+void CpuFeatures::PrintTarget() {}
+void CpuFeatures::PrintFeatures() {}
+
+int ToNumber(Register reg) {
+ DCHECK(reg.is_valid());
+ const int kNumbers[] = {
+ 0, // zero_reg
+ 1, // ra
+ 2, // tp
+ 3, // sp
+ 4, // a0 v0
+ 5, // a1 v1
+ 6, // a2
+ 7, // a3
+ 8, // a4
+ 9, // a5
+ 10, // a6
+ 11, // a7
+ 12, // t0
+ 13, // t1
+ 14, // t2
+ 15, // t3
+ 16, // t4
+ 17, // t5
+ 18, // t6
+ 19, // t7
+ 20, // t8
+ 21, // x_reg
+ 22, // fp
+ 23, // s0
+ 24, // s1
+ 25, // s2
+ 26, // s3
+ 27, // s4
+ 28, // s5
+ 29, // s6
+ 30, // s7
+ 31, // s8
+ };
+ return kNumbers[reg.code()];
+}
+
+Register ToRegister(int num) {
+ DCHECK(num >= 0 && num < kNumRegisters);
+ const Register kRegisters[] = {
+ zero_reg, ra, tp, sp, a0, a1, a2, a3, a4, a5, a6, a7, t0, t1, t2, t3,
+ t4, t5, t6, t7, t8, x_reg, fp, s0, s1, s2, s3, s4, s5, s6, s7, s8};
+ return kRegisters[num];
+}
+
+// -----------------------------------------------------------------------------
+// Implementation of RelocInfo.
+
+const int RelocInfo::kApplyMask =
+ RelocInfo::ModeMask(RelocInfo::INTERNAL_REFERENCE) |
+ RelocInfo::ModeMask(RelocInfo::RELATIVE_CODE_TARGET);
+
+bool RelocInfo::IsCodedSpecially() {
+ // The deserializer needs to know whether a pointer is specially coded. Being
+ // specially coded on LoongArch64 means that it is a lu12i_w/ori instruction,
+ // and that is always the case inside code objects.
+ return true;
+}
+
+bool RelocInfo::IsInConstantPool() { return false; }
+
+uint32_t RelocInfo::wasm_call_tag() const {
+ DCHECK(rmode_ == WASM_CALL || rmode_ == WASM_STUB_CALL);
+ return static_cast<uint32_t>(
+ Assembler::target_address_at(pc_, constant_pool_));
+}
+
+// -----------------------------------------------------------------------------
+// Implementation of Operand and MemOperand.
+// See assembler-loong64-inl.h for inlined constructors.
+
+Operand::Operand(Handle<HeapObject> handle)
+ : rm_(no_reg), rmode_(RelocInfo::FULL_EMBEDDED_OBJECT) {
+ value_.immediate = static_cast<intptr_t>(handle.address());
+}
+
+Operand Operand::EmbeddedNumber(double value) {
+ int32_t smi;
+ if (DoubleToSmiInteger(value, &smi)) return Operand(Smi::FromInt(smi));
+ Operand result(0, RelocInfo::FULL_EMBEDDED_OBJECT);
+ result.is_heap_object_request_ = true;
+ result.value_.heap_object_request = HeapObjectRequest(value);
+ return result;
+}
+
+Operand Operand::EmbeddedStringConstant(const StringConstantBase* str) {
+ Operand result(0, RelocInfo::FULL_EMBEDDED_OBJECT);
+ result.is_heap_object_request_ = true;
+ result.value_.heap_object_request = HeapObjectRequest(str);
+ return result;
+}
+
+MemOperand::MemOperand(Register base, int32_t offset)
+ : base_(base), index_(no_reg), offset_(offset) {}
+
+MemOperand::MemOperand(Register base, Register index)
+ : base_(base), index_(index), offset_(0) {}
+
+void Assembler::AllocateAndInstallRequestedHeapObjects(Isolate* isolate) {
+ DCHECK_IMPLIES(isolate == nullptr, heap_object_requests_.empty());
+ for (auto& request : heap_object_requests_) {
+ Handle<HeapObject> object;
+ switch (request.kind()) {
+ case HeapObjectRequest::kHeapNumber:
+ object = isolate->factory()->NewHeapNumber<AllocationType::kOld>(
+ request.heap_number());
+ break;
+ case HeapObjectRequest::kStringConstant:
+ const StringConstantBase* str = request.string();
+ CHECK_NOT_NULL(str);
+ object = str->AllocateStringConstant(isolate);
+ break;
+ }
+ Address pc = reinterpret_cast<Address>(buffer_start_) + request.offset();
+ set_target_value_at(pc, reinterpret_cast<uint64_t>(object.location()));
+ }
+}
+
+// -----------------------------------------------------------------------------
+// Specific instructions, constants, and masks.
+
+Assembler::Assembler(const AssemblerOptions& options,
+ std::unique_ptr<AssemblerBuffer> buffer)
+ : AssemblerBase(options, std::move(buffer)),
+ scratch_register_list_(t7.bit() | t6.bit()) {
+ reloc_info_writer.Reposition(buffer_start_ + buffer_->size(), pc_);
+
+ last_trampoline_pool_end_ = 0;
+ no_trampoline_pool_before_ = 0;
+ trampoline_pool_blocked_nesting_ = 0;
+ // We leave space (16 * kTrampolineSlotsSize)
+ // for BlockTrampolinePoolScope buffer.
+ next_buffer_check_ = FLAG_force_long_branches
+ ? kMaxInt
+ : kMax16BranchOffset - kTrampolineSlotsSize * 16;
+ internal_trampoline_exception_ = false;
+ last_bound_pos_ = 0;
+
+ trampoline_emitted_ = FLAG_force_long_branches;
+ unbound_labels_count_ = 0;
+ block_buffer_growth_ = false;
+}
+
+void Assembler::GetCode(Isolate* isolate, CodeDesc* desc,
+ SafepointTableBuilder* safepoint_table_builder,
+ int handler_table_offset) {
+ // As a crutch to avoid having to add manual Align calls wherever we use a
+ // raw workflow to create Code objects (mostly in tests), add another Align
+ // call here. It does no harm - the end of the Code object is aligned to the
+ // (larger) kCodeAlignment anyways.
+ // TODO(jgruber): Consider moving responsibility for proper alignment to
+ // metadata table builders (safepoint, handler, constant pool, code
+ // comments).
+ DataAlign(Code::kMetadataAlignment);
+
+ // EmitForbiddenSlotInstruction(); TODO:LOONG64 why?
+
+ int code_comments_size = WriteCodeComments();
+
+ DCHECK(pc_ <= reloc_info_writer.pos()); // No overlap.
+
+ AllocateAndInstallRequestedHeapObjects(isolate);
+
+ // Set up code descriptor.
+ // TODO(jgruber): Reconsider how these offsets and sizes are maintained up to
+ // this point to make CodeDesc initialization less fiddly.
+
+ static constexpr int kConstantPoolSize = 0;
+ const int instruction_size = pc_offset();
+ const int code_comments_offset = instruction_size - code_comments_size;
+ const int constant_pool_offset = code_comments_offset - kConstantPoolSize;
+ const int handler_table_offset2 = (handler_table_offset == kNoHandlerTable)
+ ? constant_pool_offset
+ : handler_table_offset;
+ const int safepoint_table_offset =
+ (safepoint_table_builder == kNoSafepointTable)
+ ? handler_table_offset2
+ : safepoint_table_builder->GetCodeOffset();
+ const int reloc_info_offset =
+ static_cast<int>(reloc_info_writer.pos() - buffer_->start());
+ CodeDesc::Initialize(desc, this, safepoint_table_offset,
+ handler_table_offset2, constant_pool_offset,
+ code_comments_offset, reloc_info_offset);
+}
+
+void Assembler::Align(int m) {
+ // If not, the loop below won't terminate.
+ DCHECK(IsAligned(pc_offset(), kInstrSize));
+ DCHECK(m >= kInstrSize && base::bits::IsPowerOfTwo(m));
+ while ((pc_offset() & (m - 1)) != 0) {
+ nop();
+ }
+}
+
+void Assembler::CodeTargetAlign() {
+ // No advantage to aligning branch/call targets to more than
+ // single instruction, that I am aware of.
+ Align(4);
+}
+
+Register Assembler::GetRkReg(Instr instr) {
+ return Register::from_code((instr & kRkFieldMask) >> kRkShift);
+}
+
+Register Assembler::GetRjReg(Instr instr) {
+ return Register::from_code((instr & kRjFieldMask) >> kRjShift);
+}
+
+Register Assembler::GetRdReg(Instr instr) {
+ return Register::from_code((instr & kRdFieldMask) >> kRdShift);
+}
+
+uint32_t Assembler::GetRk(Instr instr) {
+ return (instr & kRkFieldMask) >> kRkShift;
+}
+
+uint32_t Assembler::GetRkField(Instr instr) { return instr & kRkFieldMask; }
+
+uint32_t Assembler::GetRj(Instr instr) {
+ return (instr & kRjFieldMask) >> kRjShift;
+}
+
+uint32_t Assembler::GetRjField(Instr instr) { return instr & kRjFieldMask; }
+
+uint32_t Assembler::GetRd(Instr instr) {
+ return (instr & kRdFieldMask) >> kRdShift;
+}
+
+uint32_t Assembler::GetRdField(Instr instr) { return instr & kRdFieldMask; }
+
+uint32_t Assembler::GetSa2(Instr instr) {
+ return (instr & kSa2FieldMask) >> kSaShift;
+}
+
+uint32_t Assembler::GetSa2Field(Instr instr) { return instr & kSa2FieldMask; }
+
+uint32_t Assembler::GetSa3(Instr instr) {
+ return (instr & kSa3FieldMask) >> kSaShift;
+}
+
+uint32_t Assembler::GetSa3Field(Instr instr) { return instr & kSa3FieldMask; }
+
+// Labels refer to positions in the (to be) generated code.
+// There are bound, linked, and unused labels.
+//
+// Bound labels refer to known positions in the already
+// generated code. pos() is the position the label refers to.
+//
+// Linked labels refer to unknown positions in the code
+// to be generated; pos() is the position of the last
+// instruction using the label.
+
+// The link chain is terminated by a value in the instruction of 0,
+// which is an otherwise illegal value (branch 0 is inf loop).
+// The instruction 16-bit offset field addresses 32-bit words, but in
+// code is conv to an 18-bit value addressing bytes, hence the -4 value.
+
+const int kEndOfChain = 0;
+// Determines the end of the Jump chain (a subset of the label link chain).
+const int kEndOfJumpChain = 0;
+
+bool Assembler::IsBranch(Instr instr) {
+ uint32_t opcode = (instr >> 26) << 26;
+ // Checks if the instruction is a branch.
+ bool isBranch = opcode == BEQZ || opcode == BNEZ || opcode == BCZ ||
+ opcode == B || opcode == BL || opcode == BEQ ||
+ opcode == BNE || opcode == BLT || opcode == BGE ||
+ opcode == BLTU || opcode == BGEU;
+ return isBranch;
+}
+
+bool Assembler::IsB(Instr instr) {
+ uint32_t opcode = (instr >> 26) << 26;
+ // Checks if the instruction is a b.
+ bool isBranch = opcode == B || opcode == BL;
+ return isBranch;
+}
+
+bool Assembler::IsBz(Instr instr) {
+ uint32_t opcode = (instr >> 26) << 26;
+ // Checks if the instruction is a branch.
+ bool isBranch = opcode == BEQZ || opcode == BNEZ || opcode == BCZ;
+ return isBranch;
+}
+
+bool Assembler::IsEmittedConstant(Instr instr) {
+ // Add GetLabelConst function?
+ uint32_t label_constant = instr & ~kImm16Mask;
+ return label_constant == 0; // Emitted label const in reg-exp engine.
+}
+
+bool Assembler::IsJ(Instr instr) {
+ uint32_t opcode = (instr >> 26) << 26;
+ // Checks if the instruction is a jump.
+ return opcode == JIRL;
+}
+
+bool Assembler::IsLu12i_w(Instr instr) {
+ uint32_t opcode = (instr >> 25) << 25;
+ return opcode == LU12I_W;
+}
+
+bool Assembler::IsOri(Instr instr) {
+ uint32_t opcode = (instr >> 22) << 22;
+ return opcode == ORI;
+}
+
+bool Assembler::IsLu32i_d(Instr instr) {
+ uint32_t opcode = (instr >> 25) << 25;
+ return opcode == LU32I_D;
+}
+
+bool Assembler::IsLu52i_d(Instr instr) {
+ uint32_t opcode = (instr >> 22) << 22;
+ return opcode == LU52I_D;
+}
+
+bool Assembler::IsMov(Instr instr, Register rd, Register rj) {
+ // Checks if the instruction is a OR with zero_reg argument (aka MOV).
+ Instr instr1 =
+ OR | zero_reg.code() << kRkShift | rj.code() << kRjShift | rd.code();
+ return instr == instr1;
+}
+
+bool Assembler::IsPcAddi(Instr instr, Register rd, int32_t si20) {
+ DCHECK(is_int20(si20));
+ Instr instr1 = PCADDI | (si20 & 0xfffff) << kRjShift | rd.code();
+ return instr == instr1;
+}
+
+bool Assembler::IsNop(Instr instr, unsigned int type) {
+ // See Assembler::nop(type).
+ DCHECK_LT(type, 32);
+
+ Instr instr1 =
+ ANDI | ((type & kImm12Mask) << kRkShift) | (zero_reg.code() << kRjShift);
+
+ return instr == instr1;
+}
+
+static inline int32_t GetOffsetOfBranch(Instr instr,
+ Assembler::OffsetSize bits) {
+ int32_t result = 0;
+ if (bits == 16) {
+ result = (instr << 6) >> 16;
+ } else if (bits == 21) {
+ uint32_t low16 = instr << 6;
+ low16 = low16 >> 16;
+ low16 &= 0xffff;
+ int32_t hi5 = (instr << 27) >> 11;
+ result = hi5 | low16;
+ } else {
+ uint32_t low16 = instr << 6;
+ low16 = low16 >> 16;
+ low16 &= 0xffff;
+ int32_t hi10 = (instr << 22) >> 6;
+ result = hi10 | low16;
+ DCHECK_EQ(bits, 26);
+ }
+ return result << 2;
+}
+
+static Assembler::OffsetSize OffsetSizeInBits(Instr instr) {
+ if (Assembler::IsB(instr)) {
+ return Assembler::OffsetSize::kOffset26;
+ } else if (Assembler::IsBz(instr)) {
+ return Assembler::OffsetSize::kOffset21;
+ } else {
+ DCHECK(Assembler::IsBranch(instr));
+ return Assembler::OffsetSize::kOffset16;
+ }
+}
+
+static inline int32_t AddBranchOffset(int pos, Instr instr) {
+ Assembler::OffsetSize bits = OffsetSizeInBits(instr);
+
+ int32_t imm = GetOffsetOfBranch(instr, bits);
+
+ if (imm == kEndOfChain) {
+ // EndOfChain sentinel is returned directly, not relative to pc or pos.
+ return kEndOfChain;
+ } else {
+ // Handle the case that next branch position is 0.
+ // TODO(LOONG_dev): Define -4 as a constant
+ int32_t offset = pos + imm;
+ return offset == 0 ? -4 : offset;
+ }
+}
+
+int Assembler::target_at(int pos, bool is_internal) {
+ if (is_internal) {
+ int64_t* p = reinterpret_cast<int64_t*>(buffer_start_ + pos);
+ int64_t address = *p;
+ if (address == kEndOfJumpChain) {
+ return kEndOfChain;
+ } else {
+ int64_t instr_address = reinterpret_cast<int64_t>(p);
+ DCHECK(instr_address - address < INT_MAX);
+ int delta = static_cast<int>(instr_address - address);
+ DCHECK(pos > delta);
+ return pos - delta;
+ }
+ }
+ Instr instr = instr_at(pos);
+
+ // TODO(LOONG_dev) remove after remove label_at_put?
+ if ((instr & ~kImm16Mask) == 0) {
+ // Emitted label constant, not part of a branch.
+ if (instr == 0) {
+ return kEndOfChain;
+ } else {
+ int32_t imm18 = ((instr & static_cast<int32_t>(kImm16Mask)) << 16) >> 14;
+ return (imm18 + pos);
+ }
+ }
+
+ // Check we have a branch or jump instruction.
+ DCHECK(IsBranch(instr) || IsPcAddi(instr, t8, 16));
+ // Do NOT change this to <<2. We rely on arithmetic shifts here, assuming
+ // the compiler uses arithmetic shifts for signed integers.
+ if (IsBranch(instr)) {
+ return AddBranchOffset(pos, instr);
+ } else {
+ DCHECK(IsPcAddi(instr, t8, 16));
+ // see BranchLong(Label* L) and BranchAndLinkLong ??
+ int32_t imm32;
+ Instr instr_lu12i_w = instr_at(pos + 1 * kInstrSize);
+ Instr instr_ori = instr_at(pos + 2 * kInstrSize);
+ DCHECK(IsLu12i_w(instr_lu12i_w));
+ imm32 = ((instr_lu12i_w >> 5) & 0xfffff) << 12;
+ imm32 |= ((instr_ori >> 10) & static_cast<int32_t>(kImm12Mask));
+ if (imm32 == kEndOfJumpChain) {
+ // EndOfChain sentinel is returned directly, not relative to pc or pos.
+ return kEndOfChain;
+ }
+ return pos + imm32;
+ }
+}
+
+static inline Instr SetBranchOffset(int32_t pos, int32_t target_pos,
+ Instr instr) {
+ int32_t bits = OffsetSizeInBits(instr);
+ int32_t imm = target_pos - pos;
+ DCHECK_EQ(imm & 3, 0);
+ imm >>= 2;
+
+ DCHECK(is_intn(imm, bits));
+
+ if (bits == 16) {
+ const int32_t mask = ((1 << 16) - 1) << 10;
+ instr &= ~mask;
+ return instr | ((imm << 10) & mask);
+ } else if (bits == 21) {
+ const int32_t mask = 0x3fffc1f;
+ instr &= ~mask;
+ uint32_t low16 = (imm & kImm16Mask) << 10;
+ int32_t hi5 = (imm >> 16) & 0x1f;
+ return instr | low16 | hi5;
+ } else {
+ DCHECK_EQ(bits, 26);
+ const int32_t mask = 0x3ffffff;
+ instr &= ~mask;
+ uint32_t low16 = (imm & kImm16Mask) << 10;
+ int32_t hi10 = (imm >> 16) & 0x3ff;
+ return instr | low16 | hi10;
+ }
+}
+
+void Assembler::target_at_put(int pos, int target_pos, bool is_internal) {
+ if (is_internal) {
+ uint64_t imm = reinterpret_cast<uint64_t>(buffer_start_) + target_pos;
+ *reinterpret_cast<uint64_t*>(buffer_start_ + pos) = imm;
+ return;
+ }
+ Instr instr = instr_at(pos);
+ if ((instr & ~kImm16Mask) == 0) {
+ DCHECK(target_pos == kEndOfChain || target_pos >= 0);
+ // Emitted label constant, not part of a branch.
+ // Make label relative to Code pointer of generated Code object.
+ instr_at_put(pos, target_pos + (Code::kHeaderSize - kHeapObjectTag));
+ return;
+ }
+
+ DCHECK(IsBranch(instr));
+ instr = SetBranchOffset(pos, target_pos, instr);
+ instr_at_put(pos, instr);
+}
+
+void Assembler::print(const Label* L) {
+ if (L->is_unused()) {
+ PrintF("unused label\n");
+ } else if (L->is_bound()) {
+ PrintF("bound label to %d\n", L->pos());
+ } else if (L->is_linked()) {
+ Label l;
+ l.link_to(L->pos());
+ PrintF("unbound label");
+ while (l.is_linked()) {
+ PrintF("@ %d ", l.pos());
+ Instr instr = instr_at(l.pos());
+ if ((instr & ~kImm16Mask) == 0) {
+ PrintF("value\n");
+ } else {
+ PrintF("%d\n", instr);
+ }
+ next(&l, is_internal_reference(&l));
+ }
+ } else {
+ PrintF("label in inconsistent state (pos = %d)\n", L->pos_);
+ }
+}
+
+void Assembler::bind_to(Label* L, int pos) {
+ DCHECK(0 <= pos && pos <= pc_offset()); // Must have valid binding position.
+ int trampoline_pos = kInvalidSlotPos;
+ bool is_internal = false;
+ if (L->is_linked() && !trampoline_emitted_) {
+ unbound_labels_count_--;
+ if (!is_internal_reference(L)) {
+ next_buffer_check_ += kTrampolineSlotsSize;
+ }
+ }
+
+ while (L->is_linked()) {
+ int fixup_pos = L->pos();
+ int dist = pos - fixup_pos;
+ is_internal = is_internal_reference(L);
+ next(L, is_internal); // Call next before overwriting link with target at
+ // fixup_pos.
+ Instr instr = instr_at(fixup_pos);
+ if (is_internal) {
+ target_at_put(fixup_pos, pos, is_internal);
+ } else {
+ if (IsBranch(instr)) {
+ int branch_offset = BranchOffset(instr);
+ if (dist > branch_offset) {
+ if (trampoline_pos == kInvalidSlotPos) {
+ trampoline_pos = get_trampoline_entry(fixup_pos);
+ CHECK_NE(trampoline_pos, kInvalidSlotPos);
+ }
+ CHECK((trampoline_pos - fixup_pos) <= branch_offset);
+ target_at_put(fixup_pos, trampoline_pos, false);
+ fixup_pos = trampoline_pos;
+ }
+ target_at_put(fixup_pos, pos, false);
+ } else {
+ DCHECK(IsJ(instr) || IsLu12i_w(instr) || IsEmittedConstant(instr) ||
+ IsPcAddi(instr, t8, 8));
+ target_at_put(fixup_pos, pos, false);
+ }
+ }
+ }
+ L->bind_to(pos);
+
+ // Keep track of the last bound label so we don't eliminate any instructions
+ // before a bound label.
+ if (pos > last_bound_pos_) last_bound_pos_ = pos;
+}
+
+void Assembler::bind(Label* L) {
+ DCHECK(!L->is_bound()); // Label can only be bound once.
+ bind_to(L, pc_offset());
+}
+
+void Assembler::next(Label* L, bool is_internal) {
+ DCHECK(L->is_linked());
+ int link = target_at(L->pos(), is_internal);
+ if (link == kEndOfChain) {
+ L->Unuse();
+ } else if (link == -4) {
+ // Next position is pc_offset == 0
+ L->link_to(0);
+ } else {
+ DCHECK_GE(link, 0);
+ L->link_to(link);
+ }
+}
+
+bool Assembler::is_near_c(Label* L) {
+ DCHECK(L->is_bound());
+ return pc_offset() - L->pos() < kMax16BranchOffset - 4 * kInstrSize;
+}
+
+bool Assembler::is_near(Label* L, OffsetSize bits) {
+ DCHECK(L->is_bound());
+ return ((pc_offset() - L->pos()) <
+ (1 << (bits + 2 - 1)) - 1 - 5 * kInstrSize);
+}
+
+bool Assembler::is_near_a(Label* L) {
+ DCHECK(L->is_bound());
+ return pc_offset() - L->pos() <= kMax26BranchOffset - 4 * kInstrSize;
+}
+
+int Assembler::BranchOffset(Instr instr) {
+ int bits = OffsetSize::kOffset16;
+
+ uint32_t opcode = (instr >> 26) << 26;
+ switch (opcode) {
+ case B:
+ case BL:
+ bits = OffsetSize::kOffset26;
+ break;
+ case BNEZ:
+ case BEQZ:
+ case BCZ:
+ bits = OffsetSize::kOffset21;
+ break;
+ case BNE:
+ case BEQ:
+ case BLT:
+ case BGE:
+ case BLTU:
+ case BGEU:
+ case JIRL:
+ bits = OffsetSize::kOffset16;
+ break;
+ default:
+ break;
+ }
+
+ return (1 << (bits + 2 - 1)) - 1;
+}
+
+// We have to use a temporary register for things that can be relocated even
+// if they can be encoded in the LOONG's 16 bits of immediate-offset
+// instruction space. There is no guarantee that the relocated location can be
+// similarly encoded.
+bool Assembler::MustUseReg(RelocInfo::Mode rmode) {
+ return !RelocInfo::IsNone(rmode);
+}
+
+void Assembler::GenB(Opcode opcode, Register rj, int32_t si21) {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ DCHECK((BEQZ == opcode || BNEZ == opcode) && is_int21(si21) && rj.is_valid());
+ Instr instr = opcode | (si21 & kImm16Mask) << kRkShift |
+ (rj.code() << kRjShift) | ((si21 & 0x1fffff) >> 16);
+ emit(instr);
+}
+
+void Assembler::GenB(Opcode opcode, CFRegister cj, int32_t si21, bool isEq) {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ DCHECK(BCZ == opcode && is_int21(si21));
+ DCHECK(cj >= 0 && cj <= 7);
+ int32_t sc = (isEq ? cj : cj + 8);
+ Instr instr = opcode | (si21 & kImm16Mask) << kRkShift | (sc << kRjShift) |
+ ((si21 & 0x1fffff) >> 16);
+ emit(instr);
+}
+
+void Assembler::GenB(Opcode opcode, int32_t si26) {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ DCHECK((B == opcode || BL == opcode) && is_int26(si26));
+ Instr instr =
+ opcode | ((si26 & kImm16Mask) << kRkShift) | ((si26 & kImm26Mask) >> 16);
+ emit(instr);
+}
+
+void Assembler::GenBJ(Opcode opcode, Register rj, Register rd, int32_t si16) {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ DCHECK(is_int16(si16));
+ Instr instr = opcode | ((si16 & kImm16Mask) << kRkShift) |
+ (rj.code() << kRjShift) | rd.code();
+ emit(instr);
+}
+
+void Assembler::GenCmp(Opcode opcode, FPUCondition cond, FPURegister fk,
+ FPURegister fj, CFRegister cd) {
+ DCHECK(opcode == FCMP_COND_S || opcode == FCMP_COND_D);
+ Instr instr = opcode | cond << kCondShift | (fk.code() << kFkShift) |
+ (fj.code() << kFjShift) | cd;
+ emit(instr);
+}
+
+void Assembler::GenSel(Opcode opcode, CFRegister ca, FPURegister fk,
+ FPURegister fj, FPURegister rd) {
+ DCHECK((opcode == FSEL));
+ Instr instr = opcode | ca << kCondShift | (fk.code() << kFkShift) |
+ (fj.code() << kFjShift) | rd.code();
+ emit(instr);
+}
+
+void Assembler::GenRegister(Opcode opcode, Register rj, Register rd,
+ bool rjrd) {
+ DCHECK(rjrd);
+ Instr instr = 0;
+ instr = opcode | (rj.code() << kRjShift) | rd.code();
+ emit(instr);
+}
+
+void Assembler::GenRegister(Opcode opcode, FPURegister fj, FPURegister fd) {
+ Instr instr = opcode | (fj.code() << kFjShift) | fd.code();
+ emit(instr);
+}
+
+void Assembler::GenRegister(Opcode opcode, Register rj, FPURegister fd) {
+ DCHECK((opcode == MOVGR2FR_W) || (opcode == MOVGR2FR_D) ||
+ (opcode == MOVGR2FRH_W));
+ Instr instr = opcode | (rj.code() << kRjShift) | fd.code();
+ emit(instr);
+}
+
+void Assembler::GenRegister(Opcode opcode, FPURegister fj, Register rd) {
+ DCHECK((opcode == MOVFR2GR_S) || (opcode == MOVFR2GR_D) ||
+ (opcode == MOVFRH2GR_S));
+ Instr instr = opcode | (fj.code() << kFjShift) | rd.code();
+ emit(instr);
+}
+
+void Assembler::GenRegister(Opcode opcode, Register rj, FPUControlRegister fd) {
+ DCHECK((opcode == MOVGR2FCSR));
+ Instr instr = opcode | (rj.code() << kRjShift) | fd.code();
+ emit(instr);
+}
+
+void Assembler::GenRegister(Opcode opcode, FPUControlRegister fj, Register rd) {
+ DCHECK((opcode == MOVFCSR2GR));
+ Instr instr = opcode | (fj.code() << kFjShift) | rd.code();
+ emit(instr);
+}
+
+void Assembler::GenRegister(Opcode opcode, FPURegister fj, CFRegister cd) {
+ DCHECK((opcode == MOVFR2CF));
+ Instr instr = opcode | (fj.code() << kFjShift) | cd;
+ emit(instr);
+}
+
+void Assembler::GenRegister(Opcode opcode, CFRegister cj, FPURegister fd) {
+ DCHECK((opcode == MOVCF2FR));
+ Instr instr = opcode | cj << kFjShift | fd.code();
+ emit(instr);
+}
+
+void Assembler::GenRegister(Opcode opcode, Register rj, CFRegister cd) {
+ DCHECK((opcode == MOVGR2CF));
+ Instr instr = opcode | (rj.code() << kRjShift) | cd;
+ emit(instr);
+}
+
+void Assembler::GenRegister(Opcode opcode, CFRegister cj, Register rd) {
+ DCHECK((opcode == MOVCF2GR));
+ Instr instr = opcode | cj << kFjShift | rd.code();
+ emit(instr);
+}
+
+void Assembler::GenRegister(Opcode opcode, Register rk, Register rj,
+ Register rd) {
+ Instr instr =
+ opcode | (rk.code() << kRkShift) | (rj.code() << kRjShift) | rd.code();
+ emit(instr);
+}
+
+void Assembler::GenRegister(Opcode opcode, FPURegister fk, FPURegister fj,
+ FPURegister fd) {
+ Instr instr =
+ opcode | (fk.code() << kFkShift) | (fj.code() << kFjShift) | fd.code();
+ emit(instr);
+}
+
+void Assembler::GenRegister(Opcode opcode, FPURegister fa, FPURegister fk,
+ FPURegister fj, FPURegister fd) {
+ Instr instr = opcode | (fa.code() << kFaShift) | (fk.code() << kFkShift) |
+ (fj.code() << kFjShift) | fd.code();
+ emit(instr);
+}
+
+void Assembler::GenRegister(Opcode opcode, Register rk, Register rj,
+ FPURegister fd) {
+ Instr instr =
+ opcode | (rk.code() << kRkShift) | (rj.code() << kRjShift) | fd.code();
+ emit(instr);
+}
+
+void Assembler::GenImm(Opcode opcode, int32_t bit3, Register rk, Register rj,
+ Register rd) {
+ DCHECK(is_uint3(bit3));
+ Instr instr = opcode | (bit3 & 0x7) << kSaShift | (rk.code() << kRkShift) |
+ (rj.code() << kRjShift) | rd.code();
+ emit(instr);
+}
+
+void Assembler::GenImm(Opcode opcode, int32_t bit6m, int32_t bit6l, Register rj,
+ Register rd) {
+ DCHECK(is_uint6(bit6m) && is_uint6(bit6l));
+ Instr instr = opcode | (bit6m & 0x3f) << 16 | (bit6l & 0x3f) << kRkShift |
+ (rj.code() << kRjShift) | rd.code();
+ emit(instr);
+}
+
+void Assembler::GenImm(Opcode opcode, int32_t bit20, Register rd) {
+ // DCHECK(is_uint20(bit20) || is_int20(bit20));
+ Instr instr = opcode | (bit20 & 0xfffff) << kRjShift | rd.code();
+ emit(instr);
+}
+
+void Assembler::GenImm(Opcode opcode, int32_t bit15) {
+ DCHECK(is_uint15(bit15));
+ Instr instr = opcode | (bit15 & 0x7fff);
+ emit(instr);
+}
+
+void Assembler::GenImm(Opcode opcode, int32_t value, Register rj, Register rd,
+ int32_t value_bits) {
+ DCHECK(value_bits == 6 || value_bits == 12 || value_bits == 14 ||
+ value_bits == 16);
+ uint32_t imm = value & 0x3f;
+ if (value_bits == 12) {
+ imm = value & kImm12Mask;
+ } else if (value_bits == 14) {
+ imm = value & 0x3fff;
+ } else if (value_bits == 16) {
+ imm = value & kImm16Mask;
+ }
+ Instr instr = opcode | imm << kRkShift | (rj.code() << kRjShift) | rd.code();
+ emit(instr);
+}
+
+void Assembler::GenImm(Opcode opcode, int32_t bit12, Register rj,
+ FPURegister fd) {
+ DCHECK(is_int12(bit12));
+ Instr instr = opcode | ((bit12 & kImm12Mask) << kRkShift) |
+ (rj.code() << kRjShift) | fd.code();
+ emit(instr);
+}
+
+// Returns the next free trampoline entry.
+int32_t Assembler::get_trampoline_entry(int32_t pos) {
+ int32_t trampoline_entry = kInvalidSlotPos;
+ if (!internal_trampoline_exception_) {
+ if (trampoline_.start() > pos) {
+ trampoline_entry = trampoline_.take_slot();
+ }
+
+ if (kInvalidSlotPos == trampoline_entry) {
+ internal_trampoline_exception_ = true;
+ }
+ }
+ return trampoline_entry;
+}
+
+uint64_t Assembler::jump_address(Label* L) {
+ int64_t target_pos;
+ if (L->is_bound()) {
+ target_pos = L->pos();
+ } else {
+ if (L->is_linked()) {
+ target_pos = L->pos(); // L's link.
+ L->link_to(pc_offset());
+ } else {
+ L->link_to(pc_offset());
+ return kEndOfJumpChain;
+ }
+ }
+ uint64_t imm = reinterpret_cast<uint64_t>(buffer_start_) + target_pos;
+ DCHECK_EQ(imm & 3, 0);
+
+ return imm;
+}
+
+uint64_t Assembler::branch_long_offset(Label* L) {
+ int64_t target_pos;
+
+ if (L->is_bound()) {
+ target_pos = L->pos();
+ } else {
+ if (L->is_linked()) {
+ target_pos = L->pos(); // L's link.
+ L->link_to(pc_offset());
+ } else {
+ L->link_to(pc_offset());
+ return kEndOfJumpChain;
+ }
+ }
+ int64_t offset = target_pos - pc_offset();
+ DCHECK_EQ(offset & 3, 0);
+
+ return static_cast<uint64_t>(offset);
+}
+
+int32_t Assembler::branch_offset_helper(Label* L, OffsetSize bits) {
+ int32_t target_pos;
+
+ if (L->is_bound()) {
+ target_pos = L->pos();
+ } else {
+ if (L->is_linked()) {
+ target_pos = L->pos();
+ L->link_to(pc_offset());
+ } else {
+ L->link_to(pc_offset());
+ if (!trampoline_emitted_) {
+ unbound_labels_count_++;
+ next_buffer_check_ -= kTrampolineSlotsSize;
+ }
+ return kEndOfChain;
+ }
+ }
+
+ int32_t offset = target_pos - pc_offset();
+ DCHECK(is_intn(offset, bits + 2));
+ DCHECK_EQ(offset & 3, 0);
+
+ return offset;
+}
+
+void Assembler::label_at_put(Label* L, int at_offset) {
+ int target_pos;
+ if (L->is_bound()) {
+ target_pos = L->pos();
+ instr_at_put(at_offset, target_pos + (Code::kHeaderSize - kHeapObjectTag));
+ } else {
+ if (L->is_linked()) {
+ target_pos = L->pos(); // L's link.
+ int32_t imm18 = target_pos - at_offset;
+ DCHECK_EQ(imm18 & 3, 0);
+ int32_t imm16 = imm18 >> 2;
+ DCHECK(is_int16(imm16));
+ instr_at_put(at_offset, (imm16 & kImm16Mask));
+ } else {
+ target_pos = kEndOfChain;
+ instr_at_put(at_offset, 0);
+ if (!trampoline_emitted_) {
+ unbound_labels_count_++;
+ next_buffer_check_ -= kTrampolineSlotsSize;
+ }
+ }
+ L->link_to(at_offset);
+ }
+}
+
+//------- Branch and jump instructions --------
+
+void Assembler::b(int32_t offset) { GenB(B, offset); }
+
+void Assembler::bl(int32_t offset) { GenB(BL, offset); }
+
+void Assembler::beq(Register rj, Register rd, int32_t offset) {
+ GenBJ(BEQ, rj, rd, offset);
+}
+
+void Assembler::bne(Register rj, Register rd, int32_t offset) {
+ GenBJ(BNE, rj, rd, offset);
+}
+
+void Assembler::blt(Register rj, Register rd, int32_t offset) {
+ GenBJ(BLT, rj, rd, offset);
+}
+
+void Assembler::bge(Register rj, Register rd, int32_t offset) {
+ GenBJ(BGE, rj, rd, offset);
+}
+
+void Assembler::bltu(Register rj, Register rd, int32_t offset) {
+ GenBJ(BLTU, rj, rd, offset);
+}
+
+void Assembler::bgeu(Register rj, Register rd, int32_t offset) {
+ GenBJ(BGEU, rj, rd, offset);
+}
+
+void Assembler::beqz(Register rj, int32_t offset) { GenB(BEQZ, rj, offset); }
+void Assembler::bnez(Register rj, int32_t offset) { GenB(BNEZ, rj, offset); }
+
+void Assembler::jirl(Register rd, Register rj, int32_t offset) {
+ GenBJ(JIRL, rj, rd, offset);
+}
+
+void Assembler::bceqz(CFRegister cj, int32_t si21) {
+ GenB(BCZ, cj, si21, true);
+}
+
+void Assembler::bcnez(CFRegister cj, int32_t si21) {
+ GenB(BCZ, cj, si21, false);
+}
+
+// -------Data-processing-instructions---------
+
+// Arithmetic.
+void Assembler::add_w(Register rd, Register rj, Register rk) {
+ GenRegister(ADD_W, rk, rj, rd);
+}
+
+void Assembler::add_d(Register rd, Register rj, Register rk) {
+ GenRegister(ADD_D, rk, rj, rd);
+}
+
+void Assembler::sub_w(Register rd, Register rj, Register rk) {
+ GenRegister(SUB_W, rk, rj, rd);
+}
+
+void Assembler::sub_d(Register rd, Register rj, Register rk) {
+ GenRegister(SUB_D, rk, rj, rd);
+}
+
+void Assembler::addi_w(Register rd, Register rj, int32_t si12) {
+ GenImm(ADDI_W, si12, rj, rd, 12);
+}
+
+void Assembler::addi_d(Register rd, Register rj, int32_t si12) {
+ GenImm(ADDI_D, si12, rj, rd, 12);
+}
+
+void Assembler::addu16i_d(Register rd, Register rj, int32_t si16) {
+ GenImm(ADDU16I_D, si16, rj, rd, 16);
+}
+
+void Assembler::alsl_w(Register rd, Register rj, Register rk, int32_t sa2) {
+ DCHECK(is_uint2(sa2 - 1));
+ GenImm(ALSL_W, sa2 - 1, rk, rj, rd);
+}
+
+void Assembler::alsl_wu(Register rd, Register rj, Register rk, int32_t sa2) {
+ DCHECK(is_uint2(sa2 - 1));
+ GenImm(ALSL_WU, sa2 + 3, rk, rj, rd);
+}
+
+void Assembler::alsl_d(Register rd, Register rj, Register rk, int32_t sa2) {
+ DCHECK(is_uint2(sa2 - 1));
+ GenImm(ALSL_D, sa2 - 1, rk, rj, rd);
+}
+
+void Assembler::lu12i_w(Register rd, int32_t si20) {
+ GenImm(LU12I_W, si20, rd);
+}
+
+void Assembler::lu32i_d(Register rd, int32_t si20) {
+ GenImm(LU32I_D, si20, rd);
+}
+
+void Assembler::lu52i_d(Register rd, Register rj, int32_t si12) {
+ GenImm(LU52I_D, si12, rj, rd, 12);
+}
+
+void Assembler::slt(Register rd, Register rj, Register rk) {
+ GenRegister(SLT, rk, rj, rd);
+}
+
+void Assembler::sltu(Register rd, Register rj, Register rk) {
+ GenRegister(SLTU, rk, rj, rd);
+}
+
+void Assembler::slti(Register rd, Register rj, int32_t si12) {
+ GenImm(SLTI, si12, rj, rd, 12);
+}
+
+void Assembler::sltui(Register rd, Register rj, int32_t si12) {
+ GenImm(SLTUI, si12, rj, rd, 12);
+}
+
+void Assembler::pcaddi(Register rd, int32_t si20) { GenImm(PCADDI, si20, rd); }
+
+void Assembler::pcaddu12i(Register rd, int32_t si20) {
+ GenImm(PCADDU12I, si20, rd);
+}
+
+void Assembler::pcaddu18i(Register rd, int32_t si20) {
+ GenImm(PCADDU18I, si20, rd);
+}
+
+void Assembler::pcalau12i(Register rd, int32_t si20) {
+ GenImm(PCALAU12I, si20, rd);
+}
+
+void Assembler::and_(Register rd, Register rj, Register rk) {
+ GenRegister(AND, rk, rj, rd);
+}
+
+void Assembler::or_(Register rd, Register rj, Register rk) {
+ GenRegister(OR, rk, rj, rd);
+}
+
+void Assembler::xor_(Register rd, Register rj, Register rk) {
+ GenRegister(XOR, rk, rj, rd);
+}
+
+void Assembler::nor(Register rd, Register rj, Register rk) {
+ GenRegister(NOR, rk, rj, rd);
+}
+
+void Assembler::andn(Register rd, Register rj, Register rk) {
+ GenRegister(ANDN, rk, rj, rd);
+}
+
+void Assembler::orn(Register rd, Register rj, Register rk) {
+ GenRegister(ORN, rk, rj, rd);
+}
+
+void Assembler::andi(Register rd, Register rj, int32_t ui12) {
+ GenImm(ANDI, ui12, rj, rd, 12);
+}
+
+void Assembler::ori(Register rd, Register rj, int32_t ui12) {
+ GenImm(ORI, ui12, rj, rd, 12);
+}
+
+void Assembler::xori(Register rd, Register rj, int32_t ui12) {
+ GenImm(XORI, ui12, rj, rd, 12);
+}
+
+void Assembler::mul_w(Register rd, Register rj, Register rk) {
+ GenRegister(MUL_W, rk, rj, rd);
+}
+
+void Assembler::mulh_w(Register rd, Register rj, Register rk) {
+ GenRegister(MULH_W, rk, rj, rd);
+}
+
+void Assembler::mulh_wu(Register rd, Register rj, Register rk) {
+ GenRegister(MULH_WU, rk, rj, rd);
+}
+
+void Assembler::mul_d(Register rd, Register rj, Register rk) {
+ GenRegister(MUL_D, rk, rj, rd);
+}
+
+void Assembler::mulh_d(Register rd, Register rj, Register rk) {
+ GenRegister(MULH_D, rk, rj, rd);
+}
+
+void Assembler::mulh_du(Register rd, Register rj, Register rk) {
+ GenRegister(MULH_DU, rk, rj, rd);
+}
+
+void Assembler::mulw_d_w(Register rd, Register rj, Register rk) {
+ GenRegister(MULW_D_W, rk, rj, rd);
+}
+
+void Assembler::mulw_d_wu(Register rd, Register rj, Register rk) {
+ GenRegister(MULW_D_WU, rk, rj, rd);
+}
+
+void Assembler::div_w(Register rd, Register rj, Register rk) {
+ GenRegister(DIV_W, rk, rj, rd);
+}
+
+void Assembler::mod_w(Register rd, Register rj, Register rk) {
+ GenRegister(MOD_W, rk, rj, rd);
+}
+
+void Assembler::div_wu(Register rd, Register rj, Register rk) {
+ GenRegister(DIV_WU, rk, rj, rd);
+}
+
+void Assembler::mod_wu(Register rd, Register rj, Register rk) {
+ GenRegister(MOD_WU, rk, rj, rd);
+}
+
+void Assembler::div_d(Register rd, Register rj, Register rk) {
+ GenRegister(DIV_D, rk, rj, rd);
+}
+
+void Assembler::mod_d(Register rd, Register rj, Register rk) {
+ GenRegister(MOD_D, rk, rj, rd);
+}
+
+void Assembler::div_du(Register rd, Register rj, Register rk) {
+ GenRegister(DIV_DU, rk, rj, rd);
+}
+
+void Assembler::mod_du(Register rd, Register rj, Register rk) {
+ GenRegister(MOD_DU, rk, rj, rd);
+}
+
+// Shifts.
+void Assembler::sll_w(Register rd, Register rj, Register rk) {
+ GenRegister(SLL_W, rk, rj, rd);
+}
+
+void Assembler::srl_w(Register rd, Register rj, Register rk) {
+ GenRegister(SRL_W, rk, rj, rd);
+}
+
+void Assembler::sra_w(Register rd, Register rj, Register rk) {
+ GenRegister(SRA_W, rk, rj, rd);
+}
+
+void Assembler::rotr_w(Register rd, Register rj, Register rk) {
+ GenRegister(ROTR_W, rk, rj, rd);
+}
+
+void Assembler::slli_w(Register rd, Register rj, int32_t ui5) {
+ DCHECK(is_uint5(ui5));
+ GenImm(SLLI_W, ui5 + 0x20, rj, rd, 6);
+}
+
+void Assembler::srli_w(Register rd, Register rj, int32_t ui5) {
+ DCHECK(is_uint5(ui5));
+ GenImm(SRLI_W, ui5 + 0x20, rj, rd, 6);
+}
+
+void Assembler::srai_w(Register rd, Register rj, int32_t ui5) {
+ DCHECK(is_uint5(ui5));
+ GenImm(SRAI_W, ui5 + 0x20, rj, rd, 6);
+}
+
+void Assembler::rotri_w(Register rd, Register rj, int32_t ui5) {
+ DCHECK(is_uint5(ui5));
+ GenImm(ROTRI_W, ui5 + 0x20, rj, rd, 6);
+}
+
+void Assembler::sll_d(Register rd, Register rj, Register rk) {
+ GenRegister(SLL_D, rk, rj, rd);
+}
+
+void Assembler::srl_d(Register rd, Register rj, Register rk) {
+ GenRegister(SRL_D, rk, rj, rd);
+}
+
+void Assembler::sra_d(Register rd, Register rj, Register rk) {
+ GenRegister(SRA_D, rk, rj, rd);
+}
+
+void Assembler::rotr_d(Register rd, Register rj, Register rk) {
+ GenRegister(ROTR_D, rk, rj, rd);
+}
+
+void Assembler::slli_d(Register rd, Register rj, int32_t ui6) {
+ GenImm(SLLI_D, ui6, rj, rd, 6);
+}
+
+void Assembler::srli_d(Register rd, Register rj, int32_t ui6) {
+ GenImm(SRLI_D, ui6, rj, rd, 6);
+}
+
+void Assembler::srai_d(Register rd, Register rj, int32_t ui6) {
+ GenImm(SRAI_D, ui6, rj, rd, 6);
+}
+
+void Assembler::rotri_d(Register rd, Register rj, int32_t ui6) {
+ GenImm(ROTRI_D, ui6, rj, rd, 6);
+}
+
+// Bit twiddling.
+void Assembler::ext_w_b(Register rd, Register rj) {
+ GenRegister(EXT_W_B, rj, rd);
+}
+
+void Assembler::ext_w_h(Register rd, Register rj) {
+ GenRegister(EXT_W_H, rj, rd);
+}
+
+void Assembler::clo_w(Register rd, Register rj) { GenRegister(CLO_W, rj, rd); }
+
+void Assembler::clz_w(Register rd, Register rj) { GenRegister(CLZ_W, rj, rd); }
+
+void Assembler::cto_w(Register rd, Register rj) { GenRegister(CTO_W, rj, rd); }
+
+void Assembler::ctz_w(Register rd, Register rj) { GenRegister(CTZ_W, rj, rd); }
+
+void Assembler::clo_d(Register rd, Register rj) { GenRegister(CLO_D, rj, rd); }
+
+void Assembler::clz_d(Register rd, Register rj) { GenRegister(CLZ_D, rj, rd); }
+
+void Assembler::cto_d(Register rd, Register rj) { GenRegister(CTO_D, rj, rd); }
+
+void Assembler::ctz_d(Register rd, Register rj) { GenRegister(CTZ_D, rj, rd); }
+
+void Assembler::bytepick_w(Register rd, Register rj, Register rk, int32_t sa2) {
+ DCHECK(is_uint2(sa2));
+ GenImm(BYTEPICK_W, sa2, rk, rj, rd);
+}
+
+void Assembler::bytepick_d(Register rd, Register rj, Register rk, int32_t sa3) {
+ GenImm(BYTEPICK_D, sa3, rk, rj, rd);
+}
+
+void Assembler::revb_2h(Register rd, Register rj) {
+ GenRegister(REVB_2H, rj, rd);
+}
+
+void Assembler::revb_4h(Register rd, Register rj) {
+ GenRegister(REVB_4H, rj, rd);
+}
+
+void Assembler::revb_2w(Register rd, Register rj) {
+ GenRegister(REVB_2W, rj, rd);
+}
+
+void Assembler::revb_d(Register rd, Register rj) {
+ GenRegister(REVB_D, rj, rd);
+}
+
+void Assembler::revh_2w(Register rd, Register rj) {
+ GenRegister(REVH_2W, rj, rd);
+}
+
+void Assembler::revh_d(Register rd, Register rj) {
+ GenRegister(REVH_D, rj, rd);
+}
+
+void Assembler::bitrev_4b(Register rd, Register rj) {
+ GenRegister(BITREV_4B, rj, rd);
+}
+
+void Assembler::bitrev_8b(Register rd, Register rj) {
+ GenRegister(BITREV_8B, rj, rd);
+}
+
+void Assembler::bitrev_w(Register rd, Register rj) {
+ GenRegister(BITREV_W, rj, rd);
+}
+
+void Assembler::bitrev_d(Register rd, Register rj) {
+ GenRegister(BITREV_D, rj, rd);
+}
+
+void Assembler::bstrins_w(Register rd, Register rj, int32_t msbw,
+ int32_t lsbw) {
+ DCHECK(is_uint5(msbw) && is_uint5(lsbw));
+ GenImm(BSTR_W, msbw + 0x20, lsbw, rj, rd);
+}
+
+void Assembler::bstrins_d(Register rd, Register rj, int32_t msbd,
+ int32_t lsbd) {
+ GenImm(BSTRINS_D, msbd, lsbd, rj, rd);
+}
+
+void Assembler::bstrpick_w(Register rd, Register rj, int32_t msbw,
+ int32_t lsbw) {
+ DCHECK(is_uint5(msbw) && is_uint5(lsbw));
+ GenImm(BSTR_W, msbw + 0x20, lsbw + 0x20, rj, rd);
+}
+
+void Assembler::bstrpick_d(Register rd, Register rj, int32_t msbd,
+ int32_t lsbd) {
+ GenImm(BSTRPICK_D, msbd, lsbd, rj, rd);
+}
+
+void Assembler::maskeqz(Register rd, Register rj, Register rk) {
+ GenRegister(MASKEQZ, rk, rj, rd);
+}
+
+void Assembler::masknez(Register rd, Register rj, Register rk) {
+ GenRegister(MASKNEZ, rk, rj, rd);
+}
+
+// Memory-instructions
+void Assembler::ld_b(Register rd, Register rj, int32_t si12) {
+ GenImm(LD_B, si12, rj, rd, 12);
+}
+
+void Assembler::ld_h(Register rd, Register rj, int32_t si12) {
+ GenImm(LD_H, si12, rj, rd, 12);
+}
+
+void Assembler::ld_w(Register rd, Register rj, int32_t si12) {
+ GenImm(LD_W, si12, rj, rd, 12);
+}
+
+void Assembler::ld_d(Register rd, Register rj, int32_t si12) {
+ GenImm(LD_D, si12, rj, rd, 12);
+}
+
+void Assembler::ld_bu(Register rd, Register rj, int32_t si12) {
+ GenImm(LD_BU, si12, rj, rd, 12);
+}
+
+void Assembler::ld_hu(Register rd, Register rj, int32_t si12) {
+ GenImm(LD_HU, si12, rj, rd, 12);
+}
+
+void Assembler::ld_wu(Register rd, Register rj, int32_t si12) {
+ GenImm(LD_WU, si12, rj, rd, 12);
+}
+
+void Assembler::st_b(Register rd, Register rj, int32_t si12) {
+ GenImm(ST_B, si12, rj, rd, 12);
+}
+
+void Assembler::st_h(Register rd, Register rj, int32_t si12) {
+ GenImm(ST_H, si12, rj, rd, 12);
+}
+
+void Assembler::st_w(Register rd, Register rj, int32_t si12) {
+ GenImm(ST_W, si12, rj, rd, 12);
+}
+
+void Assembler::st_d(Register rd, Register rj, int32_t si12) {
+ GenImm(ST_D, si12, rj, rd, 12);
+}
+
+void Assembler::ldx_b(Register rd, Register rj, Register rk) {
+ GenRegister(LDX_B, rk, rj, rd);
+}
+
+void Assembler::ldx_h(Register rd, Register rj, Register rk) {
+ GenRegister(LDX_H, rk, rj, rd);
+}
+
+void Assembler::ldx_w(Register rd, Register rj, Register rk) {
+ GenRegister(LDX_W, rk, rj, rd);
+}
+
+void Assembler::ldx_d(Register rd, Register rj, Register rk) {
+ GenRegister(LDX_D, rk, rj, rd);
+}
+
+void Assembler::ldx_bu(Register rd, Register rj, Register rk) {
+ GenRegister(LDX_BU, rk, rj, rd);
+}
+
+void Assembler::ldx_hu(Register rd, Register rj, Register rk) {
+ GenRegister(LDX_HU, rk, rj, rd);
+}
+
+void Assembler::ldx_wu(Register rd, Register rj, Register rk) {
+ GenRegister(LDX_WU, rk, rj, rd);
+}
+
+void Assembler::stx_b(Register rd, Register rj, Register rk) {
+ GenRegister(STX_B, rk, rj, rd);
+}
+
+void Assembler::stx_h(Register rd, Register rj, Register rk) {
+ GenRegister(STX_H, rk, rj, rd);
+}
+
+void Assembler::stx_w(Register rd, Register rj, Register rk) {
+ GenRegister(STX_W, rk, rj, rd);
+}
+
+void Assembler::stx_d(Register rd, Register rj, Register rk) {
+ GenRegister(STX_D, rk, rj, rd);
+}
+
+void Assembler::ldptr_w(Register rd, Register rj, int32_t si14) {
+ DCHECK(is_int16(si14) && ((si14 & 0x3) == 0));
+ GenImm(LDPTR_W, si14 >> 2, rj, rd, 14);
+}
+
+void Assembler::ldptr_d(Register rd, Register rj, int32_t si14) {
+ DCHECK(is_int16(si14) && ((si14 & 0x3) == 0));
+ GenImm(LDPTR_D, si14 >> 2, rj, rd, 14);
+}
+
+void Assembler::stptr_w(Register rd, Register rj, int32_t si14) {
+ DCHECK(is_int16(si14) && ((si14 & 0x3) == 0));
+ GenImm(STPTR_W, si14 >> 2, rj, rd, 14);
+}
+
+void Assembler::stptr_d(Register rd, Register rj, int32_t si14) {
+ DCHECK(is_int16(si14) && ((si14 & 0x3) == 0));
+ GenImm(STPTR_D, si14 >> 2, rj, rd, 14);
+}
+
+void Assembler::amswap_w(Register rd, Register rk, Register rj) {
+ GenRegister(AMSWAP_W, rk, rj, rd);
+}
+
+void Assembler::amswap_d(Register rd, Register rk, Register rj) {
+ GenRegister(AMSWAP_D, rk, rj, rd);
+}
+
+void Assembler::amadd_w(Register rd, Register rk, Register rj) {
+ GenRegister(AMADD_W, rk, rj, rd);
+}
+
+void Assembler::amadd_d(Register rd, Register rk, Register rj) {
+ GenRegister(AMADD_D, rk, rj, rd);
+}
+
+void Assembler::amand_w(Register rd, Register rk, Register rj) {
+ GenRegister(AMAND_W, rk, rj, rd);
+}
+
+void Assembler::amand_d(Register rd, Register rk, Register rj) {
+ GenRegister(AMAND_D, rk, rj, rd);
+}
+
+void Assembler::amor_w(Register rd, Register rk, Register rj) {
+ GenRegister(AMOR_W, rk, rj, rd);
+}
+
+void Assembler::amor_d(Register rd, Register rk, Register rj) {
+ GenRegister(AMOR_D, rk, rj, rd);
+}
+
+void Assembler::amxor_w(Register rd, Register rk, Register rj) {
+ GenRegister(AMXOR_W, rk, rj, rd);
+}
+
+void Assembler::amxor_d(Register rd, Register rk, Register rj) {
+ GenRegister(AMXOR_D, rk, rj, rd);
+}
+
+void Assembler::ammax_w(Register rd, Register rk, Register rj) {
+ GenRegister(AMMAX_W, rk, rj, rd);
+}
+
+void Assembler::ammax_d(Register rd, Register rk, Register rj) {
+ GenRegister(AMMAX_D, rk, rj, rd);
+}
+
+void Assembler::ammin_w(Register rd, Register rk, Register rj) {
+ GenRegister(AMMIN_W, rk, rj, rd);
+}
+
+void Assembler::ammin_d(Register rd, Register rk, Register rj) {
+ GenRegister(AMMIN_D, rk, rj, rd);
+}
+
+void Assembler::ammax_wu(Register rd, Register rk, Register rj) {
+ GenRegister(AMMAX_WU, rk, rj, rd);
+}
+
+void Assembler::ammax_du(Register rd, Register rk, Register rj) {
+ GenRegister(AMMAX_DU, rk, rj, rd);
+}
+
+void Assembler::ammin_wu(Register rd, Register rk, Register rj) {
+ GenRegister(AMMIN_WU, rk, rj, rd);
+}
+
+void Assembler::ammin_du(Register rd, Register rk, Register rj) {
+ GenRegister(AMMIN_DU, rk, rj, rd);
+}
+
+void Assembler::amswap_db_w(Register rd, Register rk, Register rj) {
+ GenRegister(AMSWAP_DB_W, rk, rj, rd);
+}
+
+void Assembler::amswap_db_d(Register rd, Register rk, Register rj) {
+ GenRegister(AMSWAP_DB_D, rk, rj, rd);
+}
+
+void Assembler::amadd_db_w(Register rd, Register rk, Register rj) {
+ GenRegister(AMADD_DB_W, rk, rj, rd);
+}
+
+void Assembler::amadd_db_d(Register rd, Register rk, Register rj) {
+ GenRegister(AMADD_DB_D, rk, rj, rd);
+}
+
+void Assembler::amand_db_w(Register rd, Register rk, Register rj) {
+ GenRegister(AMAND_DB_W, rk, rj, rd);
+}
+
+void Assembler::amand_db_d(Register rd, Register rk, Register rj) {
+ GenRegister(AMAND_DB_D, rk, rj, rd);
+}
+
+void Assembler::amor_db_w(Register rd, Register rk, Register rj) {
+ GenRegister(AMOR_DB_W, rk, rj, rd);
+}
+
+void Assembler::amor_db_d(Register rd, Register rk, Register rj) {
+ GenRegister(AMOR_DB_D, rk, rj, rd);
+}
+
+void Assembler::amxor_db_w(Register rd, Register rk, Register rj) {
+ GenRegister(AMXOR_DB_W, rk, rj, rd);
+}
+
+void Assembler::amxor_db_d(Register rd, Register rk, Register rj) {
+ GenRegister(AMXOR_DB_D, rk, rj, rd);
+}
+
+void Assembler::ammax_db_w(Register rd, Register rk, Register rj) {
+ GenRegister(AMMAX_DB_W, rk, rj, rd);
+}
+
+void Assembler::ammax_db_d(Register rd, Register rk, Register rj) {
+ GenRegister(AMMAX_DB_D, rk, rj, rd);
+}
+
+void Assembler::ammin_db_w(Register rd, Register rk, Register rj) {
+ GenRegister(AMMIN_DB_W, rk, rj, rd);
+}
+
+void Assembler::ammin_db_d(Register rd, Register rk, Register rj) {
+ GenRegister(AMMIN_DB_D, rk, rj, rd);
+}
+
+void Assembler::ammax_db_wu(Register rd, Register rk, Register rj) {
+ GenRegister(AMMAX_DB_WU, rk, rj, rd);
+}
+
+void Assembler::ammax_db_du(Register rd, Register rk, Register rj) {
+ GenRegister(AMMAX_DB_DU, rk, rj, rd);
+}
+
+void Assembler::ammin_db_wu(Register rd, Register rk, Register rj) {
+ GenRegister(AMMIN_DB_WU, rk, rj, rd);
+}
+
+void Assembler::ammin_db_du(Register rd, Register rk, Register rj) {
+ GenRegister(AMMIN_DB_DU, rk, rj, rd);
+}
+
+void Assembler::ll_w(Register rd, Register rj, int32_t si14) {
+ DCHECK(is_int16(si14) && ((si14 & 0x3) == 0));
+ GenImm(LL_W, si14 >> 2, rj, rd, 14);
+}
+
+void Assembler::ll_d(Register rd, Register rj, int32_t si14) {
+ DCHECK(is_int16(si14) && ((si14 & 0x3) == 0));
+ GenImm(LL_D, si14 >> 2, rj, rd, 14);
+}
+
+void Assembler::sc_w(Register rd, Register rj, int32_t si14) {
+ DCHECK(is_int16(si14) && ((si14 & 0x3) == 0));
+ GenImm(SC_W, si14 >> 2, rj, rd, 14);
+}
+
+void Assembler::sc_d(Register rd, Register rj, int32_t si14) {
+ DCHECK(is_int16(si14) && ((si14 & 0x3) == 0));
+ GenImm(SC_D, si14 >> 2, rj, rd, 14);
+}
+
+void Assembler::dbar(int32_t hint) { GenImm(DBAR, hint); }
+
+void Assembler::ibar(int32_t hint) { GenImm(IBAR, hint); }
+
+// Break instruction.
+void Assembler::break_(uint32_t code, bool break_as_stop) {
+ DCHECK(
+ (break_as_stop && code <= kMaxStopCode && code > kMaxWatchpointCode) ||
+ (!break_as_stop && (code > kMaxStopCode || code <= kMaxWatchpointCode)));
+ GenImm(BREAK, code);
+}
+
+void Assembler::stop(uint32_t code) {
+ DCHECK_GT(code, kMaxWatchpointCode);
+ DCHECK_LE(code, kMaxStopCode);
+#if defined(V8_HOST_ARCH_LOONG64)
+ break_(0x4321);
+#else // V8_HOST_ARCH_LOONG64
+ break_(code, true);
+#endif
+}
+
+void Assembler::fadd_s(FPURegister fd, FPURegister fj, FPURegister fk) {
+ GenRegister(FADD_S, fk, fj, fd);
+}
+
+void Assembler::fadd_d(FPURegister fd, FPURegister fj, FPURegister fk) {
+ GenRegister(FADD_D, fk, fj, fd);
+}
+
+void Assembler::fsub_s(FPURegister fd, FPURegister fj, FPURegister fk) {
+ GenRegister(FSUB_S, fk, fj, fd);
+}
+
+void Assembler::fsub_d(FPURegister fd, FPURegister fj, FPURegister fk) {
+ GenRegister(FSUB_D, fk, fj, fd);
+}
+
+void Assembler::fmul_s(FPURegister fd, FPURegister fj, FPURegister fk) {
+ GenRegister(FMUL_S, fk, fj, fd);
+}
+
+void Assembler::fmul_d(FPURegister fd, FPURegister fj, FPURegister fk) {
+ GenRegister(FMUL_D, fk, fj, fd);
+}
+
+void Assembler::fdiv_s(FPURegister fd, FPURegister fj, FPURegister fk) {
+ GenRegister(FDIV_S, fk, fj, fd);
+}
+
+void Assembler::fdiv_d(FPURegister fd, FPURegister fj, FPURegister fk) {
+ GenRegister(FDIV_D, fk, fj, fd);
+}
+
+void Assembler::fmadd_s(FPURegister fd, FPURegister fj, FPURegister fk,
+ FPURegister fa) {
+ GenRegister(FMADD_S, fa, fk, fj, fd);
+}
+
+void Assembler::fmadd_d(FPURegister fd, FPURegister fj, FPURegister fk,
+ FPURegister fa) {
+ GenRegister(FMADD_D, fa, fk, fj, fd);
+}
+
+void Assembler::fmsub_s(FPURegister fd, FPURegister fj, FPURegister fk,
+ FPURegister fa) {
+ GenRegister(FMSUB_S, fa, fk, fj, fd);
+}
+
+void Assembler::fmsub_d(FPURegister fd, FPURegister fj, FPURegister fk,
+ FPURegister fa) {
+ GenRegister(FMSUB_D, fa, fk, fj, fd);
+}
+
+void Assembler::fnmadd_s(FPURegister fd, FPURegister fj, FPURegister fk,
+ FPURegister fa) {
+ GenRegister(FNMADD_S, fa, fk, fj, fd);
+}
+
+void Assembler::fnmadd_d(FPURegister fd, FPURegister fj, FPURegister fk,
+ FPURegister fa) {
+ GenRegister(FNMADD_D, fa, fk, fj, fd);
+}
+
+void Assembler::fnmsub_s(FPURegister fd, FPURegister fj, FPURegister fk,
+ FPURegister fa) {
+ GenRegister(FNMSUB_S, fa, fk, fj, fd);
+}
+
+void Assembler::fnmsub_d(FPURegister fd, FPURegister fj, FPURegister fk,
+ FPURegister fa) {
+ GenRegister(FNMSUB_D, fa, fk, fj, fd);
+}
+
+void Assembler::fmax_s(FPURegister fd, FPURegister fj, FPURegister fk) {
+ GenRegister(FMAX_S, fk, fj, fd);
+}
+
+void Assembler::fmax_d(FPURegister fd, FPURegister fj, FPURegister fk) {
+ GenRegister(FMAX_D, fk, fj, fd);
+}
+
+void Assembler::fmin_s(FPURegister fd, FPURegister fj, FPURegister fk) {
+ GenRegister(FMIN_S, fk, fj, fd);
+}
+
+void Assembler::fmin_d(FPURegister fd, FPURegister fj, FPURegister fk) {
+ GenRegister(FMIN_D, fk, fj, fd);
+}
+
+void Assembler::fmaxa_s(FPURegister fd, FPURegister fj, FPURegister fk) {
+ GenRegister(FMAXA_S, fk, fj, fd);
+}
+
+void Assembler::fmaxa_d(FPURegister fd, FPURegister fj, FPURegister fk) {
+ GenRegister(FMAXA_D, fk, fj, fd);
+}
+
+void Assembler::fmina_s(FPURegister fd, FPURegister fj, FPURegister fk) {
+ GenRegister(FMINA_S, fk, fj, fd);
+}
+
+void Assembler::fmina_d(FPURegister fd, FPURegister fj, FPURegister fk) {
+ GenRegister(FMINA_D, fk, fj, fd);
+}
+
+void Assembler::fabs_s(FPURegister fd, FPURegister fj) {
+ GenRegister(FABS_S, fj, fd);
+}
+
+void Assembler::fabs_d(FPURegister fd, FPURegister fj) {
+ GenRegister(FABS_D, fj, fd);
+}
+
+void Assembler::fneg_s(FPURegister fd, FPURegister fj) {
+ GenRegister(FNEG_S, fj, fd);
+}
+
+void Assembler::fneg_d(FPURegister fd, FPURegister fj) {
+ GenRegister(FNEG_D, fj, fd);
+}
+
+void Assembler::fsqrt_s(FPURegister fd, FPURegister fj) {
+ GenRegister(FSQRT_S, fj, fd);
+}
+
+void Assembler::fsqrt_d(FPURegister fd, FPURegister fj) {
+ GenRegister(FSQRT_D, fj, fd);
+}
+
+void Assembler::frecip_s(FPURegister fd, FPURegister fj) {
+ GenRegister(FRECIP_S, fj, fd);
+}
+
+void Assembler::frecip_d(FPURegister fd, FPURegister fj) {
+ GenRegister(FRECIP_D, fj, fd);
+}
+
+void Assembler::frsqrt_s(FPURegister fd, FPURegister fj) {
+ GenRegister(FRSQRT_S, fj, fd);
+}
+
+void Assembler::frsqrt_d(FPURegister fd, FPURegister fj) {
+ GenRegister(FRSQRT_D, fj, fd);
+}
+
+void Assembler::fscaleb_s(FPURegister fd, FPURegister fj, FPURegister fk) {
+ GenRegister(FSCALEB_S, fk, fj, fd);
+}
+
+void Assembler::fscaleb_d(FPURegister fd, FPURegister fj, FPURegister fk) {
+ GenRegister(FSCALEB_D, fk, fj, fd);
+}
+
+void Assembler::flogb_s(FPURegister fd, FPURegister fj) {
+ GenRegister(FLOGB_S, fj, fd);
+}
+
+void Assembler::flogb_d(FPURegister fd, FPURegister fj) {
+ GenRegister(FLOGB_D, fj, fd);
+}
+
+void Assembler::fcopysign_s(FPURegister fd, FPURegister fj, FPURegister fk) {
+ GenRegister(FCOPYSIGN_S, fk, fj, fd);
+}
+
+void Assembler::fcopysign_d(FPURegister fd, FPURegister fj, FPURegister fk) {
+ GenRegister(FCOPYSIGN_D, fk, fj, fd);
+}
+
+void Assembler::fclass_s(FPURegister fd, FPURegister fj) {
+ GenRegister(FCLASS_S, fj, fd);
+}
+
+void Assembler::fclass_d(FPURegister fd, FPURegister fj) {
+ GenRegister(FCLASS_D, fj, fd);
+}
+
+void Assembler::fcmp_cond_s(FPUCondition cc, FPURegister fj, FPURegister fk,
+ CFRegister cd) {
+ GenCmp(FCMP_COND_S, cc, fk, fj, cd);
+}
+
+void Assembler::fcmp_cond_d(FPUCondition cc, FPURegister fj, FPURegister fk,
+ CFRegister cd) {
+ GenCmp(FCMP_COND_D, cc, fk, fj, cd);
+}
+
+void Assembler::fcvt_s_d(FPURegister fd, FPURegister fj) {
+ GenRegister(FCVT_S_D, fj, fd);
+}
+
+void Assembler::fcvt_d_s(FPURegister fd, FPURegister fj) {
+ GenRegister(FCVT_D_S, fj, fd);
+}
+
+void Assembler::ffint_s_w(FPURegister fd, FPURegister fj) {
+ GenRegister(FFINT_S_W, fj, fd);
+}
+
+void Assembler::ffint_s_l(FPURegister fd, FPURegister fj) {
+ GenRegister(FFINT_S_L, fj, fd);
+}
+
+void Assembler::ffint_d_w(FPURegister fd, FPURegister fj) {
+ GenRegister(FFINT_D_W, fj, fd);
+}
+
+void Assembler::ffint_d_l(FPURegister fd, FPURegister fj) {
+ GenRegister(FFINT_D_L, fj, fd);
+}
+
+void Assembler::ftint_w_s(FPURegister fd, FPURegister fj) {
+ GenRegister(FTINT_W_S, fj, fd);
+}
+
+void Assembler::ftint_w_d(FPURegister fd, FPURegister fj) {
+ GenRegister(FTINT_W_D, fj, fd);
+}
+
+void Assembler::ftint_l_s(FPURegister fd, FPURegister fj) {
+ GenRegister(FTINT_L_S, fj, fd);
+}
+
+void Assembler::ftint_l_d(FPURegister fd, FPURegister fj) {
+ GenRegister(FTINT_L_D, fj, fd);
+}
+
+void Assembler::ftintrm_w_s(FPURegister fd, FPURegister fj) {
+ GenRegister(FTINTRM_W_S, fj, fd);
+}
+
+void Assembler::ftintrm_w_d(FPURegister fd, FPURegister fj) {
+ GenRegister(FTINTRM_W_D, fj, fd);
+}
+
+void Assembler::ftintrm_l_s(FPURegister fd, FPURegister fj) {
+ GenRegister(FTINTRM_L_S, fj, fd);
+}
+
+void Assembler::ftintrm_l_d(FPURegister fd, FPURegister fj) {
+ GenRegister(FTINTRM_L_D, fj, fd);
+}
+
+void Assembler::ftintrp_w_s(FPURegister fd, FPURegister fj) {
+ GenRegister(FTINTRP_W_S, fj, fd);
+}
+
+void Assembler::ftintrp_w_d(FPURegister fd, FPURegister fj) {
+ GenRegister(FTINTRP_W_D, fj, fd);
+}
+
+void Assembler::ftintrp_l_s(FPURegister fd, FPURegister fj) {
+ GenRegister(FTINTRP_L_S, fj, fd);
+}
+
+void Assembler::ftintrp_l_d(FPURegister fd, FPURegister fj) {
+ GenRegister(FTINTRP_L_D, fj, fd);
+}
+
+void Assembler::ftintrz_w_s(FPURegister fd, FPURegister fj) {
+ GenRegister(FTINTRZ_W_S, fj, fd);
+}
+
+void Assembler::ftintrz_w_d(FPURegister fd, FPURegister fj) {
+ GenRegister(FTINTRZ_W_D, fj, fd);
+}
+
+void Assembler::ftintrz_l_s(FPURegister fd, FPURegister fj) {
+ GenRegister(FTINTRZ_L_S, fj, fd);
+}
+
+void Assembler::ftintrz_l_d(FPURegister fd, FPURegister fj) {
+ GenRegister(FTINTRZ_L_D, fj, fd);
+}
+
+void Assembler::ftintrne_w_s(FPURegister fd, FPURegister fj) {
+ GenRegister(FTINTRNE_W_S, fj, fd);
+}
+
+void Assembler::ftintrne_w_d(FPURegister fd, FPURegister fj) {
+ GenRegister(FTINTRNE_W_D, fj, fd);
+}
+
+void Assembler::ftintrne_l_s(FPURegister fd, FPURegister fj) {
+ GenRegister(FTINTRNE_L_S, fj, fd);
+}
+
+void Assembler::ftintrne_l_d(FPURegister fd, FPURegister fj) {
+ GenRegister(FTINTRNE_L_D, fj, fd);
+}
+
+void Assembler::frint_s(FPURegister fd, FPURegister fj) {
+ GenRegister(FRINT_S, fj, fd);
+}
+
+void Assembler::frint_d(FPURegister fd, FPURegister fj) {
+ GenRegister(FRINT_D, fj, fd);
+}
+
+void Assembler::fmov_s(FPURegister fd, FPURegister fj) {
+ GenRegister(FMOV_S, fj, fd);
+}
+
+void Assembler::fmov_d(FPURegister fd, FPURegister fj) {
+ GenRegister(FMOV_D, fj, fd);
+}
+
+void Assembler::fsel(CFRegister ca, FPURegister fd, FPURegister fj,
+ FPURegister fk) {
+ GenSel(FSEL, ca, fk, fj, fd);
+}
+
+void Assembler::movgr2fr_w(FPURegister fd, Register rj) {
+ GenRegister(MOVGR2FR_W, rj, fd);
+}
+
+void Assembler::movgr2fr_d(FPURegister fd, Register rj) {
+ GenRegister(MOVGR2FR_D, rj, fd);
+}
+
+void Assembler::movgr2frh_w(FPURegister fd, Register rj) {
+ GenRegister(MOVGR2FRH_W, rj, fd);
+}
+
+void Assembler::movfr2gr_s(Register rd, FPURegister fj) {
+ GenRegister(MOVFR2GR_S, fj, rd);
+}
+
+void Assembler::movfr2gr_d(Register rd, FPURegister fj) {
+ GenRegister(MOVFR2GR_D, fj, rd);
+}
+
+void Assembler::movfrh2gr_s(Register rd, FPURegister fj) {
+ GenRegister(MOVFRH2GR_S, fj, rd);
+}
+
+void Assembler::movgr2fcsr(Register rj, FPUControlRegister fcsr) {
+ GenRegister(MOVGR2FCSR, rj, fcsr);
+}
+
+void Assembler::movfcsr2gr(Register rd, FPUControlRegister fcsr) {
+ GenRegister(MOVFCSR2GR, fcsr, rd);
+}
+
+void Assembler::movfr2cf(CFRegister cd, FPURegister fj) {
+ GenRegister(MOVFR2CF, fj, cd);
+}
+
+void Assembler::movcf2fr(FPURegister fd, CFRegister cj) {
+ GenRegister(MOVCF2FR, cj, fd);
+}
+
+void Assembler::movgr2cf(CFRegister cd, Register rj) {
+ GenRegister(MOVGR2CF, rj, cd);
+}
+
+void Assembler::movcf2gr(Register rd, CFRegister cj) {
+ GenRegister(MOVCF2GR, cj, rd);
+}
+
+void Assembler::fld_s(FPURegister fd, Register rj, int32_t si12) {
+ GenImm(FLD_S, si12, rj, fd);
+}
+
+void Assembler::fld_d(FPURegister fd, Register rj, int32_t si12) {
+ GenImm(FLD_D, si12, rj, fd);
+}
+
+void Assembler::fst_s(FPURegister fd, Register rj, int32_t si12) {
+ GenImm(FST_S, si12, rj, fd);
+}
+
+void Assembler::fst_d(FPURegister fd, Register rj, int32_t si12) {
+ GenImm(FST_D, si12, rj, fd);
+}
+
+void Assembler::fldx_s(FPURegister fd, Register rj, Register rk) {
+ GenRegister(FLDX_S, rk, rj, fd);
+}
+
+void Assembler::fldx_d(FPURegister fd, Register rj, Register rk) {
+ GenRegister(FLDX_D, rk, rj, fd);
+}
+
+void Assembler::fstx_s(FPURegister fd, Register rj, Register rk) {
+ GenRegister(FSTX_S, rk, rj, fd);
+}
+
+void Assembler::fstx_d(FPURegister fd, Register rj, Register rk) {
+ GenRegister(FSTX_D, rk, rj, fd);
+}
+
+void Assembler::AdjustBaseAndOffset(MemOperand* src) {
+ // is_int12 must be passed a signed value, hence the static cast below.
+ if ((!src->hasIndexReg() && is_int12(src->offset())) || src->hasIndexReg()) {
+ return;
+ }
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ if (is_uint12(static_cast<int32_t>(src->offset()))) {
+ ori(scratch, zero_reg, src->offset() & kImm12Mask);
+ } else {
+ lu12i_w(scratch, src->offset() >> 12 & 0xfffff);
+ if (src->offset() & kImm12Mask) {
+ ori(scratch, scratch, src->offset() & kImm12Mask);
+ }
+ }
+ src->index_ = scratch;
+ src->offset_ = 0;
+}
+
+int Assembler::RelocateInternalReference(RelocInfo::Mode rmode, Address pc,
+ intptr_t pc_delta) {
+ DCHECK(RelocInfo::IsInternalReference(rmode));
+ int64_t* p = reinterpret_cast<int64_t*>(pc);
+ if (*p == kEndOfJumpChain) {
+ return 0; // Number of instructions patched.
+ }
+ *p += pc_delta;
+ return 2; // Number of instructions patched.
+}
+
+void Assembler::RelocateRelativeReference(RelocInfo::Mode rmode, Address pc,
+ intptr_t pc_delta) {
+ DCHECK(RelocInfo::IsRelativeCodeTarget(rmode));
+ Instr instr = instr_at(pc);
+ int32_t offset = instr & kImm26Mask;
+ offset = (((offset & 0x3ff) << 22 >> 6) | ((offset >> 10) & kImm16Mask)) << 2;
+ offset -= pc_delta;
+ uint32_t* p = reinterpret_cast<uint32_t*>(pc);
+ offset >>= 2;
+ offset = ((offset & kImm16Mask) << kRkShift) | ((offset & kImm26Mask) >> 16);
+ *p = (instr & ~kImm26Mask) | offset;
+ return;
+}
+
+void Assembler::FixOnHeapReferences(bool update_embedded_objects) {
+ if (!update_embedded_objects) return;
+ for (auto p : saved_handles_for_raw_object_ptr_) {
+ Address address = reinterpret_cast<Address>(buffer_->start() + p.first);
+ Handle<HeapObject> object(reinterpret_cast<Address*>(p.second));
+ set_target_value_at(address, object->ptr());
+ }
+}
+
+void Assembler::FixOnHeapReferencesToHandles() {
+ for (auto p : saved_handles_for_raw_object_ptr_) {
+ Address address = reinterpret_cast<Address>(buffer_->start() + p.first);
+ set_target_value_at(address, p.second);
+ }
+ saved_handles_for_raw_object_ptr_.clear();
+}
+
+void Assembler::GrowBuffer() {
+ bool previously_on_heap = buffer_->IsOnHeap();
+ int previous_on_heap_gc_count = OnHeapGCCount();
+
+ // Compute new buffer size.
+ int old_size = buffer_->size();
+ int new_size = std::min(2 * old_size, old_size + 1 * MB);
+
+ // Some internal data structures overflow for very large buffers,
+ // they must ensure that kMaximalBufferSize is not too large.
+ if (new_size > kMaximalBufferSize) {
+ V8::FatalProcessOutOfMemory(nullptr, "Assembler::GrowBuffer");
+ }
+
+ // Set up new buffer.
+ std::unique_ptr<AssemblerBuffer> new_buffer = buffer_->Grow(new_size);
+ DCHECK_EQ(new_size, new_buffer->size());
+ byte* new_start = new_buffer->start();
+
+ // Copy the data.
+ intptr_t pc_delta = new_start - buffer_start_;
+ intptr_t rc_delta = (new_start + new_size) - (buffer_start_ + old_size);
+ size_t reloc_size = (buffer_start_ + old_size) - reloc_info_writer.pos();
+ MemMove(new_start, buffer_start_, pc_offset());
+ MemMove(reloc_info_writer.pos() + rc_delta, reloc_info_writer.pos(),
+ reloc_size);
+
+ // Switch buffers.
+ buffer_ = std::move(new_buffer);
+ buffer_start_ = new_start;
+ pc_ += pc_delta;
+ last_call_pc_ += pc_delta;
+ reloc_info_writer.Reposition(reloc_info_writer.pos() + rc_delta,
+ reloc_info_writer.last_pc() + pc_delta);
+
+ // None of our relocation types are pc relative pointing outside the code
+ // buffer nor pc absolute pointing inside the code buffer, so there is no need
+ // to relocate any emitted relocation entries.
+
+ // Relocate internal references.
+ for (auto pos : internal_reference_positions_) {
+ Address address = reinterpret_cast<intptr_t>(buffer_start_) + pos;
+ intptr_t internal_ref = ReadUnalignedValue<intptr_t>(address);
+ if (internal_ref != kEndOfJumpChain) {
+ internal_ref += pc_delta;
+ WriteUnalignedValue<intptr_t>(address, internal_ref);
+ }
+ }
+
+ // Fix on-heap references.
+ if (previously_on_heap) {
+ if (buffer_->IsOnHeap()) {
+ FixOnHeapReferences(previous_on_heap_gc_count != OnHeapGCCount());
+ } else {
+ FixOnHeapReferencesToHandles();
+ }
+ }
+}
+
+void Assembler::db(uint8_t data) {
+ if (!is_buffer_growth_blocked()) {
+ CheckBuffer();
+ }
+ *reinterpret_cast<uint8_t*>(pc_) = data;
+ pc_ += sizeof(uint8_t);
+}
+
+void Assembler::dd(uint32_t data, RelocInfo::Mode rmode) {
+ if (!is_buffer_growth_blocked()) {
+ CheckBuffer();
+ }
+ if (!RelocInfo::IsNone(rmode)) {
+ DCHECK(RelocInfo::IsDataEmbeddedObject(rmode) ||
+ RelocInfo::IsLiteralConstant(rmode));
+ RecordRelocInfo(rmode);
+ }
+ *reinterpret_cast<uint32_t*>(pc_) = data;
+ pc_ += sizeof(uint32_t);
+}
+
+void Assembler::dq(uint64_t data, RelocInfo::Mode rmode) {
+ if (!is_buffer_growth_blocked()) {
+ CheckBuffer();
+ }
+ if (!RelocInfo::IsNone(rmode)) {
+ DCHECK(RelocInfo::IsDataEmbeddedObject(rmode) ||
+ RelocInfo::IsLiteralConstant(rmode));
+ RecordRelocInfo(rmode);
+ }
+ *reinterpret_cast<uint64_t*>(pc_) = data;
+ pc_ += sizeof(uint64_t);
+}
+
+void Assembler::dd(Label* label) {
+ if (!is_buffer_growth_blocked()) {
+ CheckBuffer();
+ }
+ uint64_t data;
+ if (label->is_bound()) {
+ data = reinterpret_cast<uint64_t>(buffer_start_ + label->pos());
+ } else {
+ data = jump_address(label);
+ unbound_labels_count_++;
+ internal_reference_positions_.insert(label->pos());
+ }
+ RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE);
+ EmitHelper(data);
+}
+
+void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
+ if (!ShouldRecordRelocInfo(rmode)) return;
+ // We do not try to reuse pool constants.
+ RelocInfo rinfo(reinterpret_cast<Address>(pc_), rmode, data, Code());
+ DCHECK_GE(buffer_space(), kMaxRelocSize); // Too late to grow buffer here.
+ reloc_info_writer.Write(&rinfo);
+}
+
+void Assembler::BlockTrampolinePoolFor(int instructions) {
+ CheckTrampolinePoolQuick(instructions);
+ BlockTrampolinePoolBefore(pc_offset() + instructions * kInstrSize);
+}
+
+void Assembler::CheckTrampolinePool() {
+ // Some small sequences of instructions must not be broken up by the
+ // insertion of a trampoline pool; such sequences are protected by setting
+ // either trampoline_pool_blocked_nesting_ or no_trampoline_pool_before_,
+ // which are both checked here. Also, recursive calls to CheckTrampolinePool
+ // are blocked by trampoline_pool_blocked_nesting_.
+ if ((trampoline_pool_blocked_nesting_ > 0) ||
+ (pc_offset() < no_trampoline_pool_before_)) {
+ // Emission is currently blocked; make sure we try again as soon as
+ // possible.
+ if (trampoline_pool_blocked_nesting_ > 0) {
+ next_buffer_check_ = pc_offset() + kInstrSize;
+ } else {
+ next_buffer_check_ = no_trampoline_pool_before_;
+ }
+ return;
+ }
+
+ DCHECK(!trampoline_emitted_);
+ DCHECK_GE(unbound_labels_count_, 0);
+ if (unbound_labels_count_ > 0) {
+ // First we emit jump (2 instructions), then we emit trampoline pool.
+ {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ Label after_pool;
+ b(&after_pool);
+ nop(); // TODO(LOONG_dev): remove this
+
+ int pool_start = pc_offset();
+ for (int i = 0; i < unbound_labels_count_; i++) {
+ {
+ b(&after_pool);
+ nop(); // TODO(LOONG_dev): remove this
+ }
+ }
+ nop();
+ trampoline_ = Trampoline(pool_start, unbound_labels_count_);
+ bind(&after_pool);
+
+ trampoline_emitted_ = true;
+ // As we are only going to emit trampoline once, we need to prevent any
+ // further emission.
+ next_buffer_check_ = kMaxInt;
+ }
+ } else {
+ // Number of branches to unbound label at this point is zero, so we can
+ // move next buffer check to maximum.
+ next_buffer_check_ =
+ pc_offset() + kMax16BranchOffset - kTrampolineSlotsSize * 16;
+ }
+ return;
+}
+
+Address Assembler::target_address_at(Address pc) {
+ Instr instr0 = instr_at(pc);
+ if (IsB(instr0)) {
+ int32_t offset = instr0 & kImm26Mask;
+ offset = (((offset & 0x3ff) << 22 >> 6) | ((offset >> 10) & kImm16Mask))
+ << 2;
+ return pc + offset;
+ }
+ Instr instr1 = instr_at(pc + 1 * kInstrSize);
+ Instr instr2 = instr_at(pc + 2 * kInstrSize);
+
+ // Interpret 4 instructions for address generated by li: See listing in
+ // Assembler::set_target_address_at() just below.
+ DCHECK((IsLu12i_w(instr0) && (IsOri(instr1)) && (IsLu32i_d(instr2))));
+
+ // Assemble the 48 bit value.
+ uint64_t hi20 = ((uint64_t)(instr2 >> 5) & 0xfffff) << 32;
+ uint64_t mid20 = ((uint64_t)(instr0 >> 5) & 0xfffff) << 12;
+ uint64_t low12 = ((uint64_t)(instr1 >> 10) & 0xfff);
+ int64_t addr = static_cast<int64_t>(hi20 | mid20 | low12);
+
+ // Sign extend to get canonical address.
+ addr = (addr << 16) >> 16;
+ return static_cast<Address>(addr);
+}
+
+// On loong64, a target address is stored in a 3-instruction sequence:
+// 0: lu12i_w(rd, (j.imm64_ >> 12) & kImm20Mask);
+// 1: ori(rd, rd, j.imm64_ & kImm12Mask);
+// 2: lu32i_d(rd, (j.imm64_ >> 32) & kImm20Mask);
+//
+// Patching the address must replace all the lui & ori instructions,
+// and flush the i-cache.
+//
+void Assembler::set_target_value_at(Address pc, uint64_t target,
+ ICacheFlushMode icache_flush_mode) {
+ // There is an optimization where only 3 instructions are used to load address
+ // in code on LOONG64 because only 48-bits of address is effectively used.
+ // It relies on fact the upper [63:48] bits are not used for virtual address
+ // translation and they have to be set according to value of bit 47 in order
+ // get canonical address.
+#ifdef DEBUG
+ // Check we have the result from a li macro-instruction.
+ Instr instr0 = instr_at(pc);
+ Instr instr1 = instr_at(pc + kInstrSize);
+ Instr instr2 = instr_at(pc + kInstrSize * 2);
+ DCHECK(IsLu12i_w(instr0) && IsOri(instr1) && IsLu32i_d(instr2) ||
+ IsB(instr0));
+#endif
+
+ Instr instr = instr_at(pc);
+ uint32_t* p = reinterpret_cast<uint32_t*>(pc);
+ if (IsB(instr)) {
+ int32_t offset = (target - pc) >> 2;
+ CHECK(is_int26(offset));
+ offset =
+ ((offset & kImm16Mask) << kRkShift) | ((offset & kImm26Mask) >> 16);
+ *p = (instr & ~kImm26Mask) | offset;
+ if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
+ FlushInstructionCache(pc, kInstrSize);
+ }
+ return;
+ }
+ uint32_t rd_code = GetRd(instr);
+
+ // Must use 3 instructions to insure patchable code.
+ // lu12i_w rd, middle-20.
+ // ori rd, rd, low-12.
+ // lu32i_d rd, high-20.
+ *p = LU12I_W | (((target >> 12) & 0xfffff) << kRjShift) | rd_code;
+ *(p + 1) =
+ ORI | (target & 0xfff) << kRkShift | (rd_code << kRjShift) | rd_code;
+ *(p + 2) = LU32I_D | (((target >> 32) & 0xfffff) << kRjShift) | rd_code;
+
+ if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
+ FlushInstructionCache(pc, 3 * kInstrSize);
+ }
+}
+
+UseScratchRegisterScope::UseScratchRegisterScope(Assembler* assembler)
+ : available_(assembler->GetScratchRegisterList()),
+ old_available_(*available_) {}
+
+UseScratchRegisterScope::~UseScratchRegisterScope() {
+ *available_ = old_available_;
+}
+
+Register UseScratchRegisterScope::Acquire() {
+ DCHECK_NOT_NULL(available_);
+ DCHECK_NE(*available_, 0);
+ int index = static_cast<int>(base::bits::CountTrailingZeros32(*available_));
+ *available_ &= ~(1UL << index);
+
+ return Register::from_code(index);
+}
+
+bool UseScratchRegisterScope::hasAvailable() const { return *available_ != 0; }
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_TARGET_ARCH_LOONG64
diff --git a/deps/v8/src/codegen/loong64/assembler-loong64.h b/deps/v8/src/codegen/loong64/assembler-loong64.h
new file mode 100644
index 0000000000..b886b2ef43
--- /dev/null
+++ b/deps/v8/src/codegen/loong64/assembler-loong64.h
@@ -0,0 +1,1129 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_CODEGEN_LOONG64_ASSEMBLER_LOONG64_H_
+#define V8_CODEGEN_LOONG64_ASSEMBLER_LOONG64_H_
+
+#include <stdio.h>
+
+#include <memory>
+#include <set>
+
+#include "src/codegen/assembler.h"
+#include "src/codegen/external-reference.h"
+#include "src/codegen/label.h"
+#include "src/codegen/loong64/constants-loong64.h"
+#include "src/codegen/loong64/register-loong64.h"
+#include "src/codegen/machine-type.h"
+#include "src/objects/contexts.h"
+#include "src/objects/smi.h"
+
+namespace v8 {
+namespace internal {
+
+class SafepointTableBuilder;
+
+// -----------------------------------------------------------------------------
+// Machine instruction Operands.
+constexpr int kSmiShift = kSmiTagSize + kSmiShiftSize;
+constexpr uint64_t kSmiShiftMask = (1UL << kSmiShift) - 1;
+// Class Operand represents a shifter operand in data processing instructions.
+class Operand {
+ public:
+ // Immediate.
+ V8_INLINE explicit Operand(int64_t immediate,
+ RelocInfo::Mode rmode = RelocInfo::NONE)
+ : rm_(no_reg), rmode_(rmode) {
+ value_.immediate = immediate;
+ }
+ V8_INLINE explicit Operand(const ExternalReference& f)
+ : rm_(no_reg), rmode_(RelocInfo::EXTERNAL_REFERENCE) {
+ value_.immediate = static_cast<int64_t>(f.address());
+ }
+ V8_INLINE explicit Operand(const char* s);
+ explicit Operand(Handle<HeapObject> handle);
+ V8_INLINE explicit Operand(Smi value) : rm_(no_reg), rmode_(RelocInfo::NONE) {
+ value_.immediate = static_cast<intptr_t>(value.ptr());
+ }
+
+ static Operand EmbeddedNumber(double number); // Smi or HeapNumber.
+ static Operand EmbeddedStringConstant(const StringConstantBase* str);
+
+ // Register.
+ V8_INLINE explicit Operand(Register rm) : rm_(rm) {}
+
+ // Return true if this is a register operand.
+ V8_INLINE bool is_reg() const;
+
+ inline int64_t immediate() const;
+
+ bool IsImmediate() const { return !rm_.is_valid(); }
+
+ HeapObjectRequest heap_object_request() const {
+ DCHECK(IsHeapObjectRequest());
+ return value_.heap_object_request;
+ }
+
+ bool IsHeapObjectRequest() const {
+ DCHECK_IMPLIES(is_heap_object_request_, IsImmediate());
+ DCHECK_IMPLIES(is_heap_object_request_,
+ rmode_ == RelocInfo::FULL_EMBEDDED_OBJECT ||
+ rmode_ == RelocInfo::CODE_TARGET);
+ return is_heap_object_request_;
+ }
+
+ Register rm() const { return rm_; }
+
+ RelocInfo::Mode rmode() const { return rmode_; }
+
+ private:
+ Register rm_;
+ union Value {
+ Value() {}
+ HeapObjectRequest heap_object_request; // if is_heap_object_request_
+ int64_t immediate; // otherwise
+ } value_; // valid if rm_ == no_reg
+ bool is_heap_object_request_ = false;
+ RelocInfo::Mode rmode_;
+
+ friend class Assembler;
+ friend class MacroAssembler;
+};
+
+// Class MemOperand represents a memory operand in load and store instructions.
+// 1: base_reg + off_imm( si12 | si14<<2)
+// 2: base_reg + offset_reg
+class V8_EXPORT_PRIVATE MemOperand {
+ public:
+ explicit MemOperand(Register rj, int32_t offset = 0);
+ explicit MemOperand(Register rj, Register offset = no_reg);
+ Register base() const { return base_; }
+ Register index() const { return index_; }
+ int32_t offset() const { return offset_; }
+
+ bool hasIndexReg() const { return index_ != no_reg; }
+
+ private:
+ Register base_; // base
+ Register index_; // index
+ int32_t offset_; // offset
+
+ friend class Assembler;
+};
+
+class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
+ public:
+ // Create an assembler. Instructions and relocation information are emitted
+ // into a buffer, with the instructions starting from the beginning and the
+ // relocation information starting from the end of the buffer. See CodeDesc
+ // for a detailed comment on the layout (globals.h).
+ //
+ // If the provided buffer is nullptr, the assembler allocates and grows its
+ // own buffer. Otherwise it takes ownership of the provided buffer.
+ explicit Assembler(const AssemblerOptions&,
+ std::unique_ptr<AssemblerBuffer> = {});
+
+ virtual ~Assembler() {}
+
+ // GetCode emits any pending (non-emitted) code and fills the descriptor desc.
+ static constexpr int kNoHandlerTable = 0;
+ static constexpr SafepointTableBuilder* kNoSafepointTable = nullptr;
+ void GetCode(Isolate* isolate, CodeDesc* desc,
+ SafepointTableBuilder* safepoint_table_builder,
+ int handler_table_offset);
+
+ // Convenience wrapper for code without safepoint or handler tables.
+ void GetCode(Isolate* isolate, CodeDesc* desc) {
+ GetCode(isolate, desc, kNoSafepointTable, kNoHandlerTable);
+ }
+
+ // This function is called when on-heap-compilation invariants are
+ // invalidated. For instance, when the assembler buffer grows or a GC happens
+ // between Code object allocation and Code object finalization.
+ void FixOnHeapReferences(bool update_embedded_objects = true);
+
+ // This function is called when we fallback from on-heap to off-heap
+ // compilation and patch on-heap references to handles.
+ void FixOnHeapReferencesToHandles();
+
+ // Unused on this architecture.
+ void MaybeEmitOutOfLineConstantPool() {}
+
+ // Loong64 uses BlockTrampolinePool to prevent generating trampoline inside a
+ // continuous instruction block. In the destructor of
+ // BlockTrampolinePool, it must check if it needs to generate trampoline
+ // immediately, if it does not do this, the branch range will go beyond the
+ // max branch offset, that means the pc_offset after call CheckTrampolinePool
+ // may be not the Call instruction's location. So we use last_call_pc here for
+ // safepoint record.
+ int pc_offset_for_safepoint() {
+ return static_cast<int>(last_call_pc_ - buffer_start_);
+ }
+
+ // TODO(LOONG_dev): LOONG64 Check this comment
+ // Label operations & relative jumps (PPUM Appendix D).
+ //
+ // Takes a branch opcode (cc) and a label (L) and generates
+ // either a backward branch or a forward branch and links it
+ // to the label fixup chain. Usage:
+ //
+ // Label L; // unbound label
+ // j(cc, &L); // forward branch to unbound label
+ // bind(&L); // bind label to the current pc
+ // j(cc, &L); // backward branch to bound label
+ // bind(&L); // illegal: a label may be bound only once
+ //
+ // Note: The same Label can be used for forward and backward branches
+ // but it may be bound only once.
+ void bind(Label* L); // Binds an unbound label L to current code position.
+
+ enum OffsetSize : int { kOffset26 = 26, kOffset21 = 21, kOffset16 = 16 };
+
+ // Determines if Label is bound and near enough so that branch instruction
+ // can be used to reach it, instead of jump instruction.
+ // c means conditinal branch, a means always branch.
+ bool is_near_c(Label* L);
+ bool is_near(Label* L, OffsetSize bits);
+ bool is_near_a(Label* L);
+
+ int BranchOffset(Instr instr);
+
+ // Returns the branch offset to the given label from the current code
+ // position. Links the label to the current position if it is still unbound.
+ // Manages the jump elimination optimization if the second parameter is true.
+ int32_t branch_offset_helper(Label* L, OffsetSize bits);
+ inline int32_t branch_offset(Label* L) {
+ return branch_offset_helper(L, OffsetSize::kOffset16);
+ }
+ inline int32_t branch_offset21(Label* L) {
+ return branch_offset_helper(L, OffsetSize::kOffset21);
+ }
+ inline int32_t branch_offset26(Label* L) {
+ return branch_offset_helper(L, OffsetSize::kOffset26);
+ }
+ inline int32_t shifted_branch_offset(Label* L) {
+ return branch_offset(L) >> 2;
+ }
+ inline int32_t shifted_branch_offset21(Label* L) {
+ return branch_offset21(L) >> 2;
+ }
+ inline int32_t shifted_branch_offset26(Label* L) {
+ return branch_offset26(L) >> 2;
+ }
+ uint64_t jump_address(Label* L);
+ uint64_t jump_offset(Label* L);
+ uint64_t branch_long_offset(Label* L);
+
+ // Puts a labels target address at the given position.
+ // The high 8 bits are set to zero.
+ void label_at_put(Label* L, int at_offset);
+
+ // Read/Modify the code target address in the branch/call instruction at pc.
+ // The isolate argument is unused (and may be nullptr) when skipping flushing.
+ static Address target_address_at(Address pc);
+ V8_INLINE static void set_target_address_at(
+ Address pc, Address target,
+ ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED) {
+ set_target_value_at(pc, target, icache_flush_mode);
+ }
+ // On LOONG64 there is no Constant Pool so we skip that parameter.
+ V8_INLINE static Address target_address_at(Address pc,
+ Address constant_pool) {
+ return target_address_at(pc);
+ }
+ V8_INLINE static void set_target_address_at(
+ Address pc, Address constant_pool, Address target,
+ ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED) {
+ set_target_address_at(pc, target, icache_flush_mode);
+ }
+
+ static void set_target_value_at(
+ Address pc, uint64_t target,
+ ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED);
+
+ static void JumpLabelToJumpRegister(Address pc);
+
+ // This sets the branch destination (which gets loaded at the call address).
+ // This is for calls and branches within generated code. The serializer
+ // has already deserialized the lui/ori instructions etc.
+ inline static void deserialization_set_special_target_at(
+ Address instruction_payload, Code code, Address target);
+
+ // Get the size of the special target encoded at 'instruction_payload'.
+ inline static int deserialization_special_target_size(
+ Address instruction_payload);
+
+ // This sets the internal reference at the pc.
+ inline static void deserialization_set_target_internal_reference_at(
+ Address pc, Address target,
+ RelocInfo::Mode mode = RelocInfo::INTERNAL_REFERENCE);
+
+ // Here we are patching the address in the LUI/ORI instruction pair.
+ // These values are used in the serialization process and must be zero for
+ // LOONG platform, as Code, Embedded Object or External-reference pointers
+ // are split across two consecutive instructions and don't exist separately
+ // in the code, so the serializer should not step forwards in memory after
+ // a target is resolved and written.
+ static constexpr int kSpecialTargetSize = 0;
+
+ // Number of consecutive instructions used to store 32bit/64bit constant.
+ // This constant was used in RelocInfo::target_address_address() function
+ // to tell serializer address of the instruction that follows
+ // LUI/ORI instruction pair.
+ // TODO(LOONG_dev): check this
+ static constexpr int kInstructionsFor64BitConstant = 4;
+
+ // Max offset for instructions with 16-bit offset field
+ static constexpr int kMax16BranchOffset = (1 << (18 - 1)) - 1;
+
+ // Max offset for instructions with 21-bit offset field
+ static constexpr int kMax21BranchOffset = (1 << (23 - 1)) - 1;
+
+ // Max offset for compact branch instructions with 26-bit offset field
+ static constexpr int kMax26BranchOffset = (1 << (28 - 1)) - 1;
+
+ static constexpr int kTrampolineSlotsSize = 2 * kInstrSize;
+
+ RegList* GetScratchRegisterList() { return &scratch_register_list_; }
+
+ // ---------------------------------------------------------------------------
+ // Code generation.
+
+ // Insert the smallest number of nop instructions
+ // possible to align the pc offset to a multiple
+ // of m. m must be a power of 2 (>= 4).
+ void Align(int m);
+ // Insert the smallest number of zero bytes possible to align the pc offset
+ // to a mulitple of m. m must be a power of 2 (>= 2).
+ void DataAlign(int m);
+ // Aligns code to something that's optimal for a jump target for the platform.
+ void CodeTargetAlign();
+ void LoopHeaderAlign() { CodeTargetAlign(); }
+
+ // Different nop operations are used by the code generator to detect certain
+ // states of the generated code.
+ enum NopMarkerTypes {
+ NON_MARKING_NOP = 0,
+ DEBUG_BREAK_NOP,
+ // IC markers.
+ PROPERTY_ACCESS_INLINED,
+ PROPERTY_ACCESS_INLINED_CONTEXT,
+ PROPERTY_ACCESS_INLINED_CONTEXT_DONT_DELETE,
+ // Helper values.
+ LAST_CODE_MARKER,
+ FIRST_IC_MARKER = PROPERTY_ACCESS_INLINED,
+ };
+
+ // Type == 0 is the default non-marking nop. For LoongArch this is a
+ // andi(zero_reg, zero_reg, 0).
+ void nop(unsigned int type = 0) {
+ DCHECK_LT(type, 32);
+ andi(zero_reg, zero_reg, type);
+ }
+
+ // --------Branch-and-jump-instructions----------
+ // We don't use likely variant of instructions.
+ void b(int32_t offset);
+ inline void b(Label* L) { b(shifted_branch_offset26(L)); }
+ void bl(int32_t offset);
+ inline void bl(Label* L) { bl(shifted_branch_offset26(L)); }
+
+ void beq(Register rj, Register rd, int32_t offset);
+ inline void beq(Register rj, Register rd, Label* L) {
+ beq(rj, rd, shifted_branch_offset(L));
+ }
+ void bne(Register rj, Register rd, int32_t offset);
+ inline void bne(Register rj, Register rd, Label* L) {
+ bne(rj, rd, shifted_branch_offset(L));
+ }
+ void blt(Register rj, Register rd, int32_t offset);
+ inline void blt(Register rj, Register rd, Label* L) {
+ blt(rj, rd, shifted_branch_offset(L));
+ }
+ void bge(Register rj, Register rd, int32_t offset);
+ inline void bge(Register rj, Register rd, Label* L) {
+ bge(rj, rd, shifted_branch_offset(L));
+ }
+ void bltu(Register rj, Register rd, int32_t offset);
+ inline void bltu(Register rj, Register rd, Label* L) {
+ bltu(rj, rd, shifted_branch_offset(L));
+ }
+ void bgeu(Register rj, Register rd, int32_t offset);
+ inline void bgeu(Register rj, Register rd, Label* L) {
+ bgeu(rj, rd, shifted_branch_offset(L));
+ }
+ void beqz(Register rj, int32_t offset);
+ inline void beqz(Register rj, Label* L) {
+ beqz(rj, shifted_branch_offset21(L));
+ }
+ void bnez(Register rj, int32_t offset);
+ inline void bnez(Register rj, Label* L) {
+ bnez(rj, shifted_branch_offset21(L));
+ }
+
+ void jirl(Register rd, Register rj, int32_t offset);
+
+ void bceqz(CFRegister cj, int32_t si21);
+ inline void bceqz(CFRegister cj, Label* L) {
+ bceqz(cj, shifted_branch_offset21(L));
+ }
+ void bcnez(CFRegister cj, int32_t si21);
+ inline void bcnez(CFRegister cj, Label* L) {
+ bcnez(cj, shifted_branch_offset21(L));
+ }
+
+ // -------Data-processing-instructions---------
+
+ // Arithmetic.
+ void add_w(Register rd, Register rj, Register rk);
+ void add_d(Register rd, Register rj, Register rk);
+ void sub_w(Register rd, Register rj, Register rk);
+ void sub_d(Register rd, Register rj, Register rk);
+
+ void addi_w(Register rd, Register rj, int32_t si12);
+ void addi_d(Register rd, Register rj, int32_t si12);
+
+ void addu16i_d(Register rd, Register rj, int32_t si16);
+
+ void alsl_w(Register rd, Register rj, Register rk, int32_t sa2);
+ void alsl_wu(Register rd, Register rj, Register rk, int32_t sa2);
+ void alsl_d(Register rd, Register rj, Register rk, int32_t sa2);
+
+ void lu12i_w(Register rd, int32_t si20);
+ void lu32i_d(Register rd, int32_t si20);
+ void lu52i_d(Register rd, Register rj, int32_t si12);
+
+ void slt(Register rd, Register rj, Register rk);
+ void sltu(Register rd, Register rj, Register rk);
+ void slti(Register rd, Register rj, int32_t si12);
+ void sltui(Register rd, Register rj, int32_t si12);
+
+ void pcaddi(Register rd, int32_t si20);
+ void pcaddu12i(Register rd, int32_t si20);
+ void pcaddu18i(Register rd, int32_t si20);
+ void pcalau12i(Register rd, int32_t si20);
+
+ void and_(Register rd, Register rj, Register rk);
+ void or_(Register rd, Register rj, Register rk);
+ void xor_(Register rd, Register rj, Register rk);
+ void nor(Register rd, Register rj, Register rk);
+ void andn(Register rd, Register rj, Register rk);
+ void orn(Register rd, Register rj, Register rk);
+
+ void andi(Register rd, Register rj, int32_t ui12);
+ void ori(Register rd, Register rj, int32_t ui12);
+ void xori(Register rd, Register rj, int32_t ui12);
+
+ void mul_w(Register rd, Register rj, Register rk);
+ void mulh_w(Register rd, Register rj, Register rk);
+ void mulh_wu(Register rd, Register rj, Register rk);
+ void mul_d(Register rd, Register rj, Register rk);
+ void mulh_d(Register rd, Register rj, Register rk);
+ void mulh_du(Register rd, Register rj, Register rk);
+
+ void mulw_d_w(Register rd, Register rj, Register rk);
+ void mulw_d_wu(Register rd, Register rj, Register rk);
+
+ void div_w(Register rd, Register rj, Register rk);
+ void mod_w(Register rd, Register rj, Register rk);
+ void div_wu(Register rd, Register rj, Register rk);
+ void mod_wu(Register rd, Register rj, Register rk);
+ void div_d(Register rd, Register rj, Register rk);
+ void mod_d(Register rd, Register rj, Register rk);
+ void div_du(Register rd, Register rj, Register rk);
+ void mod_du(Register rd, Register rj, Register rk);
+
+ // Shifts.
+ void sll_w(Register rd, Register rj, Register rk);
+ void srl_w(Register rd, Register rj, Register rk);
+ void sra_w(Register rd, Register rj, Register rk);
+ void rotr_w(Register rd, Register rj, Register rk);
+
+ void slli_w(Register rd, Register rj, int32_t ui5);
+ void srli_w(Register rd, Register rj, int32_t ui5);
+ void srai_w(Register rd, Register rj, int32_t ui5);
+ void rotri_w(Register rd, Register rj, int32_t ui5);
+
+ void sll_d(Register rd, Register rj, Register rk);
+ void srl_d(Register rd, Register rj, Register rk);
+ void sra_d(Register rd, Register rj, Register rk);
+ void rotr_d(Register rd, Register rj, Register rk);
+
+ void slli_d(Register rd, Register rj, int32_t ui6);
+ void srli_d(Register rd, Register rj, int32_t ui6);
+ void srai_d(Register rd, Register rj, int32_t ui6);
+ void rotri_d(Register rd, Register rj, int32_t ui6);
+
+ // Bit twiddling.
+ void ext_w_b(Register rd, Register rj);
+ void ext_w_h(Register rd, Register rj);
+
+ void clo_w(Register rd, Register rj);
+ void clz_w(Register rd, Register rj);
+ void cto_w(Register rd, Register rj);
+ void ctz_w(Register rd, Register rj);
+ void clo_d(Register rd, Register rj);
+ void clz_d(Register rd, Register rj);
+ void cto_d(Register rd, Register rj);
+ void ctz_d(Register rd, Register rj);
+
+ void bytepick_w(Register rd, Register rj, Register rk, int32_t sa2);
+ void bytepick_d(Register rd, Register rj, Register rk, int32_t sa3);
+
+ void revb_2h(Register rd, Register rj);
+ void revb_4h(Register rd, Register rj);
+ void revb_2w(Register rd, Register rj);
+ void revb_d(Register rd, Register rj);
+
+ void revh_2w(Register rd, Register rj);
+ void revh_d(Register rd, Register rj);
+
+ void bitrev_4b(Register rd, Register rj);
+ void bitrev_8b(Register rd, Register rj);
+
+ void bitrev_w(Register rd, Register rj);
+ void bitrev_d(Register rd, Register rj);
+
+ void bstrins_w(Register rd, Register rj, int32_t msbw, int32_t lsbw);
+ void bstrins_d(Register rd, Register rj, int32_t msbd, int32_t lsbd);
+
+ void bstrpick_w(Register rd, Register rj, int32_t msbw, int32_t lsbw);
+ void bstrpick_d(Register rd, Register rj, int32_t msbd, int32_t lsbd);
+
+ void maskeqz(Register rd, Register rj, Register rk);
+ void masknez(Register rd, Register rj, Register rk);
+
+ // Memory-instructions
+ void ld_b(Register rd, Register rj, int32_t si12);
+ void ld_h(Register rd, Register rj, int32_t si12);
+ void ld_w(Register rd, Register rj, int32_t si12);
+ void ld_d(Register rd, Register rj, int32_t si12);
+ void ld_bu(Register rd, Register rj, int32_t si12);
+ void ld_hu(Register rd, Register rj, int32_t si12);
+ void ld_wu(Register rd, Register rj, int32_t si12);
+ void st_b(Register rd, Register rj, int32_t si12);
+ void st_h(Register rd, Register rj, int32_t si12);
+ void st_w(Register rd, Register rj, int32_t si12);
+ void st_d(Register rd, Register rj, int32_t si12);
+
+ void ldx_b(Register rd, Register rj, Register rk);
+ void ldx_h(Register rd, Register rj, Register rk);
+ void ldx_w(Register rd, Register rj, Register rk);
+ void ldx_d(Register rd, Register rj, Register rk);
+ void ldx_bu(Register rd, Register rj, Register rk);
+ void ldx_hu(Register rd, Register rj, Register rk);
+ void ldx_wu(Register rd, Register rj, Register rk);
+ void stx_b(Register rd, Register rj, Register rk);
+ void stx_h(Register rd, Register rj, Register rk);
+ void stx_w(Register rd, Register rj, Register rk);
+ void stx_d(Register rd, Register rj, Register rk);
+
+ void ldptr_w(Register rd, Register rj, int32_t si14);
+ void ldptr_d(Register rd, Register rj, int32_t si14);
+ void stptr_w(Register rd, Register rj, int32_t si14);
+ void stptr_d(Register rd, Register rj, int32_t si14);
+
+ void amswap_w(Register rd, Register rk, Register rj);
+ void amswap_d(Register rd, Register rk, Register rj);
+ void amadd_w(Register rd, Register rk, Register rj);
+ void amadd_d(Register rd, Register rk, Register rj);
+ void amand_w(Register rd, Register rk, Register rj);
+ void amand_d(Register rd, Register rk, Register rj);
+ void amor_w(Register rd, Register rk, Register rj);
+ void amor_d(Register rd, Register rk, Register rj);
+ void amxor_w(Register rd, Register rk, Register rj);
+ void amxor_d(Register rd, Register rk, Register rj);
+ void ammax_w(Register rd, Register rk, Register rj);
+ void ammax_d(Register rd, Register rk, Register rj);
+ void ammin_w(Register rd, Register rk, Register rj);
+ void ammin_d(Register rd, Register rk, Register rj);
+ void ammax_wu(Register rd, Register rk, Register rj);
+ void ammax_du(Register rd, Register rk, Register rj);
+ void ammin_wu(Register rd, Register rk, Register rj);
+ void ammin_du(Register rd, Register rk, Register rj);
+
+ void amswap_db_w(Register rd, Register rk, Register rj);
+ void amswap_db_d(Register rd, Register rk, Register rj);
+ void amadd_db_w(Register rd, Register rk, Register rj);
+ void amadd_db_d(Register rd, Register rk, Register rj);
+ void amand_db_w(Register rd, Register rk, Register rj);
+ void amand_db_d(Register rd, Register rk, Register rj);
+ void amor_db_w(Register rd, Register rk, Register rj);
+ void amor_db_d(Register rd, Register rk, Register rj);
+ void amxor_db_w(Register rd, Register rk, Register rj);
+ void amxor_db_d(Register rd, Register rk, Register rj);
+ void ammax_db_w(Register rd, Register rk, Register rj);
+ void ammax_db_d(Register rd, Register rk, Register rj);
+ void ammin_db_w(Register rd, Register rk, Register rj);
+ void ammin_db_d(Register rd, Register rk, Register rj);
+ void ammax_db_wu(Register rd, Register rk, Register rj);
+ void ammax_db_du(Register rd, Register rk, Register rj);
+ void ammin_db_wu(Register rd, Register rk, Register rj);
+ void ammin_db_du(Register rd, Register rk, Register rj);
+
+ void ll_w(Register rd, Register rj, int32_t si14);
+ void ll_d(Register rd, Register rj, int32_t si14);
+ void sc_w(Register rd, Register rj, int32_t si14);
+ void sc_d(Register rd, Register rj, int32_t si14);
+
+ void dbar(int32_t hint);
+ void ibar(int32_t hint);
+
+ // Break instruction
+ void break_(uint32_t code, bool break_as_stop = false);
+ void stop(uint32_t code = kMaxStopCode);
+
+ // Arithmetic.
+ void fadd_s(FPURegister fd, FPURegister fj, FPURegister fk);
+ void fadd_d(FPURegister fd, FPURegister fj, FPURegister fk);
+ void fsub_s(FPURegister fd, FPURegister fj, FPURegister fk);
+ void fsub_d(FPURegister fd, FPURegister fj, FPURegister fk);
+ void fmul_s(FPURegister fd, FPURegister fj, FPURegister fk);
+ void fmul_d(FPURegister fd, FPURegister fj, FPURegister fk);
+ void fdiv_s(FPURegister fd, FPURegister fj, FPURegister fk);
+ void fdiv_d(FPURegister fd, FPURegister fj, FPURegister fk);
+
+ void fmadd_s(FPURegister fd, FPURegister fj, FPURegister fk, FPURegister fa);
+ void fmadd_d(FPURegister fd, FPURegister fj, FPURegister fk, FPURegister fa);
+ void fmsub_s(FPURegister fd, FPURegister fj, FPURegister fk, FPURegister fa);
+ void fmsub_d(FPURegister fd, FPURegister fj, FPURegister fk, FPURegister fa);
+ void fnmadd_s(FPURegister fd, FPURegister fj, FPURegister fk, FPURegister fa);
+ void fnmadd_d(FPURegister fd, FPURegister fj, FPURegister fk, FPURegister fa);
+ void fnmsub_s(FPURegister fd, FPURegister fj, FPURegister fk, FPURegister fa);
+ void fnmsub_d(FPURegister fd, FPURegister fj, FPURegister fk, FPURegister fa);
+
+ void fmax_s(FPURegister fd, FPURegister fj, FPURegister fk);
+ void fmax_d(FPURegister fd, FPURegister fj, FPURegister fk);
+ void fmin_s(FPURegister fd, FPURegister fj, FPURegister fk);
+ void fmin_d(FPURegister fd, FPURegister fj, FPURegister fk);
+
+ void fmaxa_s(FPURegister fd, FPURegister fj, FPURegister fk);
+ void fmaxa_d(FPURegister fd, FPURegister fj, FPURegister fk);
+ void fmina_s(FPURegister fd, FPURegister fj, FPURegister fk);
+ void fmina_d(FPURegister fd, FPURegister fj, FPURegister fk);
+
+ void fabs_s(FPURegister fd, FPURegister fj);
+ void fabs_d(FPURegister fd, FPURegister fj);
+ void fneg_s(FPURegister fd, FPURegister fj);
+ void fneg_d(FPURegister fd, FPURegister fj);
+
+ void fsqrt_s(FPURegister fd, FPURegister fj);
+ void fsqrt_d(FPURegister fd, FPURegister fj);
+ void frecip_s(FPURegister fd, FPURegister fj);
+ void frecip_d(FPURegister fd, FPURegister fj);
+ void frsqrt_s(FPURegister fd, FPURegister fj);
+ void frsqrt_d(FPURegister fd, FPURegister fj);
+
+ void fscaleb_s(FPURegister fd, FPURegister fj, FPURegister fk);
+ void fscaleb_d(FPURegister fd, FPURegister fj, FPURegister fk);
+ void flogb_s(FPURegister fd, FPURegister fj);
+ void flogb_d(FPURegister fd, FPURegister fj);
+ void fcopysign_s(FPURegister fd, FPURegister fj, FPURegister fk);
+ void fcopysign_d(FPURegister fd, FPURegister fj, FPURegister fk);
+
+ void fclass_s(FPURegister fd, FPURegister fj);
+ void fclass_d(FPURegister fd, FPURegister fj);
+
+ void fcmp_cond_s(FPUCondition cc, FPURegister fj, FPURegister fk,
+ CFRegister cd);
+ void fcmp_cond_d(FPUCondition cc, FPURegister fj, FPURegister fk,
+ CFRegister cd);
+
+ void fcvt_s_d(FPURegister fd, FPURegister fj);
+ void fcvt_d_s(FPURegister fd, FPURegister fj);
+
+ void ffint_s_w(FPURegister fd, FPURegister fj);
+ void ffint_s_l(FPURegister fd, FPURegister fj);
+ void ffint_d_w(FPURegister fd, FPURegister fj);
+ void ffint_d_l(FPURegister fd, FPURegister fj);
+ void ftint_w_s(FPURegister fd, FPURegister fj);
+ void ftint_w_d(FPURegister fd, FPURegister fj);
+ void ftint_l_s(FPURegister fd, FPURegister fj);
+ void ftint_l_d(FPURegister fd, FPURegister fj);
+
+ void ftintrm_w_s(FPURegister fd, FPURegister fj);
+ void ftintrm_w_d(FPURegister fd, FPURegister fj);
+ void ftintrm_l_s(FPURegister fd, FPURegister fj);
+ void ftintrm_l_d(FPURegister fd, FPURegister fj);
+ void ftintrp_w_s(FPURegister fd, FPURegister fj);
+ void ftintrp_w_d(FPURegister fd, FPURegister fj);
+ void ftintrp_l_s(FPURegister fd, FPURegister fj);
+ void ftintrp_l_d(FPURegister fd, FPURegister fj);
+ void ftintrz_w_s(FPURegister fd, FPURegister fj);
+ void ftintrz_w_d(FPURegister fd, FPURegister fj);
+ void ftintrz_l_s(FPURegister fd, FPURegister fj);
+ void ftintrz_l_d(FPURegister fd, FPURegister fj);
+ void ftintrne_w_s(FPURegister fd, FPURegister fj);
+ void ftintrne_w_d(FPURegister fd, FPURegister fj);
+ void ftintrne_l_s(FPURegister fd, FPURegister fj);
+ void ftintrne_l_d(FPURegister fd, FPURegister fj);
+
+ void frint_s(FPURegister fd, FPURegister fj);
+ void frint_d(FPURegister fd, FPURegister fj);
+
+ void fmov_s(FPURegister fd, FPURegister fj);
+ void fmov_d(FPURegister fd, FPURegister fj);
+
+ void fsel(CFRegister ca, FPURegister fd, FPURegister fj, FPURegister fk);
+
+ void movgr2fr_w(FPURegister fd, Register rj);
+ void movgr2fr_d(FPURegister fd, Register rj);
+ void movgr2frh_w(FPURegister fd, Register rj);
+
+ void movfr2gr_s(Register rd, FPURegister fj);
+ void movfr2gr_d(Register rd, FPURegister fj);
+ void movfrh2gr_s(Register rd, FPURegister fj);
+
+ void movgr2fcsr(Register rj, FPUControlRegister fcsr = FCSR0);
+ void movfcsr2gr(Register rd, FPUControlRegister fcsr = FCSR0);
+
+ void movfr2cf(CFRegister cd, FPURegister fj);
+ void movcf2fr(FPURegister fd, CFRegister cj);
+
+ void movgr2cf(CFRegister cd, Register rj);
+ void movcf2gr(Register rd, CFRegister cj);
+
+ void fld_s(FPURegister fd, Register rj, int32_t si12);
+ void fld_d(FPURegister fd, Register rj, int32_t si12);
+ void fst_s(FPURegister fd, Register rj, int32_t si12);
+ void fst_d(FPURegister fd, Register rj, int32_t si12);
+
+ void fldx_s(FPURegister fd, Register rj, Register rk);
+ void fldx_d(FPURegister fd, Register rj, Register rk);
+ void fstx_s(FPURegister fd, Register rj, Register rk);
+ void fstx_d(FPURegister fd, Register rj, Register rk);
+
+ // Check the code size generated from label to here.
+ int SizeOfCodeGeneratedSince(Label* label) {
+ return pc_offset() - label->pos();
+ }
+
+ // Check the number of instructions generated from label to here.
+ int InstructionsGeneratedSince(Label* label) {
+ return SizeOfCodeGeneratedSince(label) / kInstrSize;
+ }
+
+ // Class for scoping postponing the trampoline pool generation.
+ class V8_NODISCARD BlockTrampolinePoolScope {
+ public:
+ explicit BlockTrampolinePoolScope(Assembler* assem) : assem_(assem) {
+ assem_->StartBlockTrampolinePool();
+ }
+ ~BlockTrampolinePoolScope() { assem_->EndBlockTrampolinePool(); }
+
+ private:
+ Assembler* assem_;
+
+ DISALLOW_IMPLICIT_CONSTRUCTORS(BlockTrampolinePoolScope);
+ };
+
+ // Class for postponing the assembly buffer growth. Typically used for
+ // sequences of instructions that must be emitted as a unit, before
+ // buffer growth (and relocation) can occur.
+ // This blocking scope is not nestable.
+ class V8_NODISCARD BlockGrowBufferScope {
+ public:
+ explicit BlockGrowBufferScope(Assembler* assem) : assem_(assem) {
+ assem_->StartBlockGrowBuffer();
+ }
+ ~BlockGrowBufferScope() { assem_->EndBlockGrowBuffer(); }
+
+ private:
+ Assembler* assem_;
+
+ DISALLOW_IMPLICIT_CONSTRUCTORS(BlockGrowBufferScope);
+ };
+
+ // Record a deoptimization reason that can be used by a log or cpu profiler.
+ // Use --trace-deopt to enable.
+ void RecordDeoptReason(DeoptimizeReason reason, uint32_t node_id,
+ SourcePosition position, int id);
+
+ static int RelocateInternalReference(RelocInfo::Mode rmode, Address pc,
+ intptr_t pc_delta);
+ static void RelocateRelativeReference(RelocInfo::Mode rmode, Address pc,
+ intptr_t pc_delta);
+
+ // Writes a single byte or word of data in the code stream. Used for
+ // inline tables, e.g., jump-tables.
+ void db(uint8_t data);
+ void dd(uint32_t data, RelocInfo::Mode rmode = RelocInfo::NONE);
+ void dq(uint64_t data, RelocInfo::Mode rmode = RelocInfo::NONE);
+ void dp(uintptr_t data, RelocInfo::Mode rmode = RelocInfo::NONE) {
+ dq(data, rmode);
+ }
+ void dd(Label* label);
+
+ // Postpone the generation of the trampoline pool for the specified number of
+ // instructions.
+ void BlockTrampolinePoolFor(int instructions);
+
+ // Check if there is less than kGap bytes available in the buffer.
+ // If this is the case, we need to grow the buffer before emitting
+ // an instruction or relocation information.
+ inline bool overflow() const { return pc_ >= reloc_info_writer.pos() - kGap; }
+
+ // Get the number of bytes available in the buffer.
+ inline intptr_t available_space() const {
+ return reloc_info_writer.pos() - pc_;
+ }
+
+ // Read/patch instructions.
+ static Instr instr_at(Address pc) { return *reinterpret_cast<Instr*>(pc); }
+ static void instr_at_put(Address pc, Instr instr) {
+ *reinterpret_cast<Instr*>(pc) = instr;
+ }
+ Instr instr_at(int pos) {
+ return *reinterpret_cast<Instr*>(buffer_start_ + pos);
+ }
+ void instr_at_put(int pos, Instr instr) {
+ *reinterpret_cast<Instr*>(buffer_start_ + pos) = instr;
+ }
+
+ // Check if an instruction is a branch of some kind.
+ static bool IsBranch(Instr instr);
+ static bool IsB(Instr instr);
+ static bool IsBz(Instr instr);
+ static bool IsNal(Instr instr);
+
+ static bool IsBeq(Instr instr);
+ static bool IsBne(Instr instr);
+
+ static bool IsJump(Instr instr);
+ static bool IsMov(Instr instr, Register rd, Register rs);
+ static bool IsPcAddi(Instr instr, Register rd, int32_t si20);
+
+ static bool IsJ(Instr instr);
+ static bool IsLu12i_w(Instr instr);
+ static bool IsOri(Instr instr);
+ static bool IsLu32i_d(Instr instr);
+ static bool IsLu52i_d(Instr instr);
+
+ static bool IsNop(Instr instr, unsigned int type);
+
+ static Register GetRjReg(Instr instr);
+ static Register GetRkReg(Instr instr);
+ static Register GetRdReg(Instr instr);
+
+ static uint32_t GetRj(Instr instr);
+ static uint32_t GetRjField(Instr instr);
+ static uint32_t GetRk(Instr instr);
+ static uint32_t GetRkField(Instr instr);
+ static uint32_t GetRd(Instr instr);
+ static uint32_t GetRdField(Instr instr);
+ static uint32_t GetSa2(Instr instr);
+ static uint32_t GetSa3(Instr instr);
+ static uint32_t GetSa2Field(Instr instr);
+ static uint32_t GetSa3Field(Instr instr);
+ static uint32_t GetOpcodeField(Instr instr);
+ static uint32_t GetFunction(Instr instr);
+ static uint32_t GetFunctionField(Instr instr);
+ static uint32_t GetImmediate16(Instr instr);
+ static uint32_t GetLabelConst(Instr instr);
+
+ static bool IsAddImmediate(Instr instr);
+ static Instr SetAddImmediateOffset(Instr instr, int16_t offset);
+
+ static bool IsAndImmediate(Instr instr);
+ static bool IsEmittedConstant(Instr instr);
+
+ void CheckTrampolinePool();
+
+ // Get the code target object for a pc-relative call or jump.
+ V8_INLINE Handle<Code> relative_code_target_object_handle_at(
+ Address pc_) const;
+
+ inline int UnboundLabelsCount() { return unbound_labels_count_; }
+
+ protected:
+ // Helper function for memory load/store.
+ void AdjustBaseAndOffset(MemOperand* src);
+
+ inline static void set_target_internal_reference_encoded_at(Address pc,
+ Address target);
+
+ int64_t buffer_space() const { return reloc_info_writer.pos() - pc_; }
+
+ // Decode branch instruction at pos and return branch target pos.
+ int target_at(int pos, bool is_internal);
+
+ // Patch branch instruction at pos to branch to given branch target pos.
+ void target_at_put(int pos, int target_pos, bool is_internal);
+
+ // Say if we need to relocate with this mode.
+ bool MustUseReg(RelocInfo::Mode rmode);
+
+ // Record reloc info for current pc_.
+ void RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data = 0);
+
+ // Block the emission of the trampoline pool before pc_offset.
+ void BlockTrampolinePoolBefore(int pc_offset) {
+ if (no_trampoline_pool_before_ < pc_offset)
+ no_trampoline_pool_before_ = pc_offset;
+ }
+
+ void StartBlockTrampolinePool() { trampoline_pool_blocked_nesting_++; }
+
+ void EndBlockTrampolinePool() {
+ trampoline_pool_blocked_nesting_--;
+ if (trampoline_pool_blocked_nesting_ == 0) {
+ CheckTrampolinePoolQuick(1);
+ }
+ }
+
+ bool is_trampoline_pool_blocked() const {
+ return trampoline_pool_blocked_nesting_ > 0;
+ }
+
+ bool has_exception() const { return internal_trampoline_exception_; }
+
+ bool is_trampoline_emitted() const { return trampoline_emitted_; }
+
+ // Temporarily block automatic assembly buffer growth.
+ void StartBlockGrowBuffer() {
+ DCHECK(!block_buffer_growth_);
+ block_buffer_growth_ = true;
+ }
+
+ void EndBlockGrowBuffer() {
+ DCHECK(block_buffer_growth_);
+ block_buffer_growth_ = false;
+ }
+
+ bool is_buffer_growth_blocked() const { return block_buffer_growth_; }
+
+ void CheckTrampolinePoolQuick(int extra_instructions = 0) {
+ if (pc_offset() >= next_buffer_check_ - extra_instructions * kInstrSize) {
+ CheckTrampolinePool();
+ }
+ }
+
+ void set_last_call_pc_(byte* pc) { last_call_pc_ = pc; }
+
+#ifdef DEBUG
+ bool EmbeddedObjectMatches(int pc_offset, Handle<Object> object) {
+ return target_address_at(
+ reinterpret_cast<Address>(buffer_->start() + pc_offset)) ==
+ (IsOnHeap() ? object->ptr() : object.address());
+ }
+#endif
+
+ private:
+ // Avoid overflows for displacements etc.
+ static const int kMaximalBufferSize = 512 * MB;
+
+ // Buffer size and constant pool distance are checked together at regular
+ // intervals of kBufferCheckInterval emitted bytes.
+ static constexpr int kBufferCheckInterval = 1 * KB / 2;
+
+ // Code generation.
+ // The relocation writer's position is at least kGap bytes below the end of
+ // the generated instructions. This is so that multi-instruction sequences do
+ // not have to check for overflow. The same is true for writes of large
+ // relocation info entries.
+ static constexpr int kGap = 64;
+ STATIC_ASSERT(AssemblerBase::kMinimalBufferSize >= 2 * kGap);
+
+ // Repeated checking whether the trampoline pool should be emitted is rather
+ // expensive. By default we only check again once a number of instructions
+ // has been generated.
+ static constexpr int kCheckConstIntervalInst = 32;
+ static constexpr int kCheckConstInterval =
+ kCheckConstIntervalInst * kInstrSize;
+
+ int next_buffer_check_; // pc offset of next buffer check.
+
+ // Emission of the trampoline pool may be blocked in some code sequences.
+ int trampoline_pool_blocked_nesting_; // Block emission if this is not zero.
+ int no_trampoline_pool_before_; // Block emission before this pc offset.
+
+ // Keep track of the last emitted pool to guarantee a maximal distance.
+ int last_trampoline_pool_end_; // pc offset of the end of the last pool.
+
+ // Automatic growth of the assembly buffer may be blocked for some sequences.
+ bool block_buffer_growth_; // Block growth when true.
+
+ // Relocation information generation.
+ // Each relocation is encoded as a variable size value.
+ static constexpr int kMaxRelocSize = RelocInfoWriter::kMaxSize;
+ RelocInfoWriter reloc_info_writer;
+
+ // The bound position, before this we cannot do instruction elimination.
+ int last_bound_pos_;
+
+ // Code emission.
+ inline void CheckBuffer();
+ void GrowBuffer();
+ inline void emit(Instr x);
+ inline void emit(uint64_t x);
+ template <typename T>
+ inline void EmitHelper(T x);
+ inline void EmitHelper(Instr x);
+
+ void GenB(Opcode opcode, Register rj, int32_t si21); // opcode:6
+ void GenB(Opcode opcode, CFRegister cj, int32_t si21, bool isEq);
+ void GenB(Opcode opcode, int32_t si26);
+ void GenBJ(Opcode opcode, Register rj, Register rd, int32_t si16);
+ void GenCmp(Opcode opcode, FPUCondition cond, FPURegister fk, FPURegister fj,
+ CFRegister cd);
+ void GenSel(Opcode opcode, CFRegister ca, FPURegister fk, FPURegister fj,
+ FPURegister rd);
+
+ void GenRegister(Opcode opcode, Register rj, Register rd, bool rjrd = true);
+ void GenRegister(Opcode opcode, FPURegister fj, FPURegister fd);
+ void GenRegister(Opcode opcode, Register rj, FPURegister fd);
+ void GenRegister(Opcode opcode, FPURegister fj, Register rd);
+ void GenRegister(Opcode opcode, Register rj, FPUControlRegister fd);
+ void GenRegister(Opcode opcode, FPUControlRegister fj, Register rd);
+ void GenRegister(Opcode opcode, FPURegister fj, CFRegister cd);
+ void GenRegister(Opcode opcode, CFRegister cj, FPURegister fd);
+ void GenRegister(Opcode opcode, Register rj, CFRegister cd);
+ void GenRegister(Opcode opcode, CFRegister cj, Register rd);
+
+ void GenRegister(Opcode opcode, Register rk, Register rj, Register rd);
+ void GenRegister(Opcode opcode, FPURegister fk, FPURegister fj,
+ FPURegister fd);
+
+ void GenRegister(Opcode opcode, FPURegister fa, FPURegister fk,
+ FPURegister fj, FPURegister fd);
+ void GenRegister(Opcode opcode, Register rk, Register rj, FPURegister fd);
+
+ void GenImm(Opcode opcode, int32_t bit3, Register rk, Register rj,
+ Register rd);
+ void GenImm(Opcode opcode, int32_t bit6m, int32_t bit6l, Register rj,
+ Register rd);
+ void GenImm(Opcode opcode, int32_t bit20, Register rd);
+ void GenImm(Opcode opcode, int32_t bit15);
+ void GenImm(Opcode opcode, int32_t value, Register rj, Register rd,
+ int32_t value_bits); // 6 | 12 | 14 | 16
+ void GenImm(Opcode opcode, int32_t bit12, Register rj, FPURegister fd);
+
+ // Labels.
+ void print(const Label* L);
+ void bind_to(Label* L, int pos);
+ void next(Label* L, bool is_internal);
+
+ // One trampoline consists of:
+ // - space for trampoline slots,
+ // - space for labels.
+ //
+ // Space for trampoline slots is equal to slot_count * 2 * kInstrSize.
+ // Space for trampoline slots precedes space for labels. Each label is of one
+ // instruction size, so total amount for labels is equal to
+ // label_count * kInstrSize.
+ class Trampoline {
+ public:
+ Trampoline() {
+ start_ = 0;
+ next_slot_ = 0;
+ free_slot_count_ = 0;
+ end_ = 0;
+ }
+ Trampoline(int start, int slot_count) {
+ start_ = start;
+ next_slot_ = start;
+ free_slot_count_ = slot_count;
+ end_ = start + slot_count * kTrampolineSlotsSize;
+ }
+ int start() { return start_; }
+ int end() { return end_; }
+ int take_slot() {
+ int trampoline_slot = kInvalidSlotPos;
+ if (free_slot_count_ <= 0) {
+ // We have run out of space on trampolines.
+ // Make sure we fail in debug mode, so we become aware of each case
+ // when this happens.
+ DCHECK(0);
+ // Internal exception will be caught.
+ } else {
+ trampoline_slot = next_slot_;
+ free_slot_count_--;
+ next_slot_ += kTrampolineSlotsSize;
+ }
+ return trampoline_slot;
+ }
+
+ private:
+ int start_;
+ int end_;
+ int next_slot_;
+ int free_slot_count_;
+ };
+
+ int32_t get_trampoline_entry(int32_t pos);
+ int unbound_labels_count_;
+ // After trampoline is emitted, long branches are used in generated code for
+ // the forward branches whose target offsets could be beyond reach of branch
+ // instruction. We use this information to trigger different mode of
+ // branch instruction generation, where we use jump instructions rather
+ // than regular branch instructions.
+ bool trampoline_emitted_;
+ static constexpr int kInvalidSlotPos = -1;
+
+ // Internal reference positions, required for unbounded internal reference
+ // labels.
+ std::set<int64_t> internal_reference_positions_;
+ bool is_internal_reference(Label* L) {
+ return internal_reference_positions_.find(L->pos()) !=
+ internal_reference_positions_.end();
+ }
+
+ void EmittedCompactBranchInstruction() { prev_instr_compact_branch_ = true; }
+ void ClearCompactBranchState() { prev_instr_compact_branch_ = false; }
+ bool prev_instr_compact_branch_ = false;
+
+ Trampoline trampoline_;
+ bool internal_trampoline_exception_;
+
+ // Keep track of the last Call's position to ensure that safepoint can get the
+ // correct information even if there is a trampoline immediately after the
+ // Call.
+ byte* last_call_pc_;
+
+ RegList scratch_register_list_;
+
+ private:
+ void AllocateAndInstallRequestedHeapObjects(Isolate* isolate);
+
+ int WriteCodeComments();
+
+ friend class RegExpMacroAssemblerLOONG64;
+ friend class RelocInfo;
+ friend class BlockTrampolinePoolScope;
+ friend class EnsureSpace;
+};
+
+class EnsureSpace {
+ public:
+ explicit inline EnsureSpace(Assembler* assembler);
+};
+
+class V8_EXPORT_PRIVATE V8_NODISCARD UseScratchRegisterScope {
+ public:
+ explicit UseScratchRegisterScope(Assembler* assembler);
+ ~UseScratchRegisterScope();
+
+ Register Acquire();
+ bool hasAvailable() const;
+
+ void Include(const RegList& list) { *available_ |= list; }
+ void Exclude(const RegList& list) { *available_ &= ~list; }
+ void Include(const Register& reg1, const Register& reg2 = no_reg) {
+ RegList list(reg1.bit() | reg2.bit());
+ Include(list);
+ }
+ void Exclude(const Register& reg1, const Register& reg2 = no_reg) {
+ RegList list(reg1.bit() | reg2.bit());
+ Exclude(list);
+ }
+
+ private:
+ RegList* available_;
+ RegList old_available_;
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_CODEGEN_LOONG64_ASSEMBLER_LOONG64_H_
diff --git a/deps/v8/src/codegen/loong64/constants-loong64.cc b/deps/v8/src/codegen/loong64/constants-loong64.cc
new file mode 100644
index 0000000000..3f887a50fe
--- /dev/null
+++ b/deps/v8/src/codegen/loong64/constants-loong64.cc
@@ -0,0 +1,100 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#if V8_TARGET_ARCH_LOONG64
+
+#include "src/codegen/loong64/constants-loong64.h"
+
+namespace v8 {
+namespace internal {
+
+// -----------------------------------------------------------------------------
+// Registers.
+
+// These register names are defined in a way to match the native disassembler
+// formatting. See for example the command "objdump -d <binary file>".
+const char* Registers::names_[kNumSimuRegisters] = {
+ "zero_reg", "ra", "tp", "sp", "a0", "a1", "a2", "a3", "a4", "a5", "a6",
+ "a7", "t0", "t1", "t2", "t3", "t4", "t5", "t6", "t7", "t8", "x_reg",
+ "fp", "s0", "s1", "s2", "s3", "s4", "s5", "s6", "s7", "s8", "pc"};
+
+// List of alias names which can be used when referring to registers.
+const Registers::RegisterAlias Registers::aliases_[] = {
+ {0, "zero"}, {30, "cp"}, {kInvalidRegister, nullptr}};
+
+const char* Registers::Name(int reg) {
+ const char* result;
+ if ((0 <= reg) && (reg < kNumSimuRegisters)) {
+ result = names_[reg];
+ } else {
+ result = "noreg";
+ }
+ return result;
+}
+
+int Registers::Number(const char* name) {
+ // Look through the canonical names.
+ for (int i = 0; i < kNumSimuRegisters; i++) {
+ if (strcmp(names_[i], name) == 0) {
+ return i;
+ }
+ }
+
+ // Look through the alias names.
+ int i = 0;
+ while (aliases_[i].reg != kInvalidRegister) {
+ if (strcmp(aliases_[i].name, name) == 0) {
+ return aliases_[i].reg;
+ }
+ i++;
+ }
+
+ // No register with the reguested name found.
+ return kInvalidRegister;
+}
+
+const char* FPURegisters::names_[kNumFPURegisters] = {
+ "f0", "f1", "f2", "f3", "f4", "f5", "f6", "f7", "f8", "f9", "f10",
+ "f11", "f12", "f13", "f14", "f15", "f16", "f17", "f18", "f19", "f20", "f21",
+ "f22", "f23", "f24", "f25", "f26", "f27", "f28", "f29", "f30", "f31"};
+
+// List of alias names which can be used when referring to LoongArch registers.
+const FPURegisters::RegisterAlias FPURegisters::aliases_[] = {
+ {kInvalidRegister, nullptr}};
+
+const char* FPURegisters::Name(int creg) {
+ const char* result;
+ if ((0 <= creg) && (creg < kNumFPURegisters)) {
+ result = names_[creg];
+ } else {
+ result = "nocreg";
+ }
+ return result;
+}
+
+int FPURegisters::Number(const char* name) {
+ // Look through the canonical names.
+ for (int i = 0; i < kNumFPURegisters; i++) {
+ if (strcmp(names_[i], name) == 0) {
+ return i;
+ }
+ }
+
+ // Look through the alias names.
+ int i = 0;
+ while (aliases_[i].creg != kInvalidRegister) {
+ if (strcmp(aliases_[i].name, name) == 0) {
+ return aliases_[i].creg;
+ }
+ i++;
+ }
+
+ // No Cregister with the reguested name found.
+ return kInvalidFPURegister;
+}
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_TARGET_ARCH_LOONG64
diff --git a/deps/v8/src/codegen/loong64/constants-loong64.h b/deps/v8/src/codegen/loong64/constants-loong64.h
new file mode 100644
index 0000000000..394c5dc6ab
--- /dev/null
+++ b/deps/v8/src/codegen/loong64/constants-loong64.h
@@ -0,0 +1,1291 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_CODEGEN_LOONG64_CONSTANTS_LOONG64_H_
+#define V8_CODEGEN_LOONG64_CONSTANTS_LOONG64_H_
+
+#include "src/base/logging.h"
+#include "src/base/macros.h"
+#include "src/common/globals.h"
+
+// Get the standard printf format macros for C99 stdint types.
+#ifndef __STDC_FORMAT_MACROS
+#define __STDC_FORMAT_MACROS
+#endif
+#include <inttypes.h>
+
+// Defines constants and accessor classes to assemble, disassemble and
+// simulate LOONG64 instructions.
+
+namespace v8 {
+namespace internal {
+
+constexpr size_t kMaxPCRelativeCodeRangeInMB = 128;
+
+// -----------------------------------------------------------------------------
+// Registers and FPURegisters.
+
+// Number of general purpose registers.
+const int kNumRegisters = 32;
+const int kInvalidRegister = -1;
+
+// Number of registers with pc.
+const int kNumSimuRegisters = 33;
+
+// In the simulator, the PC register is simulated as the 33th register.
+const int kPCRegister = 32;
+
+// Number of floating point registers.
+const int kNumFPURegisters = 32;
+const int kInvalidFPURegister = -1;
+
+// FPU control registers.
+const int kFCSRRegister = 0;
+const int kInvalidFPUControlRegister = -1;
+const uint32_t kFPUInvalidResult = static_cast<uint32_t>(1u << 31) - 1;
+const int32_t kFPUInvalidResultNegative = static_cast<int32_t>(1u << 31);
+const uint64_t kFPU64InvalidResult =
+ static_cast<uint64_t>(static_cast<uint64_t>(1) << 63) - 1;
+const int64_t kFPU64InvalidResultNegative =
+ static_cast<int64_t>(static_cast<uint64_t>(1) << 63);
+
+// FCSR constants.
+const uint32_t kFCSRInexactCauseBit = 24;
+const uint32_t kFCSRUnderflowCauseBit = 25;
+const uint32_t kFCSROverflowCauseBit = 26;
+const uint32_t kFCSRDivideByZeroCauseBit = 27;
+const uint32_t kFCSRInvalidOpCauseBit = 28;
+
+const uint32_t kFCSRInexactCauseMask = 1 << kFCSRInexactCauseBit;
+const uint32_t kFCSRUnderflowCauseMask = 1 << kFCSRUnderflowCauseBit;
+const uint32_t kFCSROverflowCauseMask = 1 << kFCSROverflowCauseBit;
+const uint32_t kFCSRDivideByZeroCauseMask = 1 << kFCSRDivideByZeroCauseBit;
+const uint32_t kFCSRInvalidOpCauseMask = 1 << kFCSRInvalidOpCauseBit;
+
+const uint32_t kFCSRCauseMask =
+ kFCSRInexactCauseMask | kFCSRUnderflowCauseMask | kFCSROverflowCauseMask |
+ kFCSRDivideByZeroCauseMask | kFCSRInvalidOpCauseMask;
+
+const uint32_t kFCSRExceptionCauseMask = kFCSRCauseMask ^ kFCSRInexactCauseMask;
+
+// Actual value of root register is offset from the root array's start
+// to take advantage of negative displacement values.
+// TODO(sigurds): Choose best value.
+constexpr int kRootRegisterBias = 256;
+
+// Helper functions for converting between register numbers and names.
+class Registers {
+ public:
+ // Return the name of the register.
+ static const char* Name(int reg);
+
+ // Lookup the register number for the name provided.
+ static int Number(const char* name);
+
+ struct RegisterAlias {
+ int reg;
+ const char* name;
+ };
+
+ static const int64_t kMaxValue = 0x7fffffffffffffffl;
+ static const int64_t kMinValue = 0x8000000000000000l;
+
+ private:
+ static const char* names_[kNumSimuRegisters];
+ static const RegisterAlias aliases_[];
+};
+
+// Helper functions for converting between register numbers and names.
+class FPURegisters {
+ public:
+ // Return the name of the register.
+ static const char* Name(int reg);
+
+ // Lookup the register number for the name provided.
+ static int Number(const char* name);
+
+ struct RegisterAlias {
+ int creg;
+ const char* name;
+ };
+
+ private:
+ static const char* names_[kNumFPURegisters];
+ static const RegisterAlias aliases_[];
+};
+
+// -----------------------------------------------------------------------------
+// Instructions encoding constants.
+
+// On LoongArch all instructions are 32 bits.
+using Instr = int32_t;
+
+// Special Software Interrupt codes when used in the presence of the LOONG64
+// simulator.
+enum SoftwareInterruptCodes {
+ // Transition to C code.
+ call_rt_redirected = 0x7fff
+};
+
+// On LOONG64 Simulator breakpoints can have different codes:
+// - Breaks between 0 and kMaxWatchpointCode are treated as simple watchpoints,
+// the simulator will run through them and print the registers.
+// - Breaks between kMaxWatchpointCode and kMaxStopCode are treated as stop()
+// instructions (see Assembler::stop()).
+// - Breaks larger than kMaxStopCode are simple breaks, dropping you into the
+// debugger.
+const uint32_t kMaxWatchpointCode = 31;
+const uint32_t kMaxStopCode = 127;
+STATIC_ASSERT(kMaxWatchpointCode < kMaxStopCode);
+
+// ----- Fields offset and length.
+const int kRjShift = 5;
+const int kRjBits = 5;
+const int kRkShift = 10;
+const int kRkBits = 5;
+const int kRdShift = 0;
+const int kRdBits = 5;
+const int kSaShift = 15;
+const int kSa2Bits = 2;
+const int kSa3Bits = 3;
+const int kCdShift = 0;
+const int kCdBits = 3;
+const int kCjShift = 5;
+const int kCjBits = 3;
+const int kCodeShift = 0;
+const int kCodeBits = 15;
+const int kCondShift = 15;
+const int kCondBits = 5;
+const int kUi5Shift = 10;
+const int kUi5Bits = 5;
+const int kUi6Shift = 10;
+const int kUi6Bits = 6;
+const int kUi12Shift = 10;
+const int kUi12Bits = 12;
+const int kSi12Shift = 10;
+const int kSi12Bits = 12;
+const int kSi14Shift = 10;
+const int kSi14Bits = 14;
+const int kSi16Shift = 10;
+const int kSi16Bits = 16;
+const int kSi20Shift = 5;
+const int kSi20Bits = 20;
+const int kMsbwShift = 16;
+const int kMsbwBits = 5;
+const int kLsbwShift = 10;
+const int kLsbwBits = 5;
+const int kMsbdShift = 16;
+const int kMsbdBits = 6;
+const int kLsbdShift = 10;
+const int kLsbdBits = 6;
+const int kFdShift = 0;
+const int kFdBits = 5;
+const int kFjShift = 5;
+const int kFjBits = 5;
+const int kFkShift = 10;
+const int kFkBits = 5;
+const int kFaShift = 15;
+const int kFaBits = 5;
+const int kCaShift = 15;
+const int kCaBits = 3;
+const int kHint15Shift = 0;
+const int kHint15Bits = 15;
+const int kHint5Shift = 0;
+const int kHint5Bits = 5;
+const int kOffsLowShift = 10;
+const int kOffsLowBits = 16;
+const int kOffs26HighShift = 0;
+const int kOffs26HighBits = 10;
+const int kOffs21HighShift = 0;
+const int kOffs21HighBits = 5;
+const int kImm12Shift = 0;
+const int kImm12Bits = 12;
+const int kImm16Shift = 0;
+const int kImm16Bits = 16;
+const int kImm26Shift = 0;
+const int kImm26Bits = 26;
+const int kImm28Shift = 0;
+const int kImm28Bits = 28;
+const int kImm32Shift = 0;
+const int kImm32Bits = 32;
+
+// ----- Miscellaneous useful masks.
+// Instruction bit masks.
+const int kRjFieldMask = ((1 << kRjBits) - 1) << kRjShift;
+const int kRkFieldMask = ((1 << kRkBits) - 1) << kRkShift;
+const int kRdFieldMask = ((1 << kRdBits) - 1) << kRdShift;
+const int kSa2FieldMask = ((1 << kSa2Bits) - 1) << kSaShift;
+const int kSa3FieldMask = ((1 << kSa3Bits) - 1) << kSaShift;
+// Misc masks.
+const int kHiMaskOf32 = 0xffff << 16; // Only to be used with 32-bit values
+const int kLoMaskOf32 = 0xffff;
+const int kSignMaskOf32 = 0x80000000; // Only to be used with 32-bit values
+const int64_t kTop16MaskOf64 = (int64_t)0xffff << 48;
+const int64_t kHigher16MaskOf64 = (int64_t)0xffff << 32;
+const int64_t kUpper16MaskOf64 = (int64_t)0xffff << 16;
+
+const int kImm12Mask = ((1 << kImm12Bits) - 1) << kImm12Shift;
+const int kImm16Mask = ((1 << kImm16Bits) - 1) << kImm16Shift;
+const int kImm26Mask = ((1 << kImm26Bits) - 1) << kImm26Shift;
+const int kImm28Mask = ((1 << kImm28Bits) - 1) << kImm28Shift;
+
+// ----- LOONG64 Opcodes and Function Fields.
+enum Opcode : uint32_t {
+ BEQZ = 0x10U << 26,
+ BNEZ = 0x11U << 26,
+ BCZ = 0x12U << 26, // BCEQZ & BCNEZ
+ JIRL = 0x13U << 26,
+ B = 0x14U << 26,
+ BL = 0x15U << 26,
+ BEQ = 0x16U << 26,
+ BNE = 0x17U << 26,
+ BLT = 0x18U << 26,
+ BGE = 0x19U << 26,
+ BLTU = 0x1aU << 26,
+ BGEU = 0x1bU << 26,
+
+ ADDU16I_D = 0x4U << 26,
+
+ LU12I_W = 0xaU << 25,
+ LU32I_D = 0xbU << 25,
+ PCADDI = 0xcU << 25,
+ PCALAU12I = 0xdU << 25,
+ PCADDU12I = 0xeU << 25,
+ PCADDU18I = 0xfU << 25,
+
+ LL_W = 0x20U << 24,
+ SC_W = 0x21U << 24,
+ LL_D = 0x22U << 24,
+ SC_D = 0x23U << 24,
+ LDPTR_W = 0x24U << 24,
+ STPTR_W = 0x25U << 24,
+ LDPTR_D = 0x26U << 24,
+ STPTR_D = 0x27U << 24,
+
+ BSTR_W = 0x1U << 22, // BSTRINS_W & BSTRPICK_W
+ BSTRINS_W = BSTR_W,
+ BSTRPICK_W = BSTR_W,
+ BSTRINS_D = 0x2U << 22,
+ BSTRPICK_D = 0x3U << 22,
+
+ SLTI = 0x8U << 22,
+ SLTUI = 0x9U << 22,
+ ADDI_W = 0xaU << 22,
+ ADDI_D = 0xbU << 22,
+ LU52I_D = 0xcU << 22,
+ ANDI = 0xdU << 22,
+ ORI = 0xeU << 22,
+ XORI = 0xfU << 22,
+
+ LD_B = 0xa0U << 22,
+ LD_H = 0xa1U << 22,
+ LD_W = 0xa2U << 22,
+ LD_D = 0xa3U << 22,
+ ST_B = 0xa4U << 22,
+ ST_H = 0xa5U << 22,
+ ST_W = 0xa6U << 22,
+ ST_D = 0xa7U << 22,
+ LD_BU = 0xa8U << 22,
+ LD_HU = 0xa9U << 22,
+ LD_WU = 0xaaU << 22,
+ FLD_S = 0xacU << 22,
+ FST_S = 0xadU << 22,
+ FLD_D = 0xaeU << 22,
+ FST_D = 0xafU << 22,
+
+ FMADD_S = 0x81U << 20,
+ FMADD_D = 0x82U << 20,
+ FMSUB_S = 0x85U << 20,
+ FMSUB_D = 0x86U << 20,
+ FNMADD_S = 0x89U << 20,
+ FNMADD_D = 0x8aU << 20,
+ FNMSUB_S = 0x8dU << 20,
+ FNMSUB_D = 0x8eU << 20,
+ FCMP_COND_S = 0xc1U << 20,
+ FCMP_COND_D = 0xc2U << 20,
+
+ BYTEPICK_D = 0x3U << 18,
+ BYTEPICK_W = 0x2U << 18,
+
+ FSEL = 0x340U << 18,
+
+ ALSL = 0x1U << 18,
+ ALSL_W = ALSL,
+ ALSL_WU = ALSL,
+
+ ALSL_D = 0xbU << 18,
+
+ SLLI_W = 0x40U << 16,
+ SRLI_W = 0x44U << 16,
+ SRAI_W = 0x48U << 16,
+ ROTRI_W = 0x4cU << 16,
+
+ SLLI_D = 0x41U << 16,
+ SRLI_D = 0x45U << 16,
+ SRAI_D = 0x49U << 16,
+ ROTRI_D = 0x4dU << 16,
+
+ SLLI = 0x10U << 18,
+ SRLI = 0x11U << 18,
+ SRAI = 0x12U << 18,
+ ROTRI = 0x13U << 18,
+
+ ADD_W = 0x20U << 15,
+ ADD_D = 0x21U << 15,
+ SUB_W = 0x22U << 15,
+ SUB_D = 0x23U << 15,
+ SLT = 0x24U << 15,
+ SLTU = 0x25U << 15,
+ MASKNEZ = 0x26U << 15,
+ MASKEQZ = 0x27U << 15,
+ NOR = 0x28U << 15,
+ AND = 0x29U << 15,
+ OR = 0x2aU << 15,
+ XOR = 0x2bU << 15,
+ ORN = 0x2cU << 15,
+ ANDN = 0x2dU << 15,
+ SLL_W = 0x2eU << 15,
+ SRL_W = 0x2fU << 15,
+ SRA_W = 0x30U << 15,
+ SLL_D = 0x31U << 15,
+ SRL_D = 0x32U << 15,
+ SRA_D = 0x33U << 15,
+ ROTR_W = 0x36U << 15,
+ ROTR_D = 0x37U << 15,
+ MUL_W = 0x38U << 15,
+ MULH_W = 0x39U << 15,
+ MULH_WU = 0x3aU << 15,
+ MUL_D = 0x3bU << 15,
+ MULH_D = 0x3cU << 15,
+ MULH_DU = 0x3dU << 15,
+ MULW_D_W = 0x3eU << 15,
+ MULW_D_WU = 0x3fU << 15,
+
+ DIV_W = 0x40U << 15,
+ MOD_W = 0x41U << 15,
+ DIV_WU = 0x42U << 15,
+ MOD_WU = 0x43U << 15,
+ DIV_D = 0x44U << 15,
+ MOD_D = 0x45U << 15,
+ DIV_DU = 0x46U << 15,
+ MOD_DU = 0x47U << 15,
+
+ BREAK = 0x54U << 15,
+
+ FADD_S = 0x201U << 15,
+ FADD_D = 0x202U << 15,
+ FSUB_S = 0x205U << 15,
+ FSUB_D = 0x206U << 15,
+ FMUL_S = 0x209U << 15,
+ FMUL_D = 0x20aU << 15,
+ FDIV_S = 0x20dU << 15,
+ FDIV_D = 0x20eU << 15,
+ FMAX_S = 0x211U << 15,
+ FMAX_D = 0x212U << 15,
+ FMIN_S = 0x215U << 15,
+ FMIN_D = 0x216U << 15,
+ FMAXA_S = 0x219U << 15,
+ FMAXA_D = 0x21aU << 15,
+ FMINA_S = 0x21dU << 15,
+ FMINA_D = 0x21eU << 15,
+ FSCALEB_S = 0x221U << 15,
+ FSCALEB_D = 0x222U << 15,
+ FCOPYSIGN_S = 0x225U << 15,
+ FCOPYSIGN_D = 0x226U << 15,
+
+ LDX_B = 0x7000U << 15,
+ LDX_H = 0x7008U << 15,
+ LDX_W = 0x7010U << 15,
+ LDX_D = 0x7018U << 15,
+ STX_B = 0x7020U << 15,
+ STX_H = 0x7028U << 15,
+ STX_W = 0x7030U << 15,
+ STX_D = 0x7038U << 15,
+ LDX_BU = 0x7040U << 15,
+ LDX_HU = 0x7048U << 15,
+ LDX_WU = 0x7050U << 15,
+ FLDX_S = 0x7060U << 15,
+ FLDX_D = 0x7068U << 15,
+ FSTX_S = 0x7070U << 15,
+ FSTX_D = 0x7078U << 15,
+
+ AMSWAP_W = 0x70c0U << 15,
+ AMSWAP_D = 0x70c1U << 15,
+ AMADD_W = 0x70c2U << 15,
+ AMADD_D = 0x70c3U << 15,
+ AMAND_W = 0x70c4U << 15,
+ AMAND_D = 0x70c5U << 15,
+ AMOR_W = 0x70c6U << 15,
+ AMOR_D = 0x70c7U << 15,
+ AMXOR_W = 0x70c8U << 15,
+ AMXOR_D = 0x70c9U << 15,
+ AMMAX_W = 0x70caU << 15,
+ AMMAX_D = 0x70cbU << 15,
+ AMMIN_W = 0x70ccU << 15,
+ AMMIN_D = 0x70cdU << 15,
+ AMMAX_WU = 0x70ceU << 15,
+ AMMAX_DU = 0x70cfU << 15,
+ AMMIN_WU = 0x70d0U << 15,
+ AMMIN_DU = 0x70d1U << 15,
+ AMSWAP_DB_W = 0x70d2U << 15,
+ AMSWAP_DB_D = 0x70d3U << 15,
+ AMADD_DB_W = 0x70d4U << 15,
+ AMADD_DB_D = 0x70d5U << 15,
+ AMAND_DB_W = 0x70d6U << 15,
+ AMAND_DB_D = 0x70d7U << 15,
+ AMOR_DB_W = 0x70d8U << 15,
+ AMOR_DB_D = 0x70d9U << 15,
+ AMXOR_DB_W = 0x70daU << 15,
+ AMXOR_DB_D = 0x70dbU << 15,
+ AMMAX_DB_W = 0x70dcU << 15,
+ AMMAX_DB_D = 0x70ddU << 15,
+ AMMIN_DB_W = 0x70deU << 15,
+ AMMIN_DB_D = 0x70dfU << 15,
+ AMMAX_DB_WU = 0x70e0U << 15,
+ AMMAX_DB_DU = 0x70e1U << 15,
+ AMMIN_DB_WU = 0x70e2U << 15,
+ AMMIN_DB_DU = 0x70e3U << 15,
+
+ DBAR = 0x70e4U << 15,
+ IBAR = 0x70e5U << 15,
+
+ CLO_W = 0X4U << 10,
+ CLZ_W = 0X5U << 10,
+ CTO_W = 0X6U << 10,
+ CTZ_W = 0X7U << 10,
+ CLO_D = 0X8U << 10,
+ CLZ_D = 0X9U << 10,
+ CTO_D = 0XaU << 10,
+ CTZ_D = 0XbU << 10,
+ REVB_2H = 0XcU << 10,
+ REVB_4H = 0XdU << 10,
+ REVB_2W = 0XeU << 10,
+ REVB_D = 0XfU << 10,
+ REVH_2W = 0X10U << 10,
+ REVH_D = 0X11U << 10,
+ BITREV_4B = 0X12U << 10,
+ BITREV_8B = 0X13U << 10,
+ BITREV_W = 0X14U << 10,
+ BITREV_D = 0X15U << 10,
+ EXT_W_H = 0X16U << 10,
+ EXT_W_B = 0X17U << 10,
+
+ FABS_S = 0X4501U << 10,
+ FABS_D = 0X4502U << 10,
+ FNEG_S = 0X4505U << 10,
+ FNEG_D = 0X4506U << 10,
+ FLOGB_S = 0X4509U << 10,
+ FLOGB_D = 0X450aU << 10,
+ FCLASS_S = 0X450dU << 10,
+ FCLASS_D = 0X450eU << 10,
+ FSQRT_S = 0X4511U << 10,
+ FSQRT_D = 0X4512U << 10,
+ FRECIP_S = 0X4515U << 10,
+ FRECIP_D = 0X4516U << 10,
+ FRSQRT_S = 0X4519U << 10,
+ FRSQRT_D = 0X451aU << 10,
+ FMOV_S = 0X4525U << 10,
+ FMOV_D = 0X4526U << 10,
+ MOVGR2FR_W = 0X4529U << 10,
+ MOVGR2FR_D = 0X452aU << 10,
+ MOVGR2FRH_W = 0X452bU << 10,
+ MOVFR2GR_S = 0X452dU << 10,
+ MOVFR2GR_D = 0X452eU << 10,
+ MOVFRH2GR_S = 0X452fU << 10,
+ MOVGR2FCSR = 0X4530U << 10,
+ MOVFCSR2GR = 0X4532U << 10,
+ MOVFR2CF = 0X4534U << 10,
+ MOVGR2CF = 0X4536U << 10,
+
+ FCVT_S_D = 0x4646U << 10,
+ FCVT_D_S = 0x4649U << 10,
+ FTINTRM_W_S = 0x4681U << 10,
+ FTINTRM_W_D = 0x4682U << 10,
+ FTINTRM_L_S = 0x4689U << 10,
+ FTINTRM_L_D = 0x468aU << 10,
+ FTINTRP_W_S = 0x4691U << 10,
+ FTINTRP_W_D = 0x4692U << 10,
+ FTINTRP_L_S = 0x4699U << 10,
+ FTINTRP_L_D = 0x469aU << 10,
+ FTINTRZ_W_S = 0x46a1U << 10,
+ FTINTRZ_W_D = 0x46a2U << 10,
+ FTINTRZ_L_S = 0x46a9U << 10,
+ FTINTRZ_L_D = 0x46aaU << 10,
+ FTINTRNE_W_S = 0x46b1U << 10,
+ FTINTRNE_W_D = 0x46b2U << 10,
+ FTINTRNE_L_S = 0x46b9U << 10,
+ FTINTRNE_L_D = 0x46baU << 10,
+ FTINT_W_S = 0x46c1U << 10,
+ FTINT_W_D = 0x46c2U << 10,
+ FTINT_L_S = 0x46c9U << 10,
+ FTINT_L_D = 0x46caU << 10,
+ FFINT_S_W = 0x4744U << 10,
+ FFINT_S_L = 0x4746U << 10,
+ FFINT_D_W = 0x4748U << 10,
+ FFINT_D_L = 0x474aU << 10,
+ FRINT_S = 0x4791U << 10,
+ FRINT_D = 0x4792U << 10,
+
+ MOVCF2FR = 0x4535U << 10,
+ MOVCF2GR = 0x4537U << 10
+};
+
+// ----- Emulated conditions.
+// On LOONG64 we use this enum to abstract from conditional branch instructions.
+// The 'U' prefix is used to specify unsigned comparisons.
+enum Condition {
+ // Any value < 0 is considered no_condition.
+ kNoCondition = -1,
+ overflow = 0,
+ no_overflow = 1,
+ Uless = 2,
+ Ugreater_equal = 3,
+ Uless_equal = 4,
+ Ugreater = 5,
+ equal = 6,
+ not_equal = 7, // Unordered or Not Equal.
+ negative = 8,
+ positive = 9,
+ parity_even = 10,
+ parity_odd = 11,
+ less = 12,
+ greater_equal = 13,
+ less_equal = 14,
+ greater = 15,
+ ueq = 16, // Unordered or Equal.
+ ogl = 17, // Ordered and Not Equal.
+ cc_always = 18,
+
+ // Aliases.
+ carry = Uless,
+ not_carry = Ugreater_equal,
+ zero = equal,
+ eq = equal,
+ not_zero = not_equal,
+ ne = not_equal,
+ nz = not_equal,
+ sign = negative,
+ not_sign = positive,
+ mi = negative,
+ pl = positive,
+ hi = Ugreater,
+ ls = Uless_equal,
+ ge = greater_equal,
+ lt = less,
+ gt = greater,
+ le = less_equal,
+ hs = Ugreater_equal,
+ lo = Uless,
+ al = cc_always,
+ ult = Uless,
+ uge = Ugreater_equal,
+ ule = Uless_equal,
+ ugt = Ugreater,
+ cc_default = kNoCondition
+};
+
+// Returns the equivalent of !cc.
+// Negation of the default kNoCondition (-1) results in a non-default
+// no_condition value (-2). As long as tests for no_condition check
+// for condition < 0, this will work as expected.
+inline Condition NegateCondition(Condition cc) {
+ DCHECK(cc != cc_always);
+ return static_cast<Condition>(cc ^ 1);
+}
+
+inline Condition NegateFpuCondition(Condition cc) {
+ DCHECK(cc != cc_always);
+ switch (cc) {
+ case ult:
+ return ge;
+ case ugt:
+ return le;
+ case uge:
+ return lt;
+ case ule:
+ return gt;
+ case lt:
+ return uge;
+ case gt:
+ return ule;
+ case ge:
+ return ult;
+ case le:
+ return ugt;
+ case eq:
+ return ne;
+ case ne:
+ return eq;
+ case ueq:
+ return ogl;
+ case ogl:
+ return ueq;
+ default:
+ return cc;
+ }
+}
+
+// ----- Coprocessor conditions.
+enum FPUCondition {
+ kNoFPUCondition = -1,
+
+ CAF = 0x00, // False.
+ SAF = 0x01, // False.
+ CLT = 0x02, // Less Than quiet
+ // SLT = 0x03, // Less Than signaling
+ CEQ = 0x04,
+ SEQ = 0x05,
+ CLE = 0x06,
+ SLE = 0x07,
+ CUN = 0x08,
+ SUN = 0x09,
+ CULT = 0x0a,
+ SULT = 0x0b,
+ CUEQ = 0x0c,
+ SUEQ = 0x0d,
+ CULE = 0x0e,
+ SULE = 0x0f,
+ CNE = 0x10,
+ SNE = 0x11,
+ COR = 0x14,
+ SOR = 0x15,
+ CUNE = 0x18,
+ SUNE = 0x19,
+};
+
+const uint32_t kFPURoundingModeShift = 8;
+const uint32_t kFPURoundingModeMask = 0b11 << kFPURoundingModeShift;
+
+// FPU rounding modes.
+enum FPURoundingMode {
+ RN = 0b00 << kFPURoundingModeShift, // Round to Nearest.
+ RZ = 0b01 << kFPURoundingModeShift, // Round towards zero.
+ RP = 0b10 << kFPURoundingModeShift, // Round towards Plus Infinity.
+ RM = 0b11 << kFPURoundingModeShift, // Round towards Minus Infinity.
+
+ // Aliases.
+ kRoundToNearest = RN,
+ kRoundToZero = RZ,
+ kRoundToPlusInf = RP,
+ kRoundToMinusInf = RM,
+
+ mode_round = RN,
+ mode_ceil = RP,
+ mode_floor = RM,
+ mode_trunc = RZ
+};
+
+enum CheckForInexactConversion {
+ kCheckForInexactConversion,
+ kDontCheckForInexactConversion
+};
+
+enum class MaxMinKind : int { kMin = 0, kMax = 1 };
+
+// -----------------------------------------------------------------------------
+// Hints.
+
+// Branch hints are not used on the LOONG64. They are defined so that they can
+// appear in shared function signatures, but will be ignored in LOONG64
+// implementations.
+enum Hint { no_hint = 0 };
+
+inline Hint NegateHint(Hint hint) { return no_hint; }
+
+// -----------------------------------------------------------------------------
+// Specific instructions, constants, and masks.
+// These constants are declared in assembler-loong64.cc, as they use named
+// registers and other constants.
+
+// Break 0xfffff, reserved for redirected real time call.
+const Instr rtCallRedirInstr = BREAK | call_rt_redirected;
+// A nop instruction. (Encoding of addi_w 0 0 0).
+const Instr nopInstr = ADDI_W;
+
+constexpr uint8_t kInstrSize = 4;
+constexpr uint8_t kInstrSizeLog2 = 2;
+
+class InstructionBase {
+ public:
+ enum Type {
+ kOp6Type,
+ kOp7Type,
+ kOp8Type,
+ kOp10Type,
+ kOp12Type,
+ kOp14Type,
+ kOp17Type,
+ kOp22Type,
+ kUnsupported = -1
+ };
+
+ // Get the raw instruction bits.
+ inline Instr InstructionBits() const {
+ return *reinterpret_cast<const Instr*>(this);
+ }
+
+ // Set the raw instruction bits to value.
+ inline void SetInstructionBits(Instr value) {
+ *reinterpret_cast<Instr*>(this) = value;
+ }
+
+ // Read one particular bit out of the instruction bits.
+ inline int Bit(int nr) const { return (InstructionBits() >> nr) & 1; }
+
+ // Read a bit field out of the instruction bits.
+ inline int Bits(int hi, int lo) const {
+ return (InstructionBits() >> lo) & ((2U << (hi - lo)) - 1);
+ }
+
+ // Safe to call within InstructionType().
+ inline int RjFieldRawNoAssert() const {
+ return InstructionBits() & kRjFieldMask;
+ }
+
+ // Get the encoding type of the instruction.
+ inline Type InstructionType() const;
+
+ protected:
+ InstructionBase() {}
+};
+
+template <class T>
+class InstructionGetters : public T {
+ public:
+ inline int RjValue() const {
+ return this->Bits(kRjShift + kRjBits - 1, kRjShift);
+ }
+
+ inline int RkValue() const {
+ return this->Bits(kRkShift + kRkBits - 1, kRkShift);
+ }
+
+ inline int RdValue() const {
+ return this->Bits(kRdShift + kRdBits - 1, kRdShift);
+ }
+
+ inline int Sa2Value() const {
+ return this->Bits(kSaShift + kSa2Bits - 1, kSaShift);
+ }
+
+ inline int Sa3Value() const {
+ return this->Bits(kSaShift + kSa3Bits - 1, kSaShift);
+ }
+
+ inline int Ui5Value() const {
+ return this->Bits(kUi5Shift + kUi5Bits - 1, kUi5Shift);
+ }
+
+ inline int Ui6Value() const {
+ return this->Bits(kUi6Shift + kUi6Bits - 1, kUi6Shift);
+ }
+
+ inline int Ui12Value() const {
+ return this->Bits(kUi12Shift + kUi12Bits - 1, kUi12Shift);
+ }
+
+ inline int LsbwValue() const {
+ return this->Bits(kLsbwShift + kLsbwBits - 1, kLsbwShift);
+ }
+
+ inline int MsbwValue() const {
+ return this->Bits(kMsbwShift + kMsbwBits - 1, kMsbwShift);
+ }
+
+ inline int LsbdValue() const {
+ return this->Bits(kLsbdShift + kLsbdBits - 1, kLsbdShift);
+ }
+
+ inline int MsbdValue() const {
+ return this->Bits(kMsbdShift + kMsbdBits - 1, kMsbdShift);
+ }
+
+ inline int CondValue() const {
+ return this->Bits(kCondShift + kCondBits - 1, kCondShift);
+ }
+
+ inline int Si12Value() const {
+ return this->Bits(kSi12Shift + kSi12Bits - 1, kSi12Shift);
+ }
+
+ inline int Si14Value() const {
+ return this->Bits(kSi14Shift + kSi14Bits - 1, kSi14Shift);
+ }
+
+ inline int Si16Value() const {
+ return this->Bits(kSi16Shift + kSi16Bits - 1, kSi16Shift);
+ }
+
+ inline int Si20Value() const {
+ return this->Bits(kSi20Shift + kSi20Bits - 1, kSi20Shift);
+ }
+
+ inline int FdValue() const {
+ return this->Bits(kFdShift + kFdBits - 1, kFdShift);
+ }
+
+ inline int FaValue() const {
+ return this->Bits(kFaShift + kFaBits - 1, kFaShift);
+ }
+
+ inline int FjValue() const {
+ return this->Bits(kFjShift + kFjBits - 1, kFjShift);
+ }
+
+ inline int FkValue() const {
+ return this->Bits(kFkShift + kFkBits - 1, kFkShift);
+ }
+
+ inline int CjValue() const {
+ return this->Bits(kCjShift + kCjBits - 1, kCjShift);
+ }
+
+ inline int CdValue() const {
+ return this->Bits(kCdShift + kCdBits - 1, kCdShift);
+ }
+
+ inline int CaValue() const {
+ return this->Bits(kCaShift + kCaBits - 1, kCaShift);
+ }
+
+ inline int CodeValue() const {
+ return this->Bits(kCodeShift + kCodeBits - 1, kCodeShift);
+ }
+
+ inline int Hint5Value() const {
+ return this->Bits(kHint5Shift + kHint5Bits - 1, kHint5Shift);
+ }
+
+ inline int Hint15Value() const {
+ return this->Bits(kHint15Shift + kHint15Bits - 1, kHint15Shift);
+ }
+
+ inline int Offs16Value() const {
+ return this->Bits(kOffsLowShift + kOffsLowBits - 1, kOffsLowShift);
+ }
+
+ inline int Offs21Value() const {
+ int low = this->Bits(kOffsLowShift + kOffsLowBits - 1, kOffsLowShift);
+ int high =
+ this->Bits(kOffs21HighShift + kOffs21HighBits - 1, kOffs21HighShift);
+ return ((high << kOffsLowBits) + low);
+ }
+
+ inline int Offs26Value() const {
+ int low = this->Bits(kOffsLowShift + kOffsLowBits - 1, kOffsLowShift);
+ int high =
+ this->Bits(kOffs26HighShift + kOffs26HighBits - 1, kOffs26HighShift);
+ return ((high << kOffsLowBits) + low);
+ }
+
+ inline int RjFieldRaw() const {
+ return this->InstructionBits() & kRjFieldMask;
+ }
+
+ inline int RkFieldRaw() const {
+ return this->InstructionBits() & kRkFieldMask;
+ }
+
+ inline int RdFieldRaw() const {
+ return this->InstructionBits() & kRdFieldMask;
+ }
+
+ inline int32_t ImmValue(int bits) const { return this->Bits(bits - 1, 0); }
+
+ /*TODO*/
+ inline int32_t Imm12Value() const { abort(); }
+
+ inline int32_t Imm14Value() const { abort(); }
+
+ inline int32_t Imm16Value() const { abort(); }
+
+ // Say if the instruction is a break.
+ bool IsTrap() const;
+};
+
+class Instruction : public InstructionGetters<InstructionBase> {
+ public:
+ // Instructions are read of out a code stream. The only way to get a
+ // reference to an instruction is to convert a pointer. There is no way
+ // to allocate or create instances of class Instruction.
+ // Use the At(pc) function to create references to Instruction.
+ static Instruction* At(byte* pc) {
+ return reinterpret_cast<Instruction*>(pc);
+ }
+
+ private:
+ // We need to prevent the creation of instances of class Instruction.
+ DISALLOW_IMPLICIT_CONSTRUCTORS(Instruction);
+};
+
+// -----------------------------------------------------------------------------
+// LOONG64 assembly various constants.
+
+const int kInvalidStackOffset = -1;
+
+static const int kNegOffset = 0x00008000;
+
+InstructionBase::Type InstructionBase::InstructionType() const {
+ InstructionBase::Type kType = kUnsupported;
+
+ // Check for kOp6Type
+ switch (Bits(31, 26) << 26) {
+ case ADDU16I_D:
+ case BEQZ:
+ case BNEZ:
+ case BCZ:
+ case JIRL:
+ case B:
+ case BL:
+ case BEQ:
+ case BNE:
+ case BLT:
+ case BGE:
+ case BLTU:
+ case BGEU:
+ kType = kOp6Type;
+ break;
+ default:
+ kType = kUnsupported;
+ }
+
+ if (kType == kUnsupported) {
+ // Check for kOp7Type
+ switch (Bits(31, 25) << 25) {
+ case LU12I_W:
+ case LU32I_D:
+ case PCADDI:
+ case PCALAU12I:
+ case PCADDU12I:
+ case PCADDU18I:
+ kType = kOp7Type;
+ break;
+ default:
+ kType = kUnsupported;
+ }
+ }
+
+ if (kType == kUnsupported) {
+ // Check for kOp8Type
+ switch (Bits(31, 24) << 24) {
+ case LDPTR_W:
+ case STPTR_W:
+ case LDPTR_D:
+ case STPTR_D:
+ case LL_W:
+ case SC_W:
+ case LL_D:
+ case SC_D:
+ kType = kOp8Type;
+ break;
+ default:
+ kType = kUnsupported;
+ }
+ }
+
+ if (kType == kUnsupported) {
+ // Check for kOp10Type
+ switch (Bits(31, 22) << 22) {
+ case BSTR_W: {
+ // If Bit(21) = 0, then the Opcode is not BSTR_W.
+ if (Bit(21) == 0)
+ kType = kUnsupported;
+ else
+ kType = kOp10Type;
+ break;
+ }
+ case BSTRINS_D:
+ case BSTRPICK_D:
+ case SLTI:
+ case SLTUI:
+ case ADDI_W:
+ case ADDI_D:
+ case LU52I_D:
+ case ANDI:
+ case ORI:
+ case XORI:
+ case LD_B:
+ case LD_H:
+ case LD_W:
+ case LD_D:
+ case ST_B:
+ case ST_H:
+ case ST_W:
+ case ST_D:
+ case LD_BU:
+ case LD_HU:
+ case LD_WU:
+ case FLD_S:
+ case FST_S:
+ case FLD_D:
+ case FST_D:
+ kType = kOp10Type;
+ break;
+ default:
+ kType = kUnsupported;
+ }
+ }
+
+ if (kType == kUnsupported) {
+ // Check for kOp12Type
+ switch (Bits(31, 20) << 20) {
+ case FMADD_S:
+ case FMADD_D:
+ case FMSUB_S:
+ case FMSUB_D:
+ case FNMADD_S:
+ case FNMADD_D:
+ case FNMSUB_S:
+ case FNMSUB_D:
+ case FCMP_COND_S:
+ case FCMP_COND_D:
+ case FSEL:
+ kType = kOp12Type;
+ break;
+ default:
+ kType = kUnsupported;
+ }
+ }
+
+ if (kType == kUnsupported) {
+ // Check for kOp14Type
+ switch (Bits(31, 18) << 18) {
+ case ALSL:
+ case BYTEPICK_W:
+ case BYTEPICK_D:
+ case ALSL_D:
+ case SLLI:
+ case SRLI:
+ case SRAI:
+ case ROTRI:
+ kType = kOp14Type;
+ break;
+ default:
+ kType = kUnsupported;
+ }
+ }
+
+ if (kType == kUnsupported) {
+ // Check for kOp17Type
+ switch (Bits(31, 15) << 15) {
+ case ADD_W:
+ case ADD_D:
+ case SUB_W:
+ case SUB_D:
+ case SLT:
+ case SLTU:
+ case MASKEQZ:
+ case MASKNEZ:
+ case NOR:
+ case AND:
+ case OR:
+ case XOR:
+ case ORN:
+ case ANDN:
+ case SLL_W:
+ case SRL_W:
+ case SRA_W:
+ case SLL_D:
+ case SRL_D:
+ case SRA_D:
+ case ROTR_D:
+ case ROTR_W:
+ case MUL_W:
+ case MULH_W:
+ case MULH_WU:
+ case MUL_D:
+ case MULH_D:
+ case MULH_DU:
+ case MULW_D_W:
+ case MULW_D_WU:
+ case DIV_W:
+ case MOD_W:
+ case DIV_WU:
+ case MOD_WU:
+ case DIV_D:
+ case MOD_D:
+ case DIV_DU:
+ case MOD_DU:
+ case BREAK:
+ case FADD_S:
+ case FADD_D:
+ case FSUB_S:
+ case FSUB_D:
+ case FMUL_S:
+ case FMUL_D:
+ case FDIV_S:
+ case FDIV_D:
+ case FMAX_S:
+ case FMAX_D:
+ case FMIN_S:
+ case FMIN_D:
+ case FMAXA_S:
+ case FMAXA_D:
+ case FMINA_S:
+ case FMINA_D:
+ case LDX_B:
+ case LDX_H:
+ case LDX_W:
+ case LDX_D:
+ case STX_B:
+ case STX_H:
+ case STX_W:
+ case STX_D:
+ case LDX_BU:
+ case LDX_HU:
+ case LDX_WU:
+ case FLDX_S:
+ case FLDX_D:
+ case FSTX_S:
+ case FSTX_D:
+ case AMSWAP_W:
+ case AMSWAP_D:
+ case AMADD_W:
+ case AMADD_D:
+ case AMAND_W:
+ case AMAND_D:
+ case AMOR_W:
+ case AMOR_D:
+ case AMXOR_W:
+ case AMXOR_D:
+ case AMMAX_W:
+ case AMMAX_D:
+ case AMMIN_W:
+ case AMMIN_D:
+ case AMMAX_WU:
+ case AMMAX_DU:
+ case AMMIN_WU:
+ case AMMIN_DU:
+ case AMSWAP_DB_W:
+ case AMSWAP_DB_D:
+ case AMADD_DB_W:
+ case AMADD_DB_D:
+ case AMAND_DB_W:
+ case AMAND_DB_D:
+ case AMOR_DB_W:
+ case AMOR_DB_D:
+ case AMXOR_DB_W:
+ case AMXOR_DB_D:
+ case AMMAX_DB_W:
+ case AMMAX_DB_D:
+ case AMMIN_DB_W:
+ case AMMIN_DB_D:
+ case AMMAX_DB_WU:
+ case AMMAX_DB_DU:
+ case AMMIN_DB_WU:
+ case AMMIN_DB_DU:
+ case DBAR:
+ case IBAR:
+ case FSCALEB_S:
+ case FSCALEB_D:
+ case FCOPYSIGN_S:
+ case FCOPYSIGN_D:
+ kType = kOp17Type;
+ break;
+ default:
+ kType = kUnsupported;
+ }
+ }
+
+ if (kType == kUnsupported) {
+ // Check for kOp22Type
+ switch (Bits(31, 10) << 10) {
+ case CLZ_W:
+ case CTZ_W:
+ case CLZ_D:
+ case CTZ_D:
+ case REVB_2H:
+ case REVB_4H:
+ case REVB_2W:
+ case REVB_D:
+ case REVH_2W:
+ case REVH_D:
+ case BITREV_4B:
+ case BITREV_8B:
+ case BITREV_W:
+ case BITREV_D:
+ case EXT_W_B:
+ case EXT_W_H:
+ case FABS_S:
+ case FABS_D:
+ case FNEG_S:
+ case FNEG_D:
+ case FSQRT_S:
+ case FSQRT_D:
+ case FMOV_S:
+ case FMOV_D:
+ case MOVGR2FR_W:
+ case MOVGR2FR_D:
+ case MOVGR2FRH_W:
+ case MOVFR2GR_S:
+ case MOVFR2GR_D:
+ case MOVFRH2GR_S:
+ case MOVGR2FCSR:
+ case MOVFCSR2GR:
+ case FCVT_S_D:
+ case FCVT_D_S:
+ case FTINTRM_W_S:
+ case FTINTRM_W_D:
+ case FTINTRM_L_S:
+ case FTINTRM_L_D:
+ case FTINTRP_W_S:
+ case FTINTRP_W_D:
+ case FTINTRP_L_S:
+ case FTINTRP_L_D:
+ case FTINTRZ_W_S:
+ case FTINTRZ_W_D:
+ case FTINTRZ_L_S:
+ case FTINTRZ_L_D:
+ case FTINTRNE_W_S:
+ case FTINTRNE_W_D:
+ case FTINTRNE_L_S:
+ case FTINTRNE_L_D:
+ case FTINT_W_S:
+ case FTINT_W_D:
+ case FTINT_L_S:
+ case FTINT_L_D:
+ case FFINT_S_W:
+ case FFINT_S_L:
+ case FFINT_D_W:
+ case FFINT_D_L:
+ case FRINT_S:
+ case FRINT_D:
+ case MOVFR2CF:
+ case MOVCF2FR:
+ case MOVGR2CF:
+ case MOVCF2GR:
+ case FRECIP_S:
+ case FRECIP_D:
+ case FRSQRT_S:
+ case FRSQRT_D:
+ case FCLASS_S:
+ case FCLASS_D:
+ case FLOGB_S:
+ case FLOGB_D:
+ case CLO_W:
+ case CTO_W:
+ case CLO_D:
+ case CTO_D:
+ kType = kOp22Type;
+ break;
+ default:
+ kType = kUnsupported;
+ }
+ }
+
+ return kType;
+}
+
+// -----------------------------------------------------------------------------
+// Instructions.
+
+template <class P>
+bool InstructionGetters<P>::IsTrap() const {
+ return true;
+}
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_CODEGEN_LOONG64_CONSTANTS_LOONG64_H_
diff --git a/deps/v8/src/codegen/loong64/cpu-loong64.cc b/deps/v8/src/codegen/loong64/cpu-loong64.cc
new file mode 100644
index 0000000000..6b4040676d
--- /dev/null
+++ b/deps/v8/src/codegen/loong64/cpu-loong64.cc
@@ -0,0 +1,38 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// CPU specific code for LoongArch independent of OS goes here.
+
+#include <sys/syscall.h>
+#include <unistd.h>
+
+#if V8_TARGET_ARCH_LOONG64
+
+#include "src/codegen/cpu-features.h"
+
+namespace v8 {
+namespace internal {
+
+void CpuFeatures::FlushICache(void* start, size_t size) {
+#if defined(V8_HOST_ARCH_LOONG64)
+ // Nothing to do, flushing no instructions.
+ if (size == 0) {
+ return;
+ }
+
+#if defined(ANDROID) && !defined(__LP64__)
+ // Bionic cacheflush can typically run in userland, avoiding kernel call.
+ char* end = reinterpret_cast<char*>(start) + size;
+ cacheflush(reinterpret_cast<intptr_t>(start), reinterpret_cast<intptr_t>(end),
+ 0);
+#else // ANDROID
+ asm("ibar 0\n");
+#endif // ANDROID
+#endif // V8_HOST_ARCH_LOONG64
+}
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_TARGET_ARCH_LOONG64
diff --git a/deps/v8/src/codegen/loong64/interface-descriptors-loong64-inl.h b/deps/v8/src/codegen/loong64/interface-descriptors-loong64-inl.h
new file mode 100644
index 0000000000..7947c97dc3
--- /dev/null
+++ b/deps/v8/src/codegen/loong64/interface-descriptors-loong64-inl.h
@@ -0,0 +1,278 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_CODEGEN_LOONG64_INTERFACE_DESCRIPTORS_LOONG64_INL_H_
+#define V8_CODEGEN_LOONG64_INTERFACE_DESCRIPTORS_LOONG64_INL_H_
+
+#if V8_TARGET_ARCH_LOONG64
+
+#include "src/codegen/interface-descriptors.h"
+#include "src/execution/frames.h"
+
+namespace v8 {
+namespace internal {
+
+constexpr auto CallInterfaceDescriptor::DefaultRegisterArray() {
+ auto registers = RegisterArray(a0, a1, a2, a3, a4);
+ STATIC_ASSERT(registers.size() == kMaxBuiltinRegisterParams);
+ return registers;
+}
+
+#if DEBUG
+template <typename DerivedDescriptor>
+void StaticCallInterfaceDescriptor<DerivedDescriptor>::
+ VerifyArgumentRegisterCount(CallInterfaceDescriptorData* data, int argc) {
+ RegList allocatable_regs = data->allocatable_registers();
+ if (argc >= 1) DCHECK(allocatable_regs | a0.bit());
+ if (argc >= 2) DCHECK(allocatable_regs | a1.bit());
+ if (argc >= 3) DCHECK(allocatable_regs | a2.bit());
+ if (argc >= 4) DCHECK(allocatable_regs | a3.bit());
+ if (argc >= 5) DCHECK(allocatable_regs | a4.bit());
+ if (argc >= 6) DCHECK(allocatable_regs | a5.bit());
+ if (argc >= 7) DCHECK(allocatable_regs | a6.bit());
+ if (argc >= 8) DCHECK(allocatable_regs | a7.bit());
+ // Additional arguments are passed on the stack.
+}
+#endif // DEBUG
+
+// static
+constexpr auto WriteBarrierDescriptor::registers() {
+ return RegisterArray(a1, a5, a4, a2, a0, a3);
+}
+
+// static
+constexpr auto DynamicCheckMapsDescriptor::registers() {
+ STATIC_ASSERT(kReturnRegister0 == a0);
+ return RegisterArray(a0, a1, a2, a3, cp);
+}
+
+// static
+constexpr auto DynamicCheckMapsWithFeedbackVectorDescriptor::registers() {
+ STATIC_ASSERT(kReturnRegister0 == a0);
+ return RegisterArray(a0, a1, a2, a3, cp);
+}
+
+// static
+constexpr Register LoadDescriptor::ReceiverRegister() { return a1; }
+// static
+constexpr Register LoadDescriptor::NameRegister() { return a2; }
+// static
+constexpr Register LoadDescriptor::SlotRegister() { return a0; }
+
+// static
+constexpr Register LoadWithVectorDescriptor::VectorRegister() { return a3; }
+
+// static
+constexpr Register
+LoadWithReceiverAndVectorDescriptor::LookupStartObjectRegister() {
+ return a4;
+}
+
+// static
+constexpr Register StoreDescriptor::ReceiverRegister() { return a1; }
+// static
+constexpr Register StoreDescriptor::NameRegister() { return a2; }
+// static
+constexpr Register StoreDescriptor::ValueRegister() { return a0; }
+// static
+constexpr Register StoreDescriptor::SlotRegister() { return a4; }
+
+// static
+constexpr Register StoreWithVectorDescriptor::VectorRegister() { return a3; }
+
+// static
+constexpr Register StoreTransitionDescriptor::MapRegister() { return a5; }
+
+// static
+constexpr Register ApiGetterDescriptor::HolderRegister() { return a0; }
+// static
+constexpr Register ApiGetterDescriptor::CallbackRegister() { return a3; }
+
+// static
+constexpr Register GrowArrayElementsDescriptor::ObjectRegister() { return a0; }
+// static
+constexpr Register GrowArrayElementsDescriptor::KeyRegister() { return a3; }
+
+// static
+constexpr Register BaselineLeaveFrameDescriptor::ParamsSizeRegister() {
+ return a2;
+}
+
+// static
+constexpr Register BaselineLeaveFrameDescriptor::WeightRegister() { return a3; }
+
+// static
+constexpr Register TypeConversionDescriptor::ArgumentRegister() { return a0; }
+
+// static
+constexpr auto TypeofDescriptor::registers() { return RegisterArray(a3); }
+
+// static
+constexpr auto CallTrampolineDescriptor::registers() {
+ // a1: target
+ // a0: number of arguments
+ return RegisterArray(a1, a0);
+}
+
+// static
+constexpr auto CallVarargsDescriptor::registers() {
+ // a0 : number of arguments (on the stack, not including receiver)
+ // a1 : the target to call
+ // a4 : arguments list length (untagged)
+ // a2 : arguments list (FixedArray)
+ return RegisterArray(a1, a0, a4, a2);
+}
+
+// static
+constexpr auto CallForwardVarargsDescriptor::registers() {
+ // a1: the target to call
+ // a0: number of arguments
+ // a2: start index (to support rest parameters)
+ return RegisterArray(a1, a0, a2);
+}
+
+// static
+constexpr auto CallFunctionTemplateDescriptor::registers() {
+ // a1 : function template info
+ // a0 : number of arguments (on the stack, not including receiver)
+ return RegisterArray(a1, a0);
+}
+
+// static
+constexpr auto CallWithSpreadDescriptor::registers() {
+ // a0 : number of arguments (on the stack, not including receiver)
+ // a1 : the target to call
+ // a2 : the object to spread
+ return RegisterArray(a1, a0, a2);
+}
+
+// static
+constexpr auto CallWithArrayLikeDescriptor::registers() {
+ // a1 : the target to call
+ // a2 : the arguments list
+ return RegisterArray(a1, a2);
+}
+
+// static
+constexpr auto ConstructVarargsDescriptor::registers() {
+ // a0 : number of arguments (on the stack, not including receiver)
+ // a1 : the target to call
+ // a3 : the new target
+ // a4 : arguments list length (untagged)
+ // a2 : arguments list (FixedArray)
+ return RegisterArray(a1, a3, a0, a4, a2);
+}
+
+// static
+constexpr auto ConstructForwardVarargsDescriptor::registers() {
+ // a1: the target to call
+ // a3: new target
+ // a0: number of arguments
+ // a2: start index (to support rest parameters)
+ return RegisterArray(a1, a3, a0, a2);
+}
+
+// static
+constexpr auto ConstructWithSpreadDescriptor::registers() {
+ // a0 : number of arguments (on the stack, not including receiver)
+ // a1 : the target to call
+ // a3 : the new target
+ // a2 : the object to spread
+ return RegisterArray(a1, a3, a0, a2);
+}
+
+// static
+constexpr auto ConstructWithArrayLikeDescriptor::registers() {
+ // a1 : the target to call
+ // a3 : the new target
+ // a2 : the arguments list
+ return RegisterArray(a1, a3, a2);
+}
+
+// static
+constexpr auto ConstructStubDescriptor::registers() {
+ // a1: target
+ // a3: new target
+ // a0: number of arguments
+ // a2: allocation site or undefined
+ return RegisterArray(a1, a3, a0, a2);
+}
+
+// static
+constexpr auto AbortDescriptor::registers() { return RegisterArray(a0); }
+
+// static
+constexpr auto CompareDescriptor::registers() { return RegisterArray(a1, a0); }
+
+// static
+constexpr auto Compare_BaselineDescriptor::registers() {
+ // a1: left operand
+ // a0: right operand
+ // a2: feedback slot
+ return RegisterArray(a1, a0, a2);
+}
+
+// static
+constexpr auto BinaryOpDescriptor::registers() { return RegisterArray(a1, a0); }
+
+// static
+constexpr auto BinaryOp_BaselineDescriptor::registers() {
+ // a1: left operand
+ // a0: right operand
+ // a2: feedback slot
+ return RegisterArray(a1, a0, a2);
+}
+
+// static
+constexpr auto ApiCallbackDescriptor::registers() {
+ // a1 : kApiFunctionAddress
+ // a2 : kArgc
+ // a3 : kCallData
+ // a0 : kHolder
+ return RegisterArray(a1, a2, a3, a0);
+}
+
+// static
+constexpr auto InterpreterDispatchDescriptor::registers() {
+ return RegisterArray(
+ kInterpreterAccumulatorRegister, kInterpreterBytecodeOffsetRegister,
+ kInterpreterBytecodeArrayRegister, kInterpreterDispatchTableRegister);
+}
+
+// static
+constexpr auto InterpreterPushArgsThenCallDescriptor::registers() {
+ // a0 : argument count (not including receiver)
+ // a2 : address of first argument
+ // a1 : the target callable to be call
+ return RegisterArray(a0, a2, a1);
+}
+
+// static
+constexpr auto InterpreterPushArgsThenConstructDescriptor::registers() {
+ // a0 : argument count (not including receiver)
+ // a4 : address of the first argument
+ // a1 : constructor to call
+ // a3 : new target
+ // a2 : allocation site feedback if available, undefined otherwise
+ return RegisterArray(a0, a4, a1, a3, a2);
+}
+
+// static
+constexpr auto ResumeGeneratorDescriptor::registers() {
+ // v0 : the value to pass to the generator
+ // a1 : the JSGeneratorObject to resume
+ return RegisterArray(a0, a1);
+}
+
+// static
+constexpr auto RunMicrotasksEntryDescriptor::registers() {
+ return RegisterArray(a0, a1);
+}
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_TARGET_ARCH_LOONG64
+
+#endif // V8_CODEGEN_LOONG64_INTERFACE_DESCRIPTORS_LOONG64_INL_H_
diff --git a/deps/v8/src/codegen/loong64/macro-assembler-loong64.cc b/deps/v8/src/codegen/loong64/macro-assembler-loong64.cc
new file mode 100644
index 0000000000..b999c1166b
--- /dev/null
+++ b/deps/v8/src/codegen/loong64/macro-assembler-loong64.cc
@@ -0,0 +1,4107 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <limits.h> // For LONG_MIN, LONG_MAX.
+
+#if V8_TARGET_ARCH_LOONG64
+
+#include "src/base/bits.h"
+#include "src/base/division-by-constant.h"
+#include "src/codegen/assembler-inl.h"
+#include "src/codegen/callable.h"
+#include "src/codegen/code-factory.h"
+#include "src/codegen/external-reference-table.h"
+#include "src/codegen/interface-descriptors-inl.h"
+#include "src/codegen/macro-assembler.h"
+#include "src/codegen/register-configuration.h"
+#include "src/debug/debug.h"
+#include "src/deoptimizer/deoptimizer.h"
+#include "src/execution/frames-inl.h"
+#include "src/heap/memory-chunk.h"
+#include "src/init/bootstrapper.h"
+#include "src/logging/counters.h"
+#include "src/objects/heap-number.h"
+#include "src/runtime/runtime.h"
+#include "src/snapshot/snapshot.h"
+
+#if V8_ENABLE_WEBASSEMBLY
+#include "src/wasm/wasm-code-manager.h"
+#endif // V8_ENABLE_WEBASSEMBLY
+
+// Satisfy cpplint check, but don't include platform-specific header. It is
+// included recursively via macro-assembler.h.
+#if 0
+#include "src/codegen/loong64/macro-assembler-loong64.h"
+#endif
+
+namespace v8 {
+namespace internal {
+
+static inline bool IsZero(const Operand& rk) {
+ if (rk.is_reg()) {
+ return rk.rm() == zero_reg;
+ } else {
+ return rk.immediate() == 0;
+ }
+}
+
+int TurboAssembler::RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode,
+ Register exclusion1,
+ Register exclusion2,
+ Register exclusion3) const {
+ int bytes = 0;
+ RegList exclusions = 0;
+ if (exclusion1 != no_reg) {
+ exclusions |= exclusion1.bit();
+ if (exclusion2 != no_reg) {
+ exclusions |= exclusion2.bit();
+ if (exclusion3 != no_reg) {
+ exclusions |= exclusion3.bit();
+ }
+ }
+ }
+
+ RegList list = kJSCallerSaved & ~exclusions;
+ bytes += NumRegs(list) * kPointerSize;
+
+ if (fp_mode == SaveFPRegsMode::kSave) {
+ bytes += NumRegs(kCallerSavedFPU) * kDoubleSize;
+ }
+
+ return bytes;
+}
+
+int TurboAssembler::PushCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1,
+ Register exclusion2, Register exclusion3) {
+ int bytes = 0;
+ RegList exclusions = 0;
+ if (exclusion1 != no_reg) {
+ exclusions |= exclusion1.bit();
+ if (exclusion2 != no_reg) {
+ exclusions |= exclusion2.bit();
+ if (exclusion3 != no_reg) {
+ exclusions |= exclusion3.bit();
+ }
+ }
+ }
+
+ RegList list = kJSCallerSaved & ~exclusions;
+ MultiPush(list);
+ bytes += NumRegs(list) * kPointerSize;
+
+ if (fp_mode == SaveFPRegsMode::kSave) {
+ MultiPushFPU(kCallerSavedFPU);
+ bytes += NumRegs(kCallerSavedFPU) * kDoubleSize;
+ }
+
+ return bytes;
+}
+
+int TurboAssembler::PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1,
+ Register exclusion2, Register exclusion3) {
+ int bytes = 0;
+ if (fp_mode == SaveFPRegsMode::kSave) {
+ MultiPopFPU(kCallerSavedFPU);
+ bytes += NumRegs(kCallerSavedFPU) * kDoubleSize;
+ }
+
+ RegList exclusions = 0;
+ if (exclusion1 != no_reg) {
+ exclusions |= exclusion1.bit();
+ if (exclusion2 != no_reg) {
+ exclusions |= exclusion2.bit();
+ if (exclusion3 != no_reg) {
+ exclusions |= exclusion3.bit();
+ }
+ }
+ }
+
+ RegList list = kJSCallerSaved & ~exclusions;
+ MultiPop(list);
+ bytes += NumRegs(list) * kPointerSize;
+
+ return bytes;
+}
+
+void TurboAssembler::LoadRoot(Register destination, RootIndex index) {
+ Ld_d(destination, MemOperand(s6, RootRegisterOffsetForRootIndex(index)));
+}
+
+void TurboAssembler::PushCommonFrame(Register marker_reg) {
+ if (marker_reg.is_valid()) {
+ Push(ra, fp, marker_reg);
+ Add_d(fp, sp, Operand(kPointerSize));
+ } else {
+ Push(ra, fp);
+ mov(fp, sp);
+ }
+}
+
+void TurboAssembler::PushStandardFrame(Register function_reg) {
+ int offset = -StandardFrameConstants::kContextOffset;
+ if (function_reg.is_valid()) {
+ Push(ra, fp, cp, function_reg, kJavaScriptCallArgCountRegister);
+ offset += 2 * kPointerSize;
+ } else {
+ Push(ra, fp, cp, kJavaScriptCallArgCountRegister);
+ offset += kPointerSize;
+ }
+ Add_d(fp, sp, Operand(offset));
+}
+
+// Clobbers object, dst, value, and ra, if (ra_status == kRAHasBeenSaved)
+// The register 'object' contains a heap object pointer. The heap object
+// tag is shifted away.
+void MacroAssembler::RecordWriteField(Register object, int offset,
+ Register value, RAStatus ra_status,
+ SaveFPRegsMode save_fp,
+ RememberedSetAction remembered_set_action,
+ SmiCheck smi_check) {
+ // First, check if a write barrier is even needed. The tests below
+ // catch stores of Smis.
+ Label done;
+
+ // Skip barrier if writing a smi.
+ if (smi_check == SmiCheck::kInline) {
+ JumpIfSmi(value, &done);
+ }
+
+ // Although the object register is tagged, the offset is relative to the start
+ // of the object, so so offset must be a multiple of kPointerSize.
+ DCHECK(IsAligned(offset, kPointerSize));
+
+ if (FLAG_debug_code) {
+ Label ok;
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ Add_d(scratch, object, offset - kHeapObjectTag);
+ And(scratch, scratch, Operand(kPointerSize - 1));
+ Branch(&ok, eq, scratch, Operand(zero_reg));
+ Abort(AbortReason::kUnalignedCellInWriteBarrier);
+ bind(&ok);
+ }
+
+ RecordWrite(object, Operand(offset - kHeapObjectTag), value, ra_status,
+ save_fp, remembered_set_action, SmiCheck::kOmit);
+
+ bind(&done);
+}
+
+void TurboAssembler::MaybeSaveRegisters(RegList registers) {
+ if (registers == 0) return;
+ RegList regs = 0;
+ for (int i = 0; i < Register::kNumRegisters; ++i) {
+ if ((registers >> i) & 1u) {
+ regs |= Register::from_code(i).bit();
+ }
+ }
+ MultiPush(regs);
+}
+
+void TurboAssembler::MaybeRestoreRegisters(RegList registers) {
+ if (registers == 0) return;
+ RegList regs = 0;
+ for (int i = 0; i < Register::kNumRegisters; ++i) {
+ if ((registers >> i) & 1u) {
+ regs |= Register::from_code(i).bit();
+ }
+ }
+ MultiPop(regs);
+}
+
+void TurboAssembler::CallEphemeronKeyBarrier(Register object, Operand offset,
+ SaveFPRegsMode fp_mode) {
+ RegList registers = WriteBarrierDescriptor::ComputeSavedRegisters(object);
+ MaybeSaveRegisters(registers);
+
+ Register object_parameter = WriteBarrierDescriptor::ObjectRegister();
+ Register slot_address_parameter =
+ WriteBarrierDescriptor::SlotAddressRegister();
+
+ MoveObjectAndSlot(object_parameter, slot_address_parameter, object, offset);
+
+ Call(isolate()->builtins()->code_handle(
+ Builtins::GetEphemeronKeyBarrierStub(fp_mode)),
+ RelocInfo::CODE_TARGET);
+ MaybeRestoreRegisters(registers);
+}
+
+void TurboAssembler::CallRecordWriteStubSaveRegisters(
+ Register object, Operand offset, RememberedSetAction remembered_set_action,
+ SaveFPRegsMode fp_mode, StubCallMode mode) {
+ ASM_CODE_COMMENT(this);
+ RegList registers = WriteBarrierDescriptor::ComputeSavedRegisters(object);
+ MaybeSaveRegisters(registers);
+
+ Register object_parameter = WriteBarrierDescriptor::ObjectRegister();
+ Register slot_address_parameter =
+ WriteBarrierDescriptor::SlotAddressRegister();
+
+ MoveObjectAndSlot(object_parameter, slot_address_parameter, object, offset);
+
+ CallRecordWriteStub(object_parameter, slot_address_parameter,
+ remembered_set_action, fp_mode, mode);
+
+ MaybeRestoreRegisters(registers);
+}
+
+void TurboAssembler::CallRecordWriteStub(
+ Register object, Register slot_address,
+ RememberedSetAction remembered_set_action, SaveFPRegsMode fp_mode,
+ StubCallMode mode) {
+ // Use CallRecordWriteStubSaveRegisters if the object and slot registers
+ // need to be caller saved.
+ DCHECK_EQ(WriteBarrierDescriptor::ObjectRegister(), object);
+ DCHECK_EQ(WriteBarrierDescriptor::SlotAddressRegister(), slot_address);
+#if V8_ENABLE_WEBASSEMBLY
+ if (mode == StubCallMode::kCallWasmRuntimeStub) {
+ auto wasm_target =
+ wasm::WasmCode::GetRecordWriteStub(remembered_set_action, fp_mode);
+ Call(wasm_target, RelocInfo::WASM_STUB_CALL);
+#else
+ if (false) {
+#endif
+ } else {
+ auto builtin = Builtins::GetRecordWriteStub(remembered_set_action, fp_mode);
+ if (options().inline_offheap_trampolines) {
+ // Inline the trampoline.
+ RecordCommentForOffHeapTrampoline(builtin);
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ li(scratch, Operand(BuiltinEntry(builtin), RelocInfo::OFF_HEAP_TARGET));
+ Call(scratch);
+ } else {
+ Handle<Code> code_target = isolate()->builtins()->code_handle(builtin);
+ Call(code_target, RelocInfo::CODE_TARGET);
+ }
+ }
+}
+
+void TurboAssembler::MoveObjectAndSlot(Register dst_object, Register dst_slot,
+ Register object, Operand offset) {
+ DCHECK_NE(dst_object, dst_slot);
+ // If `offset` is a register, it cannot overlap with `object`.
+ DCHECK_IMPLIES(!offset.IsImmediate(), offset.rm() != object);
+
+ // If the slot register does not overlap with the object register, we can
+ // overwrite it.
+ if (dst_slot != object) {
+ Add_d(dst_slot, object, offset);
+ mov(dst_object, object);
+ return;
+ }
+
+ DCHECK_EQ(dst_slot, object);
+
+ // If the destination object register does not overlap with the offset
+ // register, we can overwrite it.
+ if (offset.IsImmediate() || (offset.rm() != dst_object)) {
+ mov(dst_object, dst_slot);
+ Add_d(dst_slot, dst_slot, offset);
+ return;
+ }
+
+ DCHECK_EQ(dst_object, offset.rm());
+
+ // We only have `dst_slot` and `dst_object` left as distinct registers so we
+ // have to swap them. We write this as a add+sub sequence to avoid using a
+ // scratch register.
+ Add_d(dst_slot, dst_slot, dst_object);
+ Sub_d(dst_object, dst_slot, dst_object);
+}
+
+// If lr_status is kLRHasBeenSaved, lr will be clobbered.
+// TODO(LOONG_dev): LOONG64 Check this comment
+// Clobbers object, address, value, and ra, if (ra_status == kRAHasBeenSaved)
+// The register 'object' contains a heap object pointer. The heap object
+// tag is shifted away.
+void MacroAssembler::RecordWrite(Register object, Operand offset,
+ Register value, RAStatus ra_status,
+ SaveFPRegsMode fp_mode,
+ RememberedSetAction remembered_set_action,
+ SmiCheck smi_check) {
+ DCHECK(!AreAliased(object, value));
+
+ if (FLAG_debug_code) {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ Add_d(scratch, object, offset);
+ Ld_d(scratch, MemOperand(scratch, 0));
+ Assert(eq, AbortReason::kWrongAddressOrValuePassedToRecordWrite, scratch,
+ Operand(value));
+ }
+
+ if ((remembered_set_action == RememberedSetAction::kOmit &&
+ !FLAG_incremental_marking) ||
+ FLAG_disable_write_barriers) {
+ return;
+ }
+
+ // First, check if a write barrier is even needed. The tests below
+ // catch stores of smis and stores into the young generation.
+ Label done;
+
+ if (smi_check == SmiCheck::kInline) {
+ DCHECK_EQ(0, kSmiTag);
+ JumpIfSmi(value, &done);
+ }
+
+ CheckPageFlag(value, MemoryChunk::kPointersToHereAreInterestingMask, eq,
+ &done);
+
+ CheckPageFlag(object, MemoryChunk::kPointersFromHereAreInterestingMask, eq,
+ &done);
+
+ // Record the actual write.
+ if (ra_status == kRAHasNotBeenSaved) {
+ Push(ra);
+ }
+
+ Register slot_address = WriteBarrierDescriptor::SlotAddressRegister();
+ DCHECK(!AreAliased(object, slot_address, value));
+ DCHECK(offset.IsImmediate());
+ Add_d(slot_address, object, offset);
+ CallRecordWriteStub(object, slot_address, remembered_set_action, fp_mode);
+ if (ra_status == kRAHasNotBeenSaved) {
+ Pop(ra);
+ }
+
+ bind(&done);
+}
+
+// ---------------------------------------------------------------------------
+// Instruction macros.
+
+void TurboAssembler::Add_w(Register rd, Register rj, const Operand& rk) {
+ if (rk.is_reg()) {
+ add_w(rd, rj, rk.rm());
+ } else {
+ if (is_int12(rk.immediate()) && !MustUseReg(rk.rmode())) {
+ addi_w(rd, rj, static_cast<int32_t>(rk.immediate()));
+ } else {
+ // li handles the relocation.
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ DCHECK(rj != scratch);
+ li(scratch, rk);
+ add_w(rd, rj, scratch);
+ }
+ }
+}
+
+void TurboAssembler::Add_d(Register rd, Register rj, const Operand& rk) {
+ if (rk.is_reg()) {
+ add_d(rd, rj, rk.rm());
+ } else {
+ if (is_int12(rk.immediate()) && !MustUseReg(rk.rmode())) {
+ addi_d(rd, rj, static_cast<int32_t>(rk.immediate()));
+ } else {
+ // li handles the relocation.
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ DCHECK(rj != scratch);
+ li(scratch, rk);
+ add_d(rd, rj, scratch);
+ }
+ }
+}
+
+void TurboAssembler::Sub_w(Register rd, Register rj, const Operand& rk) {
+ if (rk.is_reg()) {
+ sub_w(rd, rj, rk.rm());
+ } else {
+ DCHECK(is_int32(rk.immediate()));
+ if (is_int12(-rk.immediate()) && !MustUseReg(rk.rmode())) {
+ // No subi_w instr, use addi_w(x, y, -imm).
+ addi_w(rd, rj, static_cast<int32_t>(-rk.immediate()));
+ } else {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ DCHECK(rj != scratch);
+ if (-rk.immediate() >> 12 == 0 && !MustUseReg(rk.rmode())) {
+ // Use load -imm and addu when loading -imm generates one instruction.
+ li(scratch, -rk.immediate());
+ add_w(rd, rj, scratch);
+ } else {
+ // li handles the relocation.
+ li(scratch, rk);
+ sub_w(rd, rj, scratch);
+ }
+ }
+ }
+}
+
+void TurboAssembler::Sub_d(Register rd, Register rj, const Operand& rk) {
+ if (rk.is_reg()) {
+ sub_d(rd, rj, rk.rm());
+ } else if (is_int12(-rk.immediate()) && !MustUseReg(rk.rmode())) {
+ // No subi_d instr, use addi_d(x, y, -imm).
+ addi_d(rd, rj, static_cast<int32_t>(-rk.immediate()));
+ } else {
+ DCHECK(rj != t7);
+ int li_count = InstrCountForLi64Bit(rk.immediate());
+ int li_neg_count = InstrCountForLi64Bit(-rk.immediate());
+ if (li_neg_count < li_count && !MustUseReg(rk.rmode())) {
+ // Use load -imm and add_d when loading -imm generates one instruction.
+ DCHECK(rk.immediate() != std::numeric_limits<int32_t>::min());
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ li(scratch, Operand(-rk.immediate()));
+ add_d(rd, rj, scratch);
+ } else {
+ // li handles the relocation.
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ li(scratch, rk);
+ sub_d(rd, rj, scratch);
+ }
+ }
+}
+
+void TurboAssembler::Mul_w(Register rd, Register rj, const Operand& rk) {
+ if (rk.is_reg()) {
+ mul_w(rd, rj, rk.rm());
+ } else {
+ // li handles the relocation.
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ DCHECK(rj != scratch);
+ li(scratch, rk);
+ mul_w(rd, rj, scratch);
+ }
+}
+
+void TurboAssembler::Mulh_w(Register rd, Register rj, const Operand& rk) {
+ if (rk.is_reg()) {
+ mulh_w(rd, rj, rk.rm());
+ } else {
+ // li handles the relocation.
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ DCHECK(rj != scratch);
+ li(scratch, rk);
+ mulh_w(rd, rj, scratch);
+ }
+}
+
+void TurboAssembler::Mulh_wu(Register rd, Register rj, const Operand& rk) {
+ if (rk.is_reg()) {
+ mulh_wu(rd, rj, rk.rm());
+ } else {
+ // li handles the relocation.
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ DCHECK(rj != scratch);
+ li(scratch, rk);
+ mulh_wu(rd, rj, scratch);
+ }
+}
+
+void TurboAssembler::Mul_d(Register rd, Register rj, const Operand& rk) {
+ if (rk.is_reg()) {
+ mul_d(rd, rj, rk.rm());
+ } else {
+ // li handles the relocation.
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ DCHECK(rj != scratch);
+ li(scratch, rk);
+ mul_d(rd, rj, scratch);
+ }
+}
+
+void TurboAssembler::Mulh_d(Register rd, Register rj, const Operand& rk) {
+ if (rk.is_reg()) {
+ mulh_d(rd, rj, rk.rm());
+ } else {
+ // li handles the relocation.
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ DCHECK(rj != scratch);
+ li(scratch, rk);
+ mulh_d(rd, rj, scratch);
+ }
+}
+
+void TurboAssembler::Div_w(Register rd, Register rj, const Operand& rk) {
+ if (rk.is_reg()) {
+ div_w(rd, rj, rk.rm());
+ } else {
+ // li handles the relocation.
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ DCHECK(rj != scratch);
+ li(scratch, rk);
+ div_w(rd, rj, scratch);
+ }
+}
+
+void TurboAssembler::Mod_w(Register rd, Register rj, const Operand& rk) {
+ if (rk.is_reg()) {
+ mod_w(rd, rj, rk.rm());
+ } else {
+ // li handles the relocation.
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ DCHECK(rj != scratch);
+ li(scratch, rk);
+ mod_w(rd, rj, scratch);
+ }
+}
+
+void TurboAssembler::Mod_wu(Register rd, Register rj, const Operand& rk) {
+ if (rk.is_reg()) {
+ mod_wu(rd, rj, rk.rm());
+ } else {
+ // li handles the relocation.
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ DCHECK(rj != scratch);
+ li(scratch, rk);
+ mod_wu(rd, rj, scratch);
+ }
+}
+
+void TurboAssembler::Div_d(Register rd, Register rj, const Operand& rk) {
+ if (rk.is_reg()) {
+ div_d(rd, rj, rk.rm());
+ } else {
+ // li handles the relocation.
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ DCHECK(rj != scratch);
+ li(scratch, rk);
+ div_d(rd, rj, scratch);
+ }
+}
+
+void TurboAssembler::Div_wu(Register rd, Register rj, const Operand& rk) {
+ if (rk.is_reg()) {
+ div_wu(rd, rj, rk.rm());
+ } else {
+ // li handles the relocation.
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ DCHECK(rj != scratch);
+ li(scratch, rk);
+ div_wu(rd, rj, scratch);
+ }
+}
+
+void TurboAssembler::Div_du(Register rd, Register rj, const Operand& rk) {
+ if (rk.is_reg()) {
+ div_du(rd, rj, rk.rm());
+ } else {
+ // li handles the relocation.
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ DCHECK(rj != scratch);
+ li(scratch, rk);
+ div_du(rd, rj, scratch);
+ }
+}
+
+void TurboAssembler::Mod_d(Register rd, Register rj, const Operand& rk) {
+ if (rk.is_reg()) {
+ mod_d(rd, rj, rk.rm());
+ } else {
+ // li handles the relocation.
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ DCHECK(rj != scratch);
+ li(scratch, rk);
+ mod_d(rd, rj, scratch);
+ }
+}
+
+void TurboAssembler::Mod_du(Register rd, Register rj, const Operand& rk) {
+ if (rk.is_reg()) {
+ mod_du(rd, rj, rk.rm());
+ } else {
+ // li handles the relocation.
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ DCHECK(rj != scratch);
+ li(scratch, rk);
+ mod_du(rd, rj, scratch);
+ }
+}
+
+void TurboAssembler::And(Register rd, Register rj, const Operand& rk) {
+ if (rk.is_reg()) {
+ and_(rd, rj, rk.rm());
+ } else {
+ if (is_uint12(rk.immediate()) && !MustUseReg(rk.rmode())) {
+ andi(rd, rj, static_cast<int32_t>(rk.immediate()));
+ } else {
+ // li handles the relocation.
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ DCHECK(rj != scratch);
+ li(scratch, rk);
+ and_(rd, rj, scratch);
+ }
+ }
+}
+
+void TurboAssembler::Or(Register rd, Register rj, const Operand& rk) {
+ if (rk.is_reg()) {
+ or_(rd, rj, rk.rm());
+ } else {
+ if (is_uint12(rk.immediate()) && !MustUseReg(rk.rmode())) {
+ ori(rd, rj, static_cast<int32_t>(rk.immediate()));
+ } else {
+ // li handles the relocation.
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ DCHECK(rj != scratch);
+ li(scratch, rk);
+ or_(rd, rj, scratch);
+ }
+ }
+}
+
+void TurboAssembler::Xor(Register rd, Register rj, const Operand& rk) {
+ if (rk.is_reg()) {
+ xor_(rd, rj, rk.rm());
+ } else {
+ if (is_uint12(rk.immediate()) && !MustUseReg(rk.rmode())) {
+ xori(rd, rj, static_cast<int32_t>(rk.immediate()));
+ } else {
+ // li handles the relocation.
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ DCHECK(rj != scratch);
+ li(scratch, rk);
+ xor_(rd, rj, scratch);
+ }
+ }
+}
+
+void TurboAssembler::Nor(Register rd, Register rj, const Operand& rk) {
+ if (rk.is_reg()) {
+ nor(rd, rj, rk.rm());
+ } else {
+ // li handles the relocation.
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ DCHECK(rj != scratch);
+ li(scratch, rk);
+ nor(rd, rj, scratch);
+ }
+}
+
+void TurboAssembler::Andn(Register rd, Register rj, const Operand& rk) {
+ if (rk.is_reg()) {
+ andn(rd, rj, rk.rm());
+ } else {
+ // li handles the relocation.
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ DCHECK(rj != scratch);
+ li(scratch, rk);
+ andn(rd, rj, scratch);
+ }
+}
+
+void TurboAssembler::Orn(Register rd, Register rj, const Operand& rk) {
+ if (rk.is_reg()) {
+ orn(rd, rj, rk.rm());
+ } else {
+ // li handles the relocation.
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ DCHECK(rj != scratch);
+ li(scratch, rk);
+ orn(rd, rj, scratch);
+ }
+}
+
+void TurboAssembler::Neg(Register rj, const Operand& rk) {
+ DCHECK(rk.is_reg());
+ sub_d(rj, zero_reg, rk.rm());
+}
+
+void TurboAssembler::Slt(Register rd, Register rj, const Operand& rk) {
+ if (rk.is_reg()) {
+ slt(rd, rj, rk.rm());
+ } else {
+ if (is_int12(rk.immediate()) && !MustUseReg(rk.rmode())) {
+ slti(rd, rj, static_cast<int32_t>(rk.immediate()));
+ } else {
+ // li handles the relocation.
+ UseScratchRegisterScope temps(this);
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ Register scratch = temps.hasAvailable() ? temps.Acquire() : t8;
+ DCHECK(rj != scratch);
+ li(scratch, rk);
+ slt(rd, rj, scratch);
+ }
+ }
+}
+
+void TurboAssembler::Sltu(Register rd, Register rj, const Operand& rk) {
+ if (rk.is_reg()) {
+ sltu(rd, rj, rk.rm());
+ } else {
+ if (is_int12(rk.immediate()) && !MustUseReg(rk.rmode())) {
+ sltui(rd, rj, static_cast<int32_t>(rk.immediate()));
+ } else {
+ // li handles the relocation.
+ UseScratchRegisterScope temps(this);
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ Register scratch = temps.hasAvailable() ? temps.Acquire() : t8;
+ DCHECK(rj != scratch);
+ li(scratch, rk);
+ sltu(rd, rj, scratch);
+ }
+ }
+}
+
+void TurboAssembler::Sle(Register rd, Register rj, const Operand& rk) {
+ if (rk.is_reg()) {
+ slt(rd, rk.rm(), rj);
+ } else {
+ // li handles the relocation.
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.hasAvailable() ? temps.Acquire() : t8;
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ DCHECK(rj != scratch);
+ li(scratch, rk);
+ slt(rd, scratch, rj);
+ }
+ xori(rd, rd, 1);
+}
+
+void TurboAssembler::Sleu(Register rd, Register rj, const Operand& rk) {
+ if (rk.is_reg()) {
+ sltu(rd, rk.rm(), rj);
+ } else {
+ // li handles the relocation.
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.hasAvailable() ? temps.Acquire() : t8;
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ DCHECK(rj != scratch);
+ li(scratch, rk);
+ sltu(rd, scratch, rj);
+ }
+ xori(rd, rd, 1);
+}
+
+void TurboAssembler::Sge(Register rd, Register rj, const Operand& rk) {
+ Slt(rd, rj, rk);
+ xori(rd, rd, 1);
+}
+
+void TurboAssembler::Sgeu(Register rd, Register rj, const Operand& rk) {
+ Sltu(rd, rj, rk);
+ xori(rd, rd, 1);
+}
+
+void TurboAssembler::Sgt(Register rd, Register rj, const Operand& rk) {
+ if (rk.is_reg()) {
+ slt(rd, rk.rm(), rj);
+ } else {
+ // li handles the relocation.
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.hasAvailable() ? temps.Acquire() : t8;
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ DCHECK(rj != scratch);
+ li(scratch, rk);
+ slt(rd, scratch, rj);
+ }
+}
+
+void TurboAssembler::Sgtu(Register rd, Register rj, const Operand& rk) {
+ if (rk.is_reg()) {
+ sltu(rd, rk.rm(), rj);
+ } else {
+ // li handles the relocation.
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.hasAvailable() ? temps.Acquire() : t8;
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ DCHECK(rj != scratch);
+ li(scratch, rk);
+ sltu(rd, scratch, rj);
+ }
+}
+
+void TurboAssembler::Rotr_w(Register rd, Register rj, const Operand& rk) {
+ if (rk.is_reg()) {
+ rotr_w(rd, rj, rk.rm());
+ } else {
+ int64_t ror_value = rk.immediate() % 32;
+ if (ror_value < 0) {
+ ror_value += 32;
+ }
+ rotri_w(rd, rj, ror_value);
+ }
+}
+
+void TurboAssembler::Rotr_d(Register rd, Register rj, const Operand& rk) {
+ if (rk.is_reg()) {
+ rotr_d(rd, rj, rk.rm());
+ } else {
+ int64_t dror_value = rk.immediate() % 64;
+ if (dror_value < 0) dror_value += 64;
+ rotri_d(rd, rj, dror_value);
+ }
+}
+
+void TurboAssembler::Alsl_w(Register rd, Register rj, Register rk, uint8_t sa,
+ Register scratch) {
+ DCHECK(sa >= 1 && sa <= 31);
+ if (sa <= 4) {
+ alsl_w(rd, rj, rk, sa);
+ } else {
+ Register tmp = rd == rk ? scratch : rd;
+ DCHECK(tmp != rk);
+ slli_w(tmp, rj, sa);
+ add_w(rd, rk, tmp);
+ }
+}
+
+void TurboAssembler::Alsl_d(Register rd, Register rj, Register rk, uint8_t sa,
+ Register scratch) {
+ DCHECK(sa >= 1 && sa <= 31);
+ if (sa <= 4) {
+ alsl_d(rd, rj, rk, sa);
+ } else {
+ Register tmp = rd == rk ? scratch : rd;
+ DCHECK(tmp != rk);
+ slli_d(tmp, rj, sa);
+ add_d(rd, rk, tmp);
+ }
+}
+
+// ------------Pseudo-instructions-------------
+
+// Change endianness
+void TurboAssembler::ByteSwapSigned(Register dest, Register src,
+ int operand_size) {
+ DCHECK(operand_size == 2 || operand_size == 4 || operand_size == 8);
+ if (operand_size == 2) {
+ revb_2h(dest, src);
+ ext_w_h(dest, dest);
+ } else if (operand_size == 4) {
+ revb_2w(dest, src);
+ slli_w(dest, dest, 0);
+ } else {
+ revb_d(dest, dest);
+ }
+}
+
+void TurboAssembler::ByteSwapUnsigned(Register dest, Register src,
+ int operand_size) {
+ DCHECK(operand_size == 2 || operand_size == 4);
+ if (operand_size == 2) {
+ revb_2h(dest, src);
+ bstrins_d(dest, zero_reg, 63, 16);
+ } else {
+ revb_2w(dest, src);
+ bstrins_d(dest, zero_reg, 63, 32);
+ }
+}
+
+void TurboAssembler::Ld_b(Register rd, const MemOperand& rj) {
+ MemOperand source = rj;
+ AdjustBaseAndOffset(&source);
+ if (source.hasIndexReg()) {
+ ldx_b(rd, source.base(), source.index());
+ } else {
+ ld_b(rd, source.base(), source.offset());
+ }
+}
+
+void TurboAssembler::Ld_bu(Register rd, const MemOperand& rj) {
+ MemOperand source = rj;
+ AdjustBaseAndOffset(&source);
+ if (source.hasIndexReg()) {
+ ldx_bu(rd, source.base(), source.index());
+ } else {
+ ld_bu(rd, source.base(), source.offset());
+ }
+}
+
+void TurboAssembler::St_b(Register rd, const MemOperand& rj) {
+ MemOperand source = rj;
+ AdjustBaseAndOffset(&source);
+ if (source.hasIndexReg()) {
+ stx_b(rd, source.base(), source.index());
+ } else {
+ st_b(rd, source.base(), source.offset());
+ }
+}
+
+void TurboAssembler::Ld_h(Register rd, const MemOperand& rj) {
+ MemOperand source = rj;
+ AdjustBaseAndOffset(&source);
+ if (source.hasIndexReg()) {
+ ldx_h(rd, source.base(), source.index());
+ } else {
+ ld_h(rd, source.base(), source.offset());
+ }
+}
+
+void TurboAssembler::Ld_hu(Register rd, const MemOperand& rj) {
+ MemOperand source = rj;
+ AdjustBaseAndOffset(&source);
+ if (source.hasIndexReg()) {
+ ldx_hu(rd, source.base(), source.index());
+ } else {
+ ld_hu(rd, source.base(), source.offset());
+ }
+}
+
+void TurboAssembler::St_h(Register rd, const MemOperand& rj) {
+ MemOperand source = rj;
+ AdjustBaseAndOffset(&source);
+ if (source.hasIndexReg()) {
+ stx_h(rd, source.base(), source.index());
+ } else {
+ st_h(rd, source.base(), source.offset());
+ }
+}
+
+void TurboAssembler::Ld_w(Register rd, const MemOperand& rj) {
+ MemOperand source = rj;
+
+ if (!(source.hasIndexReg()) && is_int16(source.offset()) &&
+ (source.offset() & 0b11) == 0) {
+ ldptr_w(rd, source.base(), source.offset());
+ return;
+ }
+
+ AdjustBaseAndOffset(&source);
+ if (source.hasIndexReg()) {
+ ldx_w(rd, source.base(), source.index());
+ } else {
+ ld_w(rd, source.base(), source.offset());
+ }
+}
+
+void TurboAssembler::Ld_wu(Register rd, const MemOperand& rj) {
+ MemOperand source = rj;
+ AdjustBaseAndOffset(&source);
+ if (source.hasIndexReg()) {
+ ldx_wu(rd, source.base(), source.index());
+ } else {
+ ld_wu(rd, source.base(), source.offset());
+ }
+}
+
+void TurboAssembler::St_w(Register rd, const MemOperand& rj) {
+ MemOperand source = rj;
+
+ if (!(source.hasIndexReg()) && is_int16(source.offset()) &&
+ (source.offset() & 0b11) == 0) {
+ stptr_w(rd, source.base(), source.offset());
+ return;
+ }
+
+ AdjustBaseAndOffset(&source);
+ if (source.hasIndexReg()) {
+ stx_w(rd, source.base(), source.index());
+ } else {
+ st_w(rd, source.base(), source.offset());
+ }
+}
+
+void TurboAssembler::Ld_d(Register rd, const MemOperand& rj) {
+ MemOperand source = rj;
+
+ if (!(source.hasIndexReg()) && is_int16(source.offset()) &&
+ (source.offset() & 0b11) == 0) {
+ ldptr_d(rd, source.base(), source.offset());
+ return;
+ }
+
+ AdjustBaseAndOffset(&source);
+ if (source.hasIndexReg()) {
+ ldx_d(rd, source.base(), source.index());
+ } else {
+ ld_d(rd, source.base(), source.offset());
+ }
+}
+
+void TurboAssembler::St_d(Register rd, const MemOperand& rj) {
+ MemOperand source = rj;
+
+ if (!(source.hasIndexReg()) && is_int16(source.offset()) &&
+ (source.offset() & 0b11) == 0) {
+ stptr_d(rd, source.base(), source.offset());
+ return;
+ }
+
+ AdjustBaseAndOffset(&source);
+ if (source.hasIndexReg()) {
+ stx_d(rd, source.base(), source.index());
+ } else {
+ st_d(rd, source.base(), source.offset());
+ }
+}
+
+void TurboAssembler::Fld_s(FPURegister fd, const MemOperand& src) {
+ MemOperand tmp = src;
+ AdjustBaseAndOffset(&tmp);
+ if (tmp.hasIndexReg()) {
+ fldx_s(fd, tmp.base(), tmp.index());
+ } else {
+ fld_s(fd, tmp.base(), tmp.offset());
+ }
+}
+
+void TurboAssembler::Fst_s(FPURegister fs, const MemOperand& src) {
+ MemOperand tmp = src;
+ AdjustBaseAndOffset(&tmp);
+ if (tmp.hasIndexReg()) {
+ fstx_s(fs, tmp.base(), tmp.index());
+ } else {
+ fst_s(fs, tmp.base(), tmp.offset());
+ }
+}
+
+void TurboAssembler::Fld_d(FPURegister fd, const MemOperand& src) {
+ MemOperand tmp = src;
+ AdjustBaseAndOffset(&tmp);
+ if (tmp.hasIndexReg()) {
+ fldx_d(fd, tmp.base(), tmp.index());
+ } else {
+ fld_d(fd, tmp.base(), tmp.offset());
+ }
+}
+
+void TurboAssembler::Fst_d(FPURegister fs, const MemOperand& src) {
+ MemOperand tmp = src;
+ AdjustBaseAndOffset(&tmp);
+ if (tmp.hasIndexReg()) {
+ fstx_d(fs, tmp.base(), tmp.index());
+ } else {
+ fst_d(fs, tmp.base(), tmp.offset());
+ }
+}
+
+void TurboAssembler::Ll_w(Register rd, const MemOperand& rj) {
+ DCHECK(!rj.hasIndexReg());
+ bool is_one_instruction = is_int14(rj.offset());
+ if (is_one_instruction) {
+ ll_w(rd, rj.base(), rj.offset());
+ } else {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ li(scratch, rj.offset());
+ add_d(scratch, scratch, rj.base());
+ ll_w(rd, scratch, 0);
+ }
+}
+
+void TurboAssembler::Ll_d(Register rd, const MemOperand& rj) {
+ DCHECK(!rj.hasIndexReg());
+ bool is_one_instruction = is_int14(rj.offset());
+ if (is_one_instruction) {
+ ll_d(rd, rj.base(), rj.offset());
+ } else {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ li(scratch, rj.offset());
+ add_d(scratch, scratch, rj.base());
+ ll_d(rd, scratch, 0);
+ }
+}
+
+void TurboAssembler::Sc_w(Register rd, const MemOperand& rj) {
+ DCHECK(!rj.hasIndexReg());
+ bool is_one_instruction = is_int14(rj.offset());
+ if (is_one_instruction) {
+ sc_w(rd, rj.base(), rj.offset());
+ } else {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ li(scratch, rj.offset());
+ add_d(scratch, scratch, rj.base());
+ sc_w(rd, scratch, 0);
+ }
+}
+
+void TurboAssembler::Sc_d(Register rd, const MemOperand& rj) {
+ DCHECK(!rj.hasIndexReg());
+ bool is_one_instruction = is_int14(rj.offset());
+ if (is_one_instruction) {
+ sc_d(rd, rj.base(), rj.offset());
+ } else {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ li(scratch, rj.offset());
+ add_d(scratch, scratch, rj.base());
+ sc_d(rd, scratch, 0);
+ }
+}
+
+void TurboAssembler::li(Register dst, Handle<HeapObject> value, LiFlags mode) {
+ // TODO(jgruber,v8:8887): Also consider a root-relative load when generating
+ // non-isolate-independent code. In many cases it might be cheaper than
+ // embedding the relocatable value.
+ if (root_array_available_ && options().isolate_independent_code) {
+ IndirectLoadConstant(dst, value);
+ return;
+ }
+ li(dst, Operand(value), mode);
+}
+
+void TurboAssembler::li(Register dst, ExternalReference value, LiFlags mode) {
+ // TODO(jgruber,v8:8887): Also consider a root-relative load when generating
+ // non-isolate-independent code. In many cases it might be cheaper than
+ // embedding the relocatable value.
+ if (root_array_available_ && options().isolate_independent_code) {
+ IndirectLoadExternalReference(dst, value);
+ return;
+ }
+ li(dst, Operand(value), mode);
+}
+
+void TurboAssembler::li(Register dst, const StringConstantBase* string,
+ LiFlags mode) {
+ li(dst, Operand::EmbeddedStringConstant(string), mode);
+}
+
+static inline int InstrCountForLiLower32Bit(int64_t value) {
+ if (is_int12(static_cast<int32_t>(value)) ||
+ is_uint12(static_cast<int32_t>(value)) || !(value & kImm12Mask)) {
+ return 1;
+ } else {
+ return 2;
+ }
+}
+
+void TurboAssembler::LiLower32BitHelper(Register rd, Operand j) {
+ if (is_int12(static_cast<int32_t>(j.immediate()))) {
+ addi_d(rd, zero_reg, j.immediate());
+ } else if (is_uint12(static_cast<int32_t>(j.immediate()))) {
+ ori(rd, zero_reg, j.immediate() & kImm12Mask);
+ } else {
+ lu12i_w(rd, j.immediate() >> 12 & 0xfffff);
+ if (j.immediate() & kImm12Mask) {
+ ori(rd, rd, j.immediate() & kImm12Mask);
+ }
+ }
+}
+
+int TurboAssembler::InstrCountForLi64Bit(int64_t value) {
+ if (is_int32(value)) {
+ return InstrCountForLiLower32Bit(value);
+ } else if (is_int52(value)) {
+ return InstrCountForLiLower32Bit(value) + 1;
+ } else if ((value & 0xffffffffL) == 0) {
+ // 32 LSBs (Least Significant Bits) all set to zero.
+ uint8_t tzc = base::bits::CountTrailingZeros32(value >> 32);
+ uint8_t lzc = base::bits::CountLeadingZeros32(value >> 32);
+ if (tzc >= 20) {
+ return 1;
+ } else if (tzc + lzc > 12) {
+ return 2;
+ } else {
+ return 3;
+ }
+ } else {
+ int64_t imm21 = (value >> 31) & 0x1fffffL;
+ if (imm21 != 0x1fffffL && imm21 != 0) {
+ return InstrCountForLiLower32Bit(value) + 2;
+ } else {
+ return InstrCountForLiLower32Bit(value) + 1;
+ }
+ }
+ UNREACHABLE();
+ return INT_MAX;
+}
+
+// All changes to if...else conditions here must be added to
+// InstrCountForLi64Bit as well.
+void TurboAssembler::li_optimized(Register rd, Operand j, LiFlags mode) {
+ DCHECK(!j.is_reg());
+ DCHECK(!MustUseReg(j.rmode()));
+ DCHECK(mode == OPTIMIZE_SIZE);
+ int64_t imm = j.immediate();
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ // Normal load of an immediate value which does not need Relocation Info.
+ if (is_int32(imm)) {
+ LiLower32BitHelper(rd, j);
+ } else if (is_int52(imm)) {
+ LiLower32BitHelper(rd, j);
+ lu32i_d(rd, imm >> 32 & 0xfffff);
+ } else if ((imm & 0xffffffffL) == 0) {
+ // 32 LSBs (Least Significant Bits) all set to zero.
+ uint8_t tzc = base::bits::CountTrailingZeros32(imm >> 32);
+ uint8_t lzc = base::bits::CountLeadingZeros32(imm >> 32);
+ if (tzc >= 20) {
+ lu52i_d(rd, zero_reg, imm >> 52 & kImm12Mask);
+ } else if (tzc + lzc > 12) {
+ int32_t mask = (1 << (32 - tzc)) - 1;
+ lu12i_w(rd, imm >> (tzc + 32) & mask);
+ slli_d(rd, rd, tzc + 20);
+ } else {
+ xor_(rd, rd, rd);
+ lu32i_d(rd, imm >> 32 & 0xfffff);
+ lu52i_d(rd, rd, imm >> 52 & kImm12Mask);
+ }
+ } else {
+ int64_t imm21 = (imm >> 31) & 0x1fffffL;
+ LiLower32BitHelper(rd, j);
+ if (imm21 != 0x1fffffL && imm21 != 0) lu32i_d(rd, imm >> 32 & 0xfffff);
+ lu52i_d(rd, rd, imm >> 52 & kImm12Mask);
+ }
+}
+
+void TurboAssembler::li(Register rd, Operand j, LiFlags mode) {
+ DCHECK(!j.is_reg());
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ if (!MustUseReg(j.rmode()) && mode == OPTIMIZE_SIZE) {
+ li_optimized(rd, j, mode);
+ } else if (IsOnHeap() && RelocInfo::IsEmbeddedObjectMode(j.rmode())) {
+ BlockGrowBufferScope block_growbuffer(this);
+ int offset = pc_offset();
+ Address address = j.immediate();
+ saved_handles_for_raw_object_ptr_.push_back(
+ std::make_pair(offset, address));
+ Handle<HeapObject> object(reinterpret_cast<Address*>(address));
+ int64_t immediate = object->ptr();
+ RecordRelocInfo(j.rmode(), immediate);
+ lu12i_w(rd, immediate >> 12 & 0xfffff);
+ ori(rd, rd, immediate & kImm12Mask);
+ lu32i_d(rd, immediate >> 32 & 0xfffff);
+ } else if (MustUseReg(j.rmode())) {
+ int64_t immediate;
+ if (j.IsHeapObjectRequest()) {
+ RequestHeapObject(j.heap_object_request());
+ immediate = 0;
+ } else {
+ immediate = j.immediate();
+ }
+
+ RecordRelocInfo(j.rmode(), immediate);
+ lu12i_w(rd, immediate >> 12 & 0xfffff);
+ ori(rd, rd, immediate & kImm12Mask);
+ lu32i_d(rd, immediate >> 32 & 0xfffff);
+ } else if (mode == ADDRESS_LOAD) {
+ // We always need the same number of instructions as we may need to patch
+ // this code to load another value which may need all 3 instructions.
+ lu12i_w(rd, j.immediate() >> 12 & 0xfffff);
+ ori(rd, rd, j.immediate() & kImm12Mask);
+ lu32i_d(rd, j.immediate() >> 32 & 0xfffff);
+ } else { // mode == CONSTANT_SIZE - always emit the same instruction
+ // sequence.
+ lu12i_w(rd, j.immediate() >> 12 & 0xfffff);
+ ori(rd, rd, j.immediate() & kImm12Mask);
+ lu32i_d(rd, j.immediate() >> 32 & 0xfffff);
+ lu52i_d(rd, rd, j.immediate() >> 52 & kImm12Mask);
+ }
+}
+
+void TurboAssembler::MultiPush(RegList regs) {
+ int16_t stack_offset = 0;
+
+ for (int16_t i = kNumRegisters - 1; i >= 0; i--) {
+ if ((regs & (1 << i)) != 0) {
+ stack_offset -= kPointerSize;
+ St_d(ToRegister(i), MemOperand(sp, stack_offset));
+ }
+ }
+ addi_d(sp, sp, stack_offset);
+}
+
+void TurboAssembler::MultiPush(RegList regs1, RegList regs2) {
+ DCHECK_EQ(regs1 & regs2, 0);
+ int16_t stack_offset = 0;
+
+ for (int16_t i = kNumRegisters - 1; i >= 0; i--) {
+ if ((regs1 & (1 << i)) != 0) {
+ stack_offset -= kPointerSize;
+ St_d(ToRegister(i), MemOperand(sp, stack_offset));
+ }
+ }
+ for (int16_t i = kNumRegisters - 1; i >= 0; i--) {
+ if ((regs2 & (1 << i)) != 0) {
+ stack_offset -= kPointerSize;
+ St_d(ToRegister(i), MemOperand(sp, stack_offset));
+ }
+ }
+ addi_d(sp, sp, stack_offset);
+}
+
+void TurboAssembler::MultiPush(RegList regs1, RegList regs2, RegList regs3) {
+ DCHECK_EQ(regs1 & regs2, 0);
+ DCHECK_EQ(regs1 & regs3, 0);
+ DCHECK_EQ(regs2 & regs3, 0);
+ int16_t stack_offset = 0;
+
+ for (int16_t i = kNumRegisters - 1; i >= 0; i--) {
+ if ((regs1 & (1 << i)) != 0) {
+ stack_offset -= kPointerSize;
+ St_d(ToRegister(i), MemOperand(sp, stack_offset));
+ }
+ }
+ for (int16_t i = kNumRegisters - 1; i >= 0; i--) {
+ if ((regs2 & (1 << i)) != 0) {
+ stack_offset -= kPointerSize;
+ St_d(ToRegister(i), MemOperand(sp, stack_offset));
+ }
+ }
+ for (int16_t i = kNumRegisters - 1; i >= 0; i--) {
+ if ((regs3 & (1 << i)) != 0) {
+ stack_offset -= kPointerSize;
+ St_d(ToRegister(i), MemOperand(sp, stack_offset));
+ }
+ }
+ addi_d(sp, sp, stack_offset);
+}
+
+void TurboAssembler::MultiPop(RegList regs) {
+ int16_t stack_offset = 0;
+
+ for (int16_t i = 0; i < kNumRegisters; i++) {
+ if ((regs & (1 << i)) != 0) {
+ Ld_d(ToRegister(i), MemOperand(sp, stack_offset));
+ stack_offset += kPointerSize;
+ }
+ }
+ addi_d(sp, sp, stack_offset);
+}
+
+void TurboAssembler::MultiPop(RegList regs1, RegList regs2) {
+ DCHECK_EQ(regs1 & regs2, 0);
+ int16_t stack_offset = 0;
+
+ for (int16_t i = 0; i < kNumRegisters; i++) {
+ if ((regs2 & (1 << i)) != 0) {
+ Ld_d(ToRegister(i), MemOperand(sp, stack_offset));
+ stack_offset += kPointerSize;
+ }
+ }
+ for (int16_t i = 0; i < kNumRegisters; i++) {
+ if ((regs1 & (1 << i)) != 0) {
+ Ld_d(ToRegister(i), MemOperand(sp, stack_offset));
+ stack_offset += kPointerSize;
+ }
+ }
+ addi_d(sp, sp, stack_offset);
+}
+
+void TurboAssembler::MultiPop(RegList regs1, RegList regs2, RegList regs3) {
+ DCHECK_EQ(regs1 & regs2, 0);
+ DCHECK_EQ(regs1 & regs3, 0);
+ DCHECK_EQ(regs2 & regs3, 0);
+ int16_t stack_offset = 0;
+
+ for (int16_t i = 0; i < kNumRegisters; i++) {
+ if ((regs3 & (1 << i)) != 0) {
+ Ld_d(ToRegister(i), MemOperand(sp, stack_offset));
+ stack_offset += kPointerSize;
+ }
+ }
+ for (int16_t i = 0; i < kNumRegisters; i++) {
+ if ((regs2 & (1 << i)) != 0) {
+ Ld_d(ToRegister(i), MemOperand(sp, stack_offset));
+ stack_offset += kPointerSize;
+ }
+ }
+ for (int16_t i = 0; i < kNumRegisters; i++) {
+ if ((regs1 & (1 << i)) != 0) {
+ Ld_d(ToRegister(i), MemOperand(sp, stack_offset));
+ stack_offset += kPointerSize;
+ }
+ }
+ addi_d(sp, sp, stack_offset);
+}
+
+void TurboAssembler::MultiPushFPU(RegList regs) {
+ int16_t num_to_push = base::bits::CountPopulation(regs);
+ int16_t stack_offset = num_to_push * kDoubleSize;
+
+ Sub_d(sp, sp, Operand(stack_offset));
+ for (int16_t i = kNumRegisters - 1; i >= 0; i--) {
+ if ((regs & (1 << i)) != 0) {
+ stack_offset -= kDoubleSize;
+ Fst_d(FPURegister::from_code(i), MemOperand(sp, stack_offset));
+ }
+ }
+}
+
+void TurboAssembler::MultiPopFPU(RegList regs) {
+ int16_t stack_offset = 0;
+
+ for (int16_t i = 0; i < kNumRegisters; i++) {
+ if ((regs & (1 << i)) != 0) {
+ Fld_d(FPURegister::from_code(i), MemOperand(sp, stack_offset));
+ stack_offset += kDoubleSize;
+ }
+ }
+ addi_d(sp, sp, stack_offset);
+}
+
+void TurboAssembler::Bstrpick_w(Register rk, Register rj, uint16_t msbw,
+ uint16_t lsbw) {
+ DCHECK_LT(lsbw, msbw);
+ DCHECK_LT(lsbw, 32);
+ DCHECK_LT(msbw, 32);
+ bstrpick_w(rk, rj, msbw, lsbw);
+}
+
+void TurboAssembler::Bstrpick_d(Register rk, Register rj, uint16_t msbw,
+ uint16_t lsbw) {
+ DCHECK_LT(lsbw, msbw);
+ DCHECK_LT(lsbw, 64);
+ DCHECK_LT(msbw, 64);
+ bstrpick_d(rk, rj, msbw, lsbw);
+}
+
+void TurboAssembler::Neg_s(FPURegister fd, FPURegister fj) { fneg_s(fd, fj); }
+
+void TurboAssembler::Neg_d(FPURegister fd, FPURegister fj) { fneg_d(fd, fj); }
+
+void TurboAssembler::Ffint_d_uw(FPURegister fd, FPURegister fj) {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ movfr2gr_s(t8, fj);
+ Ffint_d_uw(fd, t8);
+}
+
+void TurboAssembler::Ffint_d_uw(FPURegister fd, Register rj) {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ DCHECK(rj != t7);
+
+ Bstrpick_d(t7, rj, 31, 0);
+ movgr2fr_d(fd, t7);
+ ffint_d_l(fd, fd);
+}
+
+void TurboAssembler::Ffint_d_ul(FPURegister fd, FPURegister fj) {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ movfr2gr_d(t8, fj);
+ Ffint_d_ul(fd, t8);
+}
+
+void TurboAssembler::Ffint_d_ul(FPURegister fd, Register rj) {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ DCHECK(rj != t7);
+
+ Label msb_clear, conversion_done;
+
+ Branch(&msb_clear, ge, rj, Operand(zero_reg));
+
+ // Rj >= 2^63
+ andi(t7, rj, 1);
+ srli_d(rj, rj, 1);
+ or_(t7, t7, rj);
+ movgr2fr_d(fd, t7);
+ ffint_d_l(fd, fd);
+ fadd_d(fd, fd, fd);
+ Branch(&conversion_done);
+
+ bind(&msb_clear);
+ // Rs < 2^63, we can do simple conversion.
+ movgr2fr_d(fd, rj);
+ ffint_d_l(fd, fd);
+
+ bind(&conversion_done);
+}
+
+void TurboAssembler::Ffint_s_uw(FPURegister fd, FPURegister fj) {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ movfr2gr_d(t8, fj);
+ Ffint_s_uw(fd, t8);
+}
+
+void TurboAssembler::Ffint_s_uw(FPURegister fd, Register rj) {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ DCHECK(rj != t7);
+
+ bstrpick_d(t7, rj, 31, 0);
+ movgr2fr_d(fd, t7);
+ ffint_s_l(fd, fd);
+}
+
+void TurboAssembler::Ffint_s_ul(FPURegister fd, FPURegister fj) {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ movfr2gr_d(t8, fj);
+ Ffint_s_ul(fd, t8);
+}
+
+void TurboAssembler::Ffint_s_ul(FPURegister fd, Register rj) {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ DCHECK(rj != t7);
+
+ Label positive, conversion_done;
+
+ Branch(&positive, ge, rj, Operand(zero_reg));
+
+ // Rs >= 2^31.
+ andi(t7, rj, 1);
+ srli_d(rj, rj, 1);
+ or_(t7, t7, rj);
+ movgr2fr_d(fd, t7);
+ ffint_s_l(fd, fd);
+ fadd_s(fd, fd, fd);
+ Branch(&conversion_done);
+
+ bind(&positive);
+ // Rs < 2^31, we can do simple conversion.
+ movgr2fr_d(fd, rj);
+ ffint_s_l(fd, fd);
+
+ bind(&conversion_done);
+}
+
+void MacroAssembler::Ftintrne_l_d(FPURegister fd, FPURegister fj) {
+ ftintrne_l_d(fd, fj);
+}
+
+void MacroAssembler::Ftintrm_l_d(FPURegister fd, FPURegister fj) {
+ ftintrm_l_d(fd, fj);
+}
+
+void MacroAssembler::Ftintrp_l_d(FPURegister fd, FPURegister fj) {
+ ftintrp_l_d(fd, fj);
+}
+
+void MacroAssembler::Ftintrz_l_d(FPURegister fd, FPURegister fj) {
+ ftintrz_l_d(fd, fj);
+}
+
+void MacroAssembler::Ftintrz_l_ud(FPURegister fd, FPURegister fj,
+ FPURegister scratch) {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ // Load to GPR.
+ movfr2gr_d(t8, fj);
+ // Reset sign bit.
+ {
+ UseScratchRegisterScope temps(this);
+ Register scratch1 = temps.Acquire();
+ li(scratch1, 0x7FFFFFFFFFFFFFFFl);
+ and_(t8, t8, scratch1);
+ }
+ movgr2fr_d(scratch, t8);
+ Ftintrz_l_d(fd, scratch);
+}
+
+void TurboAssembler::Ftintrz_uw_d(FPURegister fd, FPURegister fj,
+ FPURegister scratch) {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ Ftintrz_uw_d(t8, fj, scratch);
+ movgr2fr_w(fd, t8);
+}
+
+void TurboAssembler::Ftintrz_uw_s(FPURegister fd, FPURegister fj,
+ FPURegister scratch) {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ Ftintrz_uw_s(t8, fj, scratch);
+ movgr2fr_w(fd, t8);
+}
+
+void TurboAssembler::Ftintrz_ul_d(FPURegister fd, FPURegister fj,
+ FPURegister scratch, Register result) {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ Ftintrz_ul_d(t8, fj, scratch, result);
+ movgr2fr_d(fd, t8);
+}
+
+void TurboAssembler::Ftintrz_ul_s(FPURegister fd, FPURegister fj,
+ FPURegister scratch, Register result) {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ Ftintrz_ul_s(t8, fj, scratch, result);
+ movgr2fr_d(fd, t8);
+}
+
+void MacroAssembler::Ftintrz_w_d(FPURegister fd, FPURegister fj) {
+ ftintrz_w_d(fd, fj);
+}
+
+void MacroAssembler::Ftintrne_w_d(FPURegister fd, FPURegister fj) {
+ ftintrne_w_d(fd, fj);
+}
+
+void MacroAssembler::Ftintrm_w_d(FPURegister fd, FPURegister fj) {
+ ftintrm_w_d(fd, fj);
+}
+
+void MacroAssembler::Ftintrp_w_d(FPURegister fd, FPURegister fj) {
+ ftintrp_w_d(fd, fj);
+}
+
+void TurboAssembler::Ftintrz_uw_d(Register rd, FPURegister fj,
+ FPURegister scratch) {
+ DCHECK(fj != scratch);
+ DCHECK(rd != t7);
+
+ {
+ // Load 2^31 into scratch as its float representation.
+ UseScratchRegisterScope temps(this);
+ Register scratch1 = temps.Acquire();
+ li(scratch1, 0x41E00000);
+ movgr2fr_w(scratch, zero_reg);
+ movgr2frh_w(scratch, scratch1);
+ }
+ // Test if scratch > fd.
+ // If fd < 2^31 we can convert it normally.
+ Label simple_convert;
+ CompareF64(fj, scratch, CLT);
+ BranchTrueShortF(&simple_convert);
+
+ // First we subtract 2^31 from fd, then trunc it to rs
+ // and add 2^31 to rj.
+ fsub_d(scratch, fj, scratch);
+ ftintrz_w_d(scratch, scratch);
+ movfr2gr_s(rd, scratch);
+ Or(rd, rd, 1 << 31);
+
+ Label done;
+ Branch(&done);
+ // Simple conversion.
+ bind(&simple_convert);
+ ftintrz_w_d(scratch, fj);
+ movfr2gr_s(rd, scratch);
+
+ bind(&done);
+}
+
+void TurboAssembler::Ftintrz_uw_s(Register rd, FPURegister fj,
+ FPURegister scratch) {
+ DCHECK(fj != scratch);
+ DCHECK(rd != t7);
+ {
+ // Load 2^31 into scratch as its float representation.
+ UseScratchRegisterScope temps(this);
+ Register scratch1 = temps.Acquire();
+ li(scratch1, 0x4F000000);
+ movgr2fr_w(scratch, scratch1);
+ }
+ // Test if scratch > fs.
+ // If fs < 2^31 we can convert it normally.
+ Label simple_convert;
+ CompareF32(fj, scratch, CLT);
+ BranchTrueShortF(&simple_convert);
+
+ // First we subtract 2^31 from fs, then trunc it to rd
+ // and add 2^31 to rd.
+ fsub_s(scratch, fj, scratch);
+ ftintrz_w_s(scratch, scratch);
+ movfr2gr_s(rd, scratch);
+ Or(rd, rd, 1 << 31);
+
+ Label done;
+ Branch(&done);
+ // Simple conversion.
+ bind(&simple_convert);
+ ftintrz_w_s(scratch, fj);
+ movfr2gr_s(rd, scratch);
+
+ bind(&done);
+}
+
+void TurboAssembler::Ftintrz_ul_d(Register rd, FPURegister fj,
+ FPURegister scratch, Register result) {
+ DCHECK(fj != scratch);
+ DCHECK(result.is_valid() ? !AreAliased(rd, result, t7) : !AreAliased(rd, t7));
+
+ Label simple_convert, done, fail;
+ if (result.is_valid()) {
+ mov(result, zero_reg);
+ Move(scratch, -1.0);
+ // If fd =< -1 or unordered, then the conversion fails.
+ CompareF64(fj, scratch, CLE);
+ BranchTrueShortF(&fail);
+ CompareIsNanF64(fj, scratch);
+ BranchTrueShortF(&fail);
+ }
+
+ // Load 2^63 into scratch as its double representation.
+ li(t7, 0x43E0000000000000);
+ movgr2fr_d(scratch, t7);
+
+ // Test if scratch > fs.
+ // If fs < 2^63 we can convert it normally.
+ CompareF64(fj, scratch, CLT);
+ BranchTrueShortF(&simple_convert);
+
+ // First we subtract 2^63 from fs, then trunc it to rd
+ // and add 2^63 to rd.
+ fsub_d(scratch, fj, scratch);
+ ftintrz_l_d(scratch, scratch);
+ movfr2gr_d(rd, scratch);
+ Or(rd, rd, Operand(1UL << 63));
+ Branch(&done);
+
+ // Simple conversion.
+ bind(&simple_convert);
+ ftintrz_l_d(scratch, fj);
+ movfr2gr_d(rd, scratch);
+
+ bind(&done);
+ if (result.is_valid()) {
+ // Conversion is failed if the result is negative.
+ {
+ UseScratchRegisterScope temps(this);
+ Register scratch1 = temps.Acquire();
+ addi_d(scratch1, zero_reg, -1);
+ srli_d(scratch1, scratch1, 1); // Load 2^62.
+ movfr2gr_d(result, scratch);
+ xor_(result, result, scratch1);
+ }
+ Slt(result, zero_reg, result);
+ }
+
+ bind(&fail);
+}
+
+void TurboAssembler::Ftintrz_ul_s(Register rd, FPURegister fj,
+ FPURegister scratch, Register result) {
+ DCHECK(fj != scratch);
+ DCHECK(result.is_valid() ? !AreAliased(rd, result, t7) : !AreAliased(rd, t7));
+
+ Label simple_convert, done, fail;
+ if (result.is_valid()) {
+ mov(result, zero_reg);
+ Move(scratch, -1.0f);
+ // If fd =< -1 or unordered, then the conversion fails.
+ CompareF32(fj, scratch, CLE);
+ BranchTrueShortF(&fail);
+ CompareIsNanF32(fj, scratch);
+ BranchTrueShortF(&fail);
+ }
+
+ {
+ // Load 2^63 into scratch as its float representation.
+ UseScratchRegisterScope temps(this);
+ Register scratch1 = temps.Acquire();
+ li(scratch1, 0x5F000000);
+ movgr2fr_w(scratch, scratch1);
+ }
+
+ // Test if scratch > fs.
+ // If fs < 2^63 we can convert it normally.
+ CompareF32(fj, scratch, CLT);
+ BranchTrueShortF(&simple_convert);
+
+ // First we subtract 2^63 from fs, then trunc it to rd
+ // and add 2^63 to rd.
+ fsub_s(scratch, fj, scratch);
+ ftintrz_l_s(scratch, scratch);
+ movfr2gr_d(rd, scratch);
+ Or(rd, rd, Operand(1UL << 63));
+ Branch(&done);
+
+ // Simple conversion.
+ bind(&simple_convert);
+ ftintrz_l_s(scratch, fj);
+ movfr2gr_d(rd, scratch);
+
+ bind(&done);
+ if (result.is_valid()) {
+ // Conversion is failed if the result is negative or unordered.
+ {
+ UseScratchRegisterScope temps(this);
+ Register scratch1 = temps.Acquire();
+ addi_d(scratch1, zero_reg, -1);
+ srli_d(scratch1, scratch1, 1); // Load 2^62.
+ movfr2gr_d(result, scratch);
+ xor_(result, result, scratch1);
+ }
+ Slt(result, zero_reg, result);
+ }
+
+ bind(&fail);
+}
+
+void TurboAssembler::RoundDouble(FPURegister dst, FPURegister src,
+ FPURoundingMode mode) {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ Register scratch = t8;
+ movfcsr2gr(scratch);
+ li(t7, Operand(mode));
+ movgr2fcsr(t7);
+ frint_d(dst, src);
+ movgr2fcsr(scratch);
+}
+
+void TurboAssembler::Floor_d(FPURegister dst, FPURegister src) {
+ RoundDouble(dst, src, mode_floor);
+}
+
+void TurboAssembler::Ceil_d(FPURegister dst, FPURegister src) {
+ RoundDouble(dst, src, mode_ceil);
+}
+
+void TurboAssembler::Trunc_d(FPURegister dst, FPURegister src) {
+ RoundDouble(dst, src, mode_trunc);
+}
+
+void TurboAssembler::Round_d(FPURegister dst, FPURegister src) {
+ RoundDouble(dst, src, mode_round);
+}
+
+void TurboAssembler::RoundFloat(FPURegister dst, FPURegister src,
+ FPURoundingMode mode) {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ Register scratch = t8;
+ movfcsr2gr(scratch);
+ li(t7, Operand(mode));
+ movgr2fcsr(t7);
+ frint_s(dst, src);
+ movgr2fcsr(scratch);
+}
+
+void TurboAssembler::Floor_s(FPURegister dst, FPURegister src) {
+ RoundFloat(dst, src, mode_floor);
+}
+
+void TurboAssembler::Ceil_s(FPURegister dst, FPURegister src) {
+ RoundFloat(dst, src, mode_ceil);
+}
+
+void TurboAssembler::Trunc_s(FPURegister dst, FPURegister src) {
+ RoundFloat(dst, src, mode_trunc);
+}
+
+void TurboAssembler::Round_s(FPURegister dst, FPURegister src) {
+ RoundFloat(dst, src, mode_round);
+}
+
+void TurboAssembler::CompareF(FPURegister cmp1, FPURegister cmp2,
+ FPUCondition cc, CFRegister cd, bool f32) {
+ if (f32) {
+ fcmp_cond_s(cc, cmp1, cmp2, cd);
+ } else {
+ fcmp_cond_d(cc, cmp1, cmp2, cd);
+ }
+}
+
+void TurboAssembler::CompareIsNanF(FPURegister cmp1, FPURegister cmp2,
+ CFRegister cd, bool f32) {
+ CompareF(cmp1, cmp2, CUN, cd, f32);
+}
+
+void TurboAssembler::BranchTrueShortF(Label* target, CFRegister cj) {
+ bcnez(cj, target);
+}
+
+void TurboAssembler::BranchFalseShortF(Label* target, CFRegister cj) {
+ bceqz(cj, target);
+}
+
+void TurboAssembler::BranchTrueF(Label* target, CFRegister cj) {
+ // TODO(yuyin): can be optimzed
+ bool long_branch = target->is_bound()
+ ? !is_near(target, OffsetSize::kOffset21)
+ : is_trampoline_emitted();
+ if (long_branch) {
+ Label skip;
+ BranchFalseShortF(&skip, cj);
+ Branch(target);
+ bind(&skip);
+ } else {
+ BranchTrueShortF(target, cj);
+ }
+}
+
+void TurboAssembler::BranchFalseF(Label* target, CFRegister cj) {
+ bool long_branch = target->is_bound()
+ ? !is_near(target, OffsetSize::kOffset21)
+ : is_trampoline_emitted();
+ if (long_branch) {
+ Label skip;
+ BranchTrueShortF(&skip, cj);
+ Branch(target);
+ bind(&skip);
+ } else {
+ BranchFalseShortF(target, cj);
+ }
+}
+
+void TurboAssembler::FmoveLow(FPURegister dst, Register src_low) {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ DCHECK(src_low != scratch);
+ movfrh2gr_s(scratch, dst);
+ movgr2fr_w(dst, src_low);
+ movgr2frh_w(dst, scratch);
+}
+
+void TurboAssembler::Move(FPURegister dst, uint32_t src) {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ li(scratch, Operand(static_cast<int32_t>(src)));
+ movgr2fr_w(dst, scratch);
+}
+
+void TurboAssembler::Move(FPURegister dst, uint64_t src) {
+ // Handle special values first.
+ if (src == bit_cast<uint64_t>(0.0) && has_double_zero_reg_set_) {
+ fmov_d(dst, kDoubleRegZero);
+ } else if (src == bit_cast<uint64_t>(-0.0) && has_double_zero_reg_set_) {
+ Neg_d(dst, kDoubleRegZero);
+ } else {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ li(scratch, Operand(static_cast<int64_t>(src)));
+ movgr2fr_d(dst, scratch);
+ if (dst == kDoubleRegZero) has_double_zero_reg_set_ = true;
+ }
+}
+
+void TurboAssembler::Movz(Register rd, Register rj, Register rk) {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ maskeqz(scratch, rj, rk);
+ masknez(rd, rd, rk);
+ or_(rd, rd, scratch);
+}
+
+void TurboAssembler::Movn(Register rd, Register rj, Register rk) {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ masknez(scratch, rj, rk);
+ maskeqz(rd, rd, rk);
+ or_(rd, rd, scratch);
+}
+
+void TurboAssembler::LoadZeroOnCondition(Register rd, Register rj,
+ const Operand& rk, Condition cond) {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ switch (cond) {
+ case cc_always:
+ mov(rd, zero_reg);
+ break;
+ case eq:
+ if (rj == zero_reg) {
+ if (rk.is_reg()) {
+ LoadZeroIfConditionZero(rd, rk.rm());
+ } else if (rk.immediate() == 0) {
+ mov(rd, zero_reg);
+ }
+ } else if (IsZero(rk)) {
+ LoadZeroIfConditionZero(rd, rj);
+ } else {
+ Sub_d(t7, rj, rk);
+ LoadZeroIfConditionZero(rd, t7);
+ }
+ break;
+ case ne:
+ if (rj == zero_reg) {
+ if (rk.is_reg()) {
+ LoadZeroIfConditionNotZero(rd, rk.rm());
+ } else if (rk.immediate() != 0) {
+ mov(rd, zero_reg);
+ }
+ } else if (IsZero(rk)) {
+ LoadZeroIfConditionNotZero(rd, rj);
+ } else {
+ Sub_d(t7, rj, rk);
+ LoadZeroIfConditionNotZero(rd, t7);
+ }
+ break;
+
+ // Signed comparison.
+ case greater:
+ Sgt(t7, rj, rk);
+ LoadZeroIfConditionNotZero(rd, t7);
+ break;
+ case greater_equal:
+ Sge(t7, rj, rk);
+ LoadZeroIfConditionNotZero(rd, t7);
+ // rj >= rk
+ break;
+ case less:
+ Slt(t7, rj, rk);
+ LoadZeroIfConditionNotZero(rd, t7);
+ // rj < rk
+ break;
+ case less_equal:
+ Sle(t7, rj, rk);
+ LoadZeroIfConditionNotZero(rd, t7);
+ // rj <= rk
+ break;
+
+ // Unsigned comparison.
+ case Ugreater:
+ Sgtu(t7, rj, rk);
+ LoadZeroIfConditionNotZero(rd, t7);
+ // rj > rk
+ break;
+
+ case Ugreater_equal:
+ Sgeu(t7, rj, rk);
+ LoadZeroIfConditionNotZero(rd, t7);
+ // rj >= rk
+ break;
+ case Uless:
+ Sltu(t7, rj, rk);
+ LoadZeroIfConditionNotZero(rd, t7);
+ // rj < rk
+ break;
+ case Uless_equal:
+ Sleu(t7, rj, rk);
+ LoadZeroIfConditionNotZero(rd, t7);
+ // rj <= rk
+ break;
+ default:
+ UNREACHABLE();
+ } // namespace internal
+} // namespace internal
+
+void TurboAssembler::LoadZeroIfConditionNotZero(Register dest,
+ Register condition) {
+ maskeqz(dest, dest, condition);
+}
+
+void TurboAssembler::LoadZeroIfConditionZero(Register dest,
+ Register condition) {
+ masknez(dest, dest, condition);
+}
+
+void TurboAssembler::LoadZeroIfFPUCondition(Register dest, CFRegister cc) {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ movcf2gr(scratch, cc);
+ LoadZeroIfConditionNotZero(dest, scratch);
+}
+
+void TurboAssembler::LoadZeroIfNotFPUCondition(Register dest, CFRegister cc) {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ movcf2gr(scratch, cc);
+ LoadZeroIfConditionZero(dest, scratch);
+}
+
+void TurboAssembler::Clz_w(Register rd, Register rj) { clz_w(rd, rj); }
+
+void TurboAssembler::Clz_d(Register rd, Register rj) { clz_d(rd, rj); }
+
+void TurboAssembler::Ctz_w(Register rd, Register rj) { ctz_w(rd, rj); }
+
+void TurboAssembler::Ctz_d(Register rd, Register rj) { ctz_d(rd, rj); }
+
+// TODO(LOONG_dev): Optimize like arm64, use simd instruction
+void TurboAssembler::Popcnt_w(Register rd, Register rj) {
+ // https://graphics.stanford.edu/~seander/bithacks.html#CountBitsSetParallel
+ //
+ // A generalization of the best bit counting method to integers of
+ // bit-widths up to 128 (parameterized by type T) is this:
+ //
+ // v = v - ((v >> 1) & (T)~(T)0/3); // temp
+ // v = (v & (T)~(T)0/15*3) + ((v >> 2) & (T)~(T)0/15*3); // temp
+ // v = (v + (v >> 4)) & (T)~(T)0/255*15; // temp
+ // c = (T)(v * ((T)~(T)0/255)) >> (sizeof(T) - 1) * BITS_PER_BYTE; //count
+ //
+ // There are algorithms which are faster in the cases where very few
+ // bits are set but the algorithm here attempts to minimize the total
+ // number of instructions executed even when a large number of bits
+ // are set.
+ int32_t B0 = 0x55555555; // (T)~(T)0/3
+ int32_t B1 = 0x33333333; // (T)~(T)0/15*3
+ int32_t B2 = 0x0F0F0F0F; // (T)~(T)0/255*15
+ int32_t value = 0x01010101; // (T)~(T)0/255
+ uint32_t shift = 24; // (sizeof(T) - 1) * BITS_PER_BYTE
+
+ UseScratchRegisterScope temps(this);
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ Register scratch = temps.Acquire();
+ Register scratch2 = t8;
+ srli_w(scratch, rj, 1);
+ li(scratch2, B0);
+ And(scratch, scratch, scratch2);
+ Sub_w(scratch, rj, scratch);
+ li(scratch2, B1);
+ And(rd, scratch, scratch2);
+ srli_w(scratch, scratch, 2);
+ And(scratch, scratch, scratch2);
+ Add_w(scratch, rd, scratch);
+ srli_w(rd, scratch, 4);
+ Add_w(rd, rd, scratch);
+ li(scratch2, B2);
+ And(rd, rd, scratch2);
+ li(scratch, value);
+ Mul_w(rd, rd, scratch);
+ srli_w(rd, rd, shift);
+}
+
+void TurboAssembler::Popcnt_d(Register rd, Register rj) {
+ int64_t B0 = 0x5555555555555555l; // (T)~(T)0/3
+ int64_t B1 = 0x3333333333333333l; // (T)~(T)0/15*3
+ int64_t B2 = 0x0F0F0F0F0F0F0F0Fl; // (T)~(T)0/255*15
+ int64_t value = 0x0101010101010101l; // (T)~(T)0/255
+ uint32_t shift = 56; // (sizeof(T) - 1) * BITS_PER_BYTE
+
+ UseScratchRegisterScope temps(this);
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ Register scratch = temps.Acquire();
+ Register scratch2 = t8;
+ srli_d(scratch, rj, 1);
+ li(scratch2, B0);
+ And(scratch, scratch, scratch2);
+ Sub_d(scratch, rj, scratch);
+ li(scratch2, B1);
+ And(rd, scratch, scratch2);
+ srli_d(scratch, scratch, 2);
+ And(scratch, scratch, scratch2);
+ Add_d(scratch, rd, scratch);
+ srli_d(rd, scratch, 4);
+ Add_d(rd, rd, scratch);
+ li(scratch2, B2);
+ And(rd, rd, scratch2);
+ li(scratch, value);
+ Mul_d(rd, rd, scratch);
+ srli_d(rd, rd, shift);
+}
+
+void TurboAssembler::ExtractBits(Register dest, Register source, Register pos,
+ int size, bool sign_extend) {
+ sra_d(dest, source, pos);
+ bstrpick_d(dest, dest, size - 1, 0);
+ if (sign_extend) {
+ switch (size) {
+ case 8:
+ ext_w_b(dest, dest);
+ break;
+ case 16:
+ ext_w_h(dest, dest);
+ break;
+ case 32:
+ // sign-extend word
+ slli_w(dest, dest, 0);
+ break;
+ default:
+ UNREACHABLE();
+ }
+ }
+}
+
+void TurboAssembler::InsertBits(Register dest, Register source, Register pos,
+ int size) {
+ Rotr_d(dest, dest, pos);
+ bstrins_d(dest, source, size - 1, 0);
+ {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ Sub_d(scratch, zero_reg, pos);
+ Rotr_d(dest, dest, scratch);
+ }
+}
+
+void TurboAssembler::TryInlineTruncateDoubleToI(Register result,
+ DoubleRegister double_input,
+ Label* done) {
+ DoubleRegister single_scratch = kScratchDoubleReg.low();
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ Register scratch2 = temps.Acquire();
+
+ ftintrz_l_d(single_scratch, double_input);
+ movfr2gr_d(scratch2, single_scratch);
+ li(scratch, 1L << 63);
+ Xor(scratch, scratch, scratch2);
+ rotri_d(scratch2, scratch, 1);
+ movfr2gr_s(result, single_scratch);
+ Branch(done, ne, scratch, Operand(scratch2));
+
+ // Truncate NaN to zero.
+ CompareIsNanF64(double_input, double_input);
+ Move(result, zero_reg);
+ bcnez(FCC0, done);
+}
+
+void TurboAssembler::TruncateDoubleToI(Isolate* isolate, Zone* zone,
+ Register result,
+ DoubleRegister double_input,
+ StubCallMode stub_mode) {
+ Label done;
+
+ TryInlineTruncateDoubleToI(result, double_input, &done);
+
+ // If we fell through then inline version didn't succeed - call stub instead.
+ Sub_d(sp, sp,
+ Operand(kDoubleSize + kSystemPointerSize)); // Put input on stack.
+ St_d(ra, MemOperand(sp, kSystemPointerSize));
+ Fst_d(double_input, MemOperand(sp, 0));
+
+#if V8_ENABLE_WEBASSEMBLY
+ if (stub_mode == StubCallMode::kCallWasmRuntimeStub) {
+ Call(wasm::WasmCode::kDoubleToI, RelocInfo::WASM_STUB_CALL);
+#else
+ // For balance.
+ if (false) {
+#endif // V8_ENABLE_WEBASSEMBLY
+ } else {
+ Call(BUILTIN_CODE(isolate, DoubleToI), RelocInfo::CODE_TARGET);
+ }
+
+ Pop(ra, result);
+ bind(&done);
+}
+
+// BRANCH_ARGS_CHECK checks that conditional jump arguments are correct.
+#define BRANCH_ARGS_CHECK(cond, rj, rk) \
+ DCHECK((cond == cc_always && rj == zero_reg && rk.rm() == zero_reg) || \
+ (cond != cc_always && (rj != zero_reg || rk.rm() != zero_reg)))
+
+void TurboAssembler::Branch(Label* L, bool need_link) {
+ int offset = GetOffset(L, OffsetSize::kOffset26);
+ if (need_link) {
+ bl(offset);
+ } else {
+ b(offset);
+ }
+}
+
+void TurboAssembler::Branch(Label* L, Condition cond, Register rj,
+ const Operand& rk, bool need_link) {
+ if (L->is_bound()) {
+ BRANCH_ARGS_CHECK(cond, rj, rk);
+ if (!BranchShortOrFallback(L, cond, rj, rk, need_link)) {
+ if (cond != cc_always) {
+ Label skip;
+ Condition neg_cond = NegateCondition(cond);
+ BranchShort(&skip, neg_cond, rj, rk, need_link);
+ Branch(L, need_link);
+ bind(&skip);
+ } else {
+ Branch(L);
+ }
+ }
+ } else {
+ if (is_trampoline_emitted()) {
+ if (cond != cc_always) {
+ Label skip;
+ Condition neg_cond = NegateCondition(cond);
+ BranchShort(&skip, neg_cond, rj, rk, need_link);
+ Branch(L, need_link);
+ bind(&skip);
+ } else {
+ Branch(L);
+ }
+ } else {
+ BranchShort(L, cond, rj, rk, need_link);
+ }
+ }
+}
+
+void TurboAssembler::Branch(Label* L, Condition cond, Register rj,
+ RootIndex index) {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ LoadRoot(scratch, index);
+ Branch(L, cond, rj, Operand(scratch));
+}
+
+int32_t TurboAssembler::GetOffset(Label* L, OffsetSize bits) {
+ return branch_offset_helper(L, bits) >> 2;
+}
+
+Register TurboAssembler::GetRkAsRegisterHelper(const Operand& rk,
+ Register scratch) {
+ Register r2 = no_reg;
+ if (rk.is_reg()) {
+ r2 = rk.rm();
+ } else {
+ r2 = scratch;
+ li(r2, rk);
+ }
+
+ return r2;
+}
+
+bool TurboAssembler::BranchShortOrFallback(Label* L, Condition cond,
+ Register rj, const Operand& rk,
+ bool need_link) {
+ UseScratchRegisterScope temps(this);
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ Register scratch = temps.hasAvailable() ? temps.Acquire() : t8;
+ DCHECK_NE(rj, zero_reg);
+
+ // Be careful to always use shifted_branch_offset only just before the
+ // branch instruction, as the location will be remember for patching the
+ // target.
+ {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ int offset = 0;
+ switch (cond) {
+ case cc_always:
+ if (L->is_bound() && !is_near(L, OffsetSize::kOffset26)) return false;
+ offset = GetOffset(L, OffsetSize::kOffset26);
+ if (need_link) {
+ bl(offset);
+ } else {
+ b(offset);
+ }
+ break;
+ case eq:
+ if (rk.is_reg() && rj.code() == rk.rm().code()) {
+ // beq is used here to make the code patchable. Otherwise b should
+ // be used which has no condition field so is not patchable.
+ if (L->is_bound() && !is_near(L, OffsetSize::kOffset16)) return false;
+ if (need_link) pcaddi(ra, 2);
+ offset = GetOffset(L, OffsetSize::kOffset16);
+ beq(rj, rj, offset);
+ } else if (IsZero(rk)) {
+ if (L->is_bound() && !is_near(L, OffsetSize::kOffset21)) return false;
+ if (need_link) pcaddi(ra, 2);
+ offset = GetOffset(L, OffsetSize::kOffset21);
+ beqz(rj, offset);
+ } else {
+ if (L->is_bound() && !is_near(L, OffsetSize::kOffset16)) return false;
+ if (need_link) pcaddi(ra, 2);
+ // We don't want any other register but scratch clobbered.
+ Register sc = GetRkAsRegisterHelper(rk, scratch);
+ offset = GetOffset(L, OffsetSize::kOffset16);
+ beq(rj, sc, offset);
+ }
+ break;
+ case ne:
+ if (rk.is_reg() && rj.code() == rk.rm().code()) {
+ if (L->is_bound() && !is_near(L, OffsetSize::kOffset16)) return false;
+ if (need_link) pcaddi(ra, 2);
+ // bne is used here to make the code patchable. Otherwise we
+ // should not generate any instruction.
+ offset = GetOffset(L, OffsetSize::kOffset16);
+ bne(rj, rj, offset);
+ } else if (IsZero(rk)) {
+ if (L->is_bound() && !is_near(L, OffsetSize::kOffset21)) return false;
+ if (need_link) pcaddi(ra, 2);
+ offset = GetOffset(L, OffsetSize::kOffset21);
+ bnez(rj, offset);
+ } else {
+ if (L->is_bound() && !is_near(L, OffsetSize::kOffset16)) return false;
+ if (need_link) pcaddi(ra, 2);
+ // We don't want any other register but scratch clobbered.
+ Register sc = GetRkAsRegisterHelper(rk, scratch);
+ offset = GetOffset(L, OffsetSize::kOffset16);
+ bne(rj, sc, offset);
+ }
+ break;
+
+ // Signed comparison.
+ case greater:
+ // rj > rk
+ if (rk.is_reg() && rj.code() == rk.rm().code()) {
+ // No code needs to be emitted.
+ } else if (IsZero(rk)) {
+ if (L->is_bound() && !is_near(L, OffsetSize::kOffset16)) return false;
+ if (need_link) pcaddi(ra, 2);
+ offset = GetOffset(L, OffsetSize::kOffset16);
+ blt(zero_reg, rj, offset);
+ } else {
+ if (L->is_bound() && !is_near(L, OffsetSize::kOffset16)) return false;
+ if (need_link) pcaddi(ra, 2);
+ Register sc = GetRkAsRegisterHelper(rk, scratch);
+ DCHECK(rj != sc);
+ offset = GetOffset(L, OffsetSize::kOffset16);
+ blt(sc, rj, offset);
+ }
+ break;
+ case greater_equal:
+ // rj >= rk
+ if (rk.is_reg() && rj.code() == rk.rm().code()) {
+ if (L->is_bound() && !is_near(L, OffsetSize::kOffset26)) return false;
+ if (need_link) pcaddi(ra, 2);
+ offset = GetOffset(L, OffsetSize::kOffset26);
+ b(offset);
+ } else if (IsZero(rk)) {
+ if (L->is_bound() && !is_near(L, OffsetSize::kOffset16)) return false;
+ if (need_link) pcaddi(ra, 2);
+ offset = GetOffset(L, OffsetSize::kOffset16);
+ bge(rj, zero_reg, offset);
+ } else {
+ if (L->is_bound() && !is_near(L, OffsetSize::kOffset16)) return false;
+ if (need_link) pcaddi(ra, 2);
+ Register sc = GetRkAsRegisterHelper(rk, scratch);
+ DCHECK(rj != sc);
+ offset = GetOffset(L, OffsetSize::kOffset16);
+ bge(rj, sc, offset);
+ }
+ break;
+ case less:
+ // rj < rk
+ if (rk.is_reg() && rj.code() == rk.rm().code()) {
+ // No code needs to be emitted.
+ } else if (IsZero(rk)) {
+ if (L->is_bound() && !is_near(L, OffsetSize::kOffset16)) return false;
+ if (need_link) pcaddi(ra, 2);
+ offset = GetOffset(L, OffsetSize::kOffset16);
+ blt(rj, zero_reg, offset);
+ } else {
+ if (L->is_bound() && !is_near(L, OffsetSize::kOffset16)) return false;
+ if (need_link) pcaddi(ra, 2);
+ Register sc = GetRkAsRegisterHelper(rk, scratch);
+ DCHECK(rj != sc);
+ offset = GetOffset(L, OffsetSize::kOffset16);
+ blt(rj, sc, offset);
+ }
+ break;
+ case less_equal:
+ // rj <= rk
+ if (rk.is_reg() && rj.code() == rk.rm().code()) {
+ if (L->is_bound() && !is_near(L, OffsetSize::kOffset26)) return false;
+ if (need_link) pcaddi(ra, 2);
+ offset = GetOffset(L, OffsetSize::kOffset26);
+ b(offset);
+ } else if (IsZero(rk)) {
+ if (L->is_bound() && !is_near(L, OffsetSize::kOffset16)) return false;
+ if (need_link) pcaddi(ra, 2);
+ offset = GetOffset(L, OffsetSize::kOffset16);
+ bge(zero_reg, rj, offset);
+ } else {
+ if (L->is_bound() && !is_near(L, OffsetSize::kOffset16)) return false;
+ if (need_link) pcaddi(ra, 2);
+ Register sc = GetRkAsRegisterHelper(rk, scratch);
+ DCHECK(rj != sc);
+ offset = GetOffset(L, OffsetSize::kOffset16);
+ bge(sc, rj, offset);
+ }
+ break;
+
+ // Unsigned comparison.
+ case Ugreater:
+ // rj > rk
+ if (rk.is_reg() && rj.code() == rk.rm().code()) {
+ // No code needs to be emitted.
+ } else if (IsZero(rk)) {
+ if (L->is_bound() && !is_near(L, OffsetSize::kOffset26)) return false;
+ if (need_link) pcaddi(ra, 2);
+ offset = GetOffset(L, OffsetSize::kOffset26);
+ bnez(rj, offset);
+ } else {
+ if (L->is_bound() && !is_near(L, OffsetSize::kOffset16)) return false;
+ if (need_link) pcaddi(ra, 2);
+ Register sc = GetRkAsRegisterHelper(rk, scratch);
+ DCHECK(rj != sc);
+ offset = GetOffset(L, OffsetSize::kOffset16);
+ bltu(sc, rj, offset);
+ }
+ break;
+ case Ugreater_equal:
+ // rj >= rk
+ if (rk.is_reg() && rj.code() == rk.rm().code()) {
+ if (L->is_bound() && !is_near(L, OffsetSize::kOffset26)) return false;
+ if (need_link) pcaddi(ra, 2);
+ offset = GetOffset(L, OffsetSize::kOffset26);
+ b(offset);
+ } else if (IsZero(rk)) {
+ if (L->is_bound() && !is_near(L, OffsetSize::kOffset26)) return false;
+ if (need_link) pcaddi(ra, 2);
+ offset = GetOffset(L, OffsetSize::kOffset26);
+ b(offset);
+ } else {
+ if (L->is_bound() && !is_near(L, OffsetSize::kOffset16)) return false;
+ if (need_link) pcaddi(ra, 2);
+ Register sc = GetRkAsRegisterHelper(rk, scratch);
+ DCHECK(rj != sc);
+ offset = GetOffset(L, OffsetSize::kOffset16);
+ bgeu(rj, sc, offset);
+ }
+ break;
+ case Uless:
+ // rj < rk
+ if (rk.is_reg() && rj.code() == rk.rm().code()) {
+ // No code needs to be emitted.
+ } else if (IsZero(rk)) {
+ // No code needs to be emitted.
+ } else {
+ if (L->is_bound() && !is_near(L, OffsetSize::kOffset16)) return false;
+ if (need_link) pcaddi(ra, 2);
+ Register sc = GetRkAsRegisterHelper(rk, scratch);
+ DCHECK(rj != sc);
+ offset = GetOffset(L, OffsetSize::kOffset16);
+ bltu(rj, sc, offset);
+ }
+ break;
+ case Uless_equal:
+ // rj <= rk
+ if (rk.is_reg() && rj.code() == rk.rm().code()) {
+ if (L->is_bound() && !is_near(L, OffsetSize::kOffset26)) return false;
+ if (need_link) pcaddi(ra, 2);
+ offset = GetOffset(L, OffsetSize::kOffset26);
+ b(offset);
+ } else if (IsZero(rk)) {
+ if (L->is_bound() && !is_near(L, OffsetSize::kOffset21)) return false;
+ if (need_link) pcaddi(ra, 2);
+ beqz(rj, L);
+ } else {
+ if (L->is_bound() && !is_near(L, OffsetSize::kOffset16)) return false;
+ if (need_link) pcaddi(ra, 2);
+ Register sc = GetRkAsRegisterHelper(rk, scratch);
+ DCHECK(rj != sc);
+ offset = GetOffset(L, OffsetSize::kOffset16);
+ bgeu(sc, rj, offset);
+ }
+ break;
+ default:
+ UNREACHABLE();
+ }
+ }
+ return true;
+}
+
+void TurboAssembler::BranchShort(Label* L, Condition cond, Register rj,
+ const Operand& rk, bool need_link) {
+ BRANCH_ARGS_CHECK(cond, rj, rk);
+ bool result = BranchShortOrFallback(L, cond, rj, rk, need_link);
+ DCHECK(result);
+ USE(result);
+}
+
+void TurboAssembler::LoadFromConstantsTable(Register destination,
+ int constant_index) {
+ DCHECK(RootsTable::IsImmortalImmovable(RootIndex::kBuiltinsConstantsTable));
+ LoadRoot(destination, RootIndex::kBuiltinsConstantsTable);
+ Ld_d(destination,
+ FieldMemOperand(destination, FixedArray::kHeaderSize +
+ constant_index * kPointerSize));
+}
+
+void TurboAssembler::LoadRootRelative(Register destination, int32_t offset) {
+ Ld_d(destination, MemOperand(kRootRegister, offset));
+}
+
+void TurboAssembler::LoadRootRegisterOffset(Register destination,
+ intptr_t offset) {
+ if (offset == 0) {
+ Move(destination, kRootRegister);
+ } else {
+ Add_d(destination, kRootRegister, Operand(offset));
+ }
+}
+
+void TurboAssembler::Jump(Register target, Condition cond, Register rj,
+ const Operand& rk) {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ if (cond == cc_always) {
+ jirl(zero_reg, target, 0);
+ } else {
+ BRANCH_ARGS_CHECK(cond, rj, rk);
+ Label skip;
+ Branch(&skip, NegateCondition(cond), rj, rk);
+ jirl(zero_reg, target, 0);
+ bind(&skip);
+ }
+}
+
+void TurboAssembler::Jump(intptr_t target, RelocInfo::Mode rmode,
+ Condition cond, Register rj, const Operand& rk) {
+ Label skip;
+ if (cond != cc_always) {
+ Branch(&skip, NegateCondition(cond), rj, rk);
+ }
+ {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ li(t7, Operand(target, rmode));
+ jirl(zero_reg, t7, 0);
+ bind(&skip);
+ }
+}
+
+void TurboAssembler::Jump(Address target, RelocInfo::Mode rmode, Condition cond,
+ Register rj, const Operand& rk) {
+ DCHECK(!RelocInfo::IsCodeTarget(rmode));
+ Jump(static_cast<intptr_t>(target), rmode, cond, rj, rk);
+}
+
+void TurboAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode,
+ Condition cond, Register rj, const Operand& rk) {
+ DCHECK(RelocInfo::IsCodeTarget(rmode));
+
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ Label skip;
+ if (cond != cc_always) {
+ BranchShort(&skip, NegateCondition(cond), rj, rk);
+ }
+
+ Builtin builtin = Builtin::kNoBuiltinId;
+ bool target_is_isolate_independent_builtin =
+ isolate()->builtins()->IsBuiltinHandle(code, &builtin) &&
+ Builtins::IsIsolateIndependent(builtin);
+ if (target_is_isolate_independent_builtin &&
+ options().use_pc_relative_calls_and_jumps) {
+ int32_t code_target_index = AddCodeTarget(code);
+ RecordRelocInfo(RelocInfo::RELATIVE_CODE_TARGET);
+ b(code_target_index);
+ bind(&skip);
+ return;
+ } else if (root_array_available_ && options().isolate_independent_code) {
+ UNREACHABLE();
+ /*int offset = code->builtin_index() * kSystemPointerSize +
+ IsolateData::builtin_entry_table_offset();
+ Ld_d(t7, MemOperand(kRootRegister, offset));
+ Jump(t7, cc_always, rj, rk);
+ bind(&skip);
+ return;*/
+ } else if (options().inline_offheap_trampolines &&
+ target_is_isolate_independent_builtin) {
+ // Inline the trampoline.
+ RecordCommentForOffHeapTrampoline(builtin);
+ li(t7, Operand(BuiltinEntry(builtin), RelocInfo::OFF_HEAP_TARGET));
+ Jump(t7, cc_always, rj, rk);
+ bind(&skip);
+ return;
+ }
+
+ Jump(static_cast<intptr_t>(code.address()), rmode, cc_always, rj, rk);
+ bind(&skip);
+}
+
+void TurboAssembler::Jump(const ExternalReference& reference) {
+ li(t7, reference);
+ Jump(t7);
+}
+
+// Note: To call gcc-compiled C code on loonarch, you must call through t[0-8].
+void TurboAssembler::Call(Register target, Condition cond, Register rj,
+ const Operand& rk) {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ if (cond == cc_always) {
+ jirl(ra, target, 0);
+ } else {
+ BRANCH_ARGS_CHECK(cond, rj, rk);
+ Label skip;
+ Branch(&skip, NegateCondition(cond), rj, rk);
+ jirl(ra, target, 0);
+ bind(&skip);
+ }
+ set_last_call_pc_(pc_);
+}
+
+void MacroAssembler::JumpIfIsInRange(Register value, unsigned lower_limit,
+ unsigned higher_limit,
+ Label* on_in_range) {
+ if (lower_limit != 0) {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ Sub_d(scratch, value, Operand(lower_limit));
+ Branch(on_in_range, ls, scratch, Operand(higher_limit - lower_limit));
+ } else {
+ Branch(on_in_range, ls, value, Operand(higher_limit - lower_limit));
+ }
+}
+
+void TurboAssembler::Call(Address target, RelocInfo::Mode rmode, Condition cond,
+ Register rj, const Operand& rk) {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ Label skip;
+ if (cond != cc_always) {
+ BranchShort(&skip, NegateCondition(cond), rj, rk);
+ }
+ intptr_t offset_diff = target - pc_offset();
+ if (RelocInfo::IsNone(rmode) && is_int28(offset_diff)) {
+ bl(offset_diff >> 2);
+ } else if (RelocInfo::IsNone(rmode) && is_int38(offset_diff)) {
+ pcaddu18i(t7, static_cast<int32_t>(offset_diff) >> 18);
+ jirl(ra, t7, (offset_diff & 0x3ffff) >> 2);
+ } else {
+ li(t7, Operand(static_cast<int64_t>(target), rmode), ADDRESS_LOAD);
+ Call(t7, cc_always, rj, rk);
+ }
+ bind(&skip);
+}
+
+void TurboAssembler::Call(Handle<Code> code, RelocInfo::Mode rmode,
+ Condition cond, Register rj, const Operand& rk) {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ Label skip;
+ if (cond != cc_always) {
+ BranchShort(&skip, NegateCondition(cond), rj, rk);
+ }
+
+ Builtin builtin = Builtin::kNoBuiltinId;
+ bool target_is_isolate_independent_builtin =
+ isolate()->builtins()->IsBuiltinHandle(code, &builtin) &&
+ Builtins::IsIsolateIndependent(builtin);
+
+ if (target_is_isolate_independent_builtin &&
+ options().use_pc_relative_calls_and_jumps) {
+ int32_t code_target_index = AddCodeTarget(code);
+ RecordCommentForOffHeapTrampoline(builtin);
+ RecordRelocInfo(RelocInfo::RELATIVE_CODE_TARGET);
+ bl(code_target_index);
+ set_last_call_pc_(pc_);
+ bind(&skip);
+ RecordComment("]");
+ return;
+ } else if (root_array_available_ && options().isolate_independent_code) {
+ UNREACHABLE();
+ /*int offset = code->builtin_index() * kSystemPointerSize +
+ IsolateData::builtin_entry_table_offset();
+ LoadRootRelative(t7, offset);
+ Call(t7, cond, rj, rk);
+ bind(&skip);
+ return;*/
+ } else if (options().inline_offheap_trampolines &&
+ target_is_isolate_independent_builtin) {
+ // Inline the trampoline.
+ RecordCommentForOffHeapTrampoline(builtin);
+ li(t7, Operand(BuiltinEntry(builtin), RelocInfo::OFF_HEAP_TARGET));
+ Call(t7, cond, rj, rk);
+ bind(&skip);
+ return;
+ }
+
+ DCHECK(RelocInfo::IsCodeTarget(rmode));
+ DCHECK(code->IsExecutable());
+ Call(code.address(), rmode, cc_always, rj, rk);
+ bind(&skip);
+}
+
+void TurboAssembler::LoadEntryFromBuiltinIndex(Register builtin_index) {
+ STATIC_ASSERT(kSystemPointerSize == 8);
+ STATIC_ASSERT(kSmiTagSize == 1);
+ STATIC_ASSERT(kSmiTag == 0);
+
+ // The builtin_index register contains the builtin index as a Smi.
+ SmiUntag(builtin_index, builtin_index);
+ Alsl_d(builtin_index, builtin_index, kRootRegister, kSystemPointerSizeLog2,
+ t7);
+ Ld_d(builtin_index,
+ MemOperand(builtin_index, IsolateData::builtin_entry_table_offset()));
+}
+
+void TurboAssembler::LoadEntryFromBuiltin(Builtin builtin,
+ Register destination) {
+ Ld_d(destination, EntryFromBuiltinAsOperand(builtin));
+}
+MemOperand TurboAssembler::EntryFromBuiltinAsOperand(Builtin builtin) {
+ DCHECK(root_array_available());
+ return MemOperand(kRootRegister,
+ IsolateData::builtin_entry_slot_offset(builtin));
+}
+
+void TurboAssembler::CallBuiltinByIndex(Register builtin_index) {
+ LoadEntryFromBuiltinIndex(builtin_index);
+ Call(builtin_index);
+}
+void TurboAssembler::CallBuiltin(Builtin builtin) {
+ RecordCommentForOffHeapTrampoline(builtin);
+ Call(BuiltinEntry(builtin), RelocInfo::OFF_HEAP_TARGET);
+ if (FLAG_code_comments) RecordComment("]");
+}
+
+void TurboAssembler::PatchAndJump(Address target) {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ pcaddi(scratch, 4);
+ Ld_d(t7, MemOperand(scratch, 0));
+ jirl(zero_reg, t7, 0);
+ nop();
+ DCHECK_EQ(reinterpret_cast<uint64_t>(pc_) % 8, 0);
+ *reinterpret_cast<uint64_t*>(pc_) = target; // pc_ should be align.
+ pc_ += sizeof(uint64_t);
+}
+
+void TurboAssembler::StoreReturnAddressAndCall(Register target) {
+ // This generates the final instruction sequence for calls to C functions
+ // once an exit frame has been constructed.
+ //
+ // Note that this assumes the caller code (i.e. the Code object currently
+ // being generated) is immovable or that the callee function cannot trigger
+ // GC, since the callee function will return to it.
+
+ Assembler::BlockTrampolinePoolScope block_trampoline_pool(this);
+ static constexpr int kNumInstructionsToJump = 2;
+ Label find_ra;
+ // Adjust the value in ra to point to the correct return location, 2nd
+ // instruction past the real call into C code (the jirl)), and push it.
+ // This is the return address of the exit frame.
+ pcaddi(ra, kNumInstructionsToJump + 1);
+ bind(&find_ra);
+
+ // This spot was reserved in EnterExitFrame.
+ St_d(ra, MemOperand(sp, 0));
+ // Stack is still aligned.
+
+ // TODO(LOONG_dev): can be jirl target? a0 -- a7?
+ jirl(zero_reg, target, 0);
+ // Make sure the stored 'ra' points to this position.
+ DCHECK_EQ(kNumInstructionsToJump, InstructionsGeneratedSince(&find_ra));
+}
+
+void TurboAssembler::Ret(Condition cond, Register rj, const Operand& rk) {
+ Jump(ra, cond, rj, rk);
+}
+
+void TurboAssembler::Drop(int count, Condition cond, Register reg,
+ const Operand& op) {
+ if (count <= 0) {
+ return;
+ }
+
+ Label skip;
+
+ if (cond != al) {
+ Branch(&skip, NegateCondition(cond), reg, op);
+ }
+
+ Add_d(sp, sp, Operand(count * kPointerSize));
+
+ if (cond != al) {
+ bind(&skip);
+ }
+}
+
+void MacroAssembler::Swap(Register reg1, Register reg2, Register scratch) {
+ if (scratch == no_reg) {
+ Xor(reg1, reg1, Operand(reg2));
+ Xor(reg2, reg2, Operand(reg1));
+ Xor(reg1, reg1, Operand(reg2));
+ } else {
+ mov(scratch, reg1);
+ mov(reg1, reg2);
+ mov(reg2, scratch);
+ }
+}
+
+void TurboAssembler::Call(Label* target) { Branch(target, true); }
+
+void TurboAssembler::Push(Smi smi) {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ li(scratch, Operand(smi));
+ Push(scratch);
+}
+
+void TurboAssembler::Push(Handle<HeapObject> handle) {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ li(scratch, Operand(handle));
+ Push(scratch);
+}
+
+void TurboAssembler::PushArray(Register array, Register size, Register scratch,
+ Register scratch2, PushArrayOrder order) {
+ DCHECK(!AreAliased(array, size, scratch, scratch2));
+ Label loop, entry;
+ if (order == PushArrayOrder::kReverse) {
+ mov(scratch, zero_reg);
+ jmp(&entry);
+ bind(&loop);
+ Alsl_d(scratch2, scratch, array, kPointerSizeLog2, t7);
+ Ld_d(scratch2, MemOperand(scratch2, 0));
+ Push(scratch2);
+ Add_d(scratch, scratch, Operand(1));
+ bind(&entry);
+ Branch(&loop, less, scratch, Operand(size));
+ } else {
+ mov(scratch, size);
+ jmp(&entry);
+ bind(&loop);
+ Alsl_d(scratch2, scratch, array, kPointerSizeLog2, t7);
+ Ld_d(scratch2, MemOperand(scratch2, 0));
+ Push(scratch2);
+ bind(&entry);
+ Add_d(scratch, scratch, Operand(-1));
+ Branch(&loop, greater_equal, scratch, Operand(zero_reg));
+ }
+}
+
+// ---------------------------------------------------------------------------
+// Exception handling.
+
+void MacroAssembler::PushStackHandler() {
+ // Adjust this code if not the case.
+ STATIC_ASSERT(StackHandlerConstants::kSize == 2 * kPointerSize);
+ STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize);
+
+ Push(Smi::zero()); // Padding.
+
+ // Link the current handler as the next handler.
+ li(t2,
+ ExternalReference::Create(IsolateAddressId::kHandlerAddress, isolate()));
+ Ld_d(t1, MemOperand(t2, 0));
+ Push(t1);
+
+ // Set this new handler as the current one.
+ St_d(sp, MemOperand(t2, 0));
+}
+
+void MacroAssembler::PopStackHandler() {
+ STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
+ Pop(a1);
+ Add_d(sp, sp,
+ Operand(
+ static_cast<int64_t>(StackHandlerConstants::kSize - kPointerSize)));
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ li(scratch,
+ ExternalReference::Create(IsolateAddressId::kHandlerAddress, isolate()));
+ St_d(a1, MemOperand(scratch, 0));
+}
+
+void TurboAssembler::FPUCanonicalizeNaN(const DoubleRegister dst,
+ const DoubleRegister src) {
+ fsub_d(dst, src, kDoubleRegZero);
+}
+
+// -----------------------------------------------------------------------------
+// JavaScript invokes.
+
+void MacroAssembler::LoadStackLimit(Register destination, StackLimitKind kind) {
+ DCHECK(root_array_available());
+ Isolate* isolate = this->isolate();
+ ExternalReference limit =
+ kind == StackLimitKind::kRealStackLimit
+ ? ExternalReference::address_of_real_jslimit(isolate)
+ : ExternalReference::address_of_jslimit(isolate);
+ DCHECK(TurboAssembler::IsAddressableThroughRootRegister(isolate, limit));
+
+ intptr_t offset =
+ TurboAssembler::RootRegisterOffsetForExternalReference(isolate, limit);
+ CHECK(is_int32(offset));
+ Ld_d(destination, MemOperand(kRootRegister, static_cast<int32_t>(offset)));
+}
+
+void MacroAssembler::StackOverflowCheck(Register num_args, Register scratch1,
+ Register scratch2,
+ Label* stack_overflow) {
+ // Check the stack for overflow. We are not trying to catch
+ // interruptions (e.g. debug break and preemption) here, so the "real stack
+ // limit" is checked.
+
+ LoadStackLimit(scratch1, StackLimitKind::kRealStackLimit);
+ // Make scratch1 the space we have left. The stack might already be overflowed
+ // here which will cause scratch1 to become negative.
+ sub_d(scratch1, sp, scratch1);
+ // Check if the arguments will overflow the stack.
+ slli_d(scratch2, num_args, kPointerSizeLog2);
+ // Signed comparison.
+ Branch(stack_overflow, le, scratch1, Operand(scratch2));
+}
+
+void MacroAssembler::InvokePrologue(Register expected_parameter_count,
+ Register actual_parameter_count,
+ Label* done, InvokeType type) {
+ Label regular_invoke;
+
+ // a0: actual arguments count
+ // a1: function (passed through to callee)
+ // a2: expected arguments count
+
+ DCHECK_EQ(actual_parameter_count, a0);
+ DCHECK_EQ(expected_parameter_count, a2);
+
+ // If the expected parameter count is equal to the adaptor sentinel, no need
+ // to push undefined value as arguments.
+ Branch(&regular_invoke, eq, expected_parameter_count,
+ Operand(kDontAdaptArgumentsSentinel));
+
+ // If overapplication or if the actual argument count is equal to the
+ // formal parameter count, no need to push extra undefined values.
+ sub_d(expected_parameter_count, expected_parameter_count,
+ actual_parameter_count);
+ Branch(&regular_invoke, le, expected_parameter_count, Operand(zero_reg));
+
+ Label stack_overflow;
+ StackOverflowCheck(expected_parameter_count, t0, t1, &stack_overflow);
+ // Underapplication. Move the arguments already in the stack, including the
+ // receiver and the return address.
+ {
+ Label copy;
+ Register src = a6, dest = a7;
+ mov(src, sp);
+ slli_d(t0, expected_parameter_count, kSystemPointerSizeLog2);
+ Sub_d(sp, sp, Operand(t0));
+ // Update stack pointer.
+ mov(dest, sp);
+ mov(t0, actual_parameter_count);
+ bind(&copy);
+ Ld_d(t1, MemOperand(src, 0));
+ St_d(t1, MemOperand(dest, 0));
+ Sub_d(t0, t0, Operand(1));
+ Add_d(src, src, Operand(kSystemPointerSize));
+ Add_d(dest, dest, Operand(kSystemPointerSize));
+ Branch(&copy, ge, t0, Operand(zero_reg));
+ }
+
+ // Fill remaining expected arguments with undefined values.
+ LoadRoot(t0, RootIndex::kUndefinedValue);
+ {
+ Label loop;
+ bind(&loop);
+ St_d(t0, MemOperand(a7, 0));
+ Sub_d(expected_parameter_count, expected_parameter_count, Operand(1));
+ Add_d(a7, a7, Operand(kSystemPointerSize));
+ Branch(&loop, gt, expected_parameter_count, Operand(zero_reg));
+ }
+ b(&regular_invoke);
+
+ bind(&stack_overflow);
+ {
+ FrameScope frame(this,
+ has_frame() ? StackFrame::NONE : StackFrame::INTERNAL);
+ CallRuntime(Runtime::kThrowStackOverflow);
+ break_(0xCC);
+ }
+
+ bind(&regular_invoke);
+}
+
+void MacroAssembler::CallDebugOnFunctionCall(Register fun, Register new_target,
+ Register expected_parameter_count,
+ Register actual_parameter_count) {
+ // Load receiver to pass it later to DebugOnFunctionCall hook.
+ LoadReceiver(t0, actual_parameter_count);
+ FrameScope frame(this, has_frame() ? StackFrame::NONE : StackFrame::INTERNAL);
+
+ SmiTag(expected_parameter_count);
+ Push(expected_parameter_count);
+
+ SmiTag(actual_parameter_count);
+ Push(actual_parameter_count);
+
+ if (new_target.is_valid()) {
+ Push(new_target);
+ }
+ // TODO(LOONG_dev): MultiPush/Pop
+ Push(fun);
+ Push(fun);
+ Push(t0);
+ CallRuntime(Runtime::kDebugOnFunctionCall);
+ Pop(fun);
+ if (new_target.is_valid()) {
+ Pop(new_target);
+ }
+
+ Pop(actual_parameter_count);
+ SmiUntag(actual_parameter_count);
+
+ Pop(expected_parameter_count);
+ SmiUntag(expected_parameter_count);
+}
+
+void MacroAssembler::InvokeFunctionCode(Register function, Register new_target,
+ Register expected_parameter_count,
+ Register actual_parameter_count,
+ InvokeType type) {
+ // You can't call a function without a valid frame.
+ DCHECK_IMPLIES(type == InvokeType::kCall, has_frame());
+ DCHECK_EQ(function, a1);
+ DCHECK_IMPLIES(new_target.is_valid(), new_target == a3);
+
+ // On function call, call into the debugger if necessary.
+ Label debug_hook, continue_after_hook;
+ {
+ li(t0, ExternalReference::debug_hook_on_function_call_address(isolate()));
+ Ld_b(t0, MemOperand(t0, 0));
+ BranchShort(&debug_hook, ne, t0, Operand(zero_reg));
+ }
+ bind(&continue_after_hook);
+
+ // Clear the new.target register if not given.
+ if (!new_target.is_valid()) {
+ LoadRoot(a3, RootIndex::kUndefinedValue);
+ }
+
+ Label done;
+ InvokePrologue(expected_parameter_count, actual_parameter_count, &done, type);
+ // We call indirectly through the code field in the function to
+ // allow recompilation to take effect without changing any of the
+ // call sites.
+ Register code = kJavaScriptCallCodeStartRegister;
+ Ld_d(code, FieldMemOperand(function, JSFunction::kCodeOffset));
+ switch (type) {
+ case InvokeType::kCall:
+ CallCodeObject(code);
+ break;
+ case InvokeType::kJump:
+ JumpCodeObject(code);
+ break;
+ }
+
+ Branch(&done);
+
+ // Deferred debug hook.
+ bind(&debug_hook);
+ CallDebugOnFunctionCall(function, new_target, expected_parameter_count,
+ actual_parameter_count);
+ Branch(&continue_after_hook);
+
+ // Continue here if InvokePrologue does handle the invocation due to
+ // mismatched parameter counts.
+ bind(&done);
+}
+
+void MacroAssembler::InvokeFunctionWithNewTarget(
+ Register function, Register new_target, Register actual_parameter_count,
+ InvokeType type) {
+ // You can't call a function without a valid frame.
+ DCHECK_IMPLIES(type == InvokeType::kCall, has_frame());
+
+ // Contract with called JS functions requires that function is passed in a1.
+ DCHECK_EQ(function, a1);
+ Register expected_parameter_count = a2;
+ Register temp_reg = t0;
+ Ld_d(temp_reg, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
+ Ld_d(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
+ // The argument count is stored as uint16_t
+ Ld_hu(expected_parameter_count,
+ FieldMemOperand(temp_reg,
+ SharedFunctionInfo::kFormalParameterCountOffset));
+
+ InvokeFunctionCode(a1, new_target, expected_parameter_count,
+ actual_parameter_count, type);
+}
+
+void MacroAssembler::InvokeFunction(Register function,
+ Register expected_parameter_count,
+ Register actual_parameter_count,
+ InvokeType type) {
+ // You can't call a function without a valid frame.
+ DCHECK_IMPLIES(type == InvokeType::kCall, has_frame());
+
+ // Contract with called JS functions requires that function is passed in a1.
+ DCHECK_EQ(function, a1);
+
+ // Get the function and setup the context.
+ Ld_d(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
+
+ InvokeFunctionCode(a1, no_reg, expected_parameter_count,
+ actual_parameter_count, type);
+}
+
+// ---------------------------------------------------------------------------
+// Support functions.
+
+void MacroAssembler::GetObjectType(Register object, Register map,
+ Register type_reg) {
+ LoadMap(map, object);
+ Ld_hu(type_reg, FieldMemOperand(map, Map::kInstanceTypeOffset));
+}
+
+void MacroAssembler::GetInstanceTypeRange(Register map, Register type_reg,
+ InstanceType lower_limit,
+ Register range) {
+ Ld_hu(type_reg, FieldMemOperand(map, Map::kInstanceTypeOffset));
+ Sub_d(range, type_reg, Operand(lower_limit));
+}
+
+// -----------------------------------------------------------------------------
+// Runtime calls.
+
+void TurboAssembler::AddOverflow_d(Register dst, Register left,
+ const Operand& right, Register overflow) {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ Register scratch2 = temps.Acquire();
+ Register right_reg = no_reg;
+ if (!right.is_reg()) {
+ li(scratch, Operand(right));
+ right_reg = scratch;
+ } else {
+ right_reg = right.rm();
+ }
+
+ DCHECK(left != scratch2 && right_reg != scratch2 && dst != scratch2 &&
+ overflow != scratch2);
+ DCHECK(overflow != left && overflow != right_reg);
+
+ if (dst == left || dst == right_reg) {
+ add_d(scratch2, left, right_reg);
+ xor_(overflow, scratch2, left);
+ xor_(scratch, scratch2, right_reg);
+ and_(overflow, overflow, scratch);
+ mov(dst, scratch2);
+ } else {
+ add_d(dst, left, right_reg);
+ xor_(overflow, dst, left);
+ xor_(scratch, dst, right_reg);
+ and_(overflow, overflow, scratch);
+ }
+}
+
+void TurboAssembler::SubOverflow_d(Register dst, Register left,
+ const Operand& right, Register overflow) {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ Register scratch2 = temps.Acquire();
+ Register right_reg = no_reg;
+ if (!right.is_reg()) {
+ li(scratch, Operand(right));
+ right_reg = scratch;
+ } else {
+ right_reg = right.rm();
+ }
+
+ DCHECK(left != scratch2 && right_reg != scratch2 && dst != scratch2 &&
+ overflow != scratch2);
+ DCHECK(overflow != left && overflow != right_reg);
+
+ if (dst == left || dst == right_reg) {
+ Sub_d(scratch2, left, right_reg);
+ xor_(overflow, left, scratch2);
+ xor_(scratch, left, right_reg);
+ and_(overflow, overflow, scratch);
+ mov(dst, scratch2);
+ } else {
+ sub_d(dst, left, right_reg);
+ xor_(overflow, left, dst);
+ xor_(scratch, left, right_reg);
+ and_(overflow, overflow, scratch);
+ }
+}
+
+void TurboAssembler::MulOverflow_w(Register dst, Register left,
+ const Operand& right, Register overflow) {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ Register scratch2 = temps.Acquire();
+ Register right_reg = no_reg;
+ if (!right.is_reg()) {
+ li(scratch, Operand(right));
+ right_reg = scratch;
+ } else {
+ right_reg = right.rm();
+ }
+
+ DCHECK(left != scratch2 && right_reg != scratch2 && dst != scratch2 &&
+ overflow != scratch2);
+ DCHECK(overflow != left && overflow != right_reg);
+
+ if (dst == left || dst == right_reg) {
+ Mul_w(scratch2, left, right_reg);
+ Mulh_w(overflow, left, right_reg);
+ mov(dst, scratch2);
+ } else {
+ Mul_w(dst, left, right_reg);
+ Mulh_w(overflow, left, right_reg);
+ }
+
+ srai_d(scratch2, dst, 32);
+ xor_(overflow, overflow, scratch2);
+}
+
+void MacroAssembler::CallRuntime(const Runtime::Function* f, int num_arguments,
+ SaveFPRegsMode save_doubles) {
+ // All parameters are on the stack. v0 has the return value after call.
+
+ // If the expected number of arguments of the runtime function is
+ // constant, we check that the actual number of arguments match the
+ // expectation.
+ CHECK(f->nargs < 0 || f->nargs == num_arguments);
+
+ // TODO(1236192): Most runtime routines don't need the number of
+ // arguments passed in because it is constant. At some point we
+ // should remove this need and make the runtime routine entry code
+ // smarter.
+ PrepareCEntryArgs(num_arguments);
+ PrepareCEntryFunction(ExternalReference::Create(f));
+ Handle<Code> code =
+ CodeFactory::CEntry(isolate(), f->result_size, save_doubles);
+ Call(code, RelocInfo::CODE_TARGET);
+}
+
+void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid) {
+ const Runtime::Function* function = Runtime::FunctionForId(fid);
+ DCHECK_EQ(1, function->result_size);
+ if (function->nargs >= 0) {
+ PrepareCEntryArgs(function->nargs);
+ }
+ JumpToExternalReference(ExternalReference::Create(fid));
+}
+
+void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin,
+ bool builtin_exit_frame) {
+ PrepareCEntryFunction(builtin);
+ Handle<Code> code = CodeFactory::CEntry(isolate(), 1, SaveFPRegsMode::kIgnore,
+ ArgvMode::kStack, builtin_exit_frame);
+ Jump(code, RelocInfo::CODE_TARGET, al, zero_reg, Operand(zero_reg));
+}
+
+void MacroAssembler::JumpToInstructionStream(Address entry) {
+ li(kOffHeapTrampolineRegister, Operand(entry, RelocInfo::OFF_HEAP_TARGET));
+ Jump(kOffHeapTrampolineRegister);
+}
+
+void MacroAssembler::LoadWeakValue(Register out, Register in,
+ Label* target_if_cleared) {
+ Branch(target_if_cleared, eq, in, Operand(kClearedWeakHeapObjectLower32));
+
+ And(out, in, Operand(~kWeakHeapObjectMask));
+}
+
+void MacroAssembler::EmitIncrementCounter(StatsCounter* counter, int value,
+ Register scratch1,
+ Register scratch2) {
+ DCHECK_GT(value, 0);
+ if (FLAG_native_code_counters && counter->Enabled()) {
+ // This operation has to be exactly 32-bit wide in case the external
+ // reference table redirects the counter to a uint32_t dummy_stats_counter_
+ // field.
+ li(scratch2, ExternalReference::Create(counter));
+ Ld_w(scratch1, MemOperand(scratch2, 0));
+ Add_w(scratch1, scratch1, Operand(value));
+ St_w(scratch1, MemOperand(scratch2, 0));
+ }
+}
+
+void MacroAssembler::EmitDecrementCounter(StatsCounter* counter, int value,
+ Register scratch1,
+ Register scratch2) {
+ DCHECK_GT(value, 0);
+ if (FLAG_native_code_counters && counter->Enabled()) {
+ // This operation has to be exactly 32-bit wide in case the external
+ // reference table redirects the counter to a uint32_t dummy_stats_counter_
+ // field.
+ li(scratch2, ExternalReference::Create(counter));
+ Ld_w(scratch1, MemOperand(scratch2, 0));
+ Sub_w(scratch1, scratch1, Operand(value));
+ St_w(scratch1, MemOperand(scratch2, 0));
+ }
+}
+
+// -----------------------------------------------------------------------------
+// Debugging.
+
+void TurboAssembler::Trap() { stop(); }
+void TurboAssembler::DebugBreak() { stop(); }
+
+void TurboAssembler::Assert(Condition cc, AbortReason reason, Register rs,
+ Operand rk) {
+ if (FLAG_debug_code) Check(cc, reason, rs, rk);
+}
+
+void TurboAssembler::Check(Condition cc, AbortReason reason, Register rj,
+ Operand rk) {
+ Label L;
+ Branch(&L, cc, rj, rk);
+ Abort(reason);
+ // Will not return here.
+ bind(&L);
+}
+
+void TurboAssembler::Abort(AbortReason reason) {
+ Label abort_start;
+ bind(&abort_start);
+ if (FLAG_code_comments) {
+ const char* msg = GetAbortReason(reason);
+ RecordComment("Abort message: ");
+ RecordComment(msg);
+ }
+
+ // Avoid emitting call to builtin if requested.
+ if (trap_on_abort()) {
+ stop();
+ return;
+ }
+
+ if (should_abort_hard()) {
+ // We don't care if we constructed a frame. Just pretend we did.
+ FrameScope assume_frame(this, StackFrame::NONE);
+ PrepareCallCFunction(0, a0);
+ li(a0, Operand(static_cast<int>(reason)));
+ CallCFunction(ExternalReference::abort_with_reason(), 1);
+ return;
+ }
+
+ Move(a0, Smi::FromInt(static_cast<int>(reason)));
+
+ // Disable stub call restrictions to always allow calls to abort.
+ if (!has_frame()) {
+ // We don't actually want to generate a pile of code for this, so just
+ // claim there is a stack frame, without generating one.
+ FrameScope scope(this, StackFrame::NONE);
+ Call(BUILTIN_CODE(isolate(), Abort), RelocInfo::CODE_TARGET);
+ } else {
+ Call(BUILTIN_CODE(isolate(), Abort), RelocInfo::CODE_TARGET);
+ }
+ // Will not return here.
+ if (is_trampoline_pool_blocked()) {
+ // If the calling code cares about the exact number of
+ // instructions generated, we insert padding here to keep the size
+ // of the Abort macro constant.
+ // Currently in debug mode with debug_code enabled the number of
+ // generated instructions is 10, so we use this as a maximum value.
+ static const int kExpectedAbortInstructions = 10;
+ int abort_instructions = InstructionsGeneratedSince(&abort_start);
+ DCHECK_LE(abort_instructions, kExpectedAbortInstructions);
+ while (abort_instructions++ < kExpectedAbortInstructions) {
+ nop();
+ }
+ }
+}
+
+void TurboAssembler::LoadMap(Register destination, Register object) {
+ Ld_d(destination, FieldMemOperand(object, HeapObject::kMapOffset));
+}
+
+void MacroAssembler::LoadNativeContextSlot(Register dst, int index) {
+ LoadMap(dst, cp);
+ Ld_d(dst, FieldMemOperand(
+ dst, Map::kConstructorOrBackPointerOrNativeContextOffset));
+ Ld_d(dst, MemOperand(dst, Context::SlotOffset(index)));
+}
+
+void TurboAssembler::StubPrologue(StackFrame::Type type) {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ li(scratch, Operand(StackFrame::TypeToMarker(type)));
+ PushCommonFrame(scratch);
+}
+
+void TurboAssembler::Prologue() { PushStandardFrame(a1); }
+
+void TurboAssembler::EnterFrame(StackFrame::Type type) {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ Push(ra, fp);
+ Move(fp, sp);
+ if (!StackFrame::IsJavaScript(type)) {
+ li(kScratchReg, Operand(StackFrame::TypeToMarker(type)));
+ Push(kScratchReg);
+ }
+#if V8_ENABLE_WEBASSEMBLY
+ if (type == StackFrame::WASM) Push(kWasmInstanceRegister);
+#endif // V8_ENABLE_WEBASSEMBLY
+}
+
+void TurboAssembler::LeaveFrame(StackFrame::Type type) {
+ addi_d(sp, fp, 2 * kPointerSize);
+ Ld_d(ra, MemOperand(fp, 1 * kPointerSize));
+ Ld_d(fp, MemOperand(fp, 0 * kPointerSize));
+}
+
+void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space,
+ StackFrame::Type frame_type) {
+ DCHECK(frame_type == StackFrame::EXIT ||
+ frame_type == StackFrame::BUILTIN_EXIT);
+
+ // Set up the frame structure on the stack.
+ STATIC_ASSERT(2 * kPointerSize == ExitFrameConstants::kCallerSPDisplacement);
+ STATIC_ASSERT(1 * kPointerSize == ExitFrameConstants::kCallerPCOffset);
+ STATIC_ASSERT(0 * kPointerSize == ExitFrameConstants::kCallerFPOffset);
+
+ // This is how the stack will look:
+ // fp + 2 (==kCallerSPDisplacement) - old stack's end
+ // [fp + 1 (==kCallerPCOffset)] - saved old ra
+ // [fp + 0 (==kCallerFPOffset)] - saved old fp
+ // [fp - 1 StackFrame::EXIT Smi
+ // [fp - 2 (==kSPOffset)] - sp of the called function
+ // fp - (2 + stack_space + alignment) == sp == [fp - kSPOffset] - top of the
+ // new stack (will contain saved ra)
+
+ // Save registers and reserve room for saved entry sp.
+ addi_d(sp, sp, -2 * kPointerSize - ExitFrameConstants::kFixedFrameSizeFromFp);
+ St_d(ra, MemOperand(sp, 3 * kPointerSize));
+ St_d(fp, MemOperand(sp, 2 * kPointerSize));
+ {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ li(scratch, Operand(StackFrame::TypeToMarker(frame_type)));
+ St_d(scratch, MemOperand(sp, 1 * kPointerSize));
+ }
+ // Set up new frame pointer.
+ addi_d(fp, sp, ExitFrameConstants::kFixedFrameSizeFromFp);
+
+ if (FLAG_debug_code) {
+ St_d(zero_reg, MemOperand(fp, ExitFrameConstants::kSPOffset));
+ }
+
+ {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ // Save the frame pointer and the context in top.
+ li(t8, ExternalReference::Create(IsolateAddressId::kCEntryFPAddress,
+ isolate()));
+ St_d(fp, MemOperand(t8, 0));
+ li(t8,
+ ExternalReference::Create(IsolateAddressId::kContextAddress, isolate()));
+ St_d(cp, MemOperand(t8, 0));
+ }
+
+ const int frame_alignment = MacroAssembler::ActivationFrameAlignment();
+ if (save_doubles) {
+ // The stack is already aligned to 0 modulo 8 for stores with sdc1.
+ int kNumOfSavedRegisters = FPURegister::kNumRegisters / 2;
+ int space = kNumOfSavedRegisters * kDoubleSize;
+ Sub_d(sp, sp, Operand(space));
+ // Remember: we only need to save every 2nd double FPU value.
+ for (int i = 0; i < kNumOfSavedRegisters; i++) {
+ FPURegister reg = FPURegister::from_code(2 * i);
+ Fst_d(reg, MemOperand(sp, i * kDoubleSize));
+ }
+ }
+
+ // Reserve place for the return address, stack space and an optional slot
+ // (used by DirectCEntry to hold the return value if a struct is
+ // returned) and align the frame preparing for calling the runtime function.
+ DCHECK_GE(stack_space, 0);
+ Sub_d(sp, sp, Operand((stack_space + 2) * kPointerSize));
+ if (frame_alignment > 0) {
+ DCHECK(base::bits::IsPowerOfTwo(frame_alignment));
+ And(sp, sp, Operand(-frame_alignment)); // Align stack.
+ }
+
+ // Set the exit frame sp value to point just before the return address
+ // location.
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ addi_d(scratch, sp, kPointerSize);
+ St_d(scratch, MemOperand(fp, ExitFrameConstants::kSPOffset));
+}
+
+void MacroAssembler::LeaveExitFrame(bool save_doubles, Register argument_count,
+ bool do_return,
+ bool argument_count_is_length) {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ // Optionally restore all double registers.
+ if (save_doubles) {
+ // Remember: we only need to restore every 2nd double FPU value.
+ int kNumOfSavedRegisters = FPURegister::kNumRegisters / 2;
+ Sub_d(t8, fp,
+ Operand(ExitFrameConstants::kFixedFrameSizeFromFp +
+ kNumOfSavedRegisters * kDoubleSize));
+ for (int i = 0; i < kNumOfSavedRegisters; i++) {
+ FPURegister reg = FPURegister::from_code(2 * i);
+ Fld_d(reg, MemOperand(t8, i * kDoubleSize));
+ }
+ }
+
+ // Clear top frame.
+ li(t8,
+ ExternalReference::Create(IsolateAddressId::kCEntryFPAddress, isolate()));
+ St_d(zero_reg, MemOperand(t8, 0));
+
+ // Restore current context from top and clear it in debug mode.
+ li(t8,
+ ExternalReference::Create(IsolateAddressId::kContextAddress, isolate()));
+ Ld_d(cp, MemOperand(t8, 0));
+
+ if (FLAG_debug_code) {
+ UseScratchRegisterScope temp(this);
+ Register scratch = temp.Acquire();
+ li(scratch, Operand(Context::kInvalidContext));
+ St_d(scratch, MemOperand(t8, 0));
+ }
+
+ // Pop the arguments, restore registers, and return.
+ mov(sp, fp); // Respect ABI stack constraint.
+ Ld_d(fp, MemOperand(sp, ExitFrameConstants::kCallerFPOffset));
+ Ld_d(ra, MemOperand(sp, ExitFrameConstants::kCallerPCOffset));
+
+ if (argument_count.is_valid()) {
+ if (argument_count_is_length) {
+ add_d(sp, sp, argument_count);
+ } else {
+ Alsl_d(sp, argument_count, sp, kPointerSizeLog2, t8);
+ }
+ }
+
+ addi_d(sp, sp, 2 * kPointerSize);
+ if (do_return) {
+ Ret();
+ }
+}
+
+int TurboAssembler::ActivationFrameAlignment() {
+#if V8_HOST_ARCH_LOONG64
+ // Running on the real platform. Use the alignment as mandated by the local
+ // environment.
+ // Note: This will break if we ever start generating snapshots on one LOONG64
+ // platform for another LOONG64 platform with a different alignment.
+ return base::OS::ActivationFrameAlignment();
+#else // V8_HOST_ARCH_LOONG64
+ // If we are using the simulator then we should always align to the expected
+ // alignment. As the simulator is used to generate snapshots we do not know
+ // if the target platform will need alignment, so this is controlled from a
+ // flag.
+ return FLAG_sim_stack_alignment;
+#endif // V8_HOST_ARCH_LOONG64
+}
+
+void MacroAssembler::AssertStackIsAligned() {
+ if (FLAG_debug_code) {
+ const int frame_alignment = ActivationFrameAlignment();
+ const int frame_alignment_mask = frame_alignment - 1;
+
+ if (frame_alignment > kPointerSize) {
+ Label alignment_as_expected;
+ DCHECK(base::bits::IsPowerOfTwo(frame_alignment));
+ {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ andi(scratch, sp, frame_alignment_mask);
+ Branch(&alignment_as_expected, eq, scratch, Operand(zero_reg));
+ }
+ // Don't use Check here, as it will call Runtime_Abort re-entering here.
+ stop();
+ bind(&alignment_as_expected);
+ }
+ }
+}
+
+void TurboAssembler::SmiUntag(Register dst, const MemOperand& src) {
+ if (SmiValuesAre32Bits()) {
+ Ld_w(dst, MemOperand(src.base(), SmiWordOffset(src.offset())));
+ } else {
+ DCHECK(SmiValuesAre31Bits());
+ Ld_w(dst, src);
+ SmiUntag(dst);
+ }
+}
+
+void TurboAssembler::JumpIfSmi(Register value, Label* smi_label) {
+ DCHECK_EQ(0, kSmiTag);
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ andi(scratch, value, kSmiTagMask);
+ Branch(smi_label, eq, scratch, Operand(zero_reg));
+}
+
+void MacroAssembler::JumpIfNotSmi(Register value, Label* not_smi_label) {
+ DCHECK_EQ(0, kSmiTag);
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ andi(scratch, value, kSmiTagMask);
+ Branch(not_smi_label, ne, scratch, Operand(zero_reg));
+}
+
+void MacroAssembler::AssertNotSmi(Register object) {
+ if (FLAG_debug_code) {
+ STATIC_ASSERT(kSmiTag == 0);
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ andi(scratch, object, kSmiTagMask);
+ Check(ne, AbortReason::kOperandIsASmi, scratch, Operand(zero_reg));
+ }
+}
+
+void MacroAssembler::AssertSmi(Register object) {
+ if (FLAG_debug_code) {
+ STATIC_ASSERT(kSmiTag == 0);
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ andi(scratch, object, kSmiTagMask);
+ Check(eq, AbortReason::kOperandIsASmi, scratch, Operand(zero_reg));
+ }
+}
+
+void MacroAssembler::AssertConstructor(Register object) {
+ if (FLAG_debug_code) {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ STATIC_ASSERT(kSmiTag == 0);
+ SmiTst(object, t8);
+ Check(ne, AbortReason::kOperandIsASmiAndNotAConstructor, t8,
+ Operand(zero_reg));
+
+ LoadMap(t8, object);
+ Ld_bu(t8, FieldMemOperand(t8, Map::kBitFieldOffset));
+ And(t8, t8, Operand(Map::Bits1::IsConstructorBit::kMask));
+ Check(ne, AbortReason::kOperandIsNotAConstructor, t8, Operand(zero_reg));
+ }
+}
+
+void MacroAssembler::AssertFunction(Register object) {
+ if (FLAG_debug_code) {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ STATIC_ASSERT(kSmiTag == 0);
+ SmiTst(object, t8);
+ Check(ne, AbortReason::kOperandIsASmiAndNotAFunction, t8,
+ Operand(zero_reg));
+ Push(object);
+ LoadMap(object, object);
+ GetInstanceTypeRange(object, object, FIRST_JS_FUNCTION_TYPE, t8);
+ Check(ls, AbortReason::kOperandIsNotAFunction, t8,
+ Operand(LAST_JS_FUNCTION_TYPE - FIRST_JS_FUNCTION_TYPE));
+ Pop(object);
+ }
+}
+
+void MacroAssembler::AssertBoundFunction(Register object) {
+ if (FLAG_debug_code) {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ STATIC_ASSERT(kSmiTag == 0);
+ SmiTst(object, t8);
+ Check(ne, AbortReason::kOperandIsASmiAndNotABoundFunction, t8,
+ Operand(zero_reg));
+ GetObjectType(object, t8, t8);
+ Check(eq, AbortReason::kOperandIsNotABoundFunction, t8,
+ Operand(JS_BOUND_FUNCTION_TYPE));
+ }
+}
+
+void MacroAssembler::AssertGeneratorObject(Register object) {
+ if (!FLAG_debug_code) return;
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ STATIC_ASSERT(kSmiTag == 0);
+ SmiTst(object, t8);
+ Check(ne, AbortReason::kOperandIsASmiAndNotAGeneratorObject, t8,
+ Operand(zero_reg));
+
+ GetObjectType(object, t8, t8);
+
+ Label done;
+
+ // Check if JSGeneratorObject
+ Branch(&done, eq, t8, Operand(JS_GENERATOR_OBJECT_TYPE));
+
+ // Check if JSAsyncFunctionObject (See MacroAssembler::CompareInstanceType)
+ Branch(&done, eq, t8, Operand(JS_ASYNC_FUNCTION_OBJECT_TYPE));
+
+ // Check if JSAsyncGeneratorObject
+ Branch(&done, eq, t8, Operand(JS_ASYNC_GENERATOR_OBJECT_TYPE));
+
+ Abort(AbortReason::kOperandIsNotAGeneratorObject);
+
+ bind(&done);
+}
+
+void MacroAssembler::AssertUndefinedOrAllocationSite(Register object,
+ Register scratch) {
+ if (FLAG_debug_code) {
+ Label done_checking;
+ AssertNotSmi(object);
+ LoadRoot(scratch, RootIndex::kUndefinedValue);
+ Branch(&done_checking, eq, object, Operand(scratch));
+ GetObjectType(object, scratch, scratch);
+ Assert(eq, AbortReason::kExpectedUndefinedOrCell, scratch,
+ Operand(ALLOCATION_SITE_TYPE));
+ bind(&done_checking);
+ }
+}
+
+void TurboAssembler::Float32Max(FPURegister dst, FPURegister src1,
+ FPURegister src2, Label* out_of_line) {
+ if (src1 == src2) {
+ Move_s(dst, src1);
+ return;
+ }
+
+ // Check if one of operands is NaN.
+ CompareIsNanF32(src1, src2);
+ BranchTrueF(out_of_line);
+
+ fmax_s(dst, src1, src2);
+}
+
+void TurboAssembler::Float32MaxOutOfLine(FPURegister dst, FPURegister src1,
+ FPURegister src2) {
+ fadd_s(dst, src1, src2);
+}
+
+void TurboAssembler::Float32Min(FPURegister dst, FPURegister src1,
+ FPURegister src2, Label* out_of_line) {
+ if (src1 == src2) {
+ Move_s(dst, src1);
+ return;
+ }
+
+ // Check if one of operands is NaN.
+ CompareIsNanF32(src1, src2);
+ BranchTrueF(out_of_line);
+
+ fmin_s(dst, src1, src2);
+}
+
+void TurboAssembler::Float32MinOutOfLine(FPURegister dst, FPURegister src1,
+ FPURegister src2) {
+ fadd_s(dst, src1, src2);
+}
+
+void TurboAssembler::Float64Max(FPURegister dst, FPURegister src1,
+ FPURegister src2, Label* out_of_line) {
+ if (src1 == src2) {
+ Move_d(dst, src1);
+ return;
+ }
+
+ // Check if one of operands is NaN.
+ CompareIsNanF64(src1, src2);
+ BranchTrueF(out_of_line);
+
+ fmax_d(dst, src1, src2);
+}
+
+void TurboAssembler::Float64MaxOutOfLine(FPURegister dst, FPURegister src1,
+ FPURegister src2) {
+ fadd_d(dst, src1, src2);
+}
+
+void TurboAssembler::Float64Min(FPURegister dst, FPURegister src1,
+ FPURegister src2, Label* out_of_line) {
+ if (src1 == src2) {
+ Move_d(dst, src1);
+ return;
+ }
+
+ // Check if one of operands is NaN.
+ CompareIsNanF64(src1, src2);
+ BranchTrueF(out_of_line);
+
+ fmin_d(dst, src1, src2);
+}
+
+void TurboAssembler::Float64MinOutOfLine(FPURegister dst, FPURegister src1,
+ FPURegister src2) {
+ fadd_d(dst, src1, src2);
+}
+
+static const int kRegisterPassedArguments = 8;
+
+int TurboAssembler::CalculateStackPassedWords(int num_reg_arguments,
+ int num_double_arguments) {
+ int stack_passed_words = 0;
+ num_reg_arguments += 2 * num_double_arguments;
+
+ // Up to eight simple arguments are passed in registers a0..a7.
+ if (num_reg_arguments > kRegisterPassedArguments) {
+ stack_passed_words += num_reg_arguments - kRegisterPassedArguments;
+ }
+ return stack_passed_words;
+}
+
+void TurboAssembler::PrepareCallCFunction(int num_reg_arguments,
+ int num_double_arguments,
+ Register scratch) {
+ int frame_alignment = ActivationFrameAlignment();
+
+ // Up to eight simple arguments in a0..a3, a4..a7, No argument slots.
+ // Remaining arguments are pushed on the stack.
+ int stack_passed_arguments =
+ CalculateStackPassedWords(num_reg_arguments, num_double_arguments);
+ if (frame_alignment > kPointerSize) {
+ // Make stack end at alignment and make room for num_arguments - 4 words
+ // and the original value of sp.
+ mov(scratch, sp);
+ Sub_d(sp, sp, Operand((stack_passed_arguments + 1) * kPointerSize));
+ DCHECK(base::bits::IsPowerOfTwo(frame_alignment));
+ bstrins_d(sp, zero_reg, std::log2(frame_alignment) - 1, 0);
+ St_d(scratch, MemOperand(sp, stack_passed_arguments * kPointerSize));
+ } else {
+ Sub_d(sp, sp, Operand(stack_passed_arguments * kPointerSize));
+ }
+}
+
+void TurboAssembler::PrepareCallCFunction(int num_reg_arguments,
+ Register scratch) {
+ PrepareCallCFunction(num_reg_arguments, 0, scratch);
+}
+
+void TurboAssembler::CallCFunction(ExternalReference function,
+ int num_reg_arguments,
+ int num_double_arguments) {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ li(t7, function);
+ CallCFunctionHelper(t7, num_reg_arguments, num_double_arguments);
+}
+
+void TurboAssembler::CallCFunction(Register function, int num_reg_arguments,
+ int num_double_arguments) {
+ CallCFunctionHelper(function, num_reg_arguments, num_double_arguments);
+}
+
+void TurboAssembler::CallCFunction(ExternalReference function,
+ int num_arguments) {
+ CallCFunction(function, num_arguments, 0);
+}
+
+void TurboAssembler::CallCFunction(Register function, int num_arguments) {
+ CallCFunction(function, num_arguments, 0);
+}
+
+void TurboAssembler::CallCFunctionHelper(Register function,
+ int num_reg_arguments,
+ int num_double_arguments) {
+ DCHECK_LE(num_reg_arguments + num_double_arguments, kMaxCParameters);
+ DCHECK(has_frame());
+ // Make sure that the stack is aligned before calling a C function unless
+ // running in the simulator. The simulator has its own alignment check which
+ // provides more information.
+
+#if V8_HOST_ARCH_LOONG64
+ if (FLAG_debug_code) {
+ int frame_alignment = base::OS::ActivationFrameAlignment();
+ int frame_alignment_mask = frame_alignment - 1;
+ if (frame_alignment > kPointerSize) {
+ DCHECK(base::bits::IsPowerOfTwo(frame_alignment));
+ Label alignment_as_expected;
+ {
+ Register scratch = t8;
+ And(scratch, sp, Operand(frame_alignment_mask));
+ Branch(&alignment_as_expected, eq, scratch, Operand(zero_reg));
+ }
+ // Don't use Check here, as it will call Runtime_Abort possibly
+ // re-entering here.
+ stop();
+ bind(&alignment_as_expected);
+ }
+ }
+#endif // V8_HOST_ARCH_LOONG64
+
+ // Just call directly. The function called cannot cause a GC, or
+ // allow preemption, so the return address in the link register
+ // stays correct.
+ {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ if (function != t7) {
+ mov(t7, function);
+ function = t7;
+ }
+
+ // Save the frame pointer and PC so that the stack layout remains iterable,
+ // even without an ExitFrame which normally exists between JS and C frames.
+ // 't' registers are caller-saved so this is safe as a scratch register.
+ Register pc_scratch = t1;
+ Register scratch = t2;
+ DCHECK(!AreAliased(pc_scratch, scratch, function));
+
+ pcaddi(pc_scratch, 1);
+
+ // See x64 code for reasoning about how to address the isolate data fields.
+ if (root_array_available()) {
+ St_d(pc_scratch, MemOperand(kRootRegister,
+ IsolateData::fast_c_call_caller_pc_offset()));
+ St_d(fp, MemOperand(kRootRegister,
+ IsolateData::fast_c_call_caller_fp_offset()));
+ } else {
+ DCHECK_NOT_NULL(isolate());
+ li(scratch, ExternalReference::fast_c_call_caller_pc_address(isolate()));
+ St_d(pc_scratch, MemOperand(scratch, 0));
+ li(scratch, ExternalReference::fast_c_call_caller_fp_address(isolate()));
+ St_d(fp, MemOperand(scratch, 0));
+ }
+
+ Call(function);
+
+ // We don't unset the PC; the FP is the source of truth.
+ if (root_array_available()) {
+ St_d(zero_reg, MemOperand(kRootRegister,
+ IsolateData::fast_c_call_caller_fp_offset()));
+ } else {
+ DCHECK_NOT_NULL(isolate());
+ li(scratch, ExternalReference::fast_c_call_caller_fp_address(isolate()));
+ St_d(zero_reg, MemOperand(scratch, 0));
+ }
+ }
+
+ int stack_passed_arguments =
+ CalculateStackPassedWords(num_reg_arguments, num_double_arguments);
+
+ if (base::OS::ActivationFrameAlignment() > kPointerSize) {
+ Ld_d(sp, MemOperand(sp, stack_passed_arguments * kPointerSize));
+ } else {
+ Add_d(sp, sp, Operand(stack_passed_arguments * kPointerSize));
+ }
+}
+
+#undef BRANCH_ARGS_CHECK
+
+void TurboAssembler::CheckPageFlag(const Register& object, int mask,
+ Condition cc, Label* condition_met) {
+ UseScratchRegisterScope temps(this);
+ temps.Include(t8);
+ Register scratch = temps.Acquire();
+ And(scratch, object, Operand(~kPageAlignmentMask));
+ Ld_d(scratch, MemOperand(scratch, BasicMemoryChunk::kFlagsOffset));
+ And(scratch, scratch, Operand(mask));
+ Branch(condition_met, cc, scratch, Operand(zero_reg));
+}
+
+Register GetRegisterThatIsNotOneOf(Register reg1, Register reg2, Register reg3,
+ Register reg4, Register reg5,
+ Register reg6) {
+ RegList regs = 0;
+ if (reg1.is_valid()) regs |= reg1.bit();
+ if (reg2.is_valid()) regs |= reg2.bit();
+ if (reg3.is_valid()) regs |= reg3.bit();
+ if (reg4.is_valid()) regs |= reg4.bit();
+ if (reg5.is_valid()) regs |= reg5.bit();
+ if (reg6.is_valid()) regs |= reg6.bit();
+
+ const RegisterConfiguration* config = RegisterConfiguration::Default();
+ for (int i = 0; i < config->num_allocatable_general_registers(); ++i) {
+ int code = config->GetAllocatableGeneralCode(i);
+ Register candidate = Register::from_code(code);
+ if (regs & candidate.bit()) continue;
+ return candidate;
+ }
+ UNREACHABLE();
+}
+
+void TurboAssembler::ComputeCodeStartAddress(Register dst) {
+ // TODO(LOONG_dev): range check, add Pcadd macro function?
+ pcaddi(dst, -pc_offset() >> 2);
+}
+
+void TurboAssembler::CallForDeoptimization(Builtin target, int, Label* exit,
+ DeoptimizeKind kind, Label* ret,
+ Label*) {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ Ld_d(t7, MemOperand(kRootRegister,
+ IsolateData::builtin_entry_slot_offset(target)));
+ Call(t7);
+ DCHECK_EQ(SizeOfCodeGeneratedSince(exit),
+ (kind == DeoptimizeKind::kLazy)
+ ? Deoptimizer::kLazyDeoptExitSize
+ : Deoptimizer::kNonLazyDeoptExitSize);
+
+ if (kind == DeoptimizeKind::kEagerWithResume) {
+ Branch(ret);
+ DCHECK_EQ(SizeOfCodeGeneratedSince(exit),
+ Deoptimizer::kEagerWithResumeBeforeArgsSize);
+ }
+}
+
+void TurboAssembler::LoadCodeObjectEntry(Register destination,
+ Register code_object) {
+ // Code objects are called differently depending on whether we are generating
+ // builtin code (which will later be embedded into the binary) or compiling
+ // user JS code at runtime.
+ // * Builtin code runs in --jitless mode and thus must not call into on-heap
+ // Code targets. Instead, we dispatch through the builtins entry table.
+ // * Codegen at runtime does not have this restriction and we can use the
+ // shorter, branchless instruction sequence. The assumption here is that
+ // targets are usually generated code and not builtin Code objects.
+ if (options().isolate_independent_code) {
+ DCHECK(root_array_available());
+ Label if_code_is_off_heap, out;
+ Register scratch = t8;
+
+ DCHECK(!AreAliased(destination, scratch));
+ DCHECK(!AreAliased(code_object, scratch));
+
+ // Check whether the Code object is an off-heap trampoline. If so, call its
+ // (off-heap) entry point directly without going through the (on-heap)
+ // trampoline. Otherwise, just call the Code object as always.
+ Ld_w(scratch, FieldMemOperand(code_object, Code::kFlagsOffset));
+ And(scratch, scratch, Operand(Code::IsOffHeapTrampoline::kMask));
+ BranchShort(&if_code_is_off_heap, ne, scratch, Operand(zero_reg));
+ // Not an off-heap trampoline object, the entry point is at
+ // Code::raw_instruction_start().
+ Add_d(destination, code_object, Code::kHeaderSize - kHeapObjectTag);
+ Branch(&out);
+
+ // An off-heap trampoline, the entry point is loaded from the builtin entry
+ // table.
+ bind(&if_code_is_off_heap);
+ Ld_w(scratch, FieldMemOperand(code_object, Code::kBuiltinIndexOffset));
+ // TODO(liuyu): don't use scratch_reg in Alsl_d;
+ Alsl_d(destination, scratch, kRootRegister, kSystemPointerSizeLog2,
+ zero_reg);
+ Ld_d(destination,
+ MemOperand(destination, IsolateData::builtin_entry_table_offset()));
+
+ bind(&out);
+ } else {
+ Add_d(destination, code_object, Code::kHeaderSize - kHeapObjectTag);
+ }
+}
+
+void TurboAssembler::CallCodeObject(Register code_object) {
+ LoadCodeObjectEntry(code_object, code_object);
+ Call(code_object);
+}
+
+void TurboAssembler::JumpCodeObject(Register code_object, JumpMode jump_mode) {
+ DCHECK_EQ(JumpMode::kJump, jump_mode);
+ LoadCodeObjectEntry(code_object, code_object);
+ Jump(code_object);
+}
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_TARGET_ARCH_LOONG64
diff --git a/deps/v8/src/codegen/loong64/macro-assembler-loong64.h b/deps/v8/src/codegen/loong64/macro-assembler-loong64.h
new file mode 100644
index 0000000000..ef670fd1cd
--- /dev/null
+++ b/deps/v8/src/codegen/loong64/macro-assembler-loong64.h
@@ -0,0 +1,1062 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef INCLUDED_FROM_MACRO_ASSEMBLER_H
+#error This header must be included via macro-assembler.h
+#endif
+
+#ifndef V8_CODEGEN_LOONG64_MACRO_ASSEMBLER_LOONG64_H_
+#define V8_CODEGEN_LOONG64_MACRO_ASSEMBLER_LOONG64_H_
+
+#include "src/codegen/assembler.h"
+#include "src/codegen/loong64/assembler-loong64.h"
+#include "src/common/globals.h"
+#include "src/objects/tagged-index.h"
+
+namespace v8 {
+namespace internal {
+
+// Forward declarations.
+enum class AbortReason : uint8_t;
+
+// Flags used for LeaveExitFrame function.
+enum LeaveExitFrameMode { EMIT_RETURN = true, NO_EMIT_RETURN = false };
+
+// Flags used for the li macro-assembler function.
+enum LiFlags {
+ // If the constant value can be represented in just 12 bits, then
+ // optimize the li to use a single instruction, rather than lu12i_w/lu32i_d/
+ // lu52i_d/ori sequence. A number of other optimizations that emits less than
+ // maximum number of instructions exists.
+ OPTIMIZE_SIZE = 0,
+ // Always use 4 instructions (lu12i_w/ori/lu32i_d/lu52i_d sequence),
+ // even if the constant could be loaded with just one, so that this value is
+ // patchable later.
+ CONSTANT_SIZE = 1,
+ // For address loads only 3 instruction are required. Used to mark
+ // constant load that will be used as address without relocation
+ // information. It ensures predictable code size, so specific sites
+ // in code are patchable.
+ ADDRESS_LOAD = 2
+};
+
+enum RAStatus { kRAHasNotBeenSaved, kRAHasBeenSaved };
+
+Register GetRegisterThatIsNotOneOf(Register reg1, Register reg2 = no_reg,
+ Register reg3 = no_reg,
+ Register reg4 = no_reg,
+ Register reg5 = no_reg,
+ Register reg6 = no_reg);
+
+// -----------------------------------------------------------------------------
+// Static helper functions.
+
+#define SmiWordOffset(offset) (offset + kPointerSize / 2)
+
+// Generate a MemOperand for loading a field from an object.
+inline MemOperand FieldMemOperand(Register object, int offset) {
+ return MemOperand(object, offset - kHeapObjectTag);
+}
+
+class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
+ public:
+ using TurboAssemblerBase::TurboAssemblerBase;
+
+ // Activation support.
+ void EnterFrame(StackFrame::Type type);
+ void EnterFrame(StackFrame::Type type, bool load_constant_pool_pointer_reg) {
+ // Out-of-line constant pool not implemented on loong64.
+ UNREACHABLE();
+ }
+ void LeaveFrame(StackFrame::Type type);
+
+ void AllocateStackSpace(Register bytes) { Sub_d(sp, sp, bytes); }
+
+ void AllocateStackSpace(int bytes) {
+ DCHECK_GE(bytes, 0);
+ if (bytes == 0) return;
+ Sub_d(sp, sp, Operand(bytes));
+ }
+
+ // Generates function and stub prologue code.
+ void StubPrologue(StackFrame::Type type);
+ void Prologue();
+
+ void InitializeRootRegister() {
+ ExternalReference isolate_root = ExternalReference::isolate_root(isolate());
+ li(kRootRegister, Operand(isolate_root));
+ }
+
+ // Jump unconditionally to given label.
+ // Use rather b(Label) for code generation.
+ void jmp(Label* L) { Branch(L); }
+
+ // -------------------------------------------------------------------------
+ // Debugging.
+
+ void Trap();
+ void DebugBreak();
+
+ // Calls Abort(msg) if the condition cc is not satisfied.
+ // Use --debug_code to enable.
+ void Assert(Condition cc, AbortReason reason, Register rj, Operand rk);
+
+ // Like Assert(), but always enabled.
+ void Check(Condition cc, AbortReason reason, Register rj, Operand rk);
+
+ // Print a message to stdout and abort execution.
+ void Abort(AbortReason msg);
+
+ void Branch(Label* label, bool need_link = false);
+ void Branch(Label* label, Condition cond, Register r1, const Operand& r2,
+ bool need_link = false);
+ void BranchShort(Label* label, Condition cond, Register r1, const Operand& r2,
+ bool need_link = false);
+ void Branch(Label* L, Condition cond, Register rj, RootIndex index);
+
+ // Floating point branches
+ void CompareF32(FPURegister cmp1, FPURegister cmp2, FPUCondition cc,
+ CFRegister cd = FCC0) {
+ CompareF(cmp1, cmp2, cc, cd, true);
+ }
+
+ void CompareIsNanF32(FPURegister cmp1, FPURegister cmp2,
+ CFRegister cd = FCC0) {
+ CompareIsNanF(cmp1, cmp2, cd, true);
+ }
+
+ void CompareF64(FPURegister cmp1, FPURegister cmp2, FPUCondition cc,
+ CFRegister cd = FCC0) {
+ CompareF(cmp1, cmp2, cc, cd, false);
+ }
+
+ void CompareIsNanF64(FPURegister cmp1, FPURegister cmp2,
+ CFRegister cd = FCC0) {
+ CompareIsNanF(cmp1, cmp2, cd, false);
+ }
+
+ void BranchTrueShortF(Label* target, CFRegister cc = FCC0);
+ void BranchFalseShortF(Label* target, CFRegister cc = FCC0);
+
+ void BranchTrueF(Label* target, CFRegister cc = FCC0);
+ void BranchFalseF(Label* target, CFRegister cc = FCC0);
+
+ static int InstrCountForLi64Bit(int64_t value);
+ inline void LiLower32BitHelper(Register rd, Operand j);
+ void li_optimized(Register rd, Operand j, LiFlags mode = OPTIMIZE_SIZE);
+ void li(Register rd, Operand j, LiFlags mode = OPTIMIZE_SIZE);
+ inline void li(Register rd, int64_t j, LiFlags mode = OPTIMIZE_SIZE) {
+ li(rd, Operand(j), mode);
+ }
+ inline void li(Register rd, int32_t j, LiFlags mode = OPTIMIZE_SIZE) {
+ li(rd, Operand(static_cast<int64_t>(j)), mode);
+ }
+ void li(Register dst, Handle<HeapObject> value, LiFlags mode = OPTIMIZE_SIZE);
+ void li(Register dst, ExternalReference value, LiFlags mode = OPTIMIZE_SIZE);
+ void li(Register dst, const StringConstantBase* string,
+ LiFlags mode = OPTIMIZE_SIZE);
+
+ void LoadFromConstantsTable(Register destination, int constant_index) final;
+ void LoadRootRegisterOffset(Register destination, intptr_t offset) final;
+ void LoadRootRelative(Register destination, int32_t offset) final;
+
+ inline void Move(Register output, MemOperand operand) {
+ Ld_d(output, operand);
+ }
+
+ inline void GenPCRelativeJump(Register rd, int64_t offset);
+ inline void GenPCRelativeJumpAndLink(Register rd, int64_t offset);
+
+// Jump, Call, and Ret pseudo instructions implementing inter-working.
+#define COND_ARGS \
+ Condition cond = al, Register rj = zero_reg, \
+ const Operand &rk = Operand(zero_reg)
+
+ void Jump(Register target, COND_ARGS);
+ void Jump(intptr_t target, RelocInfo::Mode rmode, COND_ARGS);
+ void Jump(Address target, RelocInfo::Mode rmode, COND_ARGS);
+ // Deffer from li, this method save target to the memory, and then load
+ // it to register use ld_d, it can be used in wasm jump table for concurrent
+ // patching.
+ void PatchAndJump(Address target);
+ void Jump(Handle<Code> code, RelocInfo::Mode rmode, COND_ARGS);
+ void Jump(const ExternalReference& reference);
+ void Call(Register target, COND_ARGS);
+ void Call(Address target, RelocInfo::Mode rmode, COND_ARGS);
+ void Call(Handle<Code> code, RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
+ COND_ARGS);
+ void Call(Label* target);
+
+ // Load the builtin given by the Smi in |builtin_index| into the same
+ // register.
+ void LoadEntryFromBuiltinIndex(Register builtin);
+ void LoadEntryFromBuiltin(Builtin builtin, Register destination);
+ MemOperand EntryFromBuiltinAsOperand(Builtin builtin);
+
+ void CallBuiltinByIndex(Register builtin);
+ void CallBuiltin(Builtin builtin);
+
+ void LoadCodeObjectEntry(Register destination, Register code_object);
+ void CallCodeObject(Register code_object);
+
+ void JumpCodeObject(Register code_object,
+ JumpMode jump_mode = JumpMode::kJump);
+
+ // Generates an instruction sequence s.t. the return address points to the
+ // instruction following the call.
+ // The return address on the stack is used by frame iteration.
+ void StoreReturnAddressAndCall(Register target);
+
+ void CallForDeoptimization(Builtin target, int deopt_id, Label* exit,
+ DeoptimizeKind kind, Label* ret,
+ Label* jump_deoptimization_entry_label);
+
+ void Ret(COND_ARGS);
+
+ // Emit code to discard a non-negative number of pointer-sized elements
+ // from the stack, clobbering only the sp register.
+ void Drop(int count, Condition cond = cc_always, Register reg = no_reg,
+ const Operand& op = Operand(no_reg));
+
+ void Ld_d(Register rd, const MemOperand& rj);
+ void St_d(Register rd, const MemOperand& rj);
+
+ void Push(Handle<HeapObject> handle);
+ void Push(Smi smi);
+
+ void Push(Register src) {
+ Add_d(sp, sp, Operand(-kPointerSize));
+ St_d(src, MemOperand(sp, 0));
+ }
+
+ // Push two registers. Pushes leftmost register first (to highest address).
+ void Push(Register src1, Register src2) {
+ Sub_d(sp, sp, Operand(2 * kPointerSize));
+ St_d(src1, MemOperand(sp, 1 * kPointerSize));
+ St_d(src2, MemOperand(sp, 0 * kPointerSize));
+ }
+
+ // Push three registers. Pushes leftmost register first (to highest address).
+ void Push(Register src1, Register src2, Register src3) {
+ Sub_d(sp, sp, Operand(3 * kPointerSize));
+ St_d(src1, MemOperand(sp, 2 * kPointerSize));
+ St_d(src2, MemOperand(sp, 1 * kPointerSize));
+ St_d(src3, MemOperand(sp, 0 * kPointerSize));
+ }
+
+ // Push four registers. Pushes leftmost register first (to highest address).
+ void Push(Register src1, Register src2, Register src3, Register src4) {
+ Sub_d(sp, sp, Operand(4 * kPointerSize));
+ St_d(src1, MemOperand(sp, 3 * kPointerSize));
+ St_d(src2, MemOperand(sp, 2 * kPointerSize));
+ St_d(src3, MemOperand(sp, 1 * kPointerSize));
+ St_d(src4, MemOperand(sp, 0 * kPointerSize));
+ }
+
+ // Push five registers. Pushes leftmost register first (to highest address).
+ void Push(Register src1, Register src2, Register src3, Register src4,
+ Register src5) {
+ Sub_d(sp, sp, Operand(5 * kPointerSize));
+ St_d(src1, MemOperand(sp, 4 * kPointerSize));
+ St_d(src2, MemOperand(sp, 3 * kPointerSize));
+ St_d(src3, MemOperand(sp, 2 * kPointerSize));
+ St_d(src4, MemOperand(sp, 1 * kPointerSize));
+ St_d(src5, MemOperand(sp, 0 * kPointerSize));
+ }
+
+ enum PushArrayOrder { kNormal, kReverse };
+ void PushArray(Register array, Register size, Register scratch,
+ Register scratch2, PushArrayOrder order = kNormal);
+
+ void MaybeSaveRegisters(RegList registers);
+ void MaybeRestoreRegisters(RegList registers);
+
+ void CallEphemeronKeyBarrier(Register object, Operand offset,
+ SaveFPRegsMode fp_mode);
+
+ void CallRecordWriteStubSaveRegisters(
+ Register object, Operand offset,
+ RememberedSetAction remembered_set_action, SaveFPRegsMode fp_mode,
+ StubCallMode mode = StubCallMode::kCallBuiltinPointer);
+ void CallRecordWriteStub(
+ Register object, Register slot_address,
+ RememberedSetAction remembered_set_action, SaveFPRegsMode fp_mode,
+ StubCallMode mode = StubCallMode::kCallBuiltinPointer);
+
+ // For a given |object| and |offset|:
+ // - Move |object| to |dst_object|.
+ // - Compute the address of the slot pointed to by |offset| in |object| and
+ // write it to |dst_slot|.
+ // This method makes sure |object| and |offset| are allowed to overlap with
+ // the destination registers.
+ void MoveObjectAndSlot(Register dst_object, Register dst_slot,
+ Register object, Operand offset);
+
+ // Push multiple registers on the stack.
+ // Registers are saved in numerical order, with higher numbered registers
+ // saved in higher memory addresses.
+ void MultiPush(RegList regs);
+ void MultiPush(RegList regs1, RegList regs2);
+ void MultiPush(RegList regs1, RegList regs2, RegList regs3);
+ void MultiPushFPU(RegList regs);
+
+ // Calculate how much stack space (in bytes) are required to store caller
+ // registers excluding those specified in the arguments.
+ int RequiredStackSizeForCallerSaved(SaveFPRegsMode fp_mode,
+ Register exclusion1 = no_reg,
+ Register exclusion2 = no_reg,
+ Register exclusion3 = no_reg) const;
+
+ // Push caller saved registers on the stack, and return the number of bytes
+ // stack pointer is adjusted.
+ int PushCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1 = no_reg,
+ Register exclusion2 = no_reg,
+ Register exclusion3 = no_reg);
+ // Restore caller saved registers from the stack, and return the number of
+ // bytes stack pointer is adjusted.
+ int PopCallerSaved(SaveFPRegsMode fp_mode, Register exclusion1 = no_reg,
+ Register exclusion2 = no_reg,
+ Register exclusion3 = no_reg);
+
+ void Pop(Register dst) {
+ Ld_d(dst, MemOperand(sp, 0));
+ Add_d(sp, sp, Operand(kPointerSize));
+ }
+
+ // Pop two registers. Pops rightmost register first (from lower address).
+ void Pop(Register src1, Register src2) {
+ DCHECK(src1 != src2);
+ Ld_d(src2, MemOperand(sp, 0 * kPointerSize));
+ Ld_d(src1, MemOperand(sp, 1 * kPointerSize));
+ Add_d(sp, sp, 2 * kPointerSize);
+ }
+
+ // Pop three registers. Pops rightmost register first (from lower address).
+ void Pop(Register src1, Register src2, Register src3) {
+ Ld_d(src3, MemOperand(sp, 0 * kPointerSize));
+ Ld_d(src2, MemOperand(sp, 1 * kPointerSize));
+ Ld_d(src1, MemOperand(sp, 2 * kPointerSize));
+ Add_d(sp, sp, 3 * kPointerSize);
+ }
+
+ // Pops multiple values from the stack and load them in the
+ // registers specified in regs. Pop order is the opposite as in MultiPush.
+ void MultiPop(RegList regs);
+ void MultiPop(RegList regs1, RegList regs2);
+ void MultiPop(RegList regs1, RegList regs2, RegList regs3);
+
+ void MultiPopFPU(RegList regs);
+
+#define DEFINE_INSTRUCTION(instr) \
+ void instr(Register rd, Register rj, const Operand& rk); \
+ void instr(Register rd, Register rj, Register rk) { \
+ instr(rd, rj, Operand(rk)); \
+ } \
+ void instr(Register rj, Register rk, int32_t j) { instr(rj, rk, Operand(j)); }
+
+#define DEFINE_INSTRUCTION2(instr) \
+ void instr(Register rj, const Operand& rk); \
+ void instr(Register rj, Register rk) { instr(rj, Operand(rk)); } \
+ void instr(Register rj, int32_t j) { instr(rj, Operand(j)); }
+
+ DEFINE_INSTRUCTION(Add_w)
+ DEFINE_INSTRUCTION(Add_d)
+ DEFINE_INSTRUCTION(Div_w)
+ DEFINE_INSTRUCTION(Div_wu)
+ DEFINE_INSTRUCTION(Div_du)
+ DEFINE_INSTRUCTION(Mod_w)
+ DEFINE_INSTRUCTION(Mod_wu)
+ DEFINE_INSTRUCTION(Div_d)
+ DEFINE_INSTRUCTION(Sub_w)
+ DEFINE_INSTRUCTION(Sub_d)
+ DEFINE_INSTRUCTION(Mod_d)
+ DEFINE_INSTRUCTION(Mod_du)
+ DEFINE_INSTRUCTION(Mul_w)
+ DEFINE_INSTRUCTION(Mulh_w)
+ DEFINE_INSTRUCTION(Mulh_wu)
+ DEFINE_INSTRUCTION(Mul_d)
+ DEFINE_INSTRUCTION(Mulh_d)
+ DEFINE_INSTRUCTION2(Div_w)
+ DEFINE_INSTRUCTION2(Div_d)
+ DEFINE_INSTRUCTION2(Div_wu)
+ DEFINE_INSTRUCTION2(Div_du)
+
+ DEFINE_INSTRUCTION(And)
+ DEFINE_INSTRUCTION(Or)
+ DEFINE_INSTRUCTION(Xor)
+ DEFINE_INSTRUCTION(Nor)
+ DEFINE_INSTRUCTION2(Neg)
+ DEFINE_INSTRUCTION(Andn)
+ DEFINE_INSTRUCTION(Orn)
+
+ DEFINE_INSTRUCTION(Slt)
+ DEFINE_INSTRUCTION(Sltu)
+ DEFINE_INSTRUCTION(Slti)
+ DEFINE_INSTRUCTION(Sltiu)
+ DEFINE_INSTRUCTION(Sle)
+ DEFINE_INSTRUCTION(Sleu)
+ DEFINE_INSTRUCTION(Sgt)
+ DEFINE_INSTRUCTION(Sgtu)
+ DEFINE_INSTRUCTION(Sge)
+ DEFINE_INSTRUCTION(Sgeu)
+
+ DEFINE_INSTRUCTION(Rotr_w)
+ DEFINE_INSTRUCTION(Rotr_d)
+
+#undef DEFINE_INSTRUCTION
+#undef DEFINE_INSTRUCTION2
+#undef DEFINE_INSTRUCTION3
+
+ void SmiUntag(Register dst, const MemOperand& src);
+ void SmiUntag(Register dst, Register src) {
+ if (SmiValuesAre32Bits()) {
+ srai_d(dst, src, kSmiShift);
+ } else {
+ DCHECK(SmiValuesAre31Bits());
+ srai_w(dst, src, kSmiShift);
+ }
+ }
+
+ void SmiUntag(Register reg) { SmiUntag(reg, reg); }
+
+ int CalculateStackPassedWords(int num_reg_arguments,
+ int num_double_arguments);
+
+ // Before calling a C-function from generated code, align arguments on stack.
+ // After aligning the frame, non-register arguments must be stored on the
+ // stack, after the argument-slots using helper: CFunctionArgumentOperand().
+ // The argument count assumes all arguments are word sized.
+ // Some compilers/platforms require the stack to be aligned when calling
+ // C++ code.
+ // Needs a scratch register to do some arithmetic. This register will be
+ // trashed.
+ void PrepareCallCFunction(int num_reg_arguments, int num_double_registers,
+ Register scratch);
+ void PrepareCallCFunction(int num_reg_arguments, Register scratch);
+
+ // Calls a C function and cleans up the space for arguments allocated
+ // by PrepareCallCFunction. The called function is not allowed to trigger a
+ // garbage collection, since that might move the code and invalidate the
+ // return address (unless this is somehow accounted for by the called
+ // function).
+ void CallCFunction(ExternalReference function, int num_arguments);
+ void CallCFunction(Register function, int num_arguments);
+ void CallCFunction(ExternalReference function, int num_reg_arguments,
+ int num_double_arguments);
+ void CallCFunction(Register function, int num_reg_arguments,
+ int num_double_arguments);
+
+ // See comments at the beginning of Builtins::Generate_CEntry.
+ inline void PrepareCEntryArgs(int num_args) { li(a0, num_args); }
+ inline void PrepareCEntryFunction(const ExternalReference& ref) {
+ li(a1, ref);
+ }
+
+ void CheckPageFlag(const Register& object, int mask, Condition cc,
+ Label* condition_met);
+#undef COND_ARGS
+
+ // Performs a truncating conversion of a floating point number as used by
+ // the JS bitwise operations. See ECMA-262 9.5: ToInt32.
+ // Exits with 'result' holding the answer.
+ void TruncateDoubleToI(Isolate* isolate, Zone* zone, Register result,
+ DoubleRegister double_input, StubCallMode stub_mode);
+
+ // Conditional move.
+ void Movz(Register rd, Register rj, Register rk);
+ void Movn(Register rd, Register rj, Register rk);
+
+ void LoadZeroIfFPUCondition(Register dest, CFRegister = FCC0);
+ void LoadZeroIfNotFPUCondition(Register dest, CFRegister = FCC0);
+
+ void LoadZeroIfConditionNotZero(Register dest, Register condition);
+ void LoadZeroIfConditionZero(Register dest, Register condition);
+ void LoadZeroOnCondition(Register rd, Register rj, const Operand& rk,
+ Condition cond);
+
+ void Clz_w(Register rd, Register rj);
+ void Clz_d(Register rd, Register rj);
+ void Ctz_w(Register rd, Register rj);
+ void Ctz_d(Register rd, Register rj);
+ void Popcnt_w(Register rd, Register rj);
+ void Popcnt_d(Register rd, Register rj);
+
+ void ExtractBits(Register dest, Register source, Register pos, int size,
+ bool sign_extend = false);
+ void InsertBits(Register dest, Register source, Register pos, int size);
+
+ void Bstrins_w(Register rk, Register rj, uint16_t msbw, uint16_t lswb);
+ void Bstrins_d(Register rk, Register rj, uint16_t msbw, uint16_t lsbw);
+ void Bstrpick_w(Register rk, Register rj, uint16_t msbw, uint16_t lsbw);
+ void Bstrpick_d(Register rk, Register rj, uint16_t msbw, uint16_t lsbw);
+ void Neg_s(FPURegister fd, FPURegister fj);
+ void Neg_d(FPURegister fd, FPURegister fk);
+
+ // Convert single to unsigned word.
+ void Trunc_uw_s(FPURegister fd, FPURegister fj, FPURegister scratch);
+ void Trunc_uw_s(Register rd, FPURegister fj, FPURegister scratch);
+
+ // Change endianness
+ void ByteSwapSigned(Register dest, Register src, int operand_size);
+ void ByteSwapUnsigned(Register dest, Register src, int operand_size);
+
+ void Ld_b(Register rd, const MemOperand& rj);
+ void Ld_bu(Register rd, const MemOperand& rj);
+ void St_b(Register rd, const MemOperand& rj);
+
+ void Ld_h(Register rd, const MemOperand& rj);
+ void Ld_hu(Register rd, const MemOperand& rj);
+ void St_h(Register rd, const MemOperand& rj);
+
+ void Ld_w(Register rd, const MemOperand& rj);
+ void Ld_wu(Register rd, const MemOperand& rj);
+ void St_w(Register rd, const MemOperand& rj);
+
+ void Fld_s(FPURegister fd, const MemOperand& src);
+ void Fst_s(FPURegister fj, const MemOperand& dst);
+
+ void Fld_d(FPURegister fd, const MemOperand& src);
+ void Fst_d(FPURegister fj, const MemOperand& dst);
+
+ void Ll_w(Register rd, const MemOperand& rj);
+ void Sc_w(Register rd, const MemOperand& rj);
+
+ void Ll_d(Register rd, const MemOperand& rj);
+ void Sc_d(Register rd, const MemOperand& rj);
+
+ // These functions assume (and assert) that src1!=src2. It is permitted
+ // for the result to alias either input register.
+ void Float32Max(FPURegister dst, FPURegister src1, FPURegister src2,
+ Label* out_of_line);
+ void Float32Min(FPURegister dst, FPURegister src1, FPURegister src2,
+ Label* out_of_line);
+ void Float64Max(FPURegister dst, FPURegister src1, FPURegister src2,
+ Label* out_of_line);
+ void Float64Min(FPURegister dst, FPURegister src1, FPURegister src2,
+ Label* out_of_line);
+
+ // Generate out-of-line cases for the macros above.
+ void Float32MaxOutOfLine(FPURegister dst, FPURegister src1, FPURegister src2);
+ void Float32MinOutOfLine(FPURegister dst, FPURegister src1, FPURegister src2);
+ void Float64MaxOutOfLine(FPURegister dst, FPURegister src1, FPURegister src2);
+ void Float64MinOutOfLine(FPURegister dst, FPURegister src1, FPURegister src2);
+
+ bool IsDoubleZeroRegSet() { return has_double_zero_reg_set_; }
+
+ void mov(Register rd, Register rj) { or_(rd, rj, zero_reg); }
+
+ inline void Move(Register dst, Handle<HeapObject> handle) { li(dst, handle); }
+ inline void Move(Register dst, Smi smi) { li(dst, Operand(smi)); }
+
+ inline void Move(Register dst, Register src) {
+ if (dst != src) {
+ mov(dst, src);
+ }
+ }
+
+ inline void FmoveLow(Register dst_low, FPURegister src) {
+ movfr2gr_s(dst_low, src);
+ }
+
+ void FmoveLow(FPURegister dst, Register src_low);
+
+ inline void Move(FPURegister dst, FPURegister src) { Move_d(dst, src); }
+
+ inline void Move_d(FPURegister dst, FPURegister src) {
+ if (dst != src) {
+ fmov_d(dst, src);
+ }
+ }
+
+ inline void Move_s(FPURegister dst, FPURegister src) {
+ if (dst != src) {
+ fmov_s(dst, src);
+ }
+ }
+
+ void Move(FPURegister dst, float imm) { Move(dst, bit_cast<uint32_t>(imm)); }
+ void Move(FPURegister dst, double imm) { Move(dst, bit_cast<uint64_t>(imm)); }
+ void Move(FPURegister dst, uint32_t src);
+ void Move(FPURegister dst, uint64_t src);
+
+ // AddOverflow_d sets overflow register to a negative value if
+ // overflow occured, otherwise it is zero or positive
+ void AddOverflow_d(Register dst, Register left, const Operand& right,
+ Register overflow);
+ // SubOverflow_d sets overflow register to a negative value if
+ // overflow occured, otherwise it is zero or positive
+ void SubOverflow_d(Register dst, Register left, const Operand& right,
+ Register overflow);
+ // MulOverflow_w sets overflow register to zero if no overflow occured
+ void MulOverflow_w(Register dst, Register left, const Operand& right,
+ Register overflow);
+
+ // TODO(LOONG_dev): LOONG64 Remove this constant
+ // Number of instructions needed for calculation of switch table entry address
+ static const int kSwitchTablePrologueSize = 5;
+
+ // GetLabelFunction must be lambda '[](size_t index) -> Label*' or a
+ // functor/function with 'Label *func(size_t index)' declaration.
+ template <typename Func>
+ void GenerateSwitchTable(Register index, size_t case_count,
+ Func GetLabelFunction);
+
+ // Load an object from the root table.
+ void LoadRoot(Register destination, RootIndex index) final;
+ void LoadRoot(Register destination, RootIndex index, Condition cond,
+ Register src1, const Operand& src2);
+
+ void LoadMap(Register destination, Register object);
+
+ // If the value is a NaN, canonicalize the value else, do nothing.
+ void FPUCanonicalizeNaN(const DoubleRegister dst, const DoubleRegister src);
+
+ // ---------------------------------------------------------------------------
+ // FPU macros. These do not handle special cases like NaN or +- inf.
+
+ // Convert unsigned word to double.
+ void Ffint_d_uw(FPURegister fd, FPURegister fj);
+ void Ffint_d_uw(FPURegister fd, Register rj);
+
+ // Convert unsigned long to double.
+ void Ffint_d_ul(FPURegister fd, FPURegister fj);
+ void Ffint_d_ul(FPURegister fd, Register rj);
+
+ // Convert unsigned word to float.
+ void Ffint_s_uw(FPURegister fd, FPURegister fj);
+ void Ffint_s_uw(FPURegister fd, Register rj);
+
+ // Convert unsigned long to float.
+ void Ffint_s_ul(FPURegister fd, FPURegister fj);
+ void Ffint_s_ul(FPURegister fd, Register rj);
+
+ // Convert double to unsigned word.
+ void Ftintrz_uw_d(FPURegister fd, FPURegister fj, FPURegister scratch);
+ void Ftintrz_uw_d(Register rd, FPURegister fj, FPURegister scratch);
+
+ // Convert single to unsigned word.
+ void Ftintrz_uw_s(FPURegister fd, FPURegister fs, FPURegister scratch);
+ void Ftintrz_uw_s(Register rd, FPURegister fs, FPURegister scratch);
+
+ // Convert double to unsigned long.
+ void Ftintrz_ul_d(FPURegister fd, FPURegister fj, FPURegister scratch,
+ Register result = no_reg);
+ void Ftintrz_ul_d(Register rd, FPURegister fj, FPURegister scratch,
+ Register result = no_reg);
+
+ // Convert single to unsigned long.
+ void Ftintrz_ul_s(FPURegister fd, FPURegister fj, FPURegister scratch,
+ Register result = no_reg);
+ void Ftintrz_ul_s(Register rd, FPURegister fj, FPURegister scratch,
+ Register result = no_reg);
+
+ // Round double functions
+ void Trunc_d(FPURegister fd, FPURegister fj);
+ void Round_d(FPURegister fd, FPURegister fj);
+ void Floor_d(FPURegister fd, FPURegister fj);
+ void Ceil_d(FPURegister fd, FPURegister fj);
+
+ // Round float functions
+ void Trunc_s(FPURegister fd, FPURegister fj);
+ void Round_s(FPURegister fd, FPURegister fj);
+ void Floor_s(FPURegister fd, FPURegister fj);
+ void Ceil_s(FPURegister fd, FPURegister fj);
+
+ // Jump the register contains a smi.
+ void JumpIfSmi(Register value, Label* smi_label);
+
+ void JumpIfEqual(Register a, int32_t b, Label* dest) {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ li(scratch, Operand(b));
+ Branch(dest, eq, a, Operand(scratch));
+ }
+
+ void JumpIfLessThan(Register a, int32_t b, Label* dest) {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ li(scratch, Operand(b));
+ Branch(dest, lt, a, Operand(scratch));
+ }
+
+ // Push a standard frame, consisting of ra, fp, context and JS function.
+ void PushStandardFrame(Register function_reg);
+
+ // Get the actual activation frame alignment for target environment.
+ static int ActivationFrameAlignment();
+
+ // Load Scaled Address instructions. Parameter sa (shift argument) must be
+ // between [1, 31] (inclusive). The scratch register may be clobbered.
+ void Alsl_w(Register rd, Register rj, Register rk, uint8_t sa,
+ Register scratch = t7);
+ void Alsl_d(Register rd, Register rj, Register rk, uint8_t sa,
+ Register scratch = t7);
+
+ // Compute the start of the generated instruction stream from the current PC.
+ // This is an alternative to embedding the {CodeObject} handle as a reference.
+ void ComputeCodeStartAddress(Register dst);
+
+ // Control-flow integrity:
+
+ // Define a function entrypoint. This doesn't emit any code for this
+ // architecture, as control-flow integrity is not supported for it.
+ void CodeEntry() {}
+ // Define an exception handler.
+ void ExceptionHandler() {}
+ // Define an exception handler and bind a label.
+ void BindExceptionHandler(Label* label) { bind(label); }
+
+ protected:
+ inline Register GetRkAsRegisterHelper(const Operand& rk, Register scratch);
+ inline int32_t GetOffset(Label* L, OffsetSize bits);
+
+ private:
+ bool has_double_zero_reg_set_ = false;
+
+ // Performs a truncating conversion of a floating point number as used by
+ // the JS bitwise operations. See ECMA-262 9.5: ToInt32. Goes to 'done' if it
+ // succeeds, otherwise falls through if result is saturated. On return
+ // 'result' either holds answer, or is clobbered on fall through.
+ void TryInlineTruncateDoubleToI(Register result, DoubleRegister input,
+ Label* done);
+
+ bool BranchShortOrFallback(Label* L, Condition cond, Register rj,
+ const Operand& rk, bool need_link);
+
+ // f32 or f64
+ void CompareF(FPURegister cmp1, FPURegister cmp2, FPUCondition cc,
+ CFRegister cd, bool f32 = true);
+
+ void CompareIsNanF(FPURegister cmp1, FPURegister cmp2, CFRegister cd,
+ bool f32 = true);
+
+ void CallCFunctionHelper(Register function, int num_reg_arguments,
+ int num_double_arguments);
+
+ void RoundDouble(FPURegister dst, FPURegister src, FPURoundingMode mode);
+
+ void RoundFloat(FPURegister dst, FPURegister src, FPURoundingMode mode);
+
+ // Push a fixed frame, consisting of ra, fp.
+ void PushCommonFrame(Register marker_reg = no_reg);
+};
+
+// MacroAssembler implements a collection of frequently used macros.
+class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
+ public:
+ using TurboAssembler::TurboAssembler;
+
+ // It assumes that the arguments are located below the stack pointer.
+ // argc is the number of arguments not including the receiver.
+ // TODO(LOONG_dev): LOONG64: Remove this function once we stick with the
+ // reversed arguments order.
+ void LoadReceiver(Register dest, Register argc) {
+ Ld_d(dest, MemOperand(sp, 0));
+ }
+
+ void StoreReceiver(Register rec, Register argc, Register scratch) {
+ St_d(rec, MemOperand(sp, 0));
+ }
+
+ bool IsNear(Label* L, Condition cond, int rs_reg);
+
+ // Swap two registers. If the scratch register is omitted then a slightly
+ // less efficient form using xor instead of mov is emitted.
+ void Swap(Register reg1, Register reg2, Register scratch = no_reg);
+
+ void PushRoot(RootIndex index) {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ LoadRoot(scratch, index);
+ Push(scratch);
+ }
+
+ // Compare the object in a register to a value and jump if they are equal.
+ void JumpIfRoot(Register with, RootIndex index, Label* if_equal) {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ LoadRoot(scratch, index);
+ Branch(if_equal, eq, with, Operand(scratch));
+ }
+
+ // Compare the object in a register to a value and jump if they are not equal.
+ void JumpIfNotRoot(Register with, RootIndex index, Label* if_not_equal) {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ LoadRoot(scratch, index);
+ Branch(if_not_equal, ne, with, Operand(scratch));
+ }
+
+ // Checks if value is in range [lower_limit, higher_limit] using a single
+ // comparison.
+ void JumpIfIsInRange(Register value, unsigned lower_limit,
+ unsigned higher_limit, Label* on_in_range);
+
+ // ---------------------------------------------------------------------------
+ // GC Support
+
+ // Notify the garbage collector that we wrote a pointer into an object.
+ // |object| is the object being stored into, |value| is the object being
+ // stored.
+ // The offset is the offset from the start of the object, not the offset from
+ // the tagged HeapObject pointer. For use with FieldOperand(reg, off).
+ void RecordWriteField(
+ Register object, int offset, Register value, RAStatus ra_status,
+ SaveFPRegsMode save_fp,
+ RememberedSetAction remembered_set_action = RememberedSetAction::kEmit,
+ SmiCheck smi_check = SmiCheck::kInline);
+
+ // For a given |object| notify the garbage collector that the slot at |offset|
+ // has been written. |value| is the object being stored.
+ void RecordWrite(
+ Register object, Operand offset, Register value, RAStatus ra_status,
+ SaveFPRegsMode save_fp,
+ RememberedSetAction remembered_set_action = RememberedSetAction::kEmit,
+ SmiCheck smi_check = SmiCheck::kInline);
+
+ // ---------------------------------------------------------------------------
+ // Pseudo-instructions.
+
+ // Convert double to unsigned long.
+ void Ftintrz_l_ud(FPURegister fd, FPURegister fj, FPURegister scratch);
+
+ void Ftintrz_l_d(FPURegister fd, FPURegister fj);
+ void Ftintrne_l_d(FPURegister fd, FPURegister fj);
+ void Ftintrm_l_d(FPURegister fd, FPURegister fj);
+ void Ftintrp_l_d(FPURegister fd, FPURegister fj);
+
+ void Ftintrz_w_d(FPURegister fd, FPURegister fj);
+ void Ftintrne_w_d(FPURegister fd, FPURegister fj);
+ void Ftintrm_w_d(FPURegister fd, FPURegister fj);
+ void Ftintrp_w_d(FPURegister fd, FPURegister fj);
+
+ void Madd_s(FPURegister fd, FPURegister fa, FPURegister fj, FPURegister fk);
+ void Madd_d(FPURegister fd, FPURegister fa, FPURegister fj, FPURegister fk);
+ void Msub_s(FPURegister fd, FPURegister fa, FPURegister fj, FPURegister fk);
+ void Msub_d(FPURegister fd, FPURegister fa, FPURegister fj, FPURegister fk);
+
+ // Enter exit frame.
+ // argc - argument count to be dropped by LeaveExitFrame.
+ // save_doubles - saves FPU registers on stack, currently disabled.
+ // stack_space - extra stack space.
+ void EnterExitFrame(bool save_doubles, int stack_space = 0,
+ StackFrame::Type frame_type = StackFrame::EXIT);
+
+ // Leave the current exit frame.
+ void LeaveExitFrame(bool save_doubles, Register arg_count,
+ bool do_return = NO_EMIT_RETURN,
+ bool argument_count_is_length = false);
+
+ // Make sure the stack is aligned. Only emits code in debug mode.
+ void AssertStackIsAligned();
+
+ // Load the global proxy from the current context.
+ void LoadGlobalProxy(Register dst) {
+ LoadNativeContextSlot(dst, Context::GLOBAL_PROXY_INDEX);
+ }
+
+ void LoadNativeContextSlot(Register dst, int index);
+
+ // Load the initial map from the global function. The registers
+ // function and map can be the same, function is then overwritten.
+ void LoadGlobalFunctionInitialMap(Register function, Register map,
+ Register scratch);
+
+ // -------------------------------------------------------------------------
+ // JavaScript invokes.
+
+ // Invoke the JavaScript function code by either calling or jumping.
+ void InvokeFunctionCode(Register function, Register new_target,
+ Register expected_parameter_count,
+ Register actual_parameter_count, InvokeType type);
+
+ // On function call, call into the debugger.
+ void CallDebugOnFunctionCall(Register fun, Register new_target,
+ Register expected_parameter_count,
+ Register actual_parameter_count);
+
+ // Invoke the JavaScript function in the given register. Changes the
+ // current context to the context in the function before invoking.
+ void InvokeFunctionWithNewTarget(Register function, Register new_target,
+ Register actual_parameter_count,
+ InvokeType type);
+ void InvokeFunction(Register function, Register expected_parameter_count,
+ Register actual_parameter_count, InvokeType type);
+
+ // Exception handling.
+
+ // Push a new stack handler and link into stack handler chain.
+ void PushStackHandler();
+
+ // Unlink the stack handler on top of the stack from the stack handler chain.
+ // Must preserve the result register.
+ void PopStackHandler();
+
+ // -------------------------------------------------------------------------
+ // Support functions.
+
+ void GetObjectType(Register function, Register map, Register type_reg);
+
+ void GetInstanceTypeRange(Register map, Register type_reg,
+ InstanceType lower_limit, Register range);
+
+ // -------------------------------------------------------------------------
+ // Runtime calls.
+
+ // Call a runtime routine.
+ void CallRuntime(const Runtime::Function* f, int num_arguments,
+ SaveFPRegsMode save_doubles = SaveFPRegsMode::kIgnore);
+
+ // Convenience function: Same as above, but takes the fid instead.
+ void CallRuntime(Runtime::FunctionId fid,
+ SaveFPRegsMode save_doubles = SaveFPRegsMode::kIgnore) {
+ const Runtime::Function* function = Runtime::FunctionForId(fid);
+ CallRuntime(function, function->nargs, save_doubles);
+ }
+
+ // Convenience function: Same as above, but takes the fid instead.
+ void CallRuntime(Runtime::FunctionId fid, int num_arguments,
+ SaveFPRegsMode save_doubles = SaveFPRegsMode::kIgnore) {
+ CallRuntime(Runtime::FunctionForId(fid), num_arguments, save_doubles);
+ }
+
+ // Convenience function: tail call a runtime routine (jump).
+ void TailCallRuntime(Runtime::FunctionId fid);
+
+ // Jump to the builtin routine.
+ void JumpToExternalReference(const ExternalReference& builtin,
+ bool builtin_exit_frame = false);
+
+ // Generates a trampoline to jump to the off-heap instruction stream.
+ void JumpToInstructionStream(Address entry);
+
+ // ---------------------------------------------------------------------------
+ // In-place weak references.
+ void LoadWeakValue(Register out, Register in, Label* target_if_cleared);
+
+ // -------------------------------------------------------------------------
+ // StatsCounter support.
+
+ void IncrementCounter(StatsCounter* counter, int value, Register scratch1,
+ Register scratch2) {
+ if (!FLAG_native_code_counters) return;
+ EmitIncrementCounter(counter, value, scratch1, scratch2);
+ }
+ void EmitIncrementCounter(StatsCounter* counter, int value, Register scratch1,
+ Register scratch2);
+ void DecrementCounter(StatsCounter* counter, int value, Register scratch1,
+ Register scratch2) {
+ if (!FLAG_native_code_counters) return;
+ EmitDecrementCounter(counter, value, scratch1, scratch2);
+ }
+ void EmitDecrementCounter(StatsCounter* counter, int value, Register scratch1,
+ Register scratch2);
+
+ // -------------------------------------------------------------------------
+ // Stack limit utilities
+
+ enum StackLimitKind { kInterruptStackLimit, kRealStackLimit };
+ void LoadStackLimit(Register destination, StackLimitKind kind);
+ void StackOverflowCheck(Register num_args, Register scratch1,
+ Register scratch2, Label* stack_overflow);
+
+ // ---------------------------------------------------------------------------
+ // Smi utilities.
+
+ void SmiTag(Register dst, Register src) {
+ STATIC_ASSERT(kSmiTag == 0);
+ if (SmiValuesAre32Bits()) {
+ slli_d(dst, src, 32);
+ } else {
+ DCHECK(SmiValuesAre31Bits());
+ add_w(dst, src, src);
+ }
+ }
+
+ void SmiTag(Register reg) { SmiTag(reg, reg); }
+
+ // Left-shifted from int32 equivalent of Smi.
+ void SmiScale(Register dst, Register src, int scale) {
+ if (SmiValuesAre32Bits()) {
+ // The int portion is upper 32-bits of 64-bit word.
+ srai_d(dst, src, kSmiShift - scale);
+ } else {
+ DCHECK(SmiValuesAre31Bits());
+ DCHECK_GE(scale, kSmiTagSize);
+ slli_w(dst, src, scale - kSmiTagSize);
+ }
+ }
+
+ // Test if the register contains a smi.
+ inline void SmiTst(Register value, Register scratch) {
+ And(scratch, value, Operand(kSmiTagMask));
+ }
+
+ // Jump if the register contains a non-smi.
+ void JumpIfNotSmi(Register value, Label* not_smi_label);
+
+ // Abort execution if argument is a smi, enabled via --debug-code.
+ void AssertNotSmi(Register object);
+ void AssertSmi(Register object);
+
+ // Abort execution if argument is not a Constructor, enabled via --debug-code.
+ void AssertConstructor(Register object);
+
+ // Abort execution if argument is not a JSFunction, enabled via --debug-code.
+ void AssertFunction(Register object);
+
+ // Abort execution if argument is not a JSBoundFunction,
+ // enabled via --debug-code.
+ void AssertBoundFunction(Register object);
+
+ // Abort execution if argument is not a JSGeneratorObject (or subclass),
+ // enabled via --debug-code.
+ void AssertGeneratorObject(Register object);
+
+ // Abort execution if argument is not undefined or an AllocationSite, enabled
+ // via --debug-code.
+ void AssertUndefinedOrAllocationSite(Register object, Register scratch);
+
+ template <typename Field>
+ void DecodeField(Register dst, Register src) {
+ Bstrpick_d(dst, src, Field::kShift + Field::kSize - 1, Field::kShift);
+ }
+
+ template <typename Field>
+ void DecodeField(Register reg) {
+ DecodeField<Field>(reg, reg);
+ }
+
+ private:
+ // Helper functions for generating invokes.
+ void InvokePrologue(Register expected_parameter_count,
+ Register actual_parameter_count, Label* done,
+ InvokeType type);
+
+ friend class CommonFrame;
+
+ DISALLOW_IMPLICIT_CONSTRUCTORS(MacroAssembler);
+};
+
+template <typename Func>
+void TurboAssembler::GenerateSwitchTable(Register index, size_t case_count,
+ Func GetLabelFunction) {
+ UseScratchRegisterScope scope(this);
+ Register scratch = scope.Acquire();
+ BlockTrampolinePoolFor((3 + case_count) * kInstrSize);
+
+ pcaddi(scratch, 3);
+ alsl_d(scratch, index, scratch, kInstrSizeLog2);
+ jirl(zero_reg, scratch, 0);
+ for (size_t index = 0; index < case_count; ++index) {
+ b(GetLabelFunction(index));
+ }
+}
+
+#define ACCESS_MASM(masm) masm->
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_CODEGEN_LOONG64_MACRO_ASSEMBLER_LOONG64_H_
diff --git a/deps/v8/src/codegen/loong64/register-loong64.h b/deps/v8/src/codegen/loong64/register-loong64.h
new file mode 100644
index 0000000000..7d9d88c1f0
--- /dev/null
+++ b/deps/v8/src/codegen/loong64/register-loong64.h
@@ -0,0 +1,288 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_CODEGEN_LOONG64_REGISTER_LOONG64_H_
+#define V8_CODEGEN_LOONG64_REGISTER_LOONG64_H_
+
+#include "src/codegen/loong64/constants-loong64.h"
+#include "src/codegen/register.h"
+#include "src/codegen/reglist.h"
+
+namespace v8 {
+namespace internal {
+
+// clang-format off
+#define GENERAL_REGISTERS(V) \
+ V(zero_reg) V(ra) V(tp) V(sp) \
+ V(a0) V(a1) V(a2) V(a3) V(a4) V(a5) V(a6) V(a7) \
+ V(t0) V(t1) V(t2) V(t3) V(t4) V(t5) V(t6) V(t7) V(t8) \
+ V(x_reg) V(fp) \
+ V(s0) V(s1) V(s2) V(s3) V(s4) V(s5) V(s6) V(s7) V(s8) \
+
+#define ALLOCATABLE_GENERAL_REGISTERS(V) \
+ V(a0) V(a1) V(a2) V(a3) V(a4) V(a5) V(a6) V(a7) \
+ V(t0) V(t1) V(t2) V(t3) V(t4) V(t5) \
+ V(s0) V(s1) V(s2) V(s3) V(s4) V(s5) V(s7) V(s8)
+
+#define DOUBLE_REGISTERS(V) \
+ V(f0) V(f1) V(f2) V(f3) V(f4) V(f5) V(f6) V(f7) \
+ V(f8) V(f9) V(f10) V(f11) V(f12) V(f13) V(f14) V(f15) \
+ V(f16) V(f17) V(f18) V(f19) V(f20) V(f21) V(f22) V(f23) \
+ V(f24) V(f25) V(f26) V(f27) V(f28) V(f29) V(f30) V(f31)
+
+#define FLOAT_REGISTERS DOUBLE_REGISTERS
+#define SIMD128_REGISTERS(V) \
+ V(w0) V(w1) V(w2) V(w3) V(w4) V(w5) V(w6) V(w7) \
+ V(w8) V(w9) V(w10) V(w11) V(w12) V(w13) V(w14) V(w15) \
+ V(w16) V(w17) V(w18) V(w19) V(w20) V(w21) V(w22) V(w23) \
+ V(w24) V(w25) V(w26) V(w27) V(w28) V(w29) V(w30) V(w31)
+
+#define ALLOCATABLE_DOUBLE_REGISTERS(V) \
+ V(f0) V(f1) V(f2) V(f3) V(f4) V(f5) V(f6) V(f7) \
+ V(f8) V(f9) V(f10) V(f11) V(f12) V(f13) V(f14) V(f15) V(f16) \
+ V(f17) V(f18) V(f19) V(f20) V(f21) V(f22) V(f23)
+// clang-format on
+
+// Note that the bit values must match those used in actual instruction
+// encoding.
+const int kNumRegs = 32;
+
+const RegList kJSCallerSaved = 1 << 4 | // a0
+ 1 << 5 | // a1
+ 1 << 6 | // a2
+ 1 << 7 | // a3
+ 1 << 8 | // a4
+ 1 << 9 | // a5
+ 1 << 10 | // a6
+ 1 << 11 | // a7
+ 1 << 12 | // t0
+ 1 << 13 | // t1
+ 1 << 14 | // t2
+ 1 << 15 | // t3
+ 1 << 16 | // t4
+ 1 << 17 | // t5
+ 1 << 20; // t8
+
+const int kNumJSCallerSaved = 15;
+
+// Callee-saved registers preserved when switching from C to JavaScript.
+const RegList kCalleeSaved = 1 << 22 | // fp
+ 1 << 23 | // s0
+ 1 << 24 | // s1
+ 1 << 25 | // s2
+ 1 << 26 | // s3
+ 1 << 27 | // s4
+ 1 << 28 | // s5
+ 1 << 29 | // s6 (roots in Javascript code)
+ 1 << 30 | // s7 (cp in Javascript code)
+ 1 << 31; // s8
+
+const int kNumCalleeSaved = 10;
+
+const RegList kCalleeSavedFPU = 1 << 24 | // f24
+ 1 << 25 | // f25
+ 1 << 26 | // f26
+ 1 << 27 | // f27
+ 1 << 28 | // f28
+ 1 << 29 | // f29
+ 1 << 30 | // f30
+ 1 << 31; // f31
+
+const int kNumCalleeSavedFPU = 8;
+
+const RegList kCallerSavedFPU = 1 << 0 | // f0
+ 1 << 1 | // f1
+ 1 << 2 | // f2
+ 1 << 3 | // f3
+ 1 << 4 | // f4
+ 1 << 5 | // f5
+ 1 << 6 | // f6
+ 1 << 7 | // f7
+ 1 << 8 | // f8
+ 1 << 9 | // f9
+ 1 << 10 | // f10
+ 1 << 11 | // f11
+ 1 << 12 | // f12
+ 1 << 13 | // f13
+ 1 << 14 | // f14
+ 1 << 15 | // f15
+ 1 << 16 | // f16
+ 1 << 17 | // f17
+ 1 << 18 | // f18
+ 1 << 19 | // f19
+ 1 << 20 | // f20
+ 1 << 21 | // f21
+ 1 << 22 | // f22
+ 1 << 23; // f23
+
+// CPU Registers.
+//
+// 1) We would prefer to use an enum, but enum values are assignment-
+// compatible with int, which has caused code-generation bugs.
+//
+// 2) We would prefer to use a class instead of a struct but we don't like
+// the register initialization to depend on the particular initialization
+// order (which appears to be different on OS X, Linux, and Windows for the
+// installed versions of C++ we tried). Using a struct permits C-style
+// "initialization". Also, the Register objects cannot be const as this
+// forces initialization stubs in MSVC, making us dependent on initialization
+// order.
+//
+// 3) By not using an enum, we are possibly preventing the compiler from
+// doing certain constant folds, which may significantly reduce the
+// code generated for some assembly instructions (because they boil down
+// to a few constants). If this is a problem, we could change the code
+// such that we use an enum in optimized mode, and the struct in debug
+// mode. This way we get the compile-time error checking in debug mode
+// and best performance in optimized code.
+
+// -----------------------------------------------------------------------------
+// Implementation of Register and FPURegister.
+
+enum RegisterCode {
+#define REGISTER_CODE(R) kRegCode_##R,
+ GENERAL_REGISTERS(REGISTER_CODE)
+#undef REGISTER_CODE
+ kRegAfterLast
+};
+
+class Register : public RegisterBase<Register, kRegAfterLast> {
+ public:
+ static constexpr int kMantissaOffset = 0;
+ static constexpr int kExponentOffset = 4;
+
+ private:
+ friend class RegisterBase;
+ explicit constexpr Register(int code) : RegisterBase(code) {}
+};
+
+// s7: context register
+// s3: scratch register
+// s4: scratch register 2
+#define DECLARE_REGISTER(R) \
+ constexpr Register R = Register::from_code(kRegCode_##R);
+GENERAL_REGISTERS(DECLARE_REGISTER)
+#undef DECLARE_REGISTER
+
+constexpr Register no_reg = Register::no_reg();
+
+int ToNumber(Register reg);
+
+Register ToRegister(int num);
+
+// Returns the number of padding slots needed for stack pointer alignment.
+constexpr int ArgumentPaddingSlots(int argument_count) {
+ // No argument padding required.
+ return 0;
+}
+
+constexpr bool kSimpleFPAliasing = true;
+constexpr bool kSimdMaskRegisters = false;
+
+enum DoubleRegisterCode {
+#define REGISTER_CODE(R) kDoubleCode_##R,
+ DOUBLE_REGISTERS(REGISTER_CODE)
+#undef REGISTER_CODE
+ kDoubleAfterLast
+};
+
+// FPURegister register.
+class FPURegister : public RegisterBase<FPURegister, kDoubleAfterLast> {
+ public:
+ FPURegister low() const { return FPURegister::from_code(code()); }
+
+ private:
+ friend class RegisterBase;
+ explicit constexpr FPURegister(int code) : RegisterBase(code) {}
+};
+
+// Condition Flag Register
+enum CFRegister { FCC0, FCC1, FCC2, FCC3, FCC4, FCC5, FCC6, FCC7 };
+
+using FloatRegister = FPURegister;
+
+using DoubleRegister = FPURegister;
+
+using Simd128Register = FPURegister;
+
+#define DECLARE_DOUBLE_REGISTER(R) \
+ constexpr DoubleRegister R = DoubleRegister::from_code(kDoubleCode_##R);
+DOUBLE_REGISTERS(DECLARE_DOUBLE_REGISTER)
+#undef DECLARE_DOUBLE_REGISTER
+
+constexpr DoubleRegister no_dreg = DoubleRegister::no_reg();
+
+// Register aliases.
+// cp is assumed to be a callee saved register.
+constexpr Register kRootRegister = s6;
+constexpr Register cp = s7;
+constexpr Register kScratchReg = s3;
+constexpr Register kScratchReg2 = s4;
+constexpr DoubleRegister kScratchDoubleReg = f30;
+constexpr DoubleRegister kScratchDoubleReg1 = f30;
+constexpr DoubleRegister kScratchDoubleReg2 = f31;
+// FPU zero reg is often used to hold 0.0, but it's not hardwired to 0.0.
+constexpr DoubleRegister kDoubleRegZero = f29;
+
+struct FPUControlRegister {
+ bool is_valid() const { return (reg_code >> 2) == 0; }
+ bool is(FPUControlRegister creg) const { return reg_code == creg.reg_code; }
+ int code() const {
+ DCHECK(is_valid());
+ return reg_code;
+ }
+ int bit() const {
+ DCHECK(is_valid());
+ return 1 << reg_code;
+ }
+ void setcode(int f) {
+ reg_code = f;
+ DCHECK(is_valid());
+ }
+ // Unfortunately we can't make this private in a struct.
+ int reg_code;
+};
+
+constexpr FPUControlRegister no_fpucreg = {kInvalidFPUControlRegister};
+constexpr FPUControlRegister FCSR = {kFCSRRegister};
+constexpr FPUControlRegister FCSR0 = {kFCSRRegister};
+constexpr FPUControlRegister FCSR1 = {kFCSRRegister + 1};
+constexpr FPUControlRegister FCSR2 = {kFCSRRegister + 2};
+constexpr FPUControlRegister FCSR3 = {kFCSRRegister + 3};
+
+// Define {RegisterName} methods for the register types.
+DEFINE_REGISTER_NAMES(Register, GENERAL_REGISTERS)
+DEFINE_REGISTER_NAMES(FPURegister, DOUBLE_REGISTERS)
+
+// Give alias names to registers for calling conventions.
+constexpr Register kReturnRegister0 = a0;
+constexpr Register kReturnRegister1 = a1;
+constexpr Register kReturnRegister2 = a2;
+constexpr Register kJSFunctionRegister = a1;
+constexpr Register kContextRegister = s7;
+constexpr Register kAllocateSizeRegister = a0;
+constexpr Register kInterpreterAccumulatorRegister = a0;
+constexpr Register kInterpreterBytecodeOffsetRegister = t0;
+constexpr Register kInterpreterBytecodeArrayRegister = t1;
+constexpr Register kInterpreterDispatchTableRegister = t2;
+
+constexpr Register kJavaScriptCallArgCountRegister = a0;
+constexpr Register kJavaScriptCallCodeStartRegister = a2;
+constexpr Register kJavaScriptCallTargetRegister = kJSFunctionRegister;
+constexpr Register kJavaScriptCallNewTargetRegister = a3;
+constexpr Register kJavaScriptCallExtraArg1Register = a2;
+
+constexpr Register kOffHeapTrampolineRegister = t7;
+constexpr Register kRuntimeCallFunctionRegister = a1;
+constexpr Register kRuntimeCallArgCountRegister = a0;
+constexpr Register kRuntimeCallArgvRegister = a2;
+constexpr Register kWasmInstanceRegister = a0;
+constexpr Register kWasmCompileLazyFuncIndexRegister = t0;
+
+constexpr DoubleRegister kFPReturnRegister0 = f0;
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_CODEGEN_LOONG64_REGISTER_LOONG64_H_
diff --git a/deps/v8/src/codegen/macro-assembler.h b/deps/v8/src/codegen/macro-assembler.h
index cfa7a4d341..02fa1cf3f9 100644
--- a/deps/v8/src/codegen/macro-assembler.h
+++ b/deps/v8/src/codegen/macro-assembler.h
@@ -57,6 +57,9 @@ enum class SmiCheck { kOmit, kInline };
#elif V8_TARGET_ARCH_MIPS64
#include "src/codegen/mips64/constants-mips64.h"
#include "src/codegen/mips64/macro-assembler-mips64.h"
+#elif V8_TARGET_ARCH_LOONG64
+#include "src/codegen/loong64/constants-loong64.h"
+#include "src/codegen/loong64/macro-assembler-loong64.h"
#elif V8_TARGET_ARCH_S390
#include "src/codegen/s390/constants-s390.h"
#include "src/codegen/s390/macro-assembler-s390.h"
diff --git a/deps/v8/src/codegen/mips/assembler-mips.cc b/deps/v8/src/codegen/mips/assembler-mips.cc
index 0d5a8710e5..dde08710fb 100644
--- a/deps/v8/src/codegen/mips/assembler-mips.cc
+++ b/deps/v8/src/codegen/mips/assembler-mips.cc
@@ -878,7 +878,6 @@ int Assembler::target_at(int pos, bool is_internal) {
}
}
}
- return 0;
}
static inline Instr SetBranchOffset(int32_t pos, int32_t target_pos,
diff --git a/deps/v8/src/codegen/mips/macro-assembler-mips.cc b/deps/v8/src/codegen/mips/macro-assembler-mips.cc
index 9c1af1cb05..32f85c6ec2 100644
--- a/deps/v8/src/codegen/mips/macro-assembler-mips.cc
+++ b/deps/v8/src/codegen/mips/macro-assembler-mips.cc
@@ -1398,8 +1398,7 @@ void TurboAssembler::li(Register rd, Operand j, LiFlags mode) {
BlockGrowBufferScope block_growbuffer(this);
int offset = pc_offset();
Address address = j.immediate();
- saved_handles_for_raw_object_ptr_.push_back(
- std::make_pair(offset, address));
+ saved_handles_for_raw_object_ptr_.emplace_back(offset, address);
Handle<HeapObject> object(reinterpret_cast<Address*>(address));
int32_t immediate = object->ptr();
RecordRelocInfo(j.rmode(), immediate);
@@ -3279,7 +3278,6 @@ bool TurboAssembler::BranchShortCheck(int32_t offset, Label* L, Condition cond,
return BranchShortHelper(0, L, cond, rs, rt, bdslot);
}
}
- return false;
}
void TurboAssembler::BranchShort(int32_t offset, Condition cond, Register rs,
@@ -3631,7 +3629,6 @@ bool TurboAssembler::BranchAndLinkShortCheck(int32_t offset, Label* L,
return BranchAndLinkShortHelper(0, L, cond, rs, rt, bdslot);
}
}
- return false;
}
void TurboAssembler::LoadFromConstantsTable(Register destination,
@@ -4987,15 +4984,19 @@ void MacroAssembler::AssertStackIsAligned() {
}
void TurboAssembler::JumpIfSmi(Register value, Label* smi_label,
- Register scratch, BranchDelaySlot bd) {
+ BranchDelaySlot bd) {
DCHECK_EQ(0, kSmiTag);
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
andi(scratch, value, kSmiTagMask);
Branch(bd, smi_label, eq, scratch, Operand(zero_reg));
}
void MacroAssembler::JumpIfNotSmi(Register value, Label* not_smi_label,
- Register scratch, BranchDelaySlot bd) {
+ BranchDelaySlot bd) {
DCHECK_EQ(0, kSmiTag);
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
andi(scratch, value, kSmiTagMask);
Branch(bd, not_smi_label, ne, scratch, Operand(zero_reg));
}
@@ -5519,10 +5520,6 @@ void TurboAssembler::ComputeCodeStartAddress(Register dst) {
pop(ra); // Restore ra
}
-void TurboAssembler::ResetSpeculationPoisonRegister() {
- li(kSpeculationPoisonRegister, -1);
-}
-
void TurboAssembler::CallForDeoptimization(Builtin target, int, Label* exit,
DeoptimizeKind kind, Label* ret,
Label*) {
diff --git a/deps/v8/src/codegen/mips/macro-assembler-mips.h b/deps/v8/src/codegen/mips/macro-assembler-mips.h
index ffa5f5820d..f467f83bd0 100644
--- a/deps/v8/src/codegen/mips/macro-assembler-mips.h
+++ b/deps/v8/src/codegen/mips/macro-assembler-mips.h
@@ -795,7 +795,7 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
// Jump the register contains a smi.
void JumpIfSmi(Register value, Label* smi_label,
- Register scratch = kScratchReg, BranchDelaySlot bd = PROTECT);
+ BranchDelaySlot bd = PROTECT);
void JumpIfEqual(Register a, int32_t b, Label* dest) {
li(kScratchReg, Operand(b));
@@ -817,8 +817,6 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
// This is an alternative to embedding the {CodeObject} handle as a reference.
void ComputeCodeStartAddress(Register dst);
- void ResetSpeculationPoisonRegister();
-
// Control-flow integrity:
// Define a function entrypoint. This doesn't emit any code for this
@@ -1108,7 +1106,7 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
}
// Jump if the register contains a non-smi.
- void JumpIfNotSmi(Register value, Label* not_smi_label, Register scratch = at,
+ void JumpIfNotSmi(Register value, Label* not_smi_label,
BranchDelaySlot bd = PROTECT);
// Abort execution if argument is a smi, enabled via --debug-code.
diff --git a/deps/v8/src/codegen/mips/register-mips.h b/deps/v8/src/codegen/mips/register-mips.h
index 95164a86c1..7fd259bf9b 100644
--- a/deps/v8/src/codegen/mips/register-mips.h
+++ b/deps/v8/src/codegen/mips/register-mips.h
@@ -362,7 +362,6 @@ constexpr Register kReturnRegister2 = a0;
constexpr Register kJSFunctionRegister = a1;
constexpr Register kContextRegister = s7;
constexpr Register kAllocateSizeRegister = a0;
-constexpr Register kSpeculationPoisonRegister = t3;
constexpr Register kInterpreterAccumulatorRegister = v0;
constexpr Register kInterpreterBytecodeOffsetRegister = t4;
constexpr Register kInterpreterBytecodeArrayRegister = t5;
diff --git a/deps/v8/src/codegen/mips64/macro-assembler-mips64.cc b/deps/v8/src/codegen/mips64/macro-assembler-mips64.cc
index 708cf4baa6..5ceb69e861 100644
--- a/deps/v8/src/codegen/mips64/macro-assembler-mips64.cc
+++ b/deps/v8/src/codegen/mips64/macro-assembler-mips64.cc
@@ -1918,8 +1918,7 @@ void TurboAssembler::li(Register rd, Operand j, LiFlags mode) {
BlockGrowBufferScope block_growbuffer(this);
int offset = pc_offset();
Address address = j.immediate();
- saved_handles_for_raw_object_ptr_.push_back(
- std::make_pair(offset, address));
+ saved_handles_for_raw_object_ptr_.emplace_back(offset, address);
Handle<HeapObject> object(reinterpret_cast<Address*>(address));
int64_t immediate = object->ptr();
RecordRelocInfo(j.rmode(), immediate);
@@ -3922,7 +3921,6 @@ bool TurboAssembler::BranchShortCheck(int32_t offset, Label* L, Condition cond,
return BranchShortHelper(0, L, cond, rs, rt, bdslot);
}
}
- return false;
}
void TurboAssembler::BranchShort(int32_t offset, Condition cond, Register rs,
@@ -4274,7 +4272,6 @@ bool TurboAssembler::BranchAndLinkShortCheck(int32_t offset, Label* L,
return BranchAndLinkShortHelper(0, L, cond, rs, rt, bdslot);
}
}
- return false;
}
void TurboAssembler::LoadFromConstantsTable(Register destination,
@@ -5532,15 +5529,19 @@ void TurboAssembler::SmiUntag(Register dst, const MemOperand& src) {
}
void TurboAssembler::JumpIfSmi(Register value, Label* smi_label,
- Register scratch, BranchDelaySlot bd) {
+ BranchDelaySlot bd) {
DCHECK_EQ(0, kSmiTag);
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
andi(scratch, value, kSmiTagMask);
Branch(bd, smi_label, eq, scratch, Operand(zero_reg));
}
void MacroAssembler::JumpIfNotSmi(Register value, Label* not_smi_label,
- Register scratch, BranchDelaySlot bd) {
+ BranchDelaySlot bd) {
DCHECK_EQ(0, kSmiTag);
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
andi(scratch, value, kSmiTagMask);
Branch(bd, not_smi_label, ne, scratch, Operand(zero_reg));
}
@@ -6059,10 +6060,6 @@ void TurboAssembler::ComputeCodeStartAddress(Register dst) {
pop(ra); // Restore ra
}
-void TurboAssembler::ResetSpeculationPoisonRegister() {
- li(kSpeculationPoisonRegister, -1);
-}
-
void TurboAssembler::CallForDeoptimization(Builtin target, int, Label* exit,
DeoptimizeKind kind, Label* ret,
Label*) {
diff --git a/deps/v8/src/codegen/mips64/macro-assembler-mips64.h b/deps/v8/src/codegen/mips64/macro-assembler-mips64.h
index a4991bcb1e..a0ebe35a93 100644
--- a/deps/v8/src/codegen/mips64/macro-assembler-mips64.h
+++ b/deps/v8/src/codegen/mips64/macro-assembler-mips64.h
@@ -805,7 +805,7 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void MSARoundD(MSARegister dst, MSARegister src, FPURoundingMode mode);
// Jump the register contains a smi.
- void JumpIfSmi(Register value, Label* smi_label, Register scratch = at,
+ void JumpIfSmi(Register value, Label* smi_label,
BranchDelaySlot bd = PROTECT);
void JumpIfEqual(Register a, int32_t b, Label* dest) {
@@ -836,8 +836,6 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
// This is an alternative to embedding the {CodeObject} handle as a reference.
void ComputeCodeStartAddress(Register dst);
- void ResetSpeculationPoisonRegister();
-
// Control-flow integrity:
// Define a function entrypoint. This doesn't emit any code for this
@@ -1182,7 +1180,7 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
}
// Jump if the register contains a non-smi.
- void JumpIfNotSmi(Register value, Label* not_smi_label, Register scratch = at,
+ void JumpIfNotSmi(Register value, Label* not_smi_label,
BranchDelaySlot bd = PROTECT);
// Abort execution if argument is a smi, enabled via --debug-code.
diff --git a/deps/v8/src/codegen/mips64/register-mips64.h b/deps/v8/src/codegen/mips64/register-mips64.h
index 51b03aba1f..1fbe3ec7ac 100644
--- a/deps/v8/src/codegen/mips64/register-mips64.h
+++ b/deps/v8/src/codegen/mips64/register-mips64.h
@@ -373,7 +373,6 @@ constexpr Register kReturnRegister2 = a0;
constexpr Register kJSFunctionRegister = a1;
constexpr Register kContextRegister = s7;
constexpr Register kAllocateSizeRegister = a0;
-constexpr Register kSpeculationPoisonRegister = t3;
constexpr Register kInterpreterAccumulatorRegister = v0;
constexpr Register kInterpreterBytecodeOffsetRegister = t0;
constexpr Register kInterpreterBytecodeArrayRegister = t1;
diff --git a/deps/v8/src/codegen/optimized-compilation-info.cc b/deps/v8/src/codegen/optimized-compilation-info.cc
index e3ca07a3c9..d0c4ed52e6 100644
--- a/deps/v8/src/codegen/optimized-compilation-info.cc
+++ b/deps/v8/src/codegen/optimized-compilation-info.cc
@@ -63,34 +63,10 @@ OptimizedCompilationInfo::OptimizedCompilationInfo(
ConfigureFlags();
}
-#ifdef DEBUG
-bool OptimizedCompilationInfo::FlagSetIsValid(Flag flag) const {
- switch (flag) {
- case kPoisonRegisterArguments:
- return untrusted_code_mitigations();
- default:
- return true;
- }
- UNREACHABLE();
-}
-
-bool OptimizedCompilationInfo::FlagGetIsValid(Flag flag) const {
- switch (flag) {
- case kPoisonRegisterArguments:
- if (!GetFlag(kPoisonRegisterArguments)) return true;
- return untrusted_code_mitigations() && called_with_code_start_register();
- default:
- return true;
- }
- UNREACHABLE();
-}
-#endif // DEBUG
-
void OptimizedCompilationInfo::ConfigureFlags() {
- if (FLAG_untrusted_code_mitigations) set_untrusted_code_mitigations();
if (FLAG_turbo_inline_js_wasm_calls) set_inline_js_wasm_calls();
- if (!is_osr() && (IsTurboprop() || FLAG_concurrent_inlining)) {
+ if (IsTurboprop() || FLAG_concurrent_inlining) {
set_concurrent_inlining();
}
@@ -104,7 +80,6 @@ void OptimizedCompilationInfo::ConfigureFlags() {
case CodeKind::TURBOPROP:
set_called_with_code_start_register();
set_switch_jump_table();
- if (FLAG_untrusted_code_mitigations) set_poison_register_arguments();
// TODO(yangguo): Disable this in case of debugging for crbug.com/826613
if (FLAG_analyze_environment_liveness) set_analyze_environment_liveness();
break;
@@ -123,8 +98,15 @@ void OptimizedCompilationInfo::ConfigureFlags() {
case CodeKind::WASM_TO_CAPI_FUNCTION:
set_switch_jump_table();
break;
- default:
+ case CodeKind::C_WASM_ENTRY:
+ case CodeKind::JS_TO_JS_FUNCTION:
+ case CodeKind::JS_TO_WASM_FUNCTION:
+ case CodeKind::WASM_TO_JS_FUNCTION:
break;
+ case CodeKind::BASELINE:
+ case CodeKind::INTERPRETED_FUNCTION:
+ case CodeKind::REGEXP:
+ UNREACHABLE();
}
}
diff --git a/deps/v8/src/codegen/optimized-compilation-info.h b/deps/v8/src/codegen/optimized-compilation-info.h
index b7ed0d29c4..d92964c796 100644
--- a/deps/v8/src/codegen/optimized-compilation-info.h
+++ b/deps/v8/src/codegen/optimized-compilation-info.h
@@ -58,21 +58,19 @@ class V8_EXPORT_PRIVATE OptimizedCompilationInfo final {
V(SourcePositions, source_positions, 4) \
V(BailoutOnUninitialized, bailout_on_uninitialized, 5) \
V(LoopPeeling, loop_peeling, 6) \
- V(UntrustedCodeMitigations, untrusted_code_mitigations, 7) \
- V(SwitchJumpTable, switch_jump_table, 8) \
- V(CalledWithCodeStartRegister, called_with_code_start_register, 9) \
- V(PoisonRegisterArguments, poison_register_arguments, 10) \
- V(AllocationFolding, allocation_folding, 11) \
- V(AnalyzeEnvironmentLiveness, analyze_environment_liveness, 12) \
- V(TraceTurboJson, trace_turbo_json, 13) \
- V(TraceTurboGraph, trace_turbo_graph, 14) \
- V(TraceTurboScheduled, trace_turbo_scheduled, 15) \
- V(TraceTurboAllocation, trace_turbo_allocation, 16) \
- V(TraceHeapBroker, trace_heap_broker, 17) \
- V(WasmRuntimeExceptionSupport, wasm_runtime_exception_support, 18) \
- V(ConcurrentInlining, concurrent_inlining, 19) \
- V(DiscardResultForTesting, discard_result_for_testing, 20) \
- V(InlineJSWasmCalls, inline_js_wasm_calls, 21)
+ V(SwitchJumpTable, switch_jump_table, 7) \
+ V(CalledWithCodeStartRegister, called_with_code_start_register, 8) \
+ V(AllocationFolding, allocation_folding, 9) \
+ V(AnalyzeEnvironmentLiveness, analyze_environment_liveness, 10) \
+ V(TraceTurboJson, trace_turbo_json, 11) \
+ V(TraceTurboGraph, trace_turbo_graph, 12) \
+ V(TraceTurboScheduled, trace_turbo_scheduled, 13) \
+ V(TraceTurboAllocation, trace_turbo_allocation, 14) \
+ V(TraceHeapBroker, trace_heap_broker, 15) \
+ V(WasmRuntimeExceptionSupport, wasm_runtime_exception_support, 16) \
+ V(ConcurrentInlining, concurrent_inlining, 17) \
+ V(DiscardResultForTesting, discard_result_for_testing, 18) \
+ V(InlineJSWasmCalls, inline_js_wasm_calls, 19)
enum Flag {
#define DEF_ENUM(Camel, Lower, Bit) k##Camel = 1 << Bit,
@@ -82,7 +80,6 @@ class V8_EXPORT_PRIVATE OptimizedCompilationInfo final {
#define DEF_GETTER(Camel, Lower, Bit) \
bool Lower() const { \
- DCHECK(FlagGetIsValid(k##Camel)); \
return GetFlag(k##Camel); \
}
FLAGS(DEF_GETTER)
@@ -90,17 +87,11 @@ class V8_EXPORT_PRIVATE OptimizedCompilationInfo final {
#define DEF_SETTER(Camel, Lower, Bit) \
void set_##Lower() { \
- DCHECK(FlagSetIsValid(k##Camel)); \
SetFlag(k##Camel); \
}
FLAGS(DEF_SETTER)
#undef DEF_SETTER
-#ifdef DEBUG
- bool FlagGetIsValid(Flag flag) const;
- bool FlagSetIsValid(Flag flag) const;
-#endif // DEBUG
-
// Construct a compilation info for optimized compilation.
OptimizedCompilationInfo(Zone* zone, Isolate* isolate,
Handle<SharedFunctionInfo> shared,
@@ -141,13 +132,6 @@ class V8_EXPORT_PRIVATE OptimizedCompilationInfo final {
}
compiler::NodeObserver* node_observer() const { return node_observer_; }
- void SetPoisoningMitigationLevel(PoisoningMitigationLevel poisoning_level) {
- poisoning_level_ = poisoning_level;
- }
- PoisoningMitigationLevel GetPoisoningMitigationLevel() const {
- return poisoning_level_;
- }
-
// Code getters and setters.
void SetCode(Handle<Code> code);
@@ -269,8 +253,6 @@ class V8_EXPORT_PRIVATE OptimizedCompilationInfo final {
// Compilation flags.
unsigned flags_ = 0;
- PoisoningMitigationLevel poisoning_level_ =
- PoisoningMitigationLevel::kDontPoison;
const CodeKind code_kind_;
Builtin builtin_ = Builtin::kNoBuiltinId;
diff --git a/deps/v8/src/codegen/ppc/assembler-ppc.cc b/deps/v8/src/codegen/ppc/assembler-ppc.cc
index 2c568b3f3f..3e154e4c29 100644
--- a/deps/v8/src/codegen/ppc/assembler-ppc.cc
+++ b/deps/v8/src/codegen/ppc/assembler-ppc.cc
@@ -187,13 +187,13 @@ Operand Operand::EmbeddedStringConstant(const StringConstantBase* str) {
return result;
}
-MemOperand::MemOperand(Register rn, int32_t offset)
+MemOperand::MemOperand(Register rn, int64_t offset)
: ra_(rn), offset_(offset), rb_(no_reg) {}
MemOperand::MemOperand(Register ra, Register rb)
: ra_(ra), offset_(0), rb_(rb) {}
-MemOperand::MemOperand(Register ra, Register rb, int32_t offset)
+MemOperand::MemOperand(Register ra, Register rb, int64_t offset)
: ra_(ra), offset_(offset), rb_(rb) {}
void Assembler::AllocateAndInstallRequestedHeapObjects(Isolate* isolate) {
@@ -303,7 +303,6 @@ Condition Assembler::GetCondition(Instr instr) {
default:
UNIMPLEMENTED();
}
- return al;
}
bool Assembler::IsLis(Instr instr) {
@@ -1621,8 +1620,8 @@ void Assembler::fmul(const DoubleRegister frt, const DoubleRegister fra,
}
void Assembler::fcpsgn(const DoubleRegister frt, const DoubleRegister fra,
- const DoubleRegister frc, RCBit rc) {
- emit(EXT4 | FCPSGN | frt.code() * B21 | fra.code() * B16 | frc.code() * B6 |
+ const DoubleRegister frb, RCBit rc) {
+ emit(EXT4 | FCPSGN | frt.code() * B21 | fra.code() * B16 | frb.code() * B11 |
rc);
}
diff --git a/deps/v8/src/codegen/ppc/assembler-ppc.h b/deps/v8/src/codegen/ppc/assembler-ppc.h
index f46090cec5..2b5c156204 100644
--- a/deps/v8/src/codegen/ppc/assembler-ppc.h
+++ b/deps/v8/src/codegen/ppc/assembler-ppc.h
@@ -133,13 +133,13 @@ class V8_EXPORT_PRIVATE Operand {
// Alternatively we can have a 16bit signed value immediate
class V8_EXPORT_PRIVATE MemOperand {
public:
- explicit MemOperand(Register rn, int32_t offset = 0);
+ explicit MemOperand(Register rn, int64_t offset = 0);
explicit MemOperand(Register ra, Register rb);
- explicit MemOperand(Register ra, Register rb, int32_t offset);
+ explicit MemOperand(Register ra, Register rb, int64_t offset);
- int32_t offset() const { return offset_; }
+ int64_t offset() const { return offset_; }
// PowerPC - base register
Register ra() const { return ra_; }
@@ -148,7 +148,7 @@ class V8_EXPORT_PRIVATE MemOperand {
private:
Register ra_; // base
- int32_t offset_; // offset
+ int64_t offset_; // offset
Register rb_; // index
friend class Assembler;
@@ -373,6 +373,11 @@ class Assembler : public AssemblerBase {
x_form(instr_name, cr.code() * B2, src1.code(), src2.code(), LeaveRC); \
}
+#define DECLARE_PPC_X_INSTRUCTIONS_G_FORM(name, instr_name, instr_value) \
+ inline void name(const Register dst, const Register src) { \
+ x_form(instr_name, src, dst, r0, LeaveRC); \
+ }
+
#define DECLARE_PPC_X_INSTRUCTIONS_EH_S_FORM(name, instr_name, instr_value) \
inline void name(const Register dst, const MemOperand& src) { \
x_form(instr_name, src.ra(), dst, src.rb(), SetEH); \
@@ -411,6 +416,7 @@ class Assembler : public AssemblerBase {
PPC_X_OPCODE_D_FORM_LIST(DECLARE_PPC_X_INSTRUCTIONS_D_FORM)
PPC_X_OPCODE_E_FORM_LIST(DECLARE_PPC_X_INSTRUCTIONS_E_FORM)
PPC_X_OPCODE_F_FORM_LIST(DECLARE_PPC_X_INSTRUCTIONS_F_FORM)
+ PPC_X_OPCODE_G_FORM_LIST(DECLARE_PPC_X_INSTRUCTIONS_G_FORM)
PPC_X_OPCODE_EH_S_FORM_LIST(DECLARE_PPC_X_INSTRUCTIONS_EH_S_FORM)
PPC_X_OPCODE_EH_L_FORM_LIST(DECLARE_PPC_X_INSTRUCTIONS_EH_L_FORM)
@@ -442,26 +448,40 @@ class Assembler : public AssemblerBase {
#undef DECLARE_PPC_X_INSTRUCTIONS_D_FORM
#undef DECLARE_PPC_X_INSTRUCTIONS_E_FORM
#undef DECLARE_PPC_X_INSTRUCTIONS_F_FORM
+#undef DECLARE_PPC_X_INSTRUCTIONS_G_FORM
#undef DECLARE_PPC_X_INSTRUCTIONS_EH_S_FORM
#undef DECLARE_PPC_X_INSTRUCTIONS_EH_L_FORM
-#define DECLARE_PPC_XX2_INSTRUCTIONS(name, instr_name, instr_value) \
- inline void name(const Simd128Register rt, const Simd128Register rb) { \
- xx2_form(instr_name, rt, rb); \
+#define DECLARE_PPC_XX2_VECTOR_INSTRUCTIONS(name, instr_name, instr_value) \
+ inline void name(const Simd128Register rt, const Simd128Register rb) { \
+ xx2_form(instr_name, rt, rb); \
+ }
+#define DECLARE_PPC_XX2_SCALAR_INSTRUCTIONS(name, instr_name, instr_value) \
+ inline void name(const DoubleRegister rt, const DoubleRegister rb) { \
+ xx2_form(instr_name, rt, rb); \
}
- inline void xx2_form(Instr instr, Simd128Register t, Simd128Register b) {
- // Using VR (high VSR) registers.
- int BX = 1;
- int TX = 1;
+ template <typename T>
+ inline void xx2_form(Instr instr, T t, T b) {
+ static_assert(std::is_same<T, Simd128Register>::value ||
+ std::is_same<T, DoubleRegister>::value,
+ "VSX only uses FP or Vector registers.");
+ // Using FP (low VSR) registers.
+ int BX = 0, TX = 0;
+ // Using VR (high VSR) registers when Simd registers are used.
+ if (std::is_same<T, Simd128Register>::value) {
+ BX = TX = 1;
+ }
emit(instr | (t.code() & 0x1F) * B21 | (b.code() & 0x1F) * B11 | BX * B1 |
TX);
}
- PPC_XX2_OPCODE_A_FORM_LIST(DECLARE_PPC_XX2_INSTRUCTIONS)
- PPC_XX2_OPCODE_B_FORM_LIST(DECLARE_PPC_XX2_INSTRUCTIONS)
-#undef DECLARE_PPC_XX2_INSTRUCTIONS
+ PPC_XX2_OPCODE_VECTOR_A_FORM_LIST(DECLARE_PPC_XX2_VECTOR_INSTRUCTIONS)
+ PPC_XX2_OPCODE_SCALAR_A_FORM_LIST(DECLARE_PPC_XX2_SCALAR_INSTRUCTIONS)
+ PPC_XX2_OPCODE_B_FORM_LIST(DECLARE_PPC_XX2_VECTOR_INSTRUCTIONS)
+#undef DECLARE_PPC_XX2_VECTOR_INSTRUCTIONS
+#undef DECLARE_PPC_XX2_SCALAR_INSTRUCTIONS
#define DECLARE_PPC_XX3_VECTOR_INSTRUCTIONS(name, instr_name, instr_value) \
inline void name(const Simd128Register rt, const Simd128Register ra, \
diff --git a/deps/v8/src/codegen/ppc/constants-ppc.h b/deps/v8/src/codegen/ppc/constants-ppc.h
index e7f1ff311d..693f13d43e 100644
--- a/deps/v8/src/codegen/ppc/constants-ppc.h
+++ b/deps/v8/src/codegen/ppc/constants-ppc.h
@@ -364,7 +364,7 @@ using Instr = uint32_t;
/* Decimal Floating Test Data Group Quad */ \
V(dtstdgq, DTSTDGQ, 0xFC0001C4)
-#define PPC_XX2_OPCODE_A_FORM_LIST(V) \
+#define PPC_XX2_OPCODE_VECTOR_A_FORM_LIST(V) \
/* VSX Vector Absolute Value Double-Precision */ \
V(xvabsdp, XVABSDP, 0xF0000764) \
/* VSX Vector Negate Double-Precision */ \
@@ -423,6 +423,14 @@ using Instr = uint32_t;
/* Saturate */ \
V(xvcvdpuxws, XVCVDPUXWS, 0xF0000320)
+#define PPC_XX2_OPCODE_SCALAR_A_FORM_LIST(V) \
+ /* VSX Scalar Convert Double-Precision to Single-Precision format Non- */ \
+ /* signalling */ \
+ V(xscvdpspn, XSCVDPSPN, 0xF000042C) \
+ /* VSX Scalar Convert Single-Precision to Double-Precision format Non- */ \
+ /* signalling */ \
+ V(xscvspdpn, XSCVSPDPN, 0xF000052C)
+
#define PPC_XX2_OPCODE_B_FORM_LIST(V) \
/* Vector Byte-Reverse Quadword */ \
V(xxbrq, XXBRQ, 0xF01F076C)
@@ -440,9 +448,6 @@ using Instr = uint32_t;
V(xsabsdp, XSABSDP, 0xF0000564) \
/* VSX Scalar Convert Double-Precision to Single-Precision */ \
V(xscvdpsp, XSCVDPSP, 0xF0000424) \
- /* VSX Scalar Convert Double-Precision to Single-Precision format Non- */ \
- /* signalling */ \
- V(xscvdpspn, XSCVDPSPN, 0xF000042C) \
/* VSX Scalar Convert Double-Precision to Signed Fixed-Point Doubleword */ \
/* Saturate */ \
V(xscvdpsxds, XSCVDPSXDS, 0xF0000560) \
@@ -457,9 +462,6 @@ using Instr = uint32_t;
V(xscvdpuxws, XSCVDPUXWS, 0xF0000120) \
/* VSX Scalar Convert Single-Precision to Double-Precision (p=1) */ \
V(xscvspdp, XSCVSPDP, 0xF0000524) \
- /* Scalar Convert Single-Precision to Double-Precision format Non- */ \
- /* signalling */ \
- V(xscvspdpn, XSCVSPDPN, 0xF000052C) \
/* VSX Scalar Convert Signed Fixed-Point Doubleword to Double-Precision */ \
V(xscvsxddp, XSCVSXDDP, 0xF00005E0) \
/* VSX Scalar Convert Signed Fixed-Point Doubleword to Single-Precision */ \
@@ -531,9 +533,10 @@ using Instr = uint32_t;
/* Vector Splat Immediate Byte */ \
V(xxspltib, XXSPLTIB, 0xF00002D0)
-#define PPC_XX2_OPCODE_LIST(V) \
- PPC_XX2_OPCODE_A_FORM_LIST(V) \
- PPC_XX2_OPCODE_B_FORM_LIST(V) \
+#define PPC_XX2_OPCODE_LIST(V) \
+ PPC_XX2_OPCODE_VECTOR_A_FORM_LIST(V) \
+ PPC_XX2_OPCODE_SCALAR_A_FORM_LIST(V) \
+ PPC_XX2_OPCODE_B_FORM_LIST(V) \
PPC_XX2_OPCODE_UNUSED_LIST(V)
#define PPC_EVX_OPCODE_LIST(V) \
@@ -1267,6 +1270,14 @@ using Instr = uint32_t;
/* Compare Logical */ \
V(cmpl, CMPL, 0x7C000040)
+#define PPC_X_OPCODE_G_FORM_LIST(V) \
+ /* Byte-Reverse Halfword */ \
+ V(brh, BRH, 0x7C0001B6) \
+ /* Byte-Reverse Word */ \
+ V(brw, BRW, 0x7C000136) \
+ /* Byte-Reverse Doubleword */ \
+ V(brd, BRD, 0x7C000176)
+
#define PPC_X_OPCODE_EH_S_FORM_LIST(V) \
/* Store Byte Conditional Indexed */ \
V(stbcx, STBCX, 0x7C00056D) \
@@ -1737,6 +1748,7 @@ using Instr = uint32_t;
PPC_X_OPCODE_D_FORM_LIST(V) \
PPC_X_OPCODE_E_FORM_LIST(V) \
PPC_X_OPCODE_F_FORM_LIST(V) \
+ PPC_X_OPCODE_G_FORM_LIST(V) \
PPC_X_OPCODE_EH_L_FORM_LIST(V) \
PPC_X_OPCODE_UNUSED_LIST(V)
@@ -3006,7 +3018,8 @@ class Instruction {
}
opcode = extcode | BitField(10, 2);
switch (opcode) {
- PPC_XX2_OPCODE_A_FORM_LIST(OPCODE_CASES)
+ PPC_XX2_OPCODE_VECTOR_A_FORM_LIST(OPCODE_CASES)
+ PPC_XX2_OPCODE_SCALAR_A_FORM_LIST(OPCODE_CASES)
PPC_XX2_OPCODE_UNUSED_LIST(OPCODE_CASES)
return static_cast<Opcode>(opcode);
}
diff --git a/deps/v8/src/codegen/ppc/macro-assembler-ppc.cc b/deps/v8/src/codegen/ppc/macro-assembler-ppc.cc
index f243055490..64d94c68eb 100644
--- a/deps/v8/src/codegen/ppc/macro-assembler-ppc.cc
+++ b/deps/v8/src/codegen/ppc/macro-assembler-ppc.cc
@@ -168,8 +168,6 @@ void TurboAssembler::Jump(intptr_t target, RelocInfo::Mode rmode,
if (cond != al) b(NegateCondition(cond), &skip, cr);
- DCHECK(rmode == RelocInfo::CODE_TARGET || rmode == RelocInfo::RUNTIME_ENTRY);
-
mov(ip, Operand(target, rmode));
mtctr(ip);
bctr();
@@ -1252,6 +1250,9 @@ void TurboAssembler::EnterFrame(StackFrame::Type type,
mov(ip, Operand(StackFrame::TypeToMarker(type)));
PushCommonFrame(ip);
}
+#if V8_ENABLE_WEBASSEMBLY
+ if (type == StackFrame::WASM) Push(kWasmInstanceRegister);
+#endif // V8_ENABLE_WEBASSEMBLY
}
int TurboAssembler::LeaveFrame(StackFrame::Type type, int stack_adjustment) {
@@ -2662,7 +2663,14 @@ void TurboAssembler::MovDoubleToInt64(
addi(sp, sp, Operand(kDoubleSize));
}
-void TurboAssembler::MovIntToFloat(DoubleRegister dst, Register src) {
+void TurboAssembler::MovIntToFloat(DoubleRegister dst, Register src,
+ Register scratch) {
+ if (CpuFeatures::IsSupported(PPC_8_PLUS)) {
+ ShiftLeftU64(scratch, src, Operand(32));
+ mtfprd(dst, scratch);
+ xscvspdpn(dst, dst);
+ return;
+ }
subi(sp, sp, Operand(kFloatSize));
stw(src, MemOperand(sp, 0));
nop(GROUP_ENDING_NOP); // LHS/RAW optimization
@@ -2670,7 +2678,13 @@ void TurboAssembler::MovIntToFloat(DoubleRegister dst, Register src) {
addi(sp, sp, Operand(kFloatSize));
}
-void TurboAssembler::MovFloatToInt(Register dst, DoubleRegister src) {
+void TurboAssembler::MovFloatToInt(Register dst, DoubleRegister src,
+ DoubleRegister scratch) {
+ if (CpuFeatures::IsSupported(PPC_8_PLUS)) {
+ xscvdpspn(scratch, src);
+ mffprwz(dst, scratch);
+ return;
+ }
subi(sp, sp, Operand(kFloatSize));
stfs(src, MemOperand(sp, 0));
nop(GROUP_ENDING_NOP); // LHS/RAW optimization
@@ -2759,6 +2773,44 @@ void TurboAssembler::MulS32(Register dst, Register src, Register value, OEBit s,
extsw(dst, dst, r);
}
+void TurboAssembler::DivS64(Register dst, Register src, Register value, OEBit s,
+ RCBit r) {
+ divd(dst, src, value, s, r);
+}
+
+void TurboAssembler::DivU64(Register dst, Register src, Register value, OEBit s,
+ RCBit r) {
+ divdu(dst, src, value, s, r);
+}
+
+void TurboAssembler::DivS32(Register dst, Register src, Register value, OEBit s,
+ RCBit r) {
+ divw(dst, src, value, s, r);
+ extsw(dst, dst);
+}
+void TurboAssembler::DivU32(Register dst, Register src, Register value, OEBit s,
+ RCBit r) {
+ divwu(dst, src, value, s, r);
+ ZeroExtWord32(dst, dst);
+}
+
+void TurboAssembler::ModS64(Register dst, Register src, Register value) {
+ modsd(dst, src, value);
+}
+
+void TurboAssembler::ModU64(Register dst, Register src, Register value) {
+ modud(dst, src, value);
+}
+
+void TurboAssembler::ModS32(Register dst, Register src, Register value) {
+ modsw(dst, src, value);
+ extsw(dst, dst);
+}
+void TurboAssembler::ModU32(Register dst, Register src, Register value) {
+ moduw(dst, src, value);
+ ZeroExtWord32(dst, dst);
+}
+
void TurboAssembler::AndU64(Register dst, Register src, const Operand& value,
Register scratch, RCBit r) {
if (is_uint16(value.immediate()) && r == SetRC) {
@@ -3056,7 +3108,7 @@ void MacroAssembler::AndSmiLiteral(Register dst, Register src, Smi smi,
#define GenerateMemoryOperation(reg, mem, ri_op, rr_op) \
{ \
- int offset = mem.offset(); \
+ int64_t offset = mem.offset(); \
\
if (mem.rb() == no_reg) { \
if (!is_int16(offset)) { \
@@ -3085,7 +3137,7 @@ void MacroAssembler::AndSmiLiteral(Register dst, Register src, Smi smi,
#define GenerateMemoryOperationWithAlign(reg, mem, ri_op, rr_op) \
{ \
- int offset = mem.offset(); \
+ int64_t offset = mem.offset(); \
int misaligned = (offset & 3); \
\
if (mem.rb() == no_reg) { \
@@ -3265,7 +3317,7 @@ void TurboAssembler::StoreF64LE(DoubleRegister dst, const MemOperand& mem,
LoadU64(scratch, mem, scratch2);
StoreU64LE(scratch, mem, scratch2);
#else
- LoadF64(dst, mem, scratch);
+ StoreF64(dst, mem, scratch);
#endif
}
@@ -3276,7 +3328,7 @@ void TurboAssembler::StoreF32LE(DoubleRegister dst, const MemOperand& mem,
LoadU32(scratch, mem, scratch2);
StoreU32LE(scratch, mem, scratch2);
#else
- LoadF64(dst, mem, scratch);
+ StoreF32(dst, mem, scratch);
#endif
}
@@ -3453,10 +3505,6 @@ void TurboAssembler::SwapSimd128(MemOperand src, MemOperand dst,
addi(sp, sp, Operand(2 * kSimd128Size));
}
-void TurboAssembler::ResetSpeculationPoisonRegister() {
- mov(kSpeculationPoisonRegister, Operand(-1));
-}
-
void TurboAssembler::JumpIfEqual(Register x, int32_t y, Label* dest) {
CmpS64(x, Operand(y), r0);
beq(dest);
diff --git a/deps/v8/src/codegen/ppc/macro-assembler-ppc.h b/deps/v8/src/codegen/ppc/macro-assembler-ppc.h
index 035c29b1e5..2dfdb39dcc 100644
--- a/deps/v8/src/codegen/ppc/macro-assembler-ppc.h
+++ b/deps/v8/src/codegen/ppc/macro-assembler-ppc.h
@@ -201,6 +201,18 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
Register scratch = r0, OEBit s = LeaveOE, RCBit r = LeaveRC);
void MulS32(Register dst, Register src, Register value, OEBit s = LeaveOE,
RCBit r = LeaveRC);
+ void DivS64(Register dst, Register src, Register value, OEBit s = LeaveOE,
+ RCBit r = LeaveRC);
+ void DivU64(Register dst, Register src, Register value, OEBit s = LeaveOE,
+ RCBit r = LeaveRC);
+ void DivS32(Register dst, Register src, Register value, OEBit s = LeaveOE,
+ RCBit r = LeaveRC);
+ void DivU32(Register dst, Register src, Register value, OEBit s = LeaveOE,
+ RCBit r = LeaveRC);
+ void ModS64(Register dst, Register src, Register value);
+ void ModU64(Register dst, Register src, Register value);
+ void ModS32(Register dst, Register src, Register value);
+ void ModU32(Register dst, Register src, Register value);
void AndU64(Register dst, Register src, const Operand& value,
Register scratch = r0, RCBit r = SetRC);
@@ -561,8 +573,8 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
Register dst_hi,
#endif
Register dst, DoubleRegister src);
- void MovIntToFloat(DoubleRegister dst, Register src);
- void MovFloatToInt(Register dst, DoubleRegister src);
+ void MovIntToFloat(DoubleRegister dst, Register src, Register scratch);
+ void MovFloatToInt(Register dst, DoubleRegister src, DoubleRegister scratch);
// Register move. May do nothing if the registers are identical.
void Move(Register dst, Smi smi) { LoadSmiLiteral(dst, smi); }
void Move(Register dst, Handle<HeapObject> value,
@@ -735,8 +747,6 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
// The return address on the stack is used by frame iteration.
void StoreReturnAddressAndCall(Register target);
- void ResetSpeculationPoisonRegister();
-
// Control-flow integrity:
// Define a function entrypoint. This doesn't emit any code for this
diff --git a/deps/v8/src/codegen/ppc/register-ppc.h b/deps/v8/src/codegen/ppc/register-ppc.h
index ffeb327055..68adfdb155 100644
--- a/deps/v8/src/codegen/ppc/register-ppc.h
+++ b/deps/v8/src/codegen/ppc/register-ppc.h
@@ -349,7 +349,6 @@ constexpr Register kReturnRegister2 = r5;
constexpr Register kJSFunctionRegister = r4;
constexpr Register kContextRegister = r30;
constexpr Register kAllocateSizeRegister = r4;
-constexpr Register kSpeculationPoisonRegister = r14;
constexpr Register kInterpreterAccumulatorRegister = r3;
constexpr Register kInterpreterBytecodeOffsetRegister = r15;
constexpr Register kInterpreterBytecodeArrayRegister = r16;
diff --git a/deps/v8/src/codegen/register-arch.h b/deps/v8/src/codegen/register-arch.h
index eb4cdb8789..d5ea2879da 100644
--- a/deps/v8/src/codegen/register-arch.h
+++ b/deps/v8/src/codegen/register-arch.h
@@ -22,6 +22,8 @@
#include "src/codegen/mips/register-mips.h"
#elif V8_TARGET_ARCH_MIPS64
#include "src/codegen/mips64/register-mips64.h"
+#elif V8_TARGET_ARCH_LOONG64
+#include "src/codegen/loong64/register-loong64.h"
#elif V8_TARGET_ARCH_S390
#include "src/codegen/s390/register-s390.h"
#elif V8_TARGET_ARCH_RISCV64
diff --git a/deps/v8/src/codegen/register-configuration.cc b/deps/v8/src/codegen/register-configuration.cc
index aca5295c11..2fc97e2fec 100644
--- a/deps/v8/src/codegen/register-configuration.cc
+++ b/deps/v8/src/codegen/register-configuration.cc
@@ -60,6 +60,8 @@ static int get_num_allocatable_double_registers() {
kMaxAllocatableDoubleRegisterCount;
#elif V8_TARGET_ARCH_MIPS64
kMaxAllocatableDoubleRegisterCount;
+#elif V8_TARGET_ARCH_LOONG64
+ kMaxAllocatableDoubleRegisterCount;
#elif V8_TARGET_ARCH_PPC
kMaxAllocatableDoubleRegisterCount;
#elif V8_TARGET_ARCH_PPC64
@@ -102,42 +104,6 @@ class ArchDefaultRegisterConfiguration : public RegisterConfiguration {
DEFINE_LAZY_LEAKY_OBJECT_GETTER(ArchDefaultRegisterConfiguration,
GetDefaultRegisterConfiguration)
-// Allocatable registers with the masking register removed.
-class ArchDefaultPoisoningRegisterConfiguration : public RegisterConfiguration {
- public:
- ArchDefaultPoisoningRegisterConfiguration()
- : RegisterConfiguration(
- Register::kNumRegisters, DoubleRegister::kNumRegisters,
- kMaxAllocatableGeneralRegisterCount - 1,
- get_num_allocatable_double_registers(),
- InitializeGeneralRegisterCodes(), get_allocatable_double_codes(),
- kSimpleFPAliasing ? AliasingKind::OVERLAP : AliasingKind::COMBINE) {
- }
-
- private:
- static const int* InitializeGeneralRegisterCodes() {
- int filtered_index = 0;
- for (int i = 0; i < kMaxAllocatableGeneralRegisterCount; ++i) {
- if (kAllocatableGeneralCodes[i] != kSpeculationPoisonRegister.code()) {
- allocatable_general_codes_[filtered_index] =
- kAllocatableGeneralCodes[i];
- filtered_index++;
- }
- }
- DCHECK_EQ(filtered_index, kMaxAllocatableGeneralRegisterCount - 1);
- return allocatable_general_codes_;
- }
-
- static int
- allocatable_general_codes_[kMaxAllocatableGeneralRegisterCount - 1];
-};
-
-int ArchDefaultPoisoningRegisterConfiguration::allocatable_general_codes_
- [kMaxAllocatableGeneralRegisterCount - 1];
-
-DEFINE_LAZY_LEAKY_OBJECT_GETTER(ArchDefaultPoisoningRegisterConfiguration,
- GetDefaultPoisoningRegisterConfiguration)
-
// RestrictedRegisterConfiguration uses the subset of allocatable general
// registers the architecture support, which results into generating assembly
// to use less registers. Currently, it's only used by RecordWrite code stub.
@@ -184,10 +150,6 @@ const RegisterConfiguration* RegisterConfiguration::Default() {
return GetDefaultRegisterConfiguration();
}
-const RegisterConfiguration* RegisterConfiguration::Poisoning() {
- return GetDefaultPoisoningRegisterConfiguration();
-}
-
const RegisterConfiguration* RegisterConfiguration::RestrictGeneralRegisters(
RegList registers) {
int num = NumRegs(registers);
diff --git a/deps/v8/src/codegen/reloc-info.cc b/deps/v8/src/codegen/reloc-info.cc
index 0693d32459..7c4d85128f 100644
--- a/deps/v8/src/codegen/reloc-info.cc
+++ b/deps/v8/src/codegen/reloc-info.cc
@@ -320,7 +320,7 @@ bool RelocInfo::OffHeapTargetIsCodedSpecially() {
#elif defined(V8_TARGET_ARCH_IA32) || defined(V8_TARGET_ARCH_MIPS) || \
defined(V8_TARGET_ARCH_MIPS64) || defined(V8_TARGET_ARCH_PPC) || \
defined(V8_TARGET_ARCH_PPC64) || defined(V8_TARGET_ARCH_S390) || \
- defined(V8_TARGET_ARCH_RISCV64)
+ defined(V8_TARGET_ARCH_RISCV64) || defined(V8_TARGET_ARCH_LOONG64)
return true;
#endif
}
diff --git a/deps/v8/src/codegen/riscv64/assembler-riscv64.cc b/deps/v8/src/codegen/riscv64/assembler-riscv64.cc
index 0c322542a9..8cad060a47 100644
--- a/deps/v8/src/codegen/riscv64/assembler-riscv64.cc
+++ b/deps/v8/src/codegen/riscv64/assembler-riscv64.cc
@@ -57,6 +57,9 @@ static unsigned CpuFeaturesImpliedByCompiler() {
answer |= 1u << FPU;
#endif // def CAN_USE_FPU_INSTRUCTIONS
+#ifdef CAN_USE_RVV_INSTRUCTIONS
+ answer |= 1u << RISCV_SIMD;
+#endif // def CAN_USE_RVV_INSTRUCTIONS
return answer;
}
@@ -64,18 +67,20 @@ bool CpuFeatures::SupportsWasmSimd128() { return IsSupported(RISCV_SIMD); }
void CpuFeatures::ProbeImpl(bool cross_compile) {
supported_ |= CpuFeaturesImpliedByCompiler();
-
// Only use statically determined features for cross compile (snapshot).
if (cross_compile) return;
-
// Probe for additional features at runtime.
base::CPU cpu;
if (cpu.has_fpu()) supported_ |= 1u << FPU;
+ // Set a static value on whether SIMD is supported.
+ // This variable is only used for certain archs to query SupportWasmSimd128()
+ // at runtime in builtins using an extern ref. Other callers should use
+ // CpuFeatures::SupportWasmSimd128().
+ CpuFeatures::supports_wasm_simd_128_ = CpuFeatures::SupportsWasmSimd128();
}
void CpuFeatures::PrintTarget() {}
void CpuFeatures::PrintFeatures() {}
-
int ToNumber(Register reg) {
DCHECK(reg.is_valid());
const int kNumbers[] = {
@@ -207,7 +212,8 @@ void Assembler::AllocateAndInstallRequestedHeapObjects(Isolate* isolate) {
Assembler::Assembler(const AssemblerOptions& options,
std::unique_ptr<AssemblerBuffer> buffer)
: AssemblerBase(options, std::move(buffer)),
- scratch_register_list_(t3.bit() | t5.bit() | s10.bit()),
+ VU(this),
+ scratch_register_list_(t3.bit() | t5.bit()),
constpool_(this) {
reloc_info_writer.Reposition(buffer_start_ + buffer_->size(), pc_);
@@ -309,7 +315,6 @@ bool Assembler::IsCBranch(Instr instr) {
int Op = instr & kRvcOpcodeMask;
return Op == RO_C_BNEZ || Op == RO_C_BEQZ;
}
-
bool Assembler::IsJump(Instr instr) {
int Op = instr & kBaseOpcodeMask;
return Op == JAL || Op == JALR;
@@ -377,7 +382,7 @@ int Assembler::target_at(int pos, bool is_internal) {
} else {
return pos + imm13;
}
- } break;
+ }
case JAL: {
int32_t imm21 = JumpOffset(instr);
if (imm21 == kEndOfJumpChain) {
@@ -386,7 +391,7 @@ int Assembler::target_at(int pos, bool is_internal) {
} else {
return pos + imm21;
}
- } break;
+ }
case JALR: {
int32_t imm12 = instr >> 20;
if (imm12 == kEndOfJumpChain) {
@@ -395,7 +400,7 @@ int Assembler::target_at(int pos, bool is_internal) {
} else {
return pos + imm12;
}
- } break;
+ }
case LUI: {
Address pc = reinterpret_cast<Address>(buffer_start_ + pos);
pc = target_address_at(pc);
@@ -409,7 +414,7 @@ int Assembler::target_at(int pos, bool is_internal) {
DCHECK(pos > delta);
return pos - delta;
}
- } break;
+ }
case AUIPC: {
Instr instr_auipc = instr;
Instr instr_I = instr_at(pos + 4);
@@ -417,18 +422,18 @@ int Assembler::target_at(int pos, bool is_internal) {
int32_t offset = BrachlongOffset(instr_auipc, instr_I);
if (offset == kEndOfJumpChain) return kEndOfChain;
return offset + pos;
- } break;
+ }
case RO_C_J: {
int32_t offset = instruction->RvcImm11CJValue();
if (offset == kEndOfJumpChain) return kEndOfChain;
return offset + pos;
- } break;
+ }
case RO_C_BNEZ:
case RO_C_BEQZ: {
int32_t offset = instruction->RvcImm8BValue();
if (offset == kEndOfJumpChain) return kEndOfChain;
return pos + offset;
- } break;
+ }
default: {
if (instr == kEndOfJumpChain) {
return kEndOfChain;
@@ -437,7 +442,7 @@ int Assembler::target_at(int pos, bool is_internal) {
((instr & static_cast<int32_t>(kImm16Mask)) << 16) >> 14;
return (imm18 + pos);
}
- } break;
+ }
}
}
@@ -511,7 +516,6 @@ static inline ShortInstr SetCJalOffset(int32_t pos, int32_t target_pos,
DCHECK(Assembler::IsCJal(instr | (imm11 & kImm11Mask)));
return instr | (imm11 & kImm11Mask);
}
-
static inline Instr SetCBranchOffset(int32_t pos, int32_t target_pos,
Instr instr) {
DCHECK(Assembler::IsCBranch(instr));
@@ -1137,6 +1141,102 @@ void Assembler::GenInstrCBA(uint8_t funct3, uint8_t funct2, Opcode opcode,
emit(instr);
}
+// OPIVV OPFVV OPMVV
+void Assembler::GenInstrV(uint8_t funct6, Opcode opcode, VRegister vd,
+ VRegister vs1, VRegister vs2, MaskType mask) {
+ DCHECK(opcode == OP_MVV || opcode == OP_FVV || opcode == OP_IVV);
+ Instr instr = (funct6 << kRvvFunct6Shift) | opcode | (mask << kRvvVmShift) |
+ ((vd.code() & 0x1F) << kRvvVdShift) |
+ ((vs1.code() & 0x1F) << kRvvVs1Shift) |
+ ((vs2.code() & 0x1F) << kRvvVs2Shift);
+ emit(instr);
+}
+// OPMVV OPFVV
+void Assembler::GenInstrV(uint8_t funct6, Opcode opcode, Register rd,
+ VRegister vs1, VRegister vs2, MaskType mask) {
+ DCHECK(opcode == OP_MVV || opcode == OP_FVV);
+ Instr instr = (funct6 << kRvvFunct6Shift) | opcode | (mask << kRvvVmShift) |
+ ((rd.code() & 0x1F) << kRvvVdShift) |
+ ((vs1.code() & 0x1F) << kRvvVs1Shift) |
+ ((vs2.code() & 0x1F) << kRvvVs2Shift);
+ emit(instr);
+}
+
+// OPIVX OPFVF OPMVX
+void Assembler::GenInstrV(uint8_t funct6, Opcode opcode, VRegister vd,
+ Register rs1, VRegister vs2, MaskType mask) {
+ DCHECK(opcode == OP_IVX || opcode == OP_FVF || opcode == OP_MVX);
+ Instr instr = (funct6 << kRvvFunct6Shift) | opcode | (mask << kRvvVmShift) |
+ ((vd.code() & 0x1F) << kRvvVdShift) |
+ ((rs1.code() & 0x1F) << kRvvRs1Shift) |
+ ((vs2.code() & 0x1F) << kRvvVs2Shift);
+ emit(instr);
+}
+
+// OPMVX
+void Assembler::GenInstrV(uint8_t funct6, Register rd, Register rs1,
+ VRegister vs2, MaskType mask) {
+ Instr instr = (funct6 << kRvvFunct6Shift) | OP_MVX | (mask << kRvvVmShift) |
+ ((rd.code() & 0x1F) << kRvvVdShift) |
+ ((rs1.code() & 0x1F) << kRvvRs1Shift) |
+ ((vs2.code() & 0x1F) << kRvvVs2Shift);
+ emit(instr);
+}
+// OPIVI
+void Assembler::GenInstrV(uint8_t funct6, VRegister vd, int8_t imm5,
+ VRegister vs2, MaskType mask) {
+ DCHECK(is_uint5(imm5) || is_int5(imm5));
+ Instr instr = (funct6 << kRvvFunct6Shift) | OP_IVI | (mask << kRvvVmShift) |
+ ((vd.code() & 0x1F) << kRvvVdShift) |
+ (((uint32_t)imm5 << kRvvImm5Shift) & kRvvImm5Mask) |
+ ((vs2.code() & 0x1F) << kRvvVs2Shift);
+ emit(instr);
+}
+
+// VL VS
+void Assembler::GenInstrV(Opcode opcode, uint8_t width, VRegister vd,
+ Register rs1, uint8_t umop, MaskType mask,
+ uint8_t IsMop, bool IsMew, uint8_t Nf) {
+ DCHECK(opcode == LOAD_FP || opcode == STORE_FP);
+ Instr instr = opcode | ((vd.code() << kRvvVdShift) & kRvvVdMask) |
+ ((width << kRvvWidthShift) & kRvvWidthMask) |
+ ((rs1.code() << kRvvRs1Shift) & kRvvRs1Mask) |
+ ((umop << kRvvRs2Shift) & kRvvRs2Mask) |
+ ((mask << kRvvVmShift) & kRvvVmMask) |
+ ((IsMop << kRvvMopShift) & kRvvMopMask) |
+ ((IsMew << kRvvMewShift) & kRvvMewMask) |
+ ((Nf << kRvvNfShift) & kRvvNfMask);
+ emit(instr);
+}
+void Assembler::GenInstrV(Opcode opcode, uint8_t width, VRegister vd,
+ Register rs1, Register rs2, MaskType mask,
+ uint8_t IsMop, bool IsMew, uint8_t Nf) {
+ DCHECK(opcode == LOAD_FP || opcode == STORE_FP);
+ Instr instr = opcode | ((vd.code() << kRvvVdShift) & kRvvVdMask) |
+ ((width << kRvvWidthShift) & kRvvWidthMask) |
+ ((rs1.code() << kRvvRs1Shift) & kRvvRs1Mask) |
+ ((rs2.code() << kRvvRs2Shift) & kRvvRs2Mask) |
+ ((mask << kRvvVmShift) & kRvvVmMask) |
+ ((IsMop << kRvvMopShift) & kRvvMopMask) |
+ ((IsMew << kRvvMewShift) & kRvvMewMask) |
+ ((Nf << kRvvNfShift) & kRvvNfMask);
+ emit(instr);
+}
+// VL VS AMO
+void Assembler::GenInstrV(Opcode opcode, uint8_t width, VRegister vd,
+ Register rs1, VRegister vs2, MaskType mask,
+ uint8_t IsMop, bool IsMew, uint8_t Nf) {
+ DCHECK(opcode == LOAD_FP || opcode == STORE_FP || opcode == AMO);
+ Instr instr = opcode | ((vd.code() << kRvvVdShift) & kRvvVdMask) |
+ ((width << kRvvWidthShift) & kRvvWidthMask) |
+ ((rs1.code() << kRvvRs1Shift) & kRvvRs1Mask) |
+ ((vs2.code() << kRvvRs2Shift) & kRvvRs2Mask) |
+ ((mask << kRvvVmShift) & kRvvVmMask) |
+ ((IsMop << kRvvMopShift) & kRvvMopMask) |
+ ((IsMew << kRvvMewShift) & kRvvMewMask) |
+ ((Nf << kRvvNfShift) & kRvvNfMask);
+ emit(instr);
+}
// ----- Instruction class templates match those in the compiler
void Assembler::GenInstrBranchCC_rri(uint8_t funct3, Register rs1, Register rs2,
@@ -2328,8 +2428,538 @@ void Assembler::EBREAK() {
ebreak();
}
-// Privileged
+// RVV
+void Assembler::vmv_vv(VRegister vd, VRegister vs1) {
+ GenInstrV(VMV_FUNCT6, OP_IVV, vd, vs1, v0, NoMask);
+}
+
+void Assembler::vmv_vx(VRegister vd, Register rs1) {
+ GenInstrV(VMV_FUNCT6, OP_IVX, vd, rs1, v0, NoMask);
+}
+
+void Assembler::vmv_vi(VRegister vd, uint8_t simm5) {
+ GenInstrV(VMV_FUNCT6, vd, simm5, v0, NoMask);
+}
+
+void Assembler::vmv_xs(Register rd, VRegister vs2) {
+ GenInstrV(VWXUNARY0_FUNCT6, OP_MVV, rd, v0, vs2, NoMask);
+}
+
+void Assembler::vmv_sx(VRegister vd, Register rs1) {
+ GenInstrV(VRXUNARY0_FUNCT6, OP_MVX, vd, rs1, v0, NoMask);
+}
+
+void Assembler::vmerge_vv(VRegister vd, VRegister vs1, VRegister vs2) {
+ GenInstrV(VMV_FUNCT6, OP_IVV, vd, vs1, vs2, Mask);
+}
+
+void Assembler::vmerge_vx(VRegister vd, Register rs1, VRegister vs2) {
+ GenInstrV(VMV_FUNCT6, OP_IVX, vd, rs1, vs2, Mask);
+}
+
+void Assembler::vmerge_vi(VRegister vd, uint8_t imm5, VRegister vs2) {
+ GenInstrV(VMV_FUNCT6, vd, imm5, vs2, Mask);
+}
+
+void Assembler::vadc_vv(VRegister vd, VRegister vs1, VRegister vs2) {
+ GenInstrV(VADC_FUNCT6, OP_IVV, vd, vs1, vs2, Mask);
+}
+
+void Assembler::vadc_vx(VRegister vd, Register rs1, VRegister vs2) {
+ GenInstrV(VADC_FUNCT6, OP_IVX, vd, rs1, vs2, Mask);
+}
+
+void Assembler::vadc_vi(VRegister vd, uint8_t imm5, VRegister vs2) {
+ GenInstrV(VADC_FUNCT6, vd, imm5, vs2, Mask);
+}
+
+void Assembler::vmadc_vv(VRegister vd, VRegister vs1, VRegister vs2) {
+ GenInstrV(VMADC_FUNCT6, OP_IVV, vd, vs1, vs2, Mask);
+}
+
+void Assembler::vmadc_vx(VRegister vd, Register rs1, VRegister vs2) {
+ GenInstrV(VMADC_FUNCT6, OP_IVX, vd, rs1, vs2, Mask);
+}
+
+void Assembler::vmadc_vi(VRegister vd, uint8_t imm5, VRegister vs2) {
+ GenInstrV(VMADC_FUNCT6, vd, imm5, vs2, Mask);
+}
+#define DEFINE_OPIVV(name, funct6) \
+ void Assembler::name##_vv(VRegister vd, VRegister vs2, VRegister vs1, \
+ MaskType mask) { \
+ GenInstrV(funct6, OP_IVV, vd, vs1, vs2, mask); \
+ }
+
+#define DEFINE_OPIVX(name, funct6) \
+ void Assembler::name##_vx(VRegister vd, VRegister vs2, Register rs1, \
+ MaskType mask) { \
+ GenInstrV(funct6, OP_IVX, vd, rs1, vs2, mask); \
+ }
+
+#define DEFINE_OPIVI(name, funct6) \
+ void Assembler::name##_vi(VRegister vd, VRegister vs2, int8_t imm5, \
+ MaskType mask) { \
+ GenInstrV(funct6, vd, imm5, vs2, mask); \
+ }
+
+#define DEFINE_OPMVV(name, funct6) \
+ void Assembler::name##_vs(VRegister vd, VRegister vs2, VRegister vs1, \
+ MaskType mask) { \
+ GenInstrV(funct6, OP_MVV, vd, vs1, vs2, mask); \
+ }
+
+DEFINE_OPIVV(vadd, VADD_FUNCT6)
+DEFINE_OPIVX(vadd, VADD_FUNCT6)
+DEFINE_OPIVI(vadd, VADD_FUNCT6)
+DEFINE_OPIVV(vsub, VSUB_FUNCT6)
+DEFINE_OPIVX(vsub, VSUB_FUNCT6)
+DEFINE_OPIVX(vsadd, VSADD_FUNCT6)
+DEFINE_OPIVV(vsadd, VSADD_FUNCT6)
+DEFINE_OPIVI(vsadd, VSADD_FUNCT6)
+DEFINE_OPIVX(vsaddu, VSADD_FUNCT6)
+DEFINE_OPIVV(vsaddu, VSADD_FUNCT6)
+DEFINE_OPIVI(vsaddu, VSADD_FUNCT6)
+DEFINE_OPIVX(vssub, VSSUB_FUNCT6)
+DEFINE_OPIVV(vssub, VSSUB_FUNCT6)
+DEFINE_OPIVX(vssubu, VSSUBU_FUNCT6)
+DEFINE_OPIVV(vssubu, VSSUBU_FUNCT6)
+DEFINE_OPIVX(vrsub, VRSUB_FUNCT6)
+DEFINE_OPIVI(vrsub, VRSUB_FUNCT6)
+DEFINE_OPIVV(vminu, VMINU_FUNCT6)
+DEFINE_OPIVX(vminu, VMINU_FUNCT6)
+DEFINE_OPIVV(vmin, VMIN_FUNCT6)
+DEFINE_OPIVX(vmin, VMIN_FUNCT6)
+DEFINE_OPIVV(vmaxu, VMAXU_FUNCT6)
+DEFINE_OPIVX(vmaxu, VMAXU_FUNCT6)
+DEFINE_OPIVV(vmax, VMAX_FUNCT6)
+DEFINE_OPIVX(vmax, VMAX_FUNCT6)
+DEFINE_OPIVV(vand, VAND_FUNCT6)
+DEFINE_OPIVX(vand, VAND_FUNCT6)
+DEFINE_OPIVI(vand, VAND_FUNCT6)
+DEFINE_OPIVV(vor, VOR_FUNCT6)
+DEFINE_OPIVX(vor, VOR_FUNCT6)
+DEFINE_OPIVI(vor, VOR_FUNCT6)
+DEFINE_OPIVV(vxor, VXOR_FUNCT6)
+DEFINE_OPIVX(vxor, VXOR_FUNCT6)
+DEFINE_OPIVI(vxor, VXOR_FUNCT6)
+DEFINE_OPIVV(vrgather, VRGATHER_FUNCT6)
+DEFINE_OPIVX(vrgather, VRGATHER_FUNCT6)
+DEFINE_OPIVI(vrgather, VRGATHER_FUNCT6)
+
+DEFINE_OPIVX(vslidedown, VSLIDEDOWN_FUNCT6)
+DEFINE_OPIVI(vslidedown, VSLIDEDOWN_FUNCT6)
+DEFINE_OPIVX(vslideup, VSLIDEUP_FUNCT6)
+DEFINE_OPIVI(vslideup, VSLIDEUP_FUNCT6)
+
+DEFINE_OPIVV(vmseq, VMSEQ_FUNCT6)
+DEFINE_OPIVX(vmseq, VMSEQ_FUNCT6)
+DEFINE_OPIVI(vmseq, VMSEQ_FUNCT6)
+
+DEFINE_OPIVV(vmsne, VMSNE_FUNCT6)
+DEFINE_OPIVX(vmsne, VMSNE_FUNCT6)
+DEFINE_OPIVI(vmsne, VMSNE_FUNCT6)
+
+DEFINE_OPIVV(vmsltu, VMSLTU_FUNCT6)
+DEFINE_OPIVX(vmsltu, VMSLTU_FUNCT6)
+
+DEFINE_OPIVV(vmslt, VMSLT_FUNCT6)
+DEFINE_OPIVX(vmslt, VMSLT_FUNCT6)
+
+DEFINE_OPIVV(vmsle, VMSLE_FUNCT6)
+DEFINE_OPIVX(vmsle, VMSLE_FUNCT6)
+DEFINE_OPIVI(vmsle, VMSLE_FUNCT6)
+
+DEFINE_OPIVV(vmsleu, VMSLEU_FUNCT6)
+DEFINE_OPIVX(vmsleu, VMSLEU_FUNCT6)
+DEFINE_OPIVI(vmsleu, VMSLEU_FUNCT6)
+
+DEFINE_OPIVI(vmsgt, VMSGT_FUNCT6)
+DEFINE_OPIVX(vmsgt, VMSGT_FUNCT6)
+
+DEFINE_OPIVI(vmsgtu, VMSGTU_FUNCT6)
+DEFINE_OPIVX(vmsgtu, VMSGTU_FUNCT6)
+
+DEFINE_OPIVV(vsrl, VSRL_FUNCT6)
+DEFINE_OPIVX(vsrl, VSRL_FUNCT6)
+DEFINE_OPIVI(vsrl, VSRL_FUNCT6)
+
+DEFINE_OPIVV(vsll, VSLL_FUNCT6)
+DEFINE_OPIVX(vsll, VSLL_FUNCT6)
+DEFINE_OPIVI(vsll, VSLL_FUNCT6)
+
+DEFINE_OPMVV(vredmaxu, VREDMAXU_FUNCT6)
+DEFINE_OPMVV(vredmax, VREDMAX_FUNCT6)
+DEFINE_OPMVV(vredmin, VREDMIN_FUNCT6)
+DEFINE_OPMVV(vredminu, VREDMINU_FUNCT6)
+#undef DEFINE_OPIVI
+#undef DEFINE_OPIVV
+#undef DEFINE_OPIVX
+
+void Assembler::vsetvli(Register rd, Register rs1, VSew vsew, Vlmul vlmul,
+ TailAgnosticType tail, MaskAgnosticType mask) {
+ int32_t zimm = GenZimm(vsew, vlmul, tail, mask);
+ Instr instr = OP_V | ((rd.code() & 0x1F) << kRvvRdShift) | (0x7 << 12) |
+ ((rs1.code() & 0x1F) << kRvvRs1Shift) |
+ (((uint32_t)zimm << kRvvZimmShift) & kRvvZimmMask) | 0x0 << 31;
+ emit(instr);
+}
+
+void Assembler::vsetivli(Register rd, uint8_t uimm, VSew vsew, Vlmul vlmul,
+ TailAgnosticType tail, MaskAgnosticType mask) {
+ DCHECK(is_uint5(uimm));
+ int32_t zimm = GenZimm(vsew, vlmul, tail, mask) & 0x3FF;
+ Instr instr = OP_V | ((rd.code() & 0x1F) << kRvvRdShift) | (0x7 << 12) |
+ ((uimm & 0x1F) << kRvvUimmShift) |
+ (((uint32_t)zimm << kRvvZimmShift) & kRvvZimmMask) | 0x3 << 30;
+ emit(instr);
+}
+
+void Assembler::vsetvl(Register rd, Register rs1, Register rs2) {
+ Instr instr = OP_V | ((rd.code() & 0x1F) << kRvvRdShift) | (0x7 << 12) |
+ ((rs1.code() & 0x1F) << kRvvRs1Shift) |
+ ((rs2.code() & 0x1F) << kRvvRs2Shift) | 0x40 << 25;
+ emit(instr);
+}
+
+uint8_t vsew_switch(VSew vsew) {
+ uint8_t width;
+ switch (vsew) {
+ case E8:
+ width = 0b000;
+ break;
+ case E16:
+ width = 0b101;
+ break;
+ case E32:
+ width = 0b110;
+ break;
+ case E64:
+ width = 0b111;
+ break;
+ case E128:
+ width = 0b000;
+ break;
+ case E256:
+ width = 0b101;
+ break;
+ case E512:
+ width = 0b110;
+ break;
+ case E1024:
+ width = 0b111;
+ break;
+ }
+ return width;
+}
+
+void Assembler::vl(VRegister vd, Register rs1, uint8_t lumop, VSew vsew,
+ MaskType mask) {
+ bool IsMew = vsew >= E128 ? true : false;
+ uint8_t width = vsew_switch(vsew);
+ GenInstrV(LOAD_FP, width, vd, rs1, lumop, mask, 0b00, IsMew, 0b000);
+}
+void Assembler::vls(VRegister vd, Register rs1, Register rs2, VSew vsew,
+ MaskType mask) {
+ bool IsMew = vsew >= E128 ? true : false;
+ uint8_t width = vsew_switch(vsew);
+ GenInstrV(LOAD_FP, width, vd, rs1, rs2, mask, 0b10, IsMew, 0b000);
+}
+void Assembler::vlx(VRegister vd, Register rs1, VRegister vs2, VSew vsew,
+ MaskType mask) {
+ bool IsMew = vsew >= E128 ? true : false;
+ uint8_t width = vsew_switch(vsew);
+ GenInstrV(LOAD_FP, width, vd, rs1, vs2, mask, 0b11, IsMew, 0);
+}
+
+void Assembler::vs(VRegister vd, Register rs1, uint8_t sumop, VSew vsew,
+ MaskType mask) {
+ bool IsMew = vsew >= E128 ? true : false;
+ uint8_t width = vsew_switch(vsew);
+ GenInstrV(STORE_FP, width, vd, rs1, sumop, mask, 0b00, IsMew, 0b000);
+}
+void Assembler::vss(VRegister vs3, Register rs1, Register rs2, VSew vsew,
+ MaskType mask) {
+ bool IsMew = vsew >= E128 ? true : false;
+ uint8_t width = vsew_switch(vsew);
+ GenInstrV(STORE_FP, width, vs3, rs1, rs2, mask, 0b10, IsMew, 0b000);
+}
+
+void Assembler::vsx(VRegister vd, Register rs1, VRegister vs2, VSew vsew,
+ MaskType mask) {
+ bool IsMew = vsew >= E128 ? true : false;
+ uint8_t width = vsew_switch(vsew);
+ GenInstrV(STORE_FP, width, vd, rs1, vs2, mask, 0b11, IsMew, 0b000);
+}
+void Assembler::vsu(VRegister vd, Register rs1, VRegister vs2, VSew vsew,
+ MaskType mask) {
+ bool IsMew = vsew >= E128 ? true : false;
+ uint8_t width = vsew_switch(vsew);
+ GenInstrV(STORE_FP, width, vd, rs1, vs2, mask, 0b01, IsMew, 0b000);
+}
+
+void Assembler::vlseg2(VRegister vd, Register rs1, uint8_t lumop, VSew vsew,
+ MaskType mask) {
+ bool IsMew = vsew >= E128 ? true : false;
+ uint8_t width = vsew_switch(vsew);
+ GenInstrV(LOAD_FP, width, vd, rs1, lumop, mask, 0b00, IsMew, 0b001);
+}
+
+void Assembler::vlseg3(VRegister vd, Register rs1, uint8_t lumop, VSew vsew,
+ MaskType mask) {
+ bool IsMew = vsew >= E128 ? true : false;
+ uint8_t width = vsew_switch(vsew);
+ GenInstrV(LOAD_FP, width, vd, rs1, lumop, mask, 0b00, IsMew, 0b010);
+}
+
+void Assembler::vlseg4(VRegister vd, Register rs1, uint8_t lumop, VSew vsew,
+ MaskType mask) {
+ bool IsMew = vsew >= E128 ? true : false;
+ uint8_t width = vsew_switch(vsew);
+ GenInstrV(LOAD_FP, width, vd, rs1, lumop, mask, 0b00, IsMew, 0b011);
+}
+
+void Assembler::vlseg5(VRegister vd, Register rs1, uint8_t lumop, VSew vsew,
+ MaskType mask) {
+ bool IsMew = vsew >= E128 ? true : false;
+ uint8_t width = vsew_switch(vsew);
+ GenInstrV(LOAD_FP, width, vd, rs1, lumop, mask, 0b00, IsMew, 0b100);
+}
+
+void Assembler::vlseg6(VRegister vd, Register rs1, uint8_t lumop, VSew vsew,
+ MaskType mask) {
+ bool IsMew = vsew >= E128 ? true : false;
+ uint8_t width = vsew_switch(vsew);
+ GenInstrV(LOAD_FP, width, vd, rs1, lumop, mask, 0b00, IsMew, 0b101);
+}
+
+void Assembler::vlseg7(VRegister vd, Register rs1, uint8_t lumop, VSew vsew,
+ MaskType mask) {
+ bool IsMew = vsew >= E128 ? true : false;
+ uint8_t width = vsew_switch(vsew);
+ GenInstrV(LOAD_FP, width, vd, rs1, lumop, mask, 0b00, IsMew, 0b110);
+}
+
+void Assembler::vlseg8(VRegister vd, Register rs1, uint8_t lumop, VSew vsew,
+ MaskType mask) {
+ bool IsMew = vsew >= E128 ? true : false;
+ uint8_t width = vsew_switch(vsew);
+ GenInstrV(LOAD_FP, width, vd, rs1, lumop, mask, 0b00, IsMew, 0b111);
+}
+void Assembler::vsseg2(VRegister vd, Register rs1, uint8_t sumop, VSew vsew,
+ MaskType mask) {
+ bool IsMew = vsew >= E128 ? true : false;
+ uint8_t width = vsew_switch(vsew);
+ GenInstrV(STORE_FP, width, vd, rs1, sumop, mask, 0b00, IsMew, 0b001);
+}
+void Assembler::vsseg3(VRegister vd, Register rs1, uint8_t sumop, VSew vsew,
+ MaskType mask) {
+ bool IsMew = vsew >= E128 ? true : false;
+ uint8_t width = vsew_switch(vsew);
+ GenInstrV(STORE_FP, width, vd, rs1, sumop, mask, 0b00, IsMew, 0b010);
+}
+void Assembler::vsseg4(VRegister vd, Register rs1, uint8_t sumop, VSew vsew,
+ MaskType mask) {
+ bool IsMew = vsew >= E128 ? true : false;
+ uint8_t width = vsew_switch(vsew);
+ GenInstrV(STORE_FP, width, vd, rs1, sumop, mask, 0b00, IsMew, 0b011);
+}
+void Assembler::vsseg5(VRegister vd, Register rs1, uint8_t sumop, VSew vsew,
+ MaskType mask) {
+ bool IsMew = vsew >= E128 ? true : false;
+ uint8_t width = vsew_switch(vsew);
+ GenInstrV(STORE_FP, width, vd, rs1, sumop, mask, 0b00, IsMew, 0b100);
+}
+void Assembler::vsseg6(VRegister vd, Register rs1, uint8_t sumop, VSew vsew,
+ MaskType mask) {
+ bool IsMew = vsew >= E128 ? true : false;
+ uint8_t width = vsew_switch(vsew);
+ GenInstrV(STORE_FP, width, vd, rs1, sumop, mask, 0b00, IsMew, 0b101);
+}
+void Assembler::vsseg7(VRegister vd, Register rs1, uint8_t sumop, VSew vsew,
+ MaskType mask) {
+ bool IsMew = vsew >= E128 ? true : false;
+ uint8_t width = vsew_switch(vsew);
+ GenInstrV(STORE_FP, width, vd, rs1, sumop, mask, 0b00, IsMew, 0b110);
+}
+void Assembler::vsseg8(VRegister vd, Register rs1, uint8_t sumop, VSew vsew,
+ MaskType mask) {
+ bool IsMew = vsew >= E128 ? true : false;
+ uint8_t width = vsew_switch(vsew);
+ GenInstrV(STORE_FP, width, vd, rs1, sumop, mask, 0b00, IsMew, 0b111);
+}
+
+void Assembler::vlsseg2(VRegister vd, Register rs1, Register rs2, VSew vsew,
+ MaskType mask) {
+ bool IsMew = vsew >= E128 ? true : false;
+ uint8_t width = vsew_switch(vsew);
+ GenInstrV(LOAD_FP, width, vd, rs1, rs2, mask, 0b10, IsMew, 0b001);
+}
+void Assembler::vlsseg3(VRegister vd, Register rs1, Register rs2, VSew vsew,
+ MaskType mask) {
+ bool IsMew = vsew >= E128 ? true : false;
+ uint8_t width = vsew_switch(vsew);
+ GenInstrV(LOAD_FP, width, vd, rs1, rs2, mask, 0b10, IsMew, 0b010);
+}
+void Assembler::vlsseg4(VRegister vd, Register rs1, Register rs2, VSew vsew,
+ MaskType mask) {
+ bool IsMew = vsew >= E128 ? true : false;
+ uint8_t width = vsew_switch(vsew);
+ GenInstrV(LOAD_FP, width, vd, rs1, rs2, mask, 0b10, IsMew, 0b011);
+}
+void Assembler::vlsseg5(VRegister vd, Register rs1, Register rs2, VSew vsew,
+ MaskType mask) {
+ bool IsMew = vsew >= E128 ? true : false;
+ uint8_t width = vsew_switch(vsew);
+ GenInstrV(LOAD_FP, width, vd, rs1, rs2, mask, 0b10, IsMew, 0b100);
+}
+void Assembler::vlsseg6(VRegister vd, Register rs1, Register rs2, VSew vsew,
+ MaskType mask) {
+ bool IsMew = vsew >= E128 ? true : false;
+ uint8_t width = vsew_switch(vsew);
+ GenInstrV(LOAD_FP, width, vd, rs1, rs2, mask, 0b10, IsMew, 0b101);
+}
+void Assembler::vlsseg7(VRegister vd, Register rs1, Register rs2, VSew vsew,
+ MaskType mask) {
+ bool IsMew = vsew >= E128 ? true : false;
+ uint8_t width = vsew_switch(vsew);
+ GenInstrV(LOAD_FP, width, vd, rs1, rs2, mask, 0b10, IsMew, 0b110);
+}
+void Assembler::vlsseg8(VRegister vd, Register rs1, Register rs2, VSew vsew,
+ MaskType mask) {
+ bool IsMew = vsew >= E128 ? true : false;
+ uint8_t width = vsew_switch(vsew);
+ GenInstrV(LOAD_FP, width, vd, rs1, rs2, mask, 0b10, IsMew, 0b111);
+}
+void Assembler::vssseg2(VRegister vd, Register rs1, Register rs2, VSew vsew,
+ MaskType mask) {
+ bool IsMew = vsew >= E128 ? true : false;
+ uint8_t width = vsew_switch(vsew);
+ GenInstrV(STORE_FP, width, vd, rs1, rs2, mask, 0b10, IsMew, 0b001);
+}
+void Assembler::vssseg3(VRegister vd, Register rs1, Register rs2, VSew vsew,
+ MaskType mask) {
+ bool IsMew = vsew >= E128 ? true : false;
+ uint8_t width = vsew_switch(vsew);
+ GenInstrV(STORE_FP, width, vd, rs1, rs2, mask, 0b10, IsMew, 0b010);
+}
+void Assembler::vssseg4(VRegister vd, Register rs1, Register rs2, VSew vsew,
+ MaskType mask) {
+ bool IsMew = vsew >= E128 ? true : false;
+ uint8_t width = vsew_switch(vsew);
+ GenInstrV(STORE_FP, width, vd, rs1, rs2, mask, 0b10, IsMew, 0b011);
+}
+void Assembler::vssseg5(VRegister vd, Register rs1, Register rs2, VSew vsew,
+ MaskType mask) {
+ bool IsMew = vsew >= E128 ? true : false;
+ uint8_t width = vsew_switch(vsew);
+ GenInstrV(STORE_FP, width, vd, rs1, rs2, mask, 0b10, IsMew, 0b100);
+}
+void Assembler::vssseg6(VRegister vd, Register rs1, Register rs2, VSew vsew,
+ MaskType mask) {
+ bool IsMew = vsew >= E128 ? true : false;
+ uint8_t width = vsew_switch(vsew);
+ GenInstrV(STORE_FP, width, vd, rs1, rs2, mask, 0b10, IsMew, 0b101);
+}
+void Assembler::vssseg7(VRegister vd, Register rs1, Register rs2, VSew vsew,
+ MaskType mask) {
+ bool IsMew = vsew >= E128 ? true : false;
+ uint8_t width = vsew_switch(vsew);
+ GenInstrV(STORE_FP, width, vd, rs1, rs2, mask, 0b10, IsMew, 0b110);
+}
+void Assembler::vssseg8(VRegister vd, Register rs1, Register rs2, VSew vsew,
+ MaskType mask) {
+ bool IsMew = vsew >= E128 ? true : false;
+ uint8_t width = vsew_switch(vsew);
+ GenInstrV(STORE_FP, width, vd, rs1, rs2, mask, 0b10, IsMew, 0b111);
+}
+
+void Assembler::vlxseg2(VRegister vd, Register rs1, VRegister rs2, VSew vsew,
+ MaskType mask) {
+ bool IsMew = vsew >= E128 ? true : false;
+ uint8_t width = vsew_switch(vsew);
+ GenInstrV(LOAD_FP, width, vd, rs1, rs2, mask, 0b11, IsMew, 0b001);
+}
+void Assembler::vlxseg3(VRegister vd, Register rs1, VRegister rs2, VSew vsew,
+ MaskType mask) {
+ bool IsMew = vsew >= E128 ? true : false;
+ uint8_t width = vsew_switch(vsew);
+ GenInstrV(LOAD_FP, width, vd, rs1, rs2, mask, 0b11, IsMew, 0b010);
+}
+void Assembler::vlxseg4(VRegister vd, Register rs1, VRegister rs2, VSew vsew,
+ MaskType mask) {
+ bool IsMew = vsew >= E128 ? true : false;
+ uint8_t width = vsew_switch(vsew);
+ GenInstrV(LOAD_FP, width, vd, rs1, rs2, mask, 0b11, IsMew, 0b011);
+}
+void Assembler::vlxseg5(VRegister vd, Register rs1, VRegister rs2, VSew vsew,
+ MaskType mask) {
+ bool IsMew = vsew >= E128 ? true : false;
+ uint8_t width = vsew_switch(vsew);
+ GenInstrV(LOAD_FP, width, vd, rs1, rs2, mask, 0b11, IsMew, 0b100);
+}
+void Assembler::vlxseg6(VRegister vd, Register rs1, VRegister rs2, VSew vsew,
+ MaskType mask) {
+ bool IsMew = vsew >= E128 ? true : false;
+ uint8_t width = vsew_switch(vsew);
+ GenInstrV(LOAD_FP, width, vd, rs1, rs2, mask, 0b11, IsMew, 0b101);
+}
+void Assembler::vlxseg7(VRegister vd, Register rs1, VRegister rs2, VSew vsew,
+ MaskType mask) {
+ bool IsMew = vsew >= E128 ? true : false;
+ uint8_t width = vsew_switch(vsew);
+ GenInstrV(LOAD_FP, width, vd, rs1, rs2, mask, 0b11, IsMew, 0b110);
+}
+void Assembler::vlxseg8(VRegister vd, Register rs1, VRegister rs2, VSew vsew,
+ MaskType mask) {
+ bool IsMew = vsew >= E128 ? true : false;
+ uint8_t width = vsew_switch(vsew);
+ GenInstrV(LOAD_FP, width, vd, rs1, rs2, mask, 0b11, IsMew, 0b111);
+}
+void Assembler::vsxseg2(VRegister vd, Register rs1, VRegister rs2, VSew vsew,
+ MaskType mask) {
+ bool IsMew = vsew >= E128 ? true : false;
+ uint8_t width = vsew_switch(vsew);
+ GenInstrV(STORE_FP, width, vd, rs1, rs2, mask, 0b11, IsMew, 0b001);
+}
+void Assembler::vsxseg3(VRegister vd, Register rs1, VRegister rs2, VSew vsew,
+ MaskType mask) {
+ bool IsMew = vsew >= E128 ? true : false;
+ uint8_t width = vsew_switch(vsew);
+ GenInstrV(STORE_FP, width, vd, rs1, rs2, mask, 0b11, IsMew, 0b010);
+}
+void Assembler::vsxseg4(VRegister vd, Register rs1, VRegister rs2, VSew vsew,
+ MaskType mask) {
+ bool IsMew = vsew >= E128 ? true : false;
+ uint8_t width = vsew_switch(vsew);
+ GenInstrV(STORE_FP, width, vd, rs1, rs2, mask, 0b11, IsMew, 0b011);
+}
+void Assembler::vsxseg5(VRegister vd, Register rs1, VRegister rs2, VSew vsew,
+ MaskType mask) {
+ bool IsMew = vsew >= E128 ? true : false;
+ uint8_t width = vsew_switch(vsew);
+ GenInstrV(STORE_FP, width, vd, rs1, rs2, mask, 0b11, IsMew, 0b100);
+}
+void Assembler::vsxseg6(VRegister vd, Register rs1, VRegister rs2, VSew vsew,
+ MaskType mask) {
+ bool IsMew = vsew >= E128 ? true : false;
+ uint8_t width = vsew_switch(vsew);
+ GenInstrV(STORE_FP, width, vd, rs1, rs2, mask, 0b11, IsMew, 0b101);
+}
+void Assembler::vsxseg7(VRegister vd, Register rs1, VRegister rs2, VSew vsew,
+ MaskType mask) {
+ bool IsMew = vsew >= E128 ? true : false;
+ uint8_t width = vsew_switch(vsew);
+ GenInstrV(STORE_FP, width, vd, rs1, rs2, mask, 0b11, IsMew, 0b110);
+}
+void Assembler::vsxseg8(VRegister vd, Register rs1, VRegister rs2, VSew vsew,
+ MaskType mask) {
+ bool IsMew = vsew >= E128 ? true : false;
+ uint8_t width = vsew_switch(vsew);
+ GenInstrV(STORE_FP, width, vd, rs1, rs2, mask, 0b11, IsMew, 0b111);
+}
+
+// Privileged
void Assembler::uret() {
GenInstrPriv(0b0000000, ToRegister(0), ToRegister(0b00010));
}
@@ -2723,8 +3353,6 @@ void Assembler::AdjustBaseAndOffset(MemOperand* src, Register scratch,
// for a load/store when the offset doesn't fit into int12.
// Must not overwrite the register 'base' while loading 'offset'.
- DCHECK(src->rm() != scratch);
-
constexpr int32_t kMinOffsetForSimpleAdjustment = 0x7F8;
constexpr int32_t kMaxOffsetForSimpleAdjustment =
2 * kMinOffsetForSimpleAdjustment;
@@ -2766,7 +3394,6 @@ int Assembler::RelocateInternalReference(RelocInfo::Mode rmode, Address pc,
return 8; // Number of instructions patched.
} else {
UNIMPLEMENTED();
- return 1;
}
}
diff --git a/deps/v8/src/codegen/riscv64/assembler-riscv64.h b/deps/v8/src/codegen/riscv64/assembler-riscv64.h
index 88e403d366..7da77f8e0e 100644
--- a/deps/v8/src/codegen/riscv64/assembler-riscv64.h
+++ b/deps/v8/src/codegen/riscv64/assembler-riscv64.h
@@ -666,6 +666,207 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
void NOP();
void EBREAK();
+ // RVV
+ static int32_t GenZimm(VSew vsew, Vlmul vlmul, TailAgnosticType tail = tu,
+ MaskAgnosticType mask = mu) {
+ return (mask << 7) | (tail << 6) | ((vsew & 0x7) << 3) | (vlmul & 0x7);
+ }
+
+ void vsetvli(Register rd, Register rs1, VSew vsew, Vlmul vlmul,
+ TailAgnosticType tail = tu, MaskAgnosticType mask = mu);
+
+ void vsetivli(Register rd, uint8_t uimm, VSew vsew, Vlmul vlmul,
+ TailAgnosticType tail = tu, MaskAgnosticType mask = mu);
+
+ inline void vsetvlmax(Register rd, VSew vsew, Vlmul vlmul,
+ TailAgnosticType tail = tu,
+ MaskAgnosticType mask = mu) {
+ vsetvli(rd, zero_reg, vsew, vlmul, tu, mu);
+ }
+
+ inline void vsetvl(VSew vsew, Vlmul vlmul, TailAgnosticType tail = tu,
+ MaskAgnosticType mask = mu) {
+ vsetvli(zero_reg, zero_reg, vsew, vlmul, tu, mu);
+ }
+
+ void vsetvl(Register rd, Register rs1, Register rs2);
+
+ void vl(VRegister vd, Register rs1, uint8_t lumop, VSew vsew,
+ MaskType mask = NoMask);
+ void vls(VRegister vd, Register rs1, Register rs2, VSew vsew,
+ MaskType mask = NoMask);
+ void vlx(VRegister vd, Register rs1, VRegister vs3, VSew vsew,
+ MaskType mask = NoMask);
+
+ void vs(VRegister vd, Register rs1, uint8_t sumop, VSew vsew,
+ MaskType mask = NoMask);
+ void vss(VRegister vd, Register rs1, Register rs2, VSew vsew,
+ MaskType mask = NoMask);
+ void vsx(VRegister vd, Register rs1, VRegister vs3, VSew vsew,
+ MaskType mask = NoMask);
+
+ void vsu(VRegister vd, Register rs1, VRegister vs3, VSew vsew,
+ MaskType mask = NoMask);
+
+#define SegInstr(OP) \
+ void OP##seg2(ARG); \
+ void OP##seg3(ARG); \
+ void OP##seg4(ARG); \
+ void OP##seg5(ARG); \
+ void OP##seg6(ARG); \
+ void OP##seg7(ARG); \
+ void OP##seg8(ARG);
+
+#define ARG \
+ VRegister vd, Register rs1, uint8_t lumop, VSew vsew, MaskType mask = NoMask
+
+ SegInstr(vl) SegInstr(vs)
+#undef ARG
+
+#define ARG \
+ VRegister vd, Register rs1, Register rs2, VSew vsew, MaskType mask = NoMask
+
+ SegInstr(vls) SegInstr(vss)
+#undef ARG
+
+#define ARG \
+ VRegister vd, Register rs1, VRegister rs2, VSew vsew, MaskType mask = NoMask
+
+ SegInstr(vsx) SegInstr(vlx)
+#undef ARG
+#undef SegInstr
+
+ // RVV Vector Arithmetic Instruction
+
+ void vmv_vv(VRegister vd, VRegister vs1);
+ void vmv_vx(VRegister vd, Register rs1);
+ void vmv_vi(VRegister vd, uint8_t simm5);
+ void vmv_xs(Register rd, VRegister vs2);
+ void vmv_sx(VRegister vd, Register rs1);
+ void vmerge_vv(VRegister vd, VRegister vs1, VRegister vs2);
+ void vmerge_vx(VRegister vd, Register rs1, VRegister vs2);
+ void vmerge_vi(VRegister vd, uint8_t imm5, VRegister vs2);
+
+ void vadc_vv(VRegister vd, VRegister vs1, VRegister vs2);
+ void vadc_vx(VRegister vd, Register rs1, VRegister vs2);
+ void vadc_vi(VRegister vd, uint8_t imm5, VRegister vs2);
+
+ void vmadc_vv(VRegister vd, VRegister vs1, VRegister vs2);
+ void vmadc_vx(VRegister vd, Register rs1, VRegister vs2);
+ void vmadc_vi(VRegister vd, uint8_t imm5, VRegister vs2);
+
+#define DEFINE_OPIVV(name, funct6) \
+ void name##_vv(VRegister vd, VRegister vs2, VRegister vs1, \
+ MaskType mask = NoMask);
+
+#define DEFINE_OPIVX(name, funct6) \
+ void name##_vx(VRegister vd, VRegister vs2, Register rs1, \
+ MaskType mask = NoMask);
+
+#define DEFINE_OPIVI(name, funct6) \
+ void name##_vi(VRegister vd, VRegister vs2, int8_t imm5, \
+ MaskType mask = NoMask);
+
+#define DEFINE_OPMVV(name, funct6) \
+ void name##_vs(VRegister vd, VRegister vs2, VRegister vs1, \
+ MaskType mask = NoMask);
+
+#define DEFINE_OPMVX(name, funct6) \
+ void name##_vx(VRegister vd, VRegister vs2, Register rs1, \
+ MaskType mask = NoMask);
+
+ DEFINE_OPIVV(vadd, VADD_FUNCT6)
+ DEFINE_OPIVX(vadd, VADD_FUNCT6)
+ DEFINE_OPIVI(vadd, VADD_FUNCT6)
+ DEFINE_OPIVV(vsub, VSUB_FUNCT6)
+ DEFINE_OPIVX(vsub, VSUB_FUNCT6)
+ DEFINE_OPIVX(vsadd, VSADD_FUNCT6)
+ DEFINE_OPIVV(vsadd, VSADD_FUNCT6)
+ DEFINE_OPIVI(vsadd, VSADD_FUNCT6)
+ DEFINE_OPIVX(vsaddu, VSADD_FUNCT6)
+ DEFINE_OPIVV(vsaddu, VSADD_FUNCT6)
+ DEFINE_OPIVI(vsaddu, VSADD_FUNCT6)
+ DEFINE_OPIVX(vssub, VSSUB_FUNCT6)
+ DEFINE_OPIVV(vssub, VSSUB_FUNCT6)
+ DEFINE_OPIVX(vssubu, VSSUBU_FUNCT6)
+ DEFINE_OPIVV(vssubu, VSSUBU_FUNCT6)
+ DEFINE_OPIVX(vrsub, VRSUB_FUNCT6)
+ DEFINE_OPIVI(vrsub, VRSUB_FUNCT6)
+ DEFINE_OPIVV(vminu, VMINU_FUNCT6)
+ DEFINE_OPIVX(vminu, VMINU_FUNCT6)
+ DEFINE_OPIVV(vmin, VMIN_FUNCT6)
+ DEFINE_OPIVX(vmin, VMIN_FUNCT6)
+ DEFINE_OPIVV(vmaxu, VMAXU_FUNCT6)
+ DEFINE_OPIVX(vmaxu, VMAXU_FUNCT6)
+ DEFINE_OPIVV(vmax, VMAX_FUNCT6)
+ DEFINE_OPIVX(vmax, VMAX_FUNCT6)
+ DEFINE_OPIVV(vand, VAND_FUNCT6)
+ DEFINE_OPIVX(vand, VAND_FUNCT6)
+ DEFINE_OPIVI(vand, VAND_FUNCT6)
+ DEFINE_OPIVV(vor, VOR_FUNCT6)
+ DEFINE_OPIVX(vor, VOR_FUNCT6)
+ DEFINE_OPIVI(vor, VOR_FUNCT6)
+ DEFINE_OPIVV(vxor, VXOR_FUNCT6)
+ DEFINE_OPIVX(vxor, VXOR_FUNCT6)
+ DEFINE_OPIVI(vxor, VXOR_FUNCT6)
+ DEFINE_OPIVV(vrgather, VRGATHER_FUNCT6)
+ DEFINE_OPIVX(vrgather, VRGATHER_FUNCT6)
+ DEFINE_OPIVI(vrgather, VRGATHER_FUNCT6)
+
+ DEFINE_OPIVX(vslidedown, VSLIDEDOWN_FUNCT6)
+ DEFINE_OPIVI(vslidedown, VSLIDEDOWN_FUNCT6)
+ DEFINE_OPIVX(vslideup, VSLIDEUP_FUNCT6)
+ DEFINE_OPIVI(vslideup, VSLIDEUP_FUNCT6)
+
+ DEFINE_OPIVV(vmseq, VMSEQ_FUNCT6)
+ DEFINE_OPIVX(vmseq, VMSEQ_FUNCT6)
+ DEFINE_OPIVI(vmseq, VMSEQ_FUNCT6)
+
+ DEFINE_OPIVV(vmsne, VMSNE_FUNCT6)
+ DEFINE_OPIVX(vmsne, VMSNE_FUNCT6)
+ DEFINE_OPIVI(vmsne, VMSNE_FUNCT6)
+
+ DEFINE_OPIVV(vmsltu, VMSLTU_FUNCT6)
+ DEFINE_OPIVX(vmsltu, VMSLTU_FUNCT6)
+
+ DEFINE_OPIVV(vmslt, VMSLT_FUNCT6)
+ DEFINE_OPIVX(vmslt, VMSLT_FUNCT6)
+
+ DEFINE_OPIVV(vmsle, VMSLE_FUNCT6)
+ DEFINE_OPIVX(vmsle, VMSLE_FUNCT6)
+ DEFINE_OPIVI(vmsle, VMSLE_FUNCT6)
+
+ DEFINE_OPIVV(vmsleu, VMSLEU_FUNCT6)
+ DEFINE_OPIVX(vmsleu, VMSLEU_FUNCT6)
+ DEFINE_OPIVI(vmsleu, VMSLEU_FUNCT6)
+
+ DEFINE_OPIVI(vmsgt, VMSGT_FUNCT6)
+ DEFINE_OPIVX(vmsgt, VMSGT_FUNCT6)
+
+ DEFINE_OPIVI(vmsgtu, VMSGTU_FUNCT6)
+ DEFINE_OPIVX(vmsgtu, VMSGTU_FUNCT6)
+
+ DEFINE_OPIVV(vsrl, VSRL_FUNCT6)
+ DEFINE_OPIVX(vsrl, VSRL_FUNCT6)
+ DEFINE_OPIVI(vsrl, VSRL_FUNCT6)
+
+ DEFINE_OPIVV(vsll, VSLL_FUNCT6)
+ DEFINE_OPIVX(vsll, VSLL_FUNCT6)
+ DEFINE_OPIVI(vsll, VSLL_FUNCT6)
+
+ DEFINE_OPMVV(vredmaxu, VREDMAXU_FUNCT6)
+ DEFINE_OPMVV(vredmax, VREDMAX_FUNCT6)
+ DEFINE_OPMVV(vredmin, VREDMIN_FUNCT6)
+ DEFINE_OPMVV(vredminu, VREDMINU_FUNCT6)
+#undef DEFINE_OPIVI
+#undef DEFINE_OPIVV
+#undef DEFINE_OPIVX
+#undef DEFINE_OPMVV
+#undef DEFINE_OPMVX
+
+ void vnot_vv(VRegister dst, VRegister src) { vxor_vi(dst, src, -1); }
+
+ void vneg_vv(VRegister dst, VRegister src) { vrsub_vx(dst, src, zero_reg); }
// Privileged
void uret();
void sret();
@@ -942,6 +1143,55 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
constpool_.RecordEntry(data, rmode);
}
+ class VectorUnit {
+ public:
+ inline int32_t sew() const { return 2 ^ (sew_ + 3); }
+
+ inline int32_t vlmax() const {
+ if ((lmul_ & 0b100) != 0) {
+ return (kRvvVLEN / sew()) >> (lmul_ & 0b11);
+ } else {
+ return ((kRvvVLEN << lmul_) / sew());
+ }
+ }
+
+ explicit VectorUnit(Assembler* assm) : assm_(assm) {}
+
+ void set(Register rd, VSew sew, Vlmul lmul) {
+ if (sew != sew_ || lmul != lmul_ || vl != vlmax()) {
+ sew_ = sew;
+ lmul_ = lmul;
+ vl = vlmax();
+ assm_->vsetvlmax(rd, sew_, lmul_);
+ }
+ }
+
+ void set(Register rd, Register rs1, VSew sew, Vlmul lmul) {
+ if (sew != sew_ || lmul != lmul_) {
+ sew_ = sew;
+ lmul_ = lmul;
+ vl = 0;
+ assm_->vsetvli(rd, rs1, sew_, lmul_);
+ }
+ }
+
+ void set(VSew sew, Vlmul lmul) {
+ if (sew != sew_ || lmul != lmul_) {
+ sew_ = sew;
+ lmul_ = lmul;
+ assm_->vsetvl(sew_, lmul_);
+ }
+ }
+
+ private:
+ VSew sew_ = E8;
+ Vlmul lmul_ = m1;
+ int32_t vl = 0;
+ Assembler* assm_;
+ };
+
+ VectorUnit VU;
+
protected:
// Readable constants for base and offset adjustment helper, these indicate if
// aside from offset, another value like offset + 4 should fit into int16.
@@ -1192,6 +1442,42 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
void GenInstrALUFP_rr(uint8_t funct7, uint8_t funct3, Register rd,
FPURegister rs1, FPURegister rs2);
+ // ----------------------------RVV------------------------------------------
+ // vsetvl
+ void GenInstrV(Register rd, Register rs1, Register rs2);
+ // vsetvli
+ void GenInstrV(Register rd, Register rs1, uint32_t zimm);
+ // OPIVV OPFVV OPMVV
+ void GenInstrV(uint8_t funct6, Opcode opcode, VRegister vd, VRegister vs1,
+ VRegister vs2, MaskType mask = NoMask);
+ // OPMVV OPFVV
+ void GenInstrV(uint8_t funct6, Opcode opcode, Register rd, VRegister vs1,
+ VRegister vs2, MaskType mask = NoMask);
+
+ // OPIVX OPFVF OPMVX
+ void GenInstrV(uint8_t funct6, Opcode opcode, VRegister vd, Register rs1,
+ VRegister vs2, MaskType mask = NoMask);
+
+ // OPMVX
+ void GenInstrV(uint8_t funct6, Register rd, Register rs1, VRegister vs2,
+ MaskType mask = NoMask);
+ // OPIVI
+ void GenInstrV(uint8_t funct6, VRegister vd, int8_t simm5, VRegister vs2,
+ MaskType mask = NoMask);
+
+ // VL VS
+ void GenInstrV(Opcode opcode, uint8_t width, VRegister vd, Register rs1,
+ uint8_t umop, MaskType mask, uint8_t IsMop, bool IsMew,
+ uint8_t Nf);
+
+ void GenInstrV(Opcode opcode, uint8_t width, VRegister vd, Register rs1,
+ Register rs2, MaskType mask, uint8_t IsMop, bool IsMew,
+ uint8_t Nf);
+ // VL VS AMO
+ void GenInstrV(Opcode opcode, uint8_t width, VRegister vd, Register rs1,
+ VRegister vs2, MaskType mask, uint8_t IsMop, bool IsMew,
+ uint8_t Nf);
+
// Labels.
void print(const Label* L);
void bind_to(Label* L, int pos);
diff --git a/deps/v8/src/codegen/riscv64/constants-riscv64.cc b/deps/v8/src/codegen/riscv64/constants-riscv64.cc
index d2709dc2c7..655a97c12f 100644
--- a/deps/v8/src/codegen/riscv64/constants-riscv64.cc
+++ b/deps/v8/src/codegen/riscv64/constants-riscv64.cc
@@ -105,6 +105,45 @@ int FPURegisters::Number(const char* name) {
return kInvalidFPURegister;
}
+const char* VRegisters::names_[kNumVRegisters] = {
+ "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10",
+ "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21",
+ "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"};
+
+const VRegisters::RegisterAlias VRegisters::aliases_[] = {
+ {kInvalidRegister, nullptr}};
+
+const char* VRegisters::Name(int creg) {
+ const char* result;
+ if ((0 <= creg) && (creg < kNumVRegisters)) {
+ result = names_[creg];
+ } else {
+ result = "nocreg";
+ }
+ return result;
+}
+
+int VRegisters::Number(const char* name) {
+ // Look through the canonical names.
+ for (int i = 0; i < kNumVRegisters; i++) {
+ if (strcmp(names_[i], name) == 0) {
+ return i;
+ }
+ }
+
+ // Look through the alias names.
+ int i = 0;
+ while (aliases_[i].creg != kInvalidRegister) {
+ if (strcmp(aliases_[i].name, name) == 0) {
+ return aliases_[i].creg;
+ }
+ i++;
+ }
+
+ // No Cregister with the reguested name found.
+ return kInvalidVRegister;
+}
+
InstructionBase::Type InstructionBase::InstructionType() const {
if (IsIllegalInstruction()) {
return kUnsupported;
@@ -193,6 +232,8 @@ InstructionBase::Type InstructionBase::InstructionType() const {
return kJType;
case SYSTEM:
return kIType;
+ case OP_V:
+ return kVType;
}
}
return kUnsupported;
diff --git a/deps/v8/src/codegen/riscv64/constants-riscv64.h b/deps/v8/src/codegen/riscv64/constants-riscv64.h
index c9cb7687fd..934b962955 100644
--- a/deps/v8/src/codegen/riscv64/constants-riscv64.h
+++ b/deps/v8/src/codegen/riscv64/constants-riscv64.h
@@ -12,14 +12,15 @@
// UNIMPLEMENTED_ macro for RISCV.
#ifdef DEBUG
-#define UNIMPLEMENTED_RISCV() \
- v8::internal::PrintF("%s, \tline %d: \tfunction %s not implemented. \n", \
- __FILE__, __LINE__, __func__)
+#define UNIMPLEMENTED_RISCV() \
+ v8::internal::PrintF("%s, \tline %d: \tfunction %s not implemented. \n", \
+ __FILE__, __LINE__, __func__);
#else
#define UNIMPLEMENTED_RISCV()
#endif
-#define UNSUPPORTED_RISCV() v8::internal::PrintF("Unsupported instruction.\n")
+#define UNSUPPORTED_RISCV() \
+ v8::internal::PrintF("Unsupported instruction %d.\n", __LINE__)
enum Endianness { kLittle, kBig };
@@ -75,6 +76,9 @@ const int kPCRegister = 34;
const int kNumFPURegisters = 32;
const int kInvalidFPURegister = -1;
+// Number vectotr registers
+const int kNumVRegisters = 32;
+const int kInvalidVRegister = -1;
// 'pref' instruction hints
const int32_t kPrefHintLoad = 0;
const int32_t kPrefHintStore = 1;
@@ -131,6 +135,24 @@ class FPURegisters {
static const RegisterAlias aliases_[];
};
+class VRegisters {
+ public:
+ // Return the name of the register.
+ static const char* Name(int reg);
+
+ // Lookup the register number for the name provided.
+ static int Number(const char* name);
+
+ struct RegisterAlias {
+ int creg;
+ const char* name;
+ };
+
+ private:
+ static const char* names_[kNumVRegisters];
+ static const RegisterAlias aliases_[];
+};
+
// -----------------------------------------------------------------------------
// Instructions encoding constants.
@@ -170,6 +192,12 @@ const int kFunct2Shift = 25;
const int kFunct2Bits = 2;
const int kRs1Shift = 15;
const int kRs1Bits = 5;
+const int kVs1Shift = 15;
+const int kVs1Bits = 5;
+const int kVs2Shift = 20;
+const int kVs2Bits = 5;
+const int kVdShift = 7;
+const int kVdBits = 5;
const int kRs2Shift = 20;
const int kRs2Bits = 5;
const int kRs3Shift = 27;
@@ -215,6 +243,71 @@ const int kRvcFunct2Bits = 2;
const int kRvcFunct6Shift = 10;
const int kRvcFunct6Bits = 6;
+// for RVV extension
+constexpr int kRvvELEN = 64;
+constexpr int kRvvVLEN = 128;
+constexpr int kRvvSLEN = kRvvVLEN;
+const int kRvvFunct6Shift = 26;
+const int kRvvFunct6Bits = 6;
+const uint32_t kRvvFunct6Mask =
+ (((1 << kRvvFunct6Bits) - 1) << kRvvFunct6Shift);
+
+const int kRvvVmBits = 1;
+const int kRvvVmShift = 25;
+const uint32_t kRvvVmMask = (((1 << kRvvVmBits) - 1) << kRvvVmShift);
+
+const int kRvvVs2Bits = 5;
+const int kRvvVs2Shift = 20;
+const uint32_t kRvvVs2Mask = (((1 << kRvvVs2Bits) - 1) << kRvvVs2Shift);
+
+const int kRvvVs1Bits = 5;
+const int kRvvVs1Shift = 15;
+const uint32_t kRvvVs1Mask = (((1 << kRvvVs1Bits) - 1) << kRvvVs1Shift);
+
+const int kRvvRs1Bits = kRvvVs1Bits;
+const int kRvvRs1Shift = kRvvVs1Shift;
+const uint32_t kRvvRs1Mask = (((1 << kRvvRs1Bits) - 1) << kRvvRs1Shift);
+
+const int kRvvRs2Bits = 5;
+const int kRvvRs2Shift = 20;
+const uint32_t kRvvRs2Mask = (((1 << kRvvRs2Bits) - 1) << kRvvRs2Shift);
+
+const int kRvvImm5Bits = kRvvVs1Bits;
+const int kRvvImm5Shift = kRvvVs1Shift;
+const uint32_t kRvvImm5Mask = (((1 << kRvvImm5Bits) - 1) << kRvvImm5Shift);
+
+const int kRvvVdBits = 5;
+const int kRvvVdShift = 7;
+const uint32_t kRvvVdMask = (((1 << kRvvVdBits) - 1) << kRvvVdShift);
+
+const int kRvvRdBits = kRvvVdBits;
+const int kRvvRdShift = kRvvVdShift;
+const uint32_t kRvvRdMask = (((1 << kRvvRdBits) - 1) << kRvvRdShift);
+
+const int kRvvZimmBits = 11;
+const int kRvvZimmShift = 20;
+const uint32_t kRvvZimmMask = (((1 << kRvvZimmBits) - 1) << kRvvZimmShift);
+
+const int kRvvUimmShift = kRvvRs1Shift;
+const int kRvvUimmBits = kRvvRs1Bits;
+const uint32_t kRvvUimmMask = (((1 << kRvvUimmBits) - 1) << kRvvUimmShift);
+
+const int kRvvWidthBits = 3;
+const int kRvvWidthShift = 12;
+const uint32_t kRvvWidthMask = (((1 << kRvvWidthBits) - 1) << kRvvWidthShift);
+
+const int kRvvMopBits = 2;
+const int kRvvMopShift = 26;
+const uint32_t kRvvMopMask = (((1 << kRvvMopBits) - 1) << kRvvMopShift);
+
+const int kRvvMewBits = 1;
+const int kRvvMewShift = 28;
+const uint32_t kRvvMewMask = (((1 << kRvvMewBits) - 1) << kRvvMewShift);
+
+const int kRvvNfBits = 3;
+const int kRvvNfShift = 29;
+const uint32_t kRvvNfMask = (((1 << kRvvNfBits) - 1) << kRvvNfShift);
+
// RISCV Instruction bit masks
const uint32_t kBaseOpcodeMask = ((1 << kBaseOpcodeBits) - 1)
<< kBaseOpcodeShift;
@@ -231,6 +324,7 @@ const uint32_t kSTypeMask = kBaseOpcodeMask | kFunct3Mask;
const uint32_t kBTypeMask = kBaseOpcodeMask | kFunct3Mask;
const uint32_t kUTypeMask = kBaseOpcodeMask;
const uint32_t kJTypeMask = kBaseOpcodeMask;
+const uint32_t kVTypeMask = kRvvFunct6Mask | kFunct3Mask | kBaseOpcodeMask;
const uint32_t kRs1FieldMask = ((1 << kRs1Bits) - 1) << kRs1Shift;
const uint32_t kRs2FieldMask = ((1 << kRs2Bits) - 1) << kRs2Shift;
const uint32_t kRs3FieldMask = ((1 << kRs3Bits) - 1) << kRs3Shift;
@@ -535,6 +629,235 @@ enum Opcode : uint32_t {
RO_C_FSDSP = C2 | (0b101 << kRvcFunct3Shift),
RO_C_SWSP = C2 | (0b110 << kRvcFunct3Shift),
RO_C_SDSP = C2 | (0b111 << kRvcFunct3Shift),
+
+ // RVV Extension
+ OP_V = 0b1010111,
+ OP_IVV = OP_V | (0b000 << kFunct3Shift),
+ OP_FVV = OP_V | (0b001 << kFunct3Shift),
+ OP_MVV = OP_V | (0b010 << kFunct3Shift),
+ OP_IVI = OP_V | (0b011 << kFunct3Shift),
+ OP_IVX = OP_V | (0b100 << kFunct3Shift),
+ OP_FVF = OP_V | (0b101 << kFunct3Shift),
+ OP_MVX = OP_V | (0b110 << kFunct3Shift),
+
+ RO_V_VSETVLI = OP_V | (0b111 << kFunct3Shift) | 0b0 << 31,
+ RO_V_VSETIVLI = OP_V | (0b111 << kFunct3Shift) | 0b11 << 30,
+ RO_V_VSETVL = OP_V | (0b111 << kFunct3Shift) | 0b1 << 31,
+
+ // RVV LOAD/STORE
+ RO_V_VL = LOAD_FP | (0b00 << kRvvMopShift) | (0b000 << kRvvNfShift),
+ RO_V_VLS = LOAD_FP | (0b10 << kRvvMopShift) | (0b000 << kRvvNfShift),
+ RO_V_VLX = LOAD_FP | (0b11 << kRvvMopShift) | (0b000 << kRvvNfShift),
+
+ RO_V_VS = STORE_FP | (0b00 << kRvvMopShift) | (0b000 << kRvvNfShift),
+ RO_V_VSS = STORE_FP | (0b10 << kRvvMopShift) | (0b000 << kRvvNfShift),
+ RO_V_VSX = STORE_FP | (0b11 << kRvvMopShift) | (0b000 << kRvvNfShift),
+ RO_V_VSU = STORE_FP | (0b01 << kRvvMopShift) | (0b000 << kRvvNfShift),
+ // THE kFunct6Shift is mop
+ RO_V_VLSEG2 = LOAD_FP | (0b00 << kRvvMopShift) | (0b001 << kRvvNfShift),
+ RO_V_VLSEG3 = LOAD_FP | (0b00 << kRvvMopShift) | (0b010 << kRvvNfShift),
+ RO_V_VLSEG4 = LOAD_FP | (0b00 << kRvvMopShift) | (0b011 << kRvvNfShift),
+ RO_V_VLSEG5 = LOAD_FP | (0b00 << kRvvMopShift) | (0b100 << kRvvNfShift),
+ RO_V_VLSEG6 = LOAD_FP | (0b00 << kRvvMopShift) | (0b101 << kRvvNfShift),
+ RO_V_VLSEG7 = LOAD_FP | (0b00 << kRvvMopShift) | (0b110 << kRvvNfShift),
+ RO_V_VLSEG8 = LOAD_FP | (0b00 << kRvvMopShift) | (0b111 << kRvvNfShift),
+
+ RO_V_VSSEG2 = STORE_FP | (0b00 << kRvvMopShift) | (0b001 << kRvvNfShift),
+ RO_V_VSSEG3 = STORE_FP | (0b00 << kRvvMopShift) | (0b010 << kRvvNfShift),
+ RO_V_VSSEG4 = STORE_FP | (0b00 << kRvvMopShift) | (0b011 << kRvvNfShift),
+ RO_V_VSSEG5 = STORE_FP | (0b00 << kRvvMopShift) | (0b100 << kRvvNfShift),
+ RO_V_VSSEG6 = STORE_FP | (0b00 << kRvvMopShift) | (0b101 << kRvvNfShift),
+ RO_V_VSSEG7 = STORE_FP | (0b00 << kRvvMopShift) | (0b110 << kRvvNfShift),
+ RO_V_VSSEG8 = STORE_FP | (0b00 << kRvvMopShift) | (0b111 << kRvvNfShift),
+
+ RO_V_VLSSEG2 = LOAD_FP | (0b10 << kRvvMopShift) | (0b001 << kRvvNfShift),
+ RO_V_VLSSEG3 = LOAD_FP | (0b10 << kRvvMopShift) | (0b010 << kRvvNfShift),
+ RO_V_VLSSEG4 = LOAD_FP | (0b10 << kRvvMopShift) | (0b011 << kRvvNfShift),
+ RO_V_VLSSEG5 = LOAD_FP | (0b10 << kRvvMopShift) | (0b100 << kRvvNfShift),
+ RO_V_VLSSEG6 = LOAD_FP | (0b10 << kRvvMopShift) | (0b101 << kRvvNfShift),
+ RO_V_VLSSEG7 = LOAD_FP | (0b10 << kRvvMopShift) | (0b110 << kRvvNfShift),
+ RO_V_VLSSEG8 = LOAD_FP | (0b10 << kRvvMopShift) | (0b111 << kRvvNfShift),
+
+ RO_V_VSSSEG2 = STORE_FP | (0b10 << kRvvMopShift) | (0b001 << kRvvNfShift),
+ RO_V_VSSSEG3 = STORE_FP | (0b10 << kRvvMopShift) | (0b010 << kRvvNfShift),
+ RO_V_VSSSEG4 = STORE_FP | (0b10 << kRvvMopShift) | (0b011 << kRvvNfShift),
+ RO_V_VSSSEG5 = STORE_FP | (0b10 << kRvvMopShift) | (0b100 << kRvvNfShift),
+ RO_V_VSSSEG6 = STORE_FP | (0b10 << kRvvMopShift) | (0b101 << kRvvNfShift),
+ RO_V_VSSSEG7 = STORE_FP | (0b10 << kRvvMopShift) | (0b110 << kRvvNfShift),
+ RO_V_VSSSEG8 = STORE_FP | (0b10 << kRvvMopShift) | (0b111 << kRvvNfShift),
+
+ RO_V_VLXSEG2 = LOAD_FP | (0b11 << kRvvMopShift) | (0b001 << kRvvNfShift),
+ RO_V_VLXSEG3 = LOAD_FP | (0b11 << kRvvMopShift) | (0b010 << kRvvNfShift),
+ RO_V_VLXSEG4 = LOAD_FP | (0b11 << kRvvMopShift) | (0b011 << kRvvNfShift),
+ RO_V_VLXSEG5 = LOAD_FP | (0b11 << kRvvMopShift) | (0b100 << kRvvNfShift),
+ RO_V_VLXSEG6 = LOAD_FP | (0b11 << kRvvMopShift) | (0b101 << kRvvNfShift),
+ RO_V_VLXSEG7 = LOAD_FP | (0b11 << kRvvMopShift) | (0b110 << kRvvNfShift),
+ RO_V_VLXSEG8 = LOAD_FP | (0b11 << kRvvMopShift) | (0b111 << kRvvNfShift),
+
+ RO_V_VSXSEG2 = STORE_FP | (0b11 << kRvvMopShift) | (0b001 << kRvvNfShift),
+ RO_V_VSXSEG3 = STORE_FP | (0b11 << kRvvMopShift) | (0b010 << kRvvNfShift),
+ RO_V_VSXSEG4 = STORE_FP | (0b11 << kRvvMopShift) | (0b011 << kRvvNfShift),
+ RO_V_VSXSEG5 = STORE_FP | (0b11 << kRvvMopShift) | (0b100 << kRvvNfShift),
+ RO_V_VSXSEG6 = STORE_FP | (0b11 << kRvvMopShift) | (0b101 << kRvvNfShift),
+ RO_V_VSXSEG7 = STORE_FP | (0b11 << kRvvMopShift) | (0b110 << kRvvNfShift),
+ RO_V_VSXSEG8 = STORE_FP | (0b11 << kRvvMopShift) | (0b111 << kRvvNfShift),
+
+ // RVV Vector Arithmetic Instruction
+ VADD_FUNCT6 = 0b000000,
+ RO_V_VADD_VI = OP_IVI | (VADD_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VADD_VV = OP_IVV | (VADD_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VADD_VX = OP_IVX | (VADD_FUNCT6 << kRvvFunct6Shift),
+
+ VSUB_FUNCT6 = 0b000010,
+ RO_V_VSUB_VX = OP_IVX | (VSUB_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VSUB_VV = OP_IVV | (VSUB_FUNCT6 << kRvvFunct6Shift),
+
+ VSADDU_FUNCT6 = 0b100000,
+ RO_V_VSADDU_VI = OP_IVI | (VSADDU_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VSADDU_VV = OP_IVV | (VSADDU_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VSADDU_VX = OP_IVX | (VSADDU_FUNCT6 << kRvvFunct6Shift),
+
+ VSADD_FUNCT6 = 0b100001,
+ RO_V_VSADD_VI = OP_IVI | (VSADD_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VSADD_VV = OP_IVV | (VSADD_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VSADD_VX = OP_IVX | (VSADD_FUNCT6 << kRvvFunct6Shift),
+
+ VSSUB_FUNCT6 = 0b100011,
+ RO_V_VSSUB_VV = OP_IVV | (VSSUB_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VSSUB_VX = OP_IVX | (VSSUB_FUNCT6 << kRvvFunct6Shift),
+
+ VSSUBU_FUNCT6 = 0b100010,
+ RO_V_VSSUBU_VV = OP_IVV | (VSSUBU_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VSSUBU_VX = OP_IVX | (VSSUBU_FUNCT6 << kRvvFunct6Shift),
+
+ VRSUB_FUNCT6 = 0b000011,
+ RO_V_VRSUB_VX = OP_IVX | (VRSUB_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VRSUB_VI = OP_IVI | (VRSUB_FUNCT6 << kRvvFunct6Shift),
+
+ VMINU_FUNCT6 = 0b000100,
+ RO_V_VMINU_VX = OP_IVX | (VMINU_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VMINU_VV = OP_IVV | (VMINU_FUNCT6 << kRvvFunct6Shift),
+
+ VMIN_FUNCT6 = 0b000101,
+ RO_V_VMIN_VX = OP_IVX | (VMIN_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VMIN_VV = OP_IVV | (VMIN_FUNCT6 << kRvvFunct6Shift),
+
+ VMAXU_FUNCT6 = 0b000110,
+ RO_V_VMAXU_VX = OP_IVX | (VMAXU_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VMAXU_VV = OP_IVV | (VMAXU_FUNCT6 << kRvvFunct6Shift),
+
+ VMAX_FUNCT6 = 0b000111,
+ RO_V_VMAX_VX = OP_IVX | (VMAX_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VMAX_VV = OP_IVV | (VMAX_FUNCT6 << kRvvFunct6Shift),
+
+ VAND_FUNCT6 = 0b001001,
+ RO_V_VAND_VI = OP_IVI | (VAND_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VAND_VV = OP_IVV | (VAND_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VAND_VX = OP_IVX | (VAND_FUNCT6 << kRvvFunct6Shift),
+
+ VOR_FUNCT6 = 0b001010,
+ RO_V_VOR_VI = OP_IVI | (VOR_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VOR_VV = OP_IVV | (VOR_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VOR_VX = OP_IVX | (VOR_FUNCT6 << kRvvFunct6Shift),
+
+ VXOR_FUNCT6 = 0b001011,
+ RO_V_VXOR_VI = OP_IVI | (VXOR_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VXOR_VV = OP_IVV | (VXOR_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VXOR_VX = OP_IVX | (VXOR_FUNCT6 << kRvvFunct6Shift),
+
+ VRGATHER_FUNCT6 = 0b001100,
+ RO_V_VRGATHER_VI = OP_IVI | (VRGATHER_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VRGATHER_VV = OP_IVV | (VRGATHER_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VRGATHER_VX = OP_IVX | (VRGATHER_FUNCT6 << kRvvFunct6Shift),
+
+ VMV_FUNCT6 = 0b010111,
+ RO_V_VMV_VI = OP_IVI | (VMV_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VMV_VV = OP_IVV | (VMV_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VMV_VX = OP_IVX | (VMV_FUNCT6 << kRvvFunct6Shift),
+
+ RO_V_VMERGE_VI = RO_V_VMV_VI,
+ RO_V_VMERGE_VV = RO_V_VMV_VV,
+ RO_V_VMERGE_VX = RO_V_VMV_VX,
+
+ VMSEQ_FUNCT6 = 0b011000,
+ RO_V_VMSEQ_VI = OP_IVI | (VMSEQ_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VMSEQ_VV = OP_IVV | (VMSEQ_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VMSEQ_VX = OP_IVX | (VMSEQ_FUNCT6 << kRvvFunct6Shift),
+
+ VMSNE_FUNCT6 = 0b011001,
+ RO_V_VMSNE_VI = OP_IVI | (VMSNE_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VMSNE_VV = OP_IVV | (VMSNE_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VMSNE_VX = OP_IVX | (VMSNE_FUNCT6 << kRvvFunct6Shift),
+
+ VMSLTU_FUNCT6 = 0b011010,
+ RO_V_VMSLTU_VV = OP_IVV | (VMSLTU_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VMSLTU_VX = OP_IVX | (VMSLTU_FUNCT6 << kRvvFunct6Shift),
+
+ VMSLT_FUNCT6 = 0b011011,
+ RO_V_VMSLT_VV = OP_IVV | (VMSLT_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VMSLT_VX = OP_IVX | (VMSLT_FUNCT6 << kRvvFunct6Shift),
+
+ VMSLE_FUNCT6 = 0b011101,
+ RO_V_VMSLE_VI = OP_IVI | (VMSLE_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VMSLE_VV = OP_IVV | (VMSLE_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VMSLE_VX = OP_IVX | (VMSLE_FUNCT6 << kRvvFunct6Shift),
+
+ VMSLEU_FUNCT6 = 0b011100,
+ RO_V_VMSLEU_VI = OP_IVI | (VMSLEU_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VMSLEU_VV = OP_IVV | (VMSLEU_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VMSLEU_VX = OP_IVX | (VMSLEU_FUNCT6 << kRvvFunct6Shift),
+
+ VMSGTU_FUNCT6 = 0b011110,
+ RO_V_VMSGTU_VI = OP_IVI | (VMSGTU_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VMSGTU_VX = OP_IVX | (VMSGTU_FUNCT6 << kRvvFunct6Shift),
+
+ VMSGT_FUNCT6 = 0b011111,
+ RO_V_VMSGT_VI = OP_IVI | (VMSGT_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VMSGT_VX = OP_IVX | (VMSGT_FUNCT6 << kRvvFunct6Shift),
+
+ VSLIDEUP_FUNCT6 = 0b001110,
+ RO_V_VSLIDEUP_VI = OP_IVI | (VSLIDEUP_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VSLIDEUP_VX = OP_IVX | (VSLIDEUP_FUNCT6 << kRvvFunct6Shift),
+
+ VSLIDEDOWN_FUNCT6 = 0b001111,
+ RO_V_VSLIDEDOWN_VI = OP_IVI | (VSLIDEDOWN_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VSLIDEDOWN_VX = OP_IVX | (VSLIDEDOWN_FUNCT6 << kRvvFunct6Shift),
+
+ VSRL_FUNCT6 = 0b101000,
+ RO_V_VSRL_VI = OP_IVI | (VSRL_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VSRL_VV = OP_IVV | (VSRL_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VSRL_VX = OP_IVX | (VSRL_FUNCT6 << kRvvFunct6Shift),
+
+ VSLL_FUNCT6 = 0b100101,
+ RO_V_VSLL_VI = OP_IVI | (VSLL_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VSLL_VV = OP_IVV | (VSLL_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VSLL_VX = OP_IVX | (VSLL_FUNCT6 << kRvvFunct6Shift),
+
+ VADC_FUNCT6 = 0b010000,
+ RO_V_VADC_VI = OP_IVI | (VADC_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VADC_VV = OP_IVV | (VADC_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VADC_VX = OP_IVX | (VADC_FUNCT6 << kRvvFunct6Shift),
+
+ VMADC_FUNCT6 = 0b010001,
+ RO_V_VMADC_VI = OP_IVI | (VMADC_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VMADC_VV = OP_IVV | (VMADC_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VMADC_VX = OP_IVX | (VMADC_FUNCT6 << kRvvFunct6Shift),
+
+ VWXUNARY0_FUNCT6 = 0b010000,
+ VRXUNARY0_FUNCT6 = 0b010000,
+
+ RO_V_VWXUNARY0 = OP_MVV | (VWXUNARY0_FUNCT6 << kRvvFunct6Shift),
+ RO_V_VRXUNARY0 = OP_MVX | (VRXUNARY0_FUNCT6 << kRvvFunct6Shift),
+
+ VREDMAXU_FUNCT6 = 0b000110,
+ RO_V_VREDMAXU = OP_MVV | (VREDMAXU_FUNCT6 << kRvvFunct6Shift),
+ VREDMAX_FUNCT6 = 0b000111,
+ RO_V_VREDMAX = OP_MVV | (VREDMAX_FUNCT6 << kRvvFunct6Shift),
+
+ VREDMINU_FUNCT6 = 0b000100,
+ RO_V_VREDMINU = OP_MVV | (VREDMINU_FUNCT6 << kRvvFunct6Shift),
+ VREDMIN_FUNCT6 = 0b000101,
+ RO_V_VREDMIN = OP_MVV | (VREDMIN_FUNCT6 << kRvvFunct6Shift),
};
// ----- Emulated conditions.
@@ -681,6 +1004,52 @@ enum FClassFlag {
kQuietNaN = 1 << 9
};
+#define RVV_SEW(V) \
+ V(E8) \
+ V(E16) \
+ V(E32) \
+ V(E64) \
+ V(E128) \
+ V(E256) \
+ V(E512) \
+ V(E1024)
+
+enum VSew {
+#define DEFINE_FLAG(name) name,
+ RVV_SEW(DEFINE_FLAG)
+#undef DEFINE_FLAG
+};
+
+#define RVV_LMUL(V) \
+ V(m1) \
+ V(m2) \
+ V(m4) \
+ V(m8) \
+ V(RESERVERD) \
+ V(mf8) \
+ V(mf4) \
+ V(mf2)
+
+enum Vlmul {
+#define DEFINE_FLAG(name) name,
+ RVV_LMUL(DEFINE_FLAG)
+#undef DEFINE_FLAG
+};
+
+enum TailAgnosticType {
+ ta = 0x1, // Tail agnostic
+ tu = 0x0, // Tail undisturbed
+};
+
+enum MaskAgnosticType {
+ ma = 0x1, // Mask agnostic
+ mu = 0x0, // Mask undisturbed
+};
+enum MaskType {
+ Mask = 0x0, // use the mask
+ NoMask = 0x1,
+};
+
// -----------------------------------------------------------------------------
// Hints.
@@ -734,6 +1103,19 @@ class InstructionBase {
kCAType,
kCBType,
kCJType,
+ // V extension
+ kVType,
+ kVLType,
+ kVSType,
+ kVAMOType,
+ kVIVVType,
+ kVFVVType,
+ kVMVVType,
+ kVIVIType,
+ kVIVXType,
+ kVFVFType,
+ kVMVXType,
+ kVSETType,
kUnsupported = -1
};
@@ -840,7 +1222,9 @@ class InstructionGetters : public T {
this->InstructionType() == InstructionBase::kR4Type ||
this->InstructionType() == InstructionBase::kIType ||
this->InstructionType() == InstructionBase::kSType ||
- this->InstructionType() == InstructionBase::kBType);
+ this->InstructionType() == InstructionBase::kBType ||
+ this->InstructionType() == InstructionBase::kIType ||
+ this->InstructionType() == InstructionBase::kVType);
return this->Bits(kRs1Shift + kRs1Bits - 1, kRs1Shift);
}
@@ -848,7 +1232,9 @@ class InstructionGetters : public T {
DCHECK(this->InstructionType() == InstructionBase::kRType ||
this->InstructionType() == InstructionBase::kR4Type ||
this->InstructionType() == InstructionBase::kSType ||
- this->InstructionType() == InstructionBase::kBType);
+ this->InstructionType() == InstructionBase::kBType ||
+ this->InstructionType() == InstructionBase::kIType ||
+ this->InstructionType() == InstructionBase::kVType);
return this->Bits(kRs2Shift + kRs2Bits - 1, kRs2Shift);
}
@@ -857,12 +1243,35 @@ class InstructionGetters : public T {
return this->Bits(kRs3Shift + kRs3Bits - 1, kRs3Shift);
}
+ inline int Vs1Value() const {
+ DCHECK(this->InstructionType() == InstructionBase::kVType ||
+ this->InstructionType() == InstructionBase::kIType ||
+ this->InstructionType() == InstructionBase::kSType);
+ return this->Bits(kVs1Shift + kVs1Bits - 1, kVs1Shift);
+ }
+
+ inline int Vs2Value() const {
+ DCHECK(this->InstructionType() == InstructionBase::kVType ||
+ this->InstructionType() == InstructionBase::kIType ||
+ this->InstructionType() == InstructionBase::kSType);
+ return this->Bits(kVs2Shift + kVs2Bits - 1, kVs2Shift);
+ }
+
+ inline int VdValue() const {
+ DCHECK(this->InstructionType() == InstructionBase::kVType ||
+ this->InstructionType() == InstructionBase::kIType ||
+ this->InstructionType() == InstructionBase::kSType);
+ return this->Bits(kVdShift + kVdBits - 1, kVdShift);
+ }
+
inline int RdValue() const {
DCHECK(this->InstructionType() == InstructionBase::kRType ||
this->InstructionType() == InstructionBase::kR4Type ||
this->InstructionType() == InstructionBase::kIType ||
+ this->InstructionType() == InstructionBase::kSType ||
this->InstructionType() == InstructionBase::kUType ||
- this->InstructionType() == InstructionBase::kJType);
+ this->InstructionType() == InstructionBase::kJType ||
+ this->InstructionType() == InstructionBase::kVType);
return this->Bits(kRdShift + kRdBits - 1, kRdShift);
}
@@ -1149,6 +1558,129 @@ class InstructionGetters : public T {
return imm9 << 23 >> 23;
}
+ inline int vl_vs_width() {
+ int width = 0;
+ if ((this->InstructionBits() & kBaseOpcodeMask) != LOAD_FP &&
+ (this->InstructionBits() & kBaseOpcodeMask) != STORE_FP)
+ return -1;
+ switch (this->InstructionBits() & (kRvvWidthMask | kRvvMewMask)) {
+ case 0x0:
+ width = 8;
+ break;
+ case 0x00005000:
+ width = 16;
+ break;
+ case 0x00006000:
+ width = 32;
+ break;
+ case 0x00007000:
+ width = 64;
+ break;
+ case 0x10000000:
+ width = 128;
+ break;
+ case 0x10005000:
+ width = 256;
+ break;
+ case 0x10006000:
+ width = 512;
+ break;
+ case 0x10007000:
+ width = 1024;
+ break;
+ default:
+ width = -1;
+ break;
+ }
+ return width;
+ }
+
+ inline uint32_t Rvvzimm() const {
+ if ((this->InstructionBits() &
+ (kBaseOpcodeMask | kFunct3Mask | 0x80000000)) == RO_V_VSETVLI) {
+ uint32_t Bits = this->InstructionBits();
+ uint32_t zimm = Bits & kRvvZimmMask;
+ return zimm >> kRvvZimmShift;
+ } else {
+ DCHECK_EQ(this->InstructionBits() &
+ (kBaseOpcodeMask | kFunct3Mask | 0xC0000000),
+ RO_V_VSETIVLI);
+ uint32_t Bits = this->InstructionBits();
+ uint32_t zimm = Bits & kRvvZimmMask;
+ return (zimm >> kRvvZimmShift) & 0x3FF;
+ }
+ }
+
+ inline uint32_t Rvvuimm() const {
+ DCHECK_EQ(
+ this->InstructionBits() & (kBaseOpcodeMask | kFunct3Mask | 0xC0000000),
+ RO_V_VSETIVLI);
+ uint32_t Bits = this->InstructionBits();
+ uint32_t uimm = Bits & kRvvUimmMask;
+ return uimm >> kRvvUimmShift;
+ }
+
+ inline uint32_t RvvVsew() const {
+ uint32_t zimm = this->Rvvzimm();
+ uint32_t vsew = (zimm >> 3) & 0x7;
+ return vsew;
+ }
+
+ inline uint32_t RvvVlmul() const {
+ uint32_t zimm = this->Rvvzimm();
+ uint32_t vlmul = zimm & 0x7;
+ return vlmul;
+ }
+
+ inline uint8_t RvvVM() const {
+ DCHECK(this->InstructionType() == InstructionBase::kVType ||
+ this->InstructionType() == InstructionBase::kIType ||
+ this->InstructionType() == InstructionBase::kSType);
+ return this->Bits(kRvvVmShift + kRvvVmBits - 1, kRvvVmShift);
+ }
+
+ inline const char* RvvSEW() const {
+ uint32_t vsew = this->RvvVsew();
+ switch (vsew) {
+#define CAST_VSEW(name) \
+ case name: \
+ return #name;
+ RVV_SEW(CAST_VSEW)
+ default:
+ return "unknown";
+#undef CAST_VSEW
+ }
+ }
+
+ inline const char* RvvLMUL() const {
+ uint32_t vlmul = this->RvvVlmul();
+ switch (vlmul) {
+#define CAST_VLMUL(name) \
+ case name: \
+ return #name;
+ RVV_LMUL(CAST_VLMUL)
+ default:
+ return "unknown";
+#undef CAST_VSEW
+ }
+ }
+
+#define sext(x, len) (((int32_t)(x) << (32 - len)) >> (32 - len))
+#define zext(x, len) (((uint32_t)(x) << (32 - len)) >> (32 - len))
+
+ inline int32_t RvvSimm5() const {
+ DCHECK(this->InstructionType() == InstructionBase::kVType);
+ return sext(this->Bits(kRvvImm5Shift + kRvvImm5Bits - 1, kRvvImm5Shift),
+ kRvvImm5Bits);
+ }
+
+ inline uint32_t RvvUimm5() const {
+ DCHECK(this->InstructionType() == InstructionBase::kVType);
+ uint32_t imm = this->Bits(kRvvImm5Shift + kRvvImm5Bits - 1, kRvvImm5Shift);
+ return zext(imm, kRvvImm5Bits);
+ }
+#undef sext
+#undef zext
inline bool AqValue() const { return this->Bits(kAqShift, kAqShift); }
inline bool RlValue() const { return this->Bits(kRlShift, kRlShift); }
diff --git a/deps/v8/src/codegen/riscv64/macro-assembler-riscv64.cc b/deps/v8/src/codegen/riscv64/macro-assembler-riscv64.cc
index 3baa71d1a2..0e0d8bda5a 100644
--- a/deps/v8/src/codegen/riscv64/macro-assembler-riscv64.cc
+++ b/deps/v8/src/codegen/riscv64/macro-assembler-riscv64.cc
@@ -1057,7 +1057,10 @@ void TurboAssembler::CalcScaledAddress(Register rd, Register rt, Register rs,
// ------------Pseudo-instructions-------------
// Change endianness
-void TurboAssembler::ByteSwap(Register rd, Register rs, int operand_size) {
+void TurboAssembler::ByteSwap(Register rd, Register rs, int operand_size,
+ Register scratch) {
+ DCHECK_NE(scratch, rs);
+ DCHECK_NE(scratch, rd);
DCHECK(operand_size == 4 || operand_size == 8);
if (operand_size == 4) {
// Uint32_t x1 = 0x00FF00FF;
@@ -1068,7 +1071,7 @@ void TurboAssembler::ByteSwap(Register rd, Register rs, int operand_size) {
DCHECK((rd != t6) && (rs != t6));
Register x0 = temps.Acquire();
Register x1 = temps.Acquire();
- Register x2 = temps.Acquire();
+ Register x2 = scratch;
li(x1, 0x00FF00FF);
slliw(x0, rs, 16);
srliw(rd, rs, 16);
@@ -1090,7 +1093,7 @@ void TurboAssembler::ByteSwap(Register rd, Register rs, int operand_size) {
DCHECK((rd != t6) && (rs != t6));
Register x0 = temps.Acquire();
Register x1 = temps.Acquire();
- Register x2 = temps.Acquire();
+ Register x2 = scratch;
li(x1, 0x0000FFFF0000FFFFl);
slli(x0, rs, 32);
srli(rd, rs, 32);
@@ -1193,20 +1196,19 @@ void TurboAssembler::UnalignedLoadHelper(Register rd, const MemOperand& rs) {
}
template <int NBYTES>
-void TurboAssembler::UnalignedFLoadHelper(FPURegister frd,
- const MemOperand& rs) {
+void TurboAssembler::UnalignedFLoadHelper(FPURegister frd, const MemOperand& rs,
+ Register scratch_base) {
DCHECK(NBYTES == 4 || NBYTES == 8);
-
+ DCHECK_NE(scratch_base, rs.rm());
BlockTrampolinePoolScope block_trampoline_pool(this);
MemOperand source = rs;
- UseScratchRegisterScope temps(this);
- Register scratch_base = temps.Acquire();
if (NeedAdjustBaseAndOffset(rs, OffsetAccessType::TWO_ACCESSES, NBYTES - 1)) {
// Adjust offset for two accesses and check if offset + 3 fits into int12.
DCHECK(scratch_base != rs.rm());
AdjustBaseAndOffset(&source, scratch_base, OffsetAccessType::TWO_ACCESSES,
NBYTES - 1);
}
+ UseScratchRegisterScope temps(this);
Register scratch_other = temps.Acquire();
Register scratch = temps.Acquire();
DCHECK(scratch != rs.rm() && scratch_other != scratch &&
@@ -1258,10 +1260,10 @@ void TurboAssembler::UnalignedStoreHelper(Register rd, const MemOperand& rs,
template <int NBYTES>
void TurboAssembler::UnalignedFStoreHelper(FPURegister frd,
- const MemOperand& rs) {
+ const MemOperand& rs,
+ Register scratch) {
DCHECK(NBYTES == 8 || NBYTES == 4);
- UseScratchRegisterScope temps(this);
- Register scratch = temps.Acquire();
+ DCHECK_NE(scratch, rs.rm());
if (NBYTES == 4) {
fmv_x_w(scratch, frd);
} else {
@@ -1354,20 +1356,28 @@ void MacroAssembler::StoreWordPair(Register rd, const MemOperand& rs) {
Sw(scratch, MemOperand(rs.rm(), rs.offset() + kSystemPointerSize / 2));
}
-void TurboAssembler::ULoadFloat(FPURegister fd, const MemOperand& rs) {
- UnalignedFLoadHelper<4>(fd, rs);
+void TurboAssembler::ULoadFloat(FPURegister fd, const MemOperand& rs,
+ Register scratch) {
+ DCHECK_NE(scratch, rs.rm());
+ UnalignedFLoadHelper<4>(fd, rs, scratch);
}
-void TurboAssembler::UStoreFloat(FPURegister fd, const MemOperand& rs) {
- UnalignedFStoreHelper<4>(fd, rs);
+void TurboAssembler::UStoreFloat(FPURegister fd, const MemOperand& rs,
+ Register scratch) {
+ DCHECK_NE(scratch, rs.rm());
+ UnalignedFStoreHelper<4>(fd, rs, scratch);
}
-void TurboAssembler::ULoadDouble(FPURegister fd, const MemOperand& rs) {
- UnalignedFLoadHelper<8>(fd, rs);
+void TurboAssembler::ULoadDouble(FPURegister fd, const MemOperand& rs,
+ Register scratch) {
+ DCHECK_NE(scratch, rs.rm());
+ UnalignedFLoadHelper<8>(fd, rs, scratch);
}
-void TurboAssembler::UStoreDouble(FPURegister fd, const MemOperand& rs) {
- UnalignedFStoreHelper<8>(fd, rs);
+void TurboAssembler::UStoreDouble(FPURegister fd, const MemOperand& rs,
+ Register scratch) {
+ DCHECK_NE(scratch, rs.rm());
+ UnalignedFStoreHelper<8>(fd, rs, scratch);
}
void TurboAssembler::Lb(Register rd, const MemOperand& rs) {
@@ -1664,8 +1674,7 @@ void TurboAssembler::li(Register rd, Operand j, LiFlags mode) {
BlockGrowBufferScope block_growbuffer(this);
int offset = pc_offset();
Address address = j.immediate();
- saved_handles_for_raw_object_ptr_.push_back(
- std::make_pair(offset, address));
+ saved_handles_for_raw_object_ptr_.emplace_back(offset, address);
Handle<HeapObject> object(reinterpret_cast<Address*>(address));
int64_t immediate = object->ptr();
RecordRelocInfo(j.rmode(), immediate);
@@ -2442,7 +2451,6 @@ void TurboAssembler::CompareI(Register rd, Register rs, const Operand& rt,
break;
case cc_always:
UNREACHABLE();
- break;
default:
UNREACHABLE();
}
@@ -2620,7 +2628,9 @@ void TurboAssembler::Ctz64(Register rd, Register rs) {
}
}
-void TurboAssembler::Popcnt32(Register rd, Register rs) {
+void TurboAssembler::Popcnt32(Register rd, Register rs, Register scratch) {
+ DCHECK_NE(scratch, rs);
+ DCHECK_NE(scratch, rd);
// https://graphics.stanford.edu/~seander/bithacks.html#CountBitsSetParallel
//
// A generalization of the best bit counting method to integers of
@@ -2644,7 +2654,6 @@ void TurboAssembler::Popcnt32(Register rd, Register rs) {
uint32_t shift = 24;
UseScratchRegisterScope temps(this);
BlockTrampolinePoolScope block_trampoline_pool(this);
- Register scratch = temps.Acquire();
Register scratch2 = temps.Acquire();
Register value = temps.Acquire();
DCHECK((rd != value) && (rs != value));
@@ -2669,7 +2678,9 @@ void TurboAssembler::Popcnt32(Register rd, Register rs) {
Srl32(rd, rd, shift);
}
-void TurboAssembler::Popcnt64(Register rd, Register rs) {
+void TurboAssembler::Popcnt64(Register rd, Register rs, Register scratch) {
+ DCHECK_NE(scratch, rs);
+ DCHECK_NE(scratch, rd);
// uint64_t B0 = 0x5555555555555555l; // (T)~(T)0/3
// uint64_t B1 = 0x3333333333333333l; // (T)~(T)0/15*3
// uint64_t B2 = 0x0F0F0F0F0F0F0F0Fl; // (T)~(T)0/255*15
@@ -2679,7 +2690,6 @@ void TurboAssembler::Popcnt64(Register rd, Register rs) {
uint64_t shift = 24;
UseScratchRegisterScope temps(this);
BlockTrampolinePoolScope block_trampoline_pool(this);
- Register scratch = temps.Acquire();
Register scratch2 = temps.Acquire();
Register value = temps.Acquire();
DCHECK((rd != value) && (rs != value));
@@ -3006,7 +3016,6 @@ bool TurboAssembler::BranchShortCheck(int32_t offset, Label* L, Condition cond,
DCHECK_EQ(offset, 0);
return BranchShortHelper(0, L, cond, rs, rt);
}
- return false;
}
void TurboAssembler::BranchShort(int32_t offset, Condition cond, Register rs,
@@ -3122,7 +3131,6 @@ bool TurboAssembler::BranchAndLinkShortCheck(int32_t offset, Label* L,
DCHECK_EQ(offset, 0);
return BranchAndLinkShortHelper(0, L, cond, rs, rt);
}
- return false;
}
void TurboAssembler::LoadFromConstantsTable(Register destination,
@@ -3549,9 +3557,9 @@ void MacroAssembler::PushStackHandler() {
// Link the current handler as the next handler.
UseScratchRegisterScope temps(this);
Register handler_address = temps.Acquire();
- Register handler = temps.Acquire();
li(handler_address,
ExternalReference::Create(IsolateAddressId::kHandlerAddress, isolate()));
+ Register handler = temps.Acquire();
Ld(handler, MemOperand(handler_address));
push(handler);
@@ -3813,18 +3821,19 @@ void MacroAssembler::InvokeFunctionWithNewTarget(
// Contract with called JS functions requires that function is passed in a1.
DCHECK_EQ(function, a1);
Register expected_parameter_count = a2;
- UseScratchRegisterScope temps(this);
- Register temp_reg = temps.Acquire();
- LoadTaggedPointerField(
- temp_reg,
- FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset));
- LoadTaggedPointerField(cp,
- FieldMemOperand(function, JSFunction::kContextOffset));
- // The argument count is stored as uint16_t
- Lhu(expected_parameter_count,
- FieldMemOperand(temp_reg,
- SharedFunctionInfo::kFormalParameterCountOffset));
-
+ {
+ UseScratchRegisterScope temps(this);
+ Register temp_reg = temps.Acquire();
+ LoadTaggedPointerField(
+ temp_reg,
+ FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset));
+ LoadTaggedPointerField(
+ cp, FieldMemOperand(function, JSFunction::kContextOffset));
+ // The argument count is stored as uint16_t
+ Lhu(expected_parameter_count,
+ FieldMemOperand(temp_reg,
+ SharedFunctionInfo::kFormalParameterCountOffset));
+ }
InvokeFunctionCode(function, new_target, expected_parameter_count,
actual_parameter_count, type);
}
@@ -3861,7 +3870,74 @@ void MacroAssembler::GetInstanceTypeRange(Register map, Register type_reg,
Lhu(type_reg, FieldMemOperand(map, Map::kInstanceTypeOffset));
Sub64(range, type_reg, Operand(lower_limit));
}
-
+//------------------------------------------------------------------------------
+// Wasm
+void TurboAssembler::WasmRvvEq(VRegister dst, VRegister lhs, VRegister rhs,
+ VSew sew, Vlmul lmul) {
+ VU.set(kScratchReg, sew, lmul);
+ vmseq_vv(v0, lhs, rhs);
+ li(kScratchReg, -1);
+ vmv_vx(dst, zero_reg);
+ vmerge_vx(dst, kScratchReg, dst);
+}
+
+void TurboAssembler::WasmRvvNe(VRegister dst, VRegister lhs, VRegister rhs,
+ VSew sew, Vlmul lmul) {
+ VU.set(kScratchReg, sew, lmul);
+ vmsne_vv(v0, lhs, rhs);
+ li(kScratchReg, -1);
+ vmv_vx(dst, zero_reg);
+ vmerge_vx(dst, kScratchReg, dst);
+}
+
+void TurboAssembler::WasmRvvGeS(VRegister dst, VRegister lhs, VRegister rhs,
+ VSew sew, Vlmul lmul) {
+ VU.set(kScratchReg, sew, lmul);
+ vmsle_vv(v0, rhs, lhs);
+ li(kScratchReg, -1);
+ vmv_vx(dst, zero_reg);
+ vmerge_vx(dst, kScratchReg, dst);
+}
+
+void TurboAssembler::WasmRvvGeU(VRegister dst, VRegister lhs, VRegister rhs,
+ VSew sew, Vlmul lmul) {
+ VU.set(kScratchReg, sew, lmul);
+ vmsleu_vv(v0, rhs, lhs);
+ li(kScratchReg, -1);
+ vmv_vx(dst, zero_reg);
+ vmerge_vx(dst, kScratchReg, dst);
+}
+
+void TurboAssembler::WasmRvvGtS(VRegister dst, VRegister lhs, VRegister rhs,
+ VSew sew, Vlmul lmul) {
+ VU.set(kScratchReg, sew, lmul);
+ vmslt_vv(v0, rhs, lhs);
+ li(kScratchReg, -1);
+ vmv_vx(dst, zero_reg);
+ vmerge_vx(dst, kScratchReg, dst);
+}
+
+void TurboAssembler::WasmRvvGtU(VRegister dst, VRegister lhs, VRegister rhs,
+ VSew sew, Vlmul lmul) {
+ VU.set(kScratchReg, sew, lmul);
+ vmsltu_vv(v0, rhs, lhs);
+ li(kScratchReg, -1);
+ vmv_vx(dst, zero_reg);
+ vmerge_vx(dst, kScratchReg, dst);
+}
+
+void TurboAssembler::WasmRvvS128const(VRegister dst, const uint8_t imms[16]) {
+ uint64_t imm1 = *(reinterpret_cast<const uint64_t*>(imms));
+ uint64_t imm2 = *((reinterpret_cast<const uint64_t*>(imms)) + 1);
+ VU.set(kScratchReg, VSew::E64, Vlmul::m1);
+ li(kScratchReg, 1);
+ vmv_vx(v0, kScratchReg);
+ li(kScratchReg, imm1);
+ vmerge_vx(dst, kScratchReg, dst);
+ li(kScratchReg, imm2);
+ vsll_vi(v0, v0, 1);
+ vmerge_vx(dst, kScratchReg, dst);
+}
// -----------------------------------------------------------------------------
// Runtime calls.
@@ -4743,10 +4819,6 @@ void TurboAssembler::ComputeCodeStartAddress(Register dst) {
pop(ra); // Restore ra
}
-void TurboAssembler::ResetSpeculationPoisonRegister() {
- li(kSpeculationPoisonRegister, -1);
-}
-
void TurboAssembler::CallForDeoptimization(Builtin target, int, Label* exit,
DeoptimizeKind kind, Label* ret,
Label*) {
diff --git a/deps/v8/src/codegen/riscv64/macro-assembler-riscv64.h b/deps/v8/src/codegen/riscv64/macro-assembler-riscv64.h
index 04285916bc..53e8543429 100644
--- a/deps/v8/src/codegen/riscv64/macro-assembler-riscv64.h
+++ b/deps/v8/src/codegen/riscv64/macro-assembler-riscv64.h
@@ -151,6 +151,7 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void Branch(Label* target);
void Branch(int32_t target);
+ void BranchLong(Label* L);
void Branch(Label* target, Condition cond, Register r1, const Operand& r2,
Label::Distance near_jump = Label::kFar);
void Branch(int32_t target, Condition cond, Register r1, const Operand& r2,
@@ -570,8 +571,8 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void Clz64(Register rd, Register rs);
void Ctz32(Register rd, Register rs);
void Ctz64(Register rd, Register rs);
- void Popcnt32(Register rd, Register rs);
- void Popcnt64(Register rd, Register rs);
+ void Popcnt32(Register rd, Register rs, Register scratch);
+ void Popcnt64(Register rd, Register rs, Register scratch);
// Bit field starts at bit pos and extending for size bits is extracted from
// rs and stored zero/sign-extended and right-justified in rt
@@ -590,7 +591,8 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void Neg_d(FPURegister fd, FPURegister fs);
// Change endianness
- void ByteSwap(Register dest, Register src, int operand_size);
+ void ByteSwap(Register dest, Register src, int operand_size,
+ Register scratch);
void Clear_if_nan_d(Register rd, FPURegister fs);
void Clear_if_nan_s(Register rd, FPURegister fs);
@@ -605,9 +607,11 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
Register scratch_other = no_reg);
template <int NBYTES>
- void UnalignedFLoadHelper(FPURegister frd, const MemOperand& rs);
+ void UnalignedFLoadHelper(FPURegister frd, const MemOperand& rs,
+ Register scratch);
template <int NBYTES>
- void UnalignedFStoreHelper(FPURegister frd, const MemOperand& rs);
+ void UnalignedFStoreHelper(FPURegister frd, const MemOperand& rs,
+ Register scratch);
template <typename Reg_T, typename Func>
void AlignedLoadHelper(Reg_T target, const MemOperand& rs, Func generator);
@@ -631,11 +635,11 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void Uld(Register rd, const MemOperand& rs);
void Usd(Register rd, const MemOperand& rs);
- void ULoadFloat(FPURegister fd, const MemOperand& rs);
- void UStoreFloat(FPURegister fd, const MemOperand& rs);
+ void ULoadFloat(FPURegister fd, const MemOperand& rs, Register scratch);
+ void UStoreFloat(FPURegister fd, const MemOperand& rs, Register scratch);
- void ULoadDouble(FPURegister fd, const MemOperand& rs);
- void UStoreDouble(FPURegister fd, const MemOperand& rs);
+ void ULoadDouble(FPURegister fd, const MemOperand& rs, Register scratch);
+ void UStoreDouble(FPURegister fd, const MemOperand& rs, Register scratch);
void Lb(Register rd, const MemOperand& rs);
void Lbu(Register rd, const MemOperand& rs);
@@ -857,8 +861,6 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
// This is an alternative to embedding the {CodeObject} handle as a reference.
void ComputeCodeStartAddress(Register dst);
- void ResetSpeculationPoisonRegister();
-
// Control-flow integrity:
// Define a function entrypoint. This doesn't emit any code for this
@@ -908,6 +910,31 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
Sub64(rd, rs1, rs2);
}
}
+ // Wasm into RVV
+ void WasmRvvExtractLane(Register dst, VRegister src, int8_t idx, VSew sew,
+ Vlmul lmul) {
+ VU.set(kScratchReg, sew, lmul);
+ VRegister Vsrc = idx != 0 ? kSimd128ScratchReg : src;
+ if (idx != 0) {
+ vslidedown_vi(kSimd128ScratchReg, src, idx);
+ }
+ vmv_xs(dst, Vsrc);
+ }
+
+ void WasmRvvEq(VRegister dst, VRegister lhs, VRegister rhs, VSew sew,
+ Vlmul lmul);
+
+ void WasmRvvNe(VRegister dst, VRegister lhs, VRegister rhs, VSew sew,
+ Vlmul lmul);
+ void WasmRvvGeS(VRegister dst, VRegister lhs, VRegister rhs, VSew sew,
+ Vlmul lmul);
+ void WasmRvvGeU(VRegister dst, VRegister lhs, VRegister rhs, VSew sew,
+ Vlmul lmul);
+ void WasmRvvGtS(VRegister dst, VRegister lhs, VRegister rhs, VSew sew,
+ Vlmul lmul);
+ void WasmRvvGtU(VRegister dst, VRegister lhs, VRegister rhs, VSew sew,
+ Vlmul lmul);
+ void WasmRvvS128const(VRegister dst, const uint8_t imms[16]);
protected:
inline Register GetRtAsRegisterHelper(const Operand& rt, Register scratch);
@@ -945,7 +972,6 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
Register rs, const Operand& rt);
bool BranchAndLinkShortCheck(int32_t offset, Label* L, Condition cond,
Register rs, const Operand& rt);
- void BranchLong(Label* L);
void BranchAndLinkLong(Label* L);
template <typename F_TYPE>
diff --git a/deps/v8/src/codegen/riscv64/register-riscv64.h b/deps/v8/src/codegen/riscv64/register-riscv64.h
index 69654a4f54..2d2fccdf3a 100644
--- a/deps/v8/src/codegen/riscv64/register-riscv64.h
+++ b/deps/v8/src/codegen/riscv64/register-riscv64.h
@@ -49,16 +49,16 @@ namespace internal {
V(fs8) V(fs9) V(fs10) V(fs11) V(ft8) V(ft9) V(ft10) V(ft11)
#define FLOAT_REGISTERS DOUBLE_REGISTERS
-#define SIMD128_REGISTERS(V) \
- V(w0) V(w1) V(w2) V(w3) V(w4) V(w5) V(w6) V(w7) \
- V(w8) V(w9) V(w10) V(w11) V(w12) V(w13) V(w14) V(w15) \
- V(w16) V(w17) V(w18) V(w19) V(w20) V(w21) V(w22) V(w23) \
- V(w24) V(w25) V(w26) V(w27) V(w28) V(w29) V(w30) V(w31)
+#define VECTOR_REGISTERS(V) \
+ V(v0) V(v1) V(v2) V(v3) V(v4) V(v5) V(v6) V(v7) \
+ V(v8) V(v9) V(v10) V(v11) V(v12) V(v13) V(v14) V(v15) \
+ V(v16) V(v17) V(v18) V(v19) V(v20) V(v21) V(v22) V(v23) \
+ V(v24) V(v25) V(v26) V(v27) V(v28) V(v29) V(v30) V(v31)
-#define ALLOCATABLE_DOUBLE_REGISTERS(V) \
- V(ft0) V(ft1) V(ft2) V(ft3) \
- V(ft4) V(ft5) V(ft6) V(ft7) V(fa0) V(fa1) V(fa2) V(fa3) V(fa4) V(fa5) \
- V(fa6) V(fa7)
+#define ALLOCATABLE_DOUBLE_REGISTERS(V) \
+ V(ft1) V(ft2) V(ft3) V(ft4) V(ft5) V(ft6) V(ft7) V(ft8) \
+ V(ft9) V(ft10) V(ft11) V(fa0) V(fa1) V(fa2) V(fa3) V(fa4) V(fa5) \
+ V(fa6) V(fa7)
// Returns the number of padding slots needed for stack pointer alignment.
constexpr int ArgumentPaddingSlots(int argument_count) {
@@ -256,6 +256,19 @@ enum DoubleRegisterCode {
kDoubleAfterLast
};
+enum VRegisterCode {
+#define REGISTER_CODE(R) kVRCode_##R,
+ VECTOR_REGISTERS(REGISTER_CODE)
+#undef REGISTER_CODE
+ kVRAfterLast
+};
+class VRegister : public RegisterBase<VRegister, kVRAfterLast> {
+ friend class RegisterBase;
+
+ public:
+ explicit constexpr VRegister(int code) : RegisterBase(code) {}
+};
+
// Coprocessor register.
class FPURegister : public RegisterBase<FPURegister, kDoubleAfterLast> {
public:
@@ -274,25 +287,24 @@ class FPURegister : public RegisterBase<FPURegister, kDoubleAfterLast> {
return FPURegister::from_code(code() + 1);
}
+ // FIXME(riscv64): In Rvv, Vector regs is different from Float Regs. But in
+ // this cl, in order to facilitate modification, it is assumed that the vector
+ // register and floating point register are shared.
+ VRegister toV() const {
+ DCHECK(base::IsInRange(code(), 0, kVRAfterLast - 1));
+ // FIXME(riscv): Because V0 is a special mask reg, so can't allocate it.
+ // And v8 is unallocated so we replace v0 with v8
+ if (code() == 0) {
+ return VRegister(8);
+ }
+ return VRegister(code());
+ }
+
private:
friend class RegisterBase;
explicit constexpr FPURegister(int code) : RegisterBase(code) {}
};
-enum MSARegisterCode {
-#define REGISTER_CODE(R) kMsaCode_##R,
- SIMD128_REGISTERS(REGISTER_CODE)
-#undef REGISTER_CODE
- kMsaAfterLast
-};
-
-// MIPS SIMD (MSA) register
-// TODO(RISCV): Remove MIPS MSA registers.
-// https://github.com/v8-riscv/v8/issues/429
-class MSARegister : public RegisterBase<MSARegister, kMsaAfterLast> {
- friend class RegisterBase;
- explicit constexpr MSARegister(int code) : RegisterBase(code) {}
-};
// A few double registers are reserved: one as a scratch register and one to
// hold 0.0.
@@ -304,6 +316,8 @@ using FloatRegister = FPURegister;
using DoubleRegister = FPURegister;
+using Simd128Register = VRegister;
+
#define DECLARE_DOUBLE_REGISTER(R) \
constexpr DoubleRegister R = DoubleRegister::from_code(kDoubleCode_##R);
DOUBLE_REGISTERS(DECLARE_DOUBLE_REGISTER)
@@ -311,15 +325,12 @@ DOUBLE_REGISTERS(DECLARE_DOUBLE_REGISTER)
constexpr DoubleRegister no_dreg = DoubleRegister::no_reg();
-// SIMD registers.
-using Simd128Register = MSARegister;
-
-#define DECLARE_SIMD128_REGISTER(R) \
- constexpr Simd128Register R = Simd128Register::from_code(kMsaCode_##R);
-SIMD128_REGISTERS(DECLARE_SIMD128_REGISTER)
-#undef DECLARE_SIMD128_REGISTER
+#define DECLARE_VECTOR_REGISTER(R) \
+ constexpr VRegister R = VRegister::from_code(kVRCode_##R);
+VECTOR_REGISTERS(DECLARE_VECTOR_REGISTER)
+#undef DECLARE_VECTOR_REGISTER
-const Simd128Register no_msareg = Simd128Register::no_reg();
+const VRegister no_msareg = VRegister::no_reg();
// Register aliases.
// cp is assumed to be a callee saved register.
@@ -328,14 +339,14 @@ constexpr Register cp = s7;
constexpr Register kScratchReg = s3;
constexpr Register kScratchReg2 = s4;
-constexpr DoubleRegister kScratchDoubleReg = fs11;
+constexpr DoubleRegister kScratchDoubleReg = ft0;
constexpr DoubleRegister kDoubleRegZero = fs9;
// Define {RegisterName} methods for the register types.
DEFINE_REGISTER_NAMES(Register, GENERAL_REGISTERS)
DEFINE_REGISTER_NAMES(FPURegister, DOUBLE_REGISTERS)
-DEFINE_REGISTER_NAMES(MSARegister, SIMD128_REGISTERS)
+DEFINE_REGISTER_NAMES(VRegister, VECTOR_REGISTERS)
// Give alias names to registers for calling conventions.
constexpr Register kReturnRegister0 = a0;
@@ -344,7 +355,6 @@ constexpr Register kReturnRegister2 = a2;
constexpr Register kJSFunctionRegister = a1;
constexpr Register kContextRegister = s7;
constexpr Register kAllocateSizeRegister = a1;
-constexpr Register kSpeculationPoisonRegister = a7;
constexpr Register kInterpreterAccumulatorRegister = a0;
constexpr Register kInterpreterBytecodeOffsetRegister = t0;
constexpr Register kInterpreterBytecodeArrayRegister = t1;
@@ -364,6 +374,9 @@ constexpr Register kWasmInstanceRegister = a0;
constexpr Register kWasmCompileLazyFuncIndexRegister = t0;
constexpr DoubleRegister kFPReturnRegister0 = fa0;
+constexpr VRegister kSimd128ScratchReg = v27;
+constexpr VRegister kSimd128ScratchReg2 = v26;
+constexpr VRegister kSimd128RegZero = v25;
#ifdef V8_COMPRESS_POINTERS_IN_SHARED_CAGE
constexpr Register kPtrComprCageBaseRegister = s11; // callee save
diff --git a/deps/v8/src/codegen/s390/assembler-s390.cc b/deps/v8/src/codegen/s390/assembler-s390.cc
index 511096e0db..e799f8e8a4 100644
--- a/deps/v8/src/codegen/s390/assembler-s390.cc
+++ b/deps/v8/src/codegen/s390/assembler-s390.cc
@@ -440,7 +440,6 @@ Condition Assembler::GetCondition(Instr instr) {
default:
UNIMPLEMENTED();
}
- return al;
}
#if V8_TARGET_ARCH_S390X
diff --git a/deps/v8/src/codegen/s390/constants-s390.h b/deps/v8/src/codegen/s390/constants-s390.h
index b16963e52a..23e77c93d7 100644
--- a/deps/v8/src/codegen/s390/constants-s390.h
+++ b/deps/v8/src/codegen/s390/constants-s390.h
@@ -1553,14 +1553,28 @@ using SixByteInstr = uint64_t;
V(vlrep, VLREP, 0xE705) /* type = VRX VECTOR LOAD AND REPLICATE */ \
V(vl, VL, 0xE706) /* type = VRX VECTOR LOAD */ \
V(vlbb, VLBB, 0xE707) /* type = VRX VECTOR LOAD TO BLOCK BOUNDARY */ \
+ V(vlbr, VLBR, 0xE606) /* type = VRX VECTOR LOAD BYTE REVERSED ELEMENTS */ \
+ V(vlbrrep, VLBRREP, \
+ 0xE605) /* type = VRX VECTOR LOAD BYTE REVERSED ELEMENT AND REPLICATE */ \
+ V(vlebrh, VLEBRH, \
+ 0xE601) /* type = VRX VECTOR LOAD BYTE REVERSED ELEMENT (16) */ \
+ V(vlebrf, VLEBRF, \
+ 0xE603) /* type = VRX VECTOR LOAD BYTE REVERSED ELEMENT (32) */ \
+ V(vlebrg, VLEBRG, \
+ 0xE602) /* type = VRX VECTOR LOAD BYTE REVERSED ELEMENT (64) */ \
V(vsteb, VSTEB, 0xE708) /* type = VRX VECTOR STORE ELEMENT (8) */ \
V(vsteh, VSTEH, 0xE709) /* type = VRX VECTOR STORE ELEMENT (16) */ \
V(vsteg, VSTEG, 0xE70A) /* type = VRX VECTOR STORE ELEMENT (64) */ \
V(vstef, VSTEF, 0xE70B) /* type = VRX VECTOR STORE ELEMENT (32) */ \
V(vst, VST, 0xE70E) /* type = VRX VECTOR STORE */ \
- V(vlbr, VLBR, 0xE606) /* type = VRX VECTOR LOAD BYTE REVERSED ELEMENTS */ \
- V(vstbr, VSTBR, 0xE60E) /* type = VRX VECTOR STORE BYTE REVERSED ELEMENTS \
- */
+ V(vstbr, VSTBR, \
+ 0xE60E) /* type = VRX VECTOR STORE BYTE REVERSED ELEMENTS */ \
+ V(vstebrh, VSTEBRH, \
+ 0xE609) /* type = VRX VECTOR STORE BYTE REVERSED ELEMENT (16) */ \
+ V(vstebrf, VSTEBRF, \
+ 0xE60B) /* type = VRX VECTOR STORE BYTE REVERSED ELEMENT (32) */ \
+ V(vstebrg, VSTEBRG, \
+ 0xE60A) /* type = VRX VECTOR STORE BYTE REVERSED ELEMENT (64) */
#define S390_RIE_G_OPCODE_LIST(V) \
V(lochi, LOCHI, \
diff --git a/deps/v8/src/codegen/s390/macro-assembler-s390.cc b/deps/v8/src/codegen/s390/macro-assembler-s390.cc
index 4de7f2cf4b..a6c55746f8 100644
--- a/deps/v8/src/codegen/s390/macro-assembler-s390.cc
+++ b/deps/v8/src/codegen/s390/macro-assembler-s390.cc
@@ -1184,7 +1184,6 @@ void TurboAssembler::ConvertFloat32ToInt64(const Register dst,
break;
case kRoundToNearest:
UNIMPLEMENTED();
- break;
case kRoundToPlusInf:
m = Condition(6);
break;
@@ -1193,7 +1192,6 @@ void TurboAssembler::ConvertFloat32ToInt64(const Register dst,
break;
default:
UNIMPLEMENTED();
- break;
}
cgebr(m, dst, double_input);
}
@@ -1208,7 +1206,6 @@ void TurboAssembler::ConvertDoubleToInt64(const Register dst,
break;
case kRoundToNearest:
UNIMPLEMENTED();
- break;
case kRoundToPlusInf:
m = Condition(6);
break;
@@ -1217,7 +1214,6 @@ void TurboAssembler::ConvertDoubleToInt64(const Register dst,
break;
default:
UNIMPLEMENTED();
- break;
}
cgdbr(m, dst, double_input);
}
@@ -1241,7 +1237,6 @@ void TurboAssembler::ConvertDoubleToInt32(const Register dst,
break;
default:
UNIMPLEMENTED();
- break;
}
#ifdef V8_TARGET_ARCH_S390X
lghi(dst, Operand::Zero());
@@ -1268,7 +1263,6 @@ void TurboAssembler::ConvertFloat32ToInt32(const Register result,
break;
default:
UNIMPLEMENTED();
- break;
}
#ifdef V8_TARGET_ARCH_S390X
lghi(result, Operand::Zero());
@@ -1286,7 +1280,6 @@ void TurboAssembler::ConvertFloat32ToUnsignedInt32(
break;
case kRoundToNearest:
UNIMPLEMENTED();
- break;
case kRoundToPlusInf:
m = Condition(6);
break;
@@ -1295,7 +1288,6 @@ void TurboAssembler::ConvertFloat32ToUnsignedInt32(
break;
default:
UNIMPLEMENTED();
- break;
}
#ifdef V8_TARGET_ARCH_S390X
lghi(result, Operand::Zero());
@@ -1313,7 +1305,6 @@ void TurboAssembler::ConvertFloat32ToUnsignedInt64(
break;
case kRoundToNearest:
UNIMPLEMENTED();
- break;
case kRoundToPlusInf:
m = Condition(6);
break;
@@ -1322,7 +1313,6 @@ void TurboAssembler::ConvertFloat32ToUnsignedInt64(
break;
default:
UNIMPLEMENTED();
- break;
}
clgebr(m, Condition(0), result, double_input);
}
@@ -1337,7 +1327,6 @@ void TurboAssembler::ConvertDoubleToUnsignedInt64(
break;
case kRoundToNearest:
UNIMPLEMENTED();
- break;
case kRoundToPlusInf:
m = Condition(6);
break;
@@ -1346,7 +1335,6 @@ void TurboAssembler::ConvertDoubleToUnsignedInt64(
break;
default:
UNIMPLEMENTED();
- break;
}
clgdbr(m, Condition(0), dst, double_input);
}
@@ -1361,7 +1349,6 @@ void TurboAssembler::ConvertDoubleToUnsignedInt32(
break;
case kRoundToNearest:
UNIMPLEMENTED();
- break;
case kRoundToPlusInf:
m = Condition(6);
break;
@@ -1370,7 +1357,6 @@ void TurboAssembler::ConvertDoubleToUnsignedInt32(
break;
default:
UNIMPLEMENTED();
- break;
}
#ifdef V8_TARGET_ARCH_S390X
lghi(dst, Operand::Zero());
@@ -3924,6 +3910,125 @@ void TurboAssembler::StoreV128LE(Simd128Register src, const MemOperand& mem,
}
}
+// Vector LE Load and Transform instructions.
+void TurboAssembler::LoadAndSplat8x16LE(Simd128Register dst,
+ const MemOperand& mem) {
+ vlrep(dst, mem, Condition(0));
+}
+#define LOAD_SPLAT_LIST(V) \
+ V(64x2, LoadU64LE, 3) \
+ V(32x4, LoadU32LE, 2) \
+ V(16x8, LoadU16LE, 1)
+
+#define LOAD_SPLAT(name, scalar_instr, condition) \
+ void TurboAssembler::LoadAndSplat##name##LE(Simd128Register dst, \
+ const MemOperand& mem) { \
+ if (CpuFeatures::IsSupported(VECTOR_ENHANCE_FACILITY_2) && \
+ is_uint12(mem.offset())) { \
+ vlbrrep(dst, mem, Condition(condition)); \
+ return; \
+ } \
+ scalar_instr(r1, mem); \
+ vlvg(dst, r1, MemOperand(r0, 0), Condition(condition)); \
+ vrep(dst, dst, Operand(0), Condition(condition)); \
+ }
+LOAD_SPLAT_LIST(LOAD_SPLAT)
+#undef LOAD_SPLAT
+#undef LOAD_SPLAT_LIST
+
+#define LOAD_EXTEND_LIST(V) \
+ V(32x2U, vuplh, 2) \
+ V(32x2S, vuph, 2) \
+ V(16x4U, vuplh, 1) \
+ V(16x4S, vuph, 1) \
+ V(8x8U, vuplh, 0) \
+ V(8x8S, vuph, 0)
+
+#define LOAD_EXTEND(name, unpack_instr, condition) \
+ void TurboAssembler::LoadAndExtend##name##LE(Simd128Register dst, \
+ const MemOperand& mem) { \
+ if (CpuFeatures::IsSupported(VECTOR_ENHANCE_FACILITY_2) && \
+ is_uint12(mem.offset())) { \
+ vlebrg(kScratchDoubleReg, mem, Condition(0)); \
+ } else { \
+ LoadU64LE(r1, mem); \
+ vlvg(kScratchDoubleReg, r1, MemOperand(r0, 0), Condition(3)); \
+ } \
+ unpack_instr(dst, kScratchDoubleReg, Condition(0), Condition(0), \
+ Condition(condition)); \
+ }
+LOAD_EXTEND_LIST(LOAD_EXTEND)
+#undef LOAD_EXTEND
+#undef LOAD_EXTEND
+
+void TurboAssembler::LoadV32ZeroLE(Simd128Register dst, const MemOperand& mem) {
+ vx(dst, dst, dst, Condition(0), Condition(0), Condition(0));
+ if (CpuFeatures::IsSupported(VECTOR_ENHANCE_FACILITY_2)) {
+ vlebrf(dst, mem, Condition(3));
+ return;
+ }
+ LoadU32LE(r1, mem);
+ vlvg(dst, r1, MemOperand(r0, 3), Condition(2));
+}
+
+void TurboAssembler::LoadV64ZeroLE(Simd128Register dst, const MemOperand& mem) {
+ vx(dst, dst, dst, Condition(0), Condition(0), Condition(0));
+ if (CpuFeatures::IsSupported(VECTOR_ENHANCE_FACILITY_2)) {
+ vlebrg(dst, mem, Condition(1));
+ return;
+ }
+ LoadU64LE(r1, mem);
+ vlvg(dst, r1, MemOperand(r0, 1), Condition(3));
+}
+
+void TurboAssembler::LoadLane8LE(Simd128Register dst, const MemOperand& mem,
+ int index) {
+ vleb(dst, mem, Condition(index));
+}
+#define LOAD_LANE_LIST(V) \
+ V(64, vlebrg, LoadU64LE, 3) \
+ V(32, vlebrf, LoadU32LE, 2) \
+ V(16, vlebrh, LoadU16LE, 1)
+
+#define LOAD_LANE(name, vector_instr, scalar_instr, condition) \
+ void TurboAssembler::LoadLane##name##LE(Simd128Register dst, \
+ const MemOperand& mem, int lane) { \
+ if (CpuFeatures::IsSupported(VECTOR_ENHANCE_FACILITY_2) && \
+ is_uint12(mem.offset())) { \
+ vector_instr(dst, mem, Condition(lane)); \
+ return; \
+ } \
+ scalar_instr(r1, mem); \
+ vlvg(dst, r1, MemOperand(r0, lane), Condition(condition)); \
+ }
+LOAD_LANE_LIST(LOAD_LANE)
+#undef LOAD_LANE
+#undef LOAD_LANE_LIST
+
+void TurboAssembler::StoreLane8LE(Simd128Register src, const MemOperand& mem,
+ int index) {
+ vsteb(src, mem, Condition(index));
+}
+#define STORE_LANE_LIST(V) \
+ V(64, vstebrg, StoreU64LE, 3) \
+ V(32, vstebrf, StoreU32LE, 2) \
+ V(16, vstebrh, StoreU16LE, 1)
+
+#define STORE_LANE(name, vector_instr, scalar_instr, condition) \
+ void TurboAssembler::StoreLane##name##LE(Simd128Register src, \
+ const MemOperand& mem, int lane) { \
+ if (CpuFeatures::IsSupported(VECTOR_ENHANCE_FACILITY_2) && \
+ is_uint12(mem.offset())) { \
+ vector_instr(src, mem, Condition(lane)); \
+ return; \
+ } \
+ vlgv(r1, src, MemOperand(r0, lane), Condition(condition)); \
+ scalar_instr(r1, mem); \
+ }
+STORE_LANE_LIST(STORE_LANE)
+#undef STORE_LANE
+#undef STORE_LANE_LIST
+
#else
void TurboAssembler::LoadU64LE(Register dst, const MemOperand& mem,
Register scratch) {
@@ -3996,6 +4101,83 @@ void TurboAssembler::StoreV128LE(Simd128Register src, const MemOperand& mem,
StoreV128(src, mem, scratch1);
}
+// Vector LE Load and Transform instructions.
+#define LOAD_SPLAT_LIST(V) \
+ V(64x2, 3) \
+ V(32x4, 2) \
+ V(16x8, 1) \
+ V(8x16, 0)
+
+#define LOAD_SPLAT(name, condition) \
+ void TurboAssembler::LoadAndSplat##name##LE(Simd128Register dst, \
+ const MemOperand& mem) { \
+ vlrep(dst, mem, Condition(condition)); \
+ }
+LOAD_SPLAT_LIST(LOAD_SPLAT)
+#undef LOAD_SPLAT
+#undef LOAD_SPLAT_LIST
+
+#define LOAD_EXTEND_LIST(V) \
+ V(32x2U, vuplh, 2) \
+ V(32x2S, vuph, 2) \
+ V(16x4U, vuplh, 1) \
+ V(16x4S, vuph, 1) \
+ V(8x8U, vuplh, 0) \
+ V(8x8S, vuph, 0)
+
+#define LOAD_EXTEND(name, unpack_instr, condition) \
+ void TurboAssembler::LoadAndExtend##name##LE(Simd128Register dst, \
+ const MemOperand& mem) { \
+ vleg(kScratchDoubleReg, mem, Condition(0)); \
+ unpack_instr(dst, kScratchDoubleReg, Condition(0), Condition(0), \
+ Condition(condition)); \
+ }
+LOAD_EXTEND_LIST(LOAD_EXTEND)
+#undef LOAD_EXTEND
+#undef LOAD_EXTEND
+
+void TurboAssembler::LoadV32ZeroLE(Simd128Register dst, const MemOperand& mem) {
+ vx(dst, dst, dst, Condition(0), Condition(0), Condition(0));
+ vlef(dst, mem, Condition(3));
+}
+
+void TurboAssembler::LoadV64ZeroLE(Simd128Register dst, const MemOperand& mem) {
+ vx(dst, dst, dst, Condition(0), Condition(0), Condition(0));
+ vleg(dst, mem, Condition(1));
+}
+
+#define LOAD_LANE_LIST(V) \
+ V(64, vleg) \
+ V(32, vlef) \
+ V(16, vleh) \
+ V(8, vleb)
+
+#define LOAD_LANE(name, vector_instr) \
+ void TurboAssembler::LoadLane##name##LE(Simd128Register dst, \
+ const MemOperand& mem, int lane) { \
+ DCHECK(is_uint12(mem.offset())); \
+ vector_instr(dst, mem, Condition(lane)); \
+ }
+LOAD_LANE_LIST(LOAD_LANE)
+#undef LOAD_LANE
+#undef LOAD_LANE_LIST
+
+#define STORE_LANE_LIST(V) \
+ V(64, vsteg) \
+ V(32, vstef) \
+ V(16, vsteh) \
+ V(8, vsteb)
+
+#define STORE_LANE(name, vector_instr) \
+ void TurboAssembler::StoreLane##name##LE(Simd128Register src, \
+ const MemOperand& mem, int lane) { \
+ DCHECK(is_uint12(mem.offset())); \
+ vector_instr(src, mem, Condition(lane)); \
+ }
+STORE_LANE_LIST(STORE_LANE)
+#undef STORE_LANE
+#undef STORE_LANE_LIST
+
#endif
// Load And Test (Reg <- Reg)
@@ -4670,10 +4852,6 @@ void TurboAssembler::SwapSimd128(MemOperand src, MemOperand dst,
lay(sp, MemOperand(sp, kSimd128Size));
}
-void TurboAssembler::ResetSpeculationPoisonRegister() {
- mov(kSpeculationPoisonRegister, Operand(-1));
-}
-
void TurboAssembler::ComputeCodeStartAddress(Register dst) {
larl(dst, Operand(-pc_offset() / 2));
}
@@ -5276,7 +5454,37 @@ SIMD_BINOP_LIST_VRR_C(EMIT_SIMD_BINOP_VRR_C)
#undef EMIT_SIMD_BINOP_VRR_C
#undef SIMD_BINOP_LIST_VRR_C
-// Opcodes without a 1-1 match.
+#define SIMD_SHIFT_LIST(V) \
+ V(I64x2Shl, veslv, 3) \
+ V(I64x2ShrS, vesrav, 3) \
+ V(I64x2ShrU, vesrlv, 3) \
+ V(I32x4Shl, veslv, 2) \
+ V(I32x4ShrS, vesrav, 2) \
+ V(I32x4ShrU, vesrlv, 2) \
+ V(I16x8Shl, veslv, 1) \
+ V(I16x8ShrS, vesrav, 1) \
+ V(I16x8ShrU, vesrlv, 1) \
+ V(I8x16Shl, veslv, 0) \
+ V(I8x16ShrS, vesrav, 0) \
+ V(I8x16ShrU, vesrlv, 0)
+
+#define EMIT_SIMD_SHIFT(name, op, c1) \
+ void TurboAssembler::name(Simd128Register dst, Simd128Register src1, \
+ Register src2) { \
+ vlvg(kScratchDoubleReg, src2, MemOperand(r0, 0), Condition(c1)); \
+ vrep(kScratchDoubleReg, kScratchDoubleReg, Operand(0), Condition(c1)); \
+ op(dst, src1, kScratchDoubleReg, Condition(0), Condition(0), \
+ Condition(c1)); \
+ } \
+ void TurboAssembler::name(Simd128Register dst, Simd128Register src1, \
+ const Operand& src2) { \
+ mov(ip, src2); \
+ name(dst, src1, ip); \
+ }
+SIMD_SHIFT_LIST(EMIT_SIMD_SHIFT)
+#undef EMIT_SIMD_SHIFT
+#undef SIMD_SHIFT_LIST
+
void TurboAssembler::I64x2Mul(Simd128Register dst, Simd128Register src1,
Simd128Register src2) {
Register scratch_1 = r0;
diff --git a/deps/v8/src/codegen/s390/macro-assembler-s390.h b/deps/v8/src/codegen/s390/macro-assembler-s390.h
index 51cdb48326..b7123d5960 100644
--- a/deps/v8/src/codegen/s390/macro-assembler-s390.h
+++ b/deps/v8/src/codegen/s390/macro-assembler-s390.h
@@ -392,6 +392,27 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
Register scratch1);
void LoadF64LE(DoubleRegister dst, const MemOperand& opnd, Register scratch);
void LoadF32LE(DoubleRegister dst, const MemOperand& opnd, Register scratch);
+ // Vector LE Load and Transform instructions.
+ void LoadAndSplat64x2LE(Simd128Register dst, const MemOperand& mem);
+ void LoadAndSplat32x4LE(Simd128Register dst, const MemOperand& mem);
+ void LoadAndSplat16x8LE(Simd128Register dst, const MemOperand& mem);
+ void LoadAndSplat8x16LE(Simd128Register dst, const MemOperand& mem);
+ void LoadAndExtend8x8ULE(Simd128Register dst, const MemOperand& mem);
+ void LoadAndExtend8x8SLE(Simd128Register dst, const MemOperand& mem);
+ void LoadAndExtend16x4ULE(Simd128Register dst, const MemOperand& mem);
+ void LoadAndExtend16x4SLE(Simd128Register dst, const MemOperand& mem);
+ void LoadAndExtend32x2ULE(Simd128Register dst, const MemOperand& mem);
+ void LoadAndExtend32x2SLE(Simd128Register dst, const MemOperand& mem);
+ void LoadV32ZeroLE(Simd128Register dst, const MemOperand& mem);
+ void LoadV64ZeroLE(Simd128Register dst, const MemOperand& mem);
+ void LoadLane8LE(Simd128Register dst, const MemOperand& mem, int lane);
+ void LoadLane16LE(Simd128Register dst, const MemOperand& mem, int lane);
+ void LoadLane32LE(Simd128Register dst, const MemOperand& mem, int lane);
+ void LoadLane64LE(Simd128Register dst, const MemOperand& mem, int lane);
+ void StoreLane8LE(Simd128Register src, const MemOperand& mem, int lane);
+ void StoreLane16LE(Simd128Register src, const MemOperand& mem, int lane);
+ void StoreLane32LE(Simd128Register src, const MemOperand& mem, int lane);
+ void StoreLane64LE(Simd128Register src, const MemOperand& mem, int lane);
// Load And Test
void LoadAndTest32(Register dst, Register src);
@@ -1015,7 +1036,6 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void CheckPageFlag(Register object, Register scratch, int mask, Condition cc,
Label* condition_met);
- void ResetSpeculationPoisonRegister();
void ComputeCodeStartAddress(Register dst);
void LoadPC(Register dst);
@@ -1071,75 +1091,99 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void I8x16ReplaceLane(Simd128Register dst, Simd128Register src1,
Register src2, uint8_t imm_lane_idx);
-#define SIMD_BINOP_LIST(V) \
- V(F64x2Add) \
- V(F64x2Sub) \
- V(F64x2Mul) \
- V(F64x2Div) \
- V(F64x2Min) \
- V(F64x2Max) \
- V(F64x2Eq) \
- V(F64x2Ne) \
- V(F64x2Lt) \
- V(F64x2Le) \
- V(F32x4Add) \
- V(F32x4Sub) \
- V(F32x4Mul) \
- V(F32x4Div) \
- V(F32x4Min) \
- V(F32x4Max) \
- V(F32x4Eq) \
- V(F32x4Ne) \
- V(F32x4Lt) \
- V(F32x4Le) \
- V(I64x2Add) \
- V(I64x2Sub) \
- V(I64x2Mul) \
- V(I64x2Eq) \
- V(I64x2Ne) \
- V(I64x2GtS) \
- V(I64x2GeS) \
- V(I32x4Add) \
- V(I32x4Sub) \
- V(I32x4Mul) \
- V(I32x4Eq) \
- V(I32x4Ne) \
- V(I32x4GtS) \
- V(I32x4GeS) \
- V(I32x4GtU) \
- V(I32x4GeU) \
- V(I32x4MinS) \
- V(I32x4MinU) \
- V(I32x4MaxS) \
- V(I32x4MaxU) \
- V(I16x8Add) \
- V(I16x8Sub) \
- V(I16x8Mul) \
- V(I16x8Eq) \
- V(I16x8Ne) \
- V(I16x8GtS) \
- V(I16x8GeS) \
- V(I16x8GtU) \
- V(I16x8GeU) \
- V(I16x8MinS) \
- V(I16x8MinU) \
- V(I16x8MaxS) \
- V(I16x8MaxU) \
- V(I8x16Add) \
- V(I8x16Sub) \
- V(I8x16Eq) \
- V(I8x16Ne) \
- V(I8x16GtS) \
- V(I8x16GeS) \
- V(I8x16GtU) \
- V(I8x16GeU) \
- V(I8x16MinS) \
- V(I8x16MinU) \
- V(I8x16MaxS) \
- V(I8x16MaxU)
-
-#define PROTOTYPE_SIMD_BINOP(name) \
- void name(Simd128Register dst, Simd128Register src1, Simd128Register src2);
+#define SIMD_BINOP_LIST(V) \
+ V(F64x2Add, Simd128Register) \
+ V(F64x2Sub, Simd128Register) \
+ V(F64x2Mul, Simd128Register) \
+ V(F64x2Div, Simd128Register) \
+ V(F64x2Min, Simd128Register) \
+ V(F64x2Max, Simd128Register) \
+ V(F64x2Eq, Simd128Register) \
+ V(F64x2Ne, Simd128Register) \
+ V(F64x2Lt, Simd128Register) \
+ V(F64x2Le, Simd128Register) \
+ V(F32x4Add, Simd128Register) \
+ V(F32x4Sub, Simd128Register) \
+ V(F32x4Mul, Simd128Register) \
+ V(F32x4Div, Simd128Register) \
+ V(F32x4Min, Simd128Register) \
+ V(F32x4Max, Simd128Register) \
+ V(F32x4Eq, Simd128Register) \
+ V(F32x4Ne, Simd128Register) \
+ V(F32x4Lt, Simd128Register) \
+ V(F32x4Le, Simd128Register) \
+ V(I64x2Add, Simd128Register) \
+ V(I64x2Sub, Simd128Register) \
+ V(I64x2Mul, Simd128Register) \
+ V(I64x2Eq, Simd128Register) \
+ V(I64x2Ne, Simd128Register) \
+ V(I64x2GtS, Simd128Register) \
+ V(I64x2GeS, Simd128Register) \
+ V(I64x2Shl, Register) \
+ V(I64x2ShrS, Register) \
+ V(I64x2ShrU, Register) \
+ V(I64x2Shl, const Operand&) \
+ V(I64x2ShrS, const Operand&) \
+ V(I64x2ShrU, const Operand&) \
+ V(I32x4Add, Simd128Register) \
+ V(I32x4Sub, Simd128Register) \
+ V(I32x4Mul, Simd128Register) \
+ V(I32x4Eq, Simd128Register) \
+ V(I32x4Ne, Simd128Register) \
+ V(I32x4GtS, Simd128Register) \
+ V(I32x4GeS, Simd128Register) \
+ V(I32x4GtU, Simd128Register) \
+ V(I32x4GeU, Simd128Register) \
+ V(I32x4MinS, Simd128Register) \
+ V(I32x4MinU, Simd128Register) \
+ V(I32x4MaxS, Simd128Register) \
+ V(I32x4MaxU, Simd128Register) \
+ V(I32x4Shl, Register) \
+ V(I32x4ShrS, Register) \
+ V(I32x4ShrU, Register) \
+ V(I32x4Shl, const Operand&) \
+ V(I32x4ShrS, const Operand&) \
+ V(I32x4ShrU, const Operand&) \
+ V(I16x8Add, Simd128Register) \
+ V(I16x8Sub, Simd128Register) \
+ V(I16x8Mul, Simd128Register) \
+ V(I16x8Eq, Simd128Register) \
+ V(I16x8Ne, Simd128Register) \
+ V(I16x8GtS, Simd128Register) \
+ V(I16x8GeS, Simd128Register) \
+ V(I16x8GtU, Simd128Register) \
+ V(I16x8GeU, Simd128Register) \
+ V(I16x8MinS, Simd128Register) \
+ V(I16x8MinU, Simd128Register) \
+ V(I16x8MaxS, Simd128Register) \
+ V(I16x8MaxU, Simd128Register) \
+ V(I16x8Shl, Register) \
+ V(I16x8ShrS, Register) \
+ V(I16x8ShrU, Register) \
+ V(I16x8Shl, const Operand&) \
+ V(I16x8ShrS, const Operand&) \
+ V(I16x8ShrU, const Operand&) \
+ V(I8x16Add, Simd128Register) \
+ V(I8x16Sub, Simd128Register) \
+ V(I8x16Eq, Simd128Register) \
+ V(I8x16Ne, Simd128Register) \
+ V(I8x16GtS, Simd128Register) \
+ V(I8x16GeS, Simd128Register) \
+ V(I8x16GtU, Simd128Register) \
+ V(I8x16GeU, Simd128Register) \
+ V(I8x16MinS, Simd128Register) \
+ V(I8x16MinU, Simd128Register) \
+ V(I8x16MaxS, Simd128Register) \
+ V(I8x16MaxU, Simd128Register) \
+ V(I8x16Shl, Register) \
+ V(I8x16ShrS, Register) \
+ V(I8x16ShrU, Register) \
+ V(I8x16Shl, const Operand&) \
+ V(I8x16ShrS, const Operand&) \
+ V(I8x16ShrU, const Operand&)
+
+#define PROTOTYPE_SIMD_BINOP(name, stype) \
+ void name(Simd128Register dst, Simd128Register src1, stype src2);
SIMD_BINOP_LIST(PROTOTYPE_SIMD_BINOP)
#undef PROTOTYPE_SIMD_BINOP
#undef SIMD_BINOP_LIST
diff --git a/deps/v8/src/codegen/s390/register-s390.h b/deps/v8/src/codegen/s390/register-s390.h
index 48accf08c5..6e3b6a3e2b 100644
--- a/deps/v8/src/codegen/s390/register-s390.h
+++ b/deps/v8/src/codegen/s390/register-s390.h
@@ -253,7 +253,6 @@ constexpr Register kReturnRegister2 = r4;
constexpr Register kJSFunctionRegister = r3;
constexpr Register kContextRegister = r13;
constexpr Register kAllocateSizeRegister = r3;
-constexpr Register kSpeculationPoisonRegister = r9;
constexpr Register kInterpreterAccumulatorRegister = r2;
constexpr Register kInterpreterBytecodeOffsetRegister = r6;
constexpr Register kInterpreterBytecodeArrayRegister = r7;
diff --git a/deps/v8/src/codegen/script-details.h b/deps/v8/src/codegen/script-details.h
index a0a364c6b5..e342e132d7 100644
--- a/deps/v8/src/codegen/script-details.h
+++ b/deps/v8/src/codegen/script-details.h
@@ -5,6 +5,7 @@
#ifndef V8_CODEGEN_SCRIPT_DETAILS_H_
#define V8_CODEGEN_SCRIPT_DETAILS_H_
+#include "include/v8-script.h"
#include "src/common/globals.h"
#include "src/objects/fixed-array.h"
#include "src/objects/objects.h"
diff --git a/deps/v8/src/codegen/shared-ia32-x64/macro-assembler-shared-ia32-x64.cc b/deps/v8/src/codegen/shared-ia32-x64/macro-assembler-shared-ia32-x64.cc
index edd1a977e6..dc39be5b84 100644
--- a/deps/v8/src/codegen/shared-ia32-x64/macro-assembler-shared-ia32-x64.cc
+++ b/deps/v8/src/codegen/shared-ia32-x64/macro-assembler-shared-ia32-x64.cc
@@ -6,6 +6,7 @@
#include "src/codegen/assembler.h"
#include "src/codegen/cpu-features.h"
+#include "src/codegen/register-arch.h"
#if V8_TARGET_ARCH_IA32
#include "src/codegen/ia32/register-ia32.h"
@@ -15,9 +16,28 @@
#error Unsupported target architecture.
#endif
+// Operand on IA32 can be a wrapper for a single register, in which case they
+// should call I8x16Splat |src| being Register.
+#if V8_TARGET_ARCH_IA32
+#define DCHECK_OPERAND_IS_NOT_REG(op) DCHECK(!op.is_reg_only());
+#else
+#define DCHECK_OPERAND_IS_NOT_REG(op)
+#endif
+
namespace v8 {
namespace internal {
+void SharedTurboAssembler::Move(Register dst, uint32_t src) {
+ // Helper to paper over the different assembler function names.
+#if V8_TARGET_ARCH_IA32
+ mov(dst, Immediate(src));
+#elif V8_TARGET_ARCH_X64
+ movl(dst, Immediate(src));
+#else
+#error Unsupported target architecture.
+#endif
+}
+
void SharedTurboAssembler::Move(Register dst, Register src) {
// Helper to paper over the different assembler function names.
if (dst != src) {
@@ -31,6 +51,17 @@ void SharedTurboAssembler::Move(Register dst, Register src) {
}
}
+void SharedTurboAssembler::Add(Register dst, Immediate src) {
+ // Helper to paper over the different assembler function names.
+#if V8_TARGET_ARCH_IA32
+ add(dst, src);
+#elif V8_TARGET_ARCH_X64
+ addq(dst, src);
+#else
+#error Unsupported target architecture.
+#endif
+}
+
void SharedTurboAssembler::And(Register dst, Immediate src) {
// Helper to paper over the different assembler function names.
#if V8_TARGET_ARCH_IA32
@@ -42,17 +73,6 @@ void SharedTurboAssembler::And(Register dst, Immediate src) {
#endif
}
-void SharedTurboAssembler::Movapd(XMMRegister dst, XMMRegister src) {
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope avx_scope(this, AVX);
- vmovapd(dst, src);
- } else {
- // On SSE, movaps is 1 byte shorter than movapd, and has the same
- // behavior.
- movaps(dst, src);
- }
-}
-
void SharedTurboAssembler::Shufps(XMMRegister dst, XMMRegister src1,
XMMRegister src2, uint8_t imm8) {
if (CpuFeatures::IsSupported(AVX)) {
@@ -89,7 +109,7 @@ void SharedTurboAssembler::F64x2ReplaceLane(XMMRegister dst, XMMRegister src,
if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope scope(this, AVX);
if (lane == 0) {
- vpblendw(dst, src, rep, 0b00001111);
+ vmovsd(dst, src, rep);
} else {
vmovlhps(dst, src, rep);
}
@@ -100,7 +120,7 @@ void SharedTurboAssembler::F64x2ReplaceLane(XMMRegister dst, XMMRegister src,
movaps(dst, src);
}
if (lane == 0) {
- pblendw(dst, rep, 0b00001111);
+ movsd(dst, rep);
} else {
movlhps(dst, rep);
}
@@ -231,6 +251,187 @@ void SharedTurboAssembler::S128Store32Lane(Operand dst, XMMRegister src,
}
}
+template <typename Op>
+void SharedTurboAssembler::I8x16SplatPreAvx2(XMMRegister dst, Op src,
+ XMMRegister scratch) {
+ DCHECK(!CpuFeatures::IsSupported(AVX2));
+ CpuFeatureScope ssse3_scope(this, SSSE3);
+ Movd(dst, src);
+ Xorps(scratch, scratch);
+ Pshufb(dst, scratch);
+}
+
+void SharedTurboAssembler::I8x16Splat(XMMRegister dst, Register src,
+ XMMRegister scratch) {
+ if (CpuFeatures::IsSupported(AVX2)) {
+ CpuFeatureScope avx2_scope(this, AVX2);
+ Movd(scratch, src);
+ vpbroadcastb(dst, scratch);
+ } else {
+ I8x16SplatPreAvx2(dst, src, scratch);
+ }
+}
+
+void SharedTurboAssembler::I8x16Splat(XMMRegister dst, Operand src,
+ XMMRegister scratch) {
+ DCHECK_OPERAND_IS_NOT_REG(src);
+ if (CpuFeatures::IsSupported(AVX2)) {
+ CpuFeatureScope avx2_scope(this, AVX2);
+ vpbroadcastb(dst, src);
+ } else {
+ I8x16SplatPreAvx2(dst, src, scratch);
+ }
+}
+
+void SharedTurboAssembler::I8x16Shl(XMMRegister dst, XMMRegister src1,
+ uint8_t src2, Register tmp1,
+ XMMRegister tmp2) {
+ DCHECK_NE(dst, tmp2);
+ // Perform 16-bit shift, then mask away low bits.
+ if (!CpuFeatures::IsSupported(AVX) && (dst != src1)) {
+ movaps(dst, src1);
+ src1 = dst;
+ }
+
+ uint8_t shift = truncate_to_int3(src2);
+ Psllw(dst, src1, byte{shift});
+
+ uint8_t bmask = static_cast<uint8_t>(0xff << shift);
+ uint32_t mask = bmask << 24 | bmask << 16 | bmask << 8 | bmask;
+ Move(tmp1, mask);
+ Movd(tmp2, tmp1);
+ Pshufd(tmp2, tmp2, uint8_t{0});
+ Pand(dst, tmp2);
+}
+
+void SharedTurboAssembler::I8x16Shl(XMMRegister dst, XMMRegister src1,
+ Register src2, Register tmp1,
+ XMMRegister tmp2, XMMRegister tmp3) {
+ DCHECK(!AreAliased(dst, tmp2, tmp3));
+ DCHECK(!AreAliased(src1, tmp2, tmp3));
+
+ // Take shift value modulo 8.
+ Move(tmp1, src2);
+ And(tmp1, Immediate(7));
+ Add(tmp1, Immediate(8));
+ // Create a mask to unset high bits.
+ Movd(tmp3, tmp1);
+ Pcmpeqd(tmp2, tmp2);
+ Psrlw(tmp2, tmp2, tmp3);
+ Packuswb(tmp2, tmp2);
+ if (!CpuFeatures::IsSupported(AVX) && (dst != src1)) {
+ movaps(dst, src1);
+ src1 = dst;
+ }
+ // Mask off the unwanted bits before word-shifting.
+ Pand(dst, src1, tmp2);
+ Add(tmp1, Immediate(-8));
+ Movd(tmp3, tmp1);
+ Psllw(dst, dst, tmp3);
+}
+
+void SharedTurboAssembler::I8x16ShrS(XMMRegister dst, XMMRegister src1,
+ uint8_t src2, XMMRegister tmp) {
+ // Unpack bytes into words, do word (16-bit) shifts, and repack.
+ DCHECK_NE(dst, tmp);
+ uint8_t shift = truncate_to_int3(src2) + 8;
+
+ Punpckhbw(tmp, src1);
+ Punpcklbw(dst, src1);
+ Psraw(tmp, shift);
+ Psraw(dst, shift);
+ Packsswb(dst, tmp);
+}
+
+void SharedTurboAssembler::I8x16ShrS(XMMRegister dst, XMMRegister src1,
+ Register src2, Register tmp1,
+ XMMRegister tmp2, XMMRegister tmp3) {
+ DCHECK(!AreAliased(dst, tmp2, tmp3));
+ DCHECK_NE(src1, tmp2);
+
+ // Unpack the bytes into words, do arithmetic shifts, and repack.
+ Punpckhbw(tmp2, src1);
+ Punpcklbw(dst, src1);
+ // Prepare shift value
+ Move(tmp1, src2);
+ // Take shift value modulo 8.
+ And(tmp1, Immediate(7));
+ Add(tmp1, Immediate(8));
+ Movd(tmp3, tmp1);
+ Psraw(tmp2, tmp3);
+ Psraw(dst, tmp3);
+ Packsswb(dst, tmp2);
+}
+
+void SharedTurboAssembler::I8x16ShrU(XMMRegister dst, XMMRegister src1,
+ uint8_t src2, Register tmp1,
+ XMMRegister tmp2) {
+ DCHECK_NE(dst, tmp2);
+ if (!CpuFeatures::IsSupported(AVX) && (dst != src1)) {
+ movaps(dst, src1);
+ src1 = dst;
+ }
+
+ // Perform 16-bit shift, then mask away high bits.
+ uint8_t shift = truncate_to_int3(src2);
+ Psrlw(dst, src1, shift);
+
+ uint8_t bmask = 0xff >> shift;
+ uint32_t mask = bmask << 24 | bmask << 16 | bmask << 8 | bmask;
+ Move(tmp1, mask);
+ Movd(tmp2, tmp1);
+ Pshufd(tmp2, tmp2, byte{0});
+ Pand(dst, tmp2);
+}
+
+void SharedTurboAssembler::I8x16ShrU(XMMRegister dst, XMMRegister src1,
+ Register src2, Register tmp1,
+ XMMRegister tmp2, XMMRegister tmp3) {
+ DCHECK(!AreAliased(dst, tmp2, tmp3));
+ DCHECK_NE(src1, tmp2);
+
+ // Unpack the bytes into words, do logical shifts, and repack.
+ Punpckhbw(tmp2, src1);
+ Punpcklbw(dst, src1);
+ // Prepare shift value.
+ Move(tmp1, src2);
+ // Take shift value modulo 8.
+ And(tmp1, Immediate(7));
+ Add(tmp1, Immediate(8));
+ Movd(tmp3, tmp1);
+ Psrlw(tmp2, tmp3);
+ Psrlw(dst, tmp3);
+ Packuswb(dst, tmp2);
+}
+
+template <typename Op>
+void SharedTurboAssembler::I16x8SplatPreAvx2(XMMRegister dst, Op src) {
+ DCHECK(!CpuFeatures::IsSupported(AVX2));
+ Movd(dst, src);
+ Pshuflw(dst, dst, uint8_t{0x0});
+ Punpcklqdq(dst, dst);
+}
+
+void SharedTurboAssembler::I16x8Splat(XMMRegister dst, Register src) {
+ if (CpuFeatures::IsSupported(AVX2)) {
+ CpuFeatureScope avx2_scope(this, AVX2);
+ Movd(dst, src);
+ vpbroadcastw(dst, dst);
+ } else {
+ I16x8SplatPreAvx2(dst, src);
+ }
+}
+
+void SharedTurboAssembler::I16x8Splat(XMMRegister dst, Operand src) {
+ DCHECK_OPERAND_IS_NOT_REG(src);
+ if (CpuFeatures::IsSupported(AVX2)) {
+ CpuFeatureScope avx2_scope(this, AVX2);
+ vpbroadcastw(dst, src);
+ } else {
+ I16x8SplatPreAvx2(dst, src);
+ }
+}
+
void SharedTurboAssembler::I16x8ExtMulLow(XMMRegister dst, XMMRegister src1,
XMMRegister src2, XMMRegister scratch,
bool is_signed) {
@@ -358,6 +559,65 @@ void SharedTurboAssembler::I16x8UConvertI8x16High(XMMRegister dst,
}
}
+void SharedTurboAssembler::I16x8Q15MulRSatS(XMMRegister dst, XMMRegister src1,
+ XMMRegister src2,
+ XMMRegister scratch) {
+ ASM_CODE_COMMENT(this);
+ // k = i16x8.splat(0x8000)
+ Pcmpeqd(scratch, scratch);
+ Psllw(scratch, scratch, byte{15});
+
+ if (!CpuFeatures::IsSupported(AVX) && (dst != src1)) {
+ movaps(dst, src1);
+ src1 = dst;
+ }
+
+ Pmulhrsw(dst, src1, src2);
+ Pcmpeqw(scratch, dst);
+ Pxor(dst, scratch);
+}
+
+void SharedTurboAssembler::I32x4ExtAddPairwiseI16x8U(XMMRegister dst,
+ XMMRegister src,
+ XMMRegister tmp) {
+ ASM_CODE_COMMENT(this);
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope avx_scope(this, AVX);
+ // src = |a|b|c|d|e|f|g|h| (low)
+ // scratch = |0|a|0|c|0|e|0|g|
+ vpsrld(tmp, src, 16);
+ // dst = |0|b|0|d|0|f|0|h|
+ vpblendw(dst, src, tmp, 0xAA);
+ // dst = |a+b|c+d|e+f|g+h|
+ vpaddd(dst, tmp, dst);
+ } else if (CpuFeatures::IsSupported(SSE4_1)) {
+ CpuFeatureScope sse_scope(this, SSE4_1);
+ // There is a potentially better lowering if we get rip-relative
+ // constants, see https://github.com/WebAssembly/simd/pull/380.
+ movaps(tmp, src);
+ psrld(tmp, 16);
+ if (dst != src) {
+ movaps(dst, src);
+ }
+ pblendw(dst, tmp, 0xAA);
+ paddd(dst, tmp);
+ } else {
+ // src = |a|b|c|d|e|f|g|h|
+ // tmp = i32x4.splat(0x0000FFFF)
+ pcmpeqd(tmp, tmp);
+ psrld(tmp, byte{16});
+ // tmp =|0|b|0|d|0|f|0|h|
+ andps(tmp, src);
+ // dst = |0|a|0|c|0|e|0|g|
+ if (dst != src) {
+ movaps(dst, src);
+ }
+ psrld(dst, byte{16});
+ // dst = |a+b|c+d|e+f|g+h|
+ paddd(dst, tmp);
+ }
+}
+
// 1. Multiply low word into scratch.
// 2. Multiply high word (can be signed or unsigned) into dst.
// 3. Unpack and interleave scratch and dst into dst.
@@ -539,7 +799,7 @@ void SharedTurboAssembler::I64x2ShrS(XMMRegister dst, XMMRegister src,
Psllq(xmm_tmp, byte{63});
if (!CpuFeatures::IsSupported(AVX) && (dst != src)) {
- Movapd(dst, src);
+ movaps(dst, src);
src = dst;
}
// Add a bias of 2^63 to convert signed to unsigned.
@@ -572,7 +832,7 @@ void SharedTurboAssembler::I64x2ShrS(XMMRegister dst, XMMRegister src,
Movd(xmm_shift, tmp_shift);
if (!CpuFeatures::IsSupported(AVX) && (dst != src)) {
- Movapd(dst, src);
+ movaps(dst, src);
src = dst;
}
Pxor(dst, src, xmm_tmp);
@@ -640,11 +900,16 @@ void SharedTurboAssembler::I64x2UConvertI32x4High(XMMRegister dst,
vpxor(scratch, scratch, scratch);
vpunpckhdq(dst, src, scratch);
} else {
- if (dst != src) {
- movaps(dst, src);
+ if (dst == src) {
+ // xorps can be executed on more ports than pshufd.
+ xorps(scratch, scratch);
+ punpckhdq(dst, scratch);
+ } else {
+ CpuFeatureScope sse_scope(this, SSE4_1);
+ // No dependency on dst.
+ pshufd(dst, src, 0xEE);
+ pmovzxdq(dst, dst);
}
- xorps(scratch, scratch);
- punpckhdq(dst, scratch);
}
}
@@ -679,5 +944,74 @@ void SharedTurboAssembler::S128Select(XMMRegister dst, XMMRegister mask,
}
}
+void SharedTurboAssembler::S128Load8Splat(XMMRegister dst, Operand src,
+ XMMRegister scratch) {
+ // The trap handler uses the current pc to creating a landing, so that it can
+ // determine if a trap occured in Wasm code due to a OOB load. Make sure the
+ // first instruction in each case below is the one that loads.
+ if (CpuFeatures::IsSupported(AVX2)) {
+ CpuFeatureScope avx2_scope(this, AVX2);
+ vpbroadcastb(dst, src);
+ } else if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope avx_scope(this, AVX);
+ // Avoid dependency on previous value of dst.
+ vpinsrb(dst, scratch, src, uint8_t{0});
+ vpxor(scratch, scratch, scratch);
+ vpshufb(dst, dst, scratch);
+ } else {
+ CpuFeatureScope ssse4_scope(this, SSE4_1);
+ CpuFeatureScope ssse3_scope(this, SSSE3);
+ pinsrb(dst, src, uint8_t{0});
+ xorps(scratch, scratch);
+ pshufb(dst, scratch);
+ }
+}
+
+void SharedTurboAssembler::S128Load16Splat(XMMRegister dst, Operand src,
+ XMMRegister scratch) {
+ // The trap handler uses the current pc to creating a landing, so that it can
+ // determine if a trap occured in Wasm code due to a OOB load. Make sure the
+ // first instruction in each case below is the one that loads.
+ if (CpuFeatures::IsSupported(AVX2)) {
+ CpuFeatureScope avx2_scope(this, AVX2);
+ vpbroadcastw(dst, src);
+ } else if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope avx_scope(this, AVX);
+ // Avoid dependency on previous value of dst.
+ vpinsrw(dst, scratch, src, uint8_t{0});
+ vpshuflw(dst, dst, uint8_t{0});
+ vpunpcklqdq(dst, dst, dst);
+ } else {
+ pinsrw(dst, src, uint8_t{0});
+ pshuflw(dst, dst, uint8_t{0});
+ movlhps(dst, dst);
+ }
+}
+
+void SharedTurboAssembler::S128Load32Splat(XMMRegister dst, Operand src) {
+ // The trap handler uses the current pc to creating a landing, so that it can
+ // determine if a trap occured in Wasm code due to a OOB load. Make sure the
+ // first instruction in each case below is the one that loads.
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope avx_scope(this, AVX);
+ vbroadcastss(dst, src);
+ } else {
+ movss(dst, src);
+ shufps(dst, dst, byte{0});
+ }
+}
+
+void SharedTurboAssembler::S128Store64Lane(Operand dst, XMMRegister src,
+ uint8_t laneidx) {
+ if (laneidx == 0) {
+ Movlps(dst, src);
+ } else {
+ DCHECK_EQ(1, laneidx);
+ Movhps(dst, src);
+ }
+}
+
} // namespace internal
} // namespace v8
+
+#undef DCHECK_OPERAND_IS_NOT_REG
diff --git a/deps/v8/src/codegen/shared-ia32-x64/macro-assembler-shared-ia32-x64.h b/deps/v8/src/codegen/shared-ia32-x64/macro-assembler-shared-ia32-x64.h
index 7c6f7185b9..c2d07392ac 100644
--- a/deps/v8/src/codegen/shared-ia32-x64/macro-assembler-shared-ia32-x64.h
+++ b/deps/v8/src/codegen/shared-ia32-x64/macro-assembler-shared-ia32-x64.h
@@ -29,28 +29,44 @@ constexpr int kStackSavedSavedFPSize = 2 * kDoubleSize;
constexpr int kStackSavedSavedFPSize = kDoubleSize;
#endif // V8_ENABLE_WEBASSEMBLY
+// Base class for SharedTurboAssemblerBase. This class contains macro-assembler
+// functions that can be shared across ia32 and x64 without any template
+// machinery, i.e. does not require the CRTP pattern that
+// SharedTurboAssemblerBase exposes. This allows us to keep the bulk of
+// definition inside a separate source file, rather than putting everything
+// inside this header.
class V8_EXPORT_PRIVATE SharedTurboAssembler : public TurboAssemblerBase {
public:
using TurboAssemblerBase::TurboAssemblerBase;
+ void Move(Register dst, uint32_t src);
// Move if registers are not identical.
void Move(Register dst, Register src);
+ void Add(Register dst, Immediate src);
void And(Register dst, Immediate src);
- void Movapd(XMMRegister dst, XMMRegister src);
-
- template <typename Dst, typename Src>
- void Movdqu(Dst dst, Src src) {
+ // Supports both SSE and AVX. Move src1 to dst if they are not equal on SSE.
+ template <typename Op>
+ void Pshufb(XMMRegister dst, XMMRegister src, Op mask) {
if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope avx_scope(this, AVX);
- vmovdqu(dst, src);
+ vpshufb(dst, src, mask);
} else {
- // movups is 1 byte shorter than movdqu. On most SSE systems, this incurs
- // no delay moving between integer and floating-point domain.
- movups(dst, src);
+ // Make sure these are different so that we won't overwrite mask.
+ DCHECK_NE(mask, dst);
+ if (dst != src) {
+ movaps(dst, src);
+ }
+ CpuFeatureScope sse_scope(this, SSSE3);
+ pshufb(dst, mask);
}
}
+ template <typename Op>
+ void Pshufb(XMMRegister dst, Op mask) {
+ Pshufb(dst, dst, mask);
+ }
+
// Shufps that will mov src1 into dst if AVX is not supported.
void Shufps(XMMRegister dst, XMMRegister src1, XMMRegister src2,
uint8_t imm8);
@@ -128,6 +144,25 @@ class V8_EXPORT_PRIVATE SharedTurboAssembler : public TurboAssemblerBase {
args...); \
}
+// Define a macro which uses |avx_name| when AVX is supported, and |sse_name|
+// when AVX is not supported. This is useful for bit-wise instructions like
+// andpd/andps, where the behavior is exactly the same, but the *ps
+// version is 1 byte shorter, and on SSE-only processors there is no
+// performance difference since those processors don't differentiate integer
+// and floating-point domains.
+// Note: we require |avx_name| to be the AVX instruction without the "v"
+// prefix. If we require the full AVX instruction name and the caller
+// accidentally passes in a SSE instruction, we compile without any issues and
+// generate the SSE instruction. By appending "v" here, we ensure that we will
+// generate an AVX instruction.
+#define AVX_OP_WITH_DIFF_SSE_INSTR(macro_name, avx_name, sse_name) \
+ template <typename Dst, typename Arg, typename... Args> \
+ void macro_name(Dst dst, Arg arg, Args... args) { \
+ AvxHelper<Dst, Arg, Args...>{this} \
+ .template emit<&Assembler::v##avx_name, &Assembler::sse_name>( \
+ dst, arg, args...); \
+ }
+
#define AVX_OP_SSE3(macro_name, name) \
template <typename Dst, typename Arg, typename... Args> \
void macro_name(Dst dst, Arg arg, Args... args) { \
@@ -163,15 +198,20 @@ class V8_EXPORT_PRIVATE SharedTurboAssembler : public TurboAssemblerBase {
// Keep this list sorted by required extension, then instruction name.
AVX_OP(Addpd, addpd)
AVX_OP(Addps, addps)
+ AVX_OP(Addsd, addsd)
+ AVX_OP(Addss, addss)
AVX_OP(Andnpd, andnpd)
AVX_OP(Andnps, andnps)
AVX_OP(Andpd, andpd)
AVX_OP(Andps, andps)
AVX_OP(Cmpeqpd, cmpeqpd)
+ AVX_OP(Cmpeqps, cmpeqps)
AVX_OP(Cmplepd, cmplepd)
AVX_OP(Cmpleps, cmpleps)
AVX_OP(Cmpltpd, cmpltpd)
+ AVX_OP(Cmpltps, cmpltps)
AVX_OP(Cmpneqpd, cmpneqpd)
+ AVX_OP(Cmpneqps, cmpneqps)
AVX_OP(Cmpunordpd, cmpunordpd)
AVX_OP(Cmpunordps, cmpunordps)
AVX_OP(Cvtdq2pd, cvtdq2pd)
@@ -181,6 +221,8 @@ class V8_EXPORT_PRIVATE SharedTurboAssembler : public TurboAssemblerBase {
AVX_OP(Cvttps2dq, cvttps2dq)
AVX_OP(Divpd, divpd)
AVX_OP(Divps, divps)
+ AVX_OP(Divsd, divsd)
+ AVX_OP(Divss, divss)
AVX_OP(Maxpd, maxpd)
AVX_OP(Maxps, maxps)
AVX_OP(Minpd, minpd)
@@ -198,6 +240,8 @@ class V8_EXPORT_PRIVATE SharedTurboAssembler : public TurboAssemblerBase {
AVX_OP(Movups, movups)
AVX_OP(Mulpd, mulpd)
AVX_OP(Mulps, mulps)
+ AVX_OP(Mulsd, mulsd)
+ AVX_OP(Mulss, mulss)
AVX_OP(Orpd, orpd)
AVX_OP(Orps, orps)
AVX_OP(Packssdw, packssdw)
@@ -207,20 +251,26 @@ class V8_EXPORT_PRIVATE SharedTurboAssembler : public TurboAssemblerBase {
AVX_OP(Paddd, paddd)
AVX_OP(Paddq, paddq)
AVX_OP(Paddsb, paddsb)
+ AVX_OP(Paddsw, paddsw)
AVX_OP(Paddusb, paddusb)
AVX_OP(Paddusw, paddusw)
AVX_OP(Paddw, paddw)
- AVX_OP(Pand, pand)
AVX_OP(Pavgb, pavgb)
AVX_OP(Pavgw, pavgw)
AVX_OP(Pcmpgtb, pcmpgtb)
+ AVX_OP(Pcmpgtd, pcmpgtd)
+ AVX_OP(Pcmpgtw, pcmpgtw)
AVX_OP(Pcmpeqd, pcmpeqd)
+ AVX_OP(Pcmpeqw, pcmpeqw)
+ AVX_OP(Pinsrw, pinsrw)
+ AVX_OP(Pmaddwd, pmaddwd)
+ AVX_OP(Pmaxsw, pmaxsw)
AVX_OP(Pmaxub, pmaxub)
+ AVX_OP(Pminsw, pminsw)
AVX_OP(Pminub, pminub)
AVX_OP(Pmovmskb, pmovmskb)
AVX_OP(Pmullw, pmullw)
AVX_OP(Pmuludq, pmuludq)
- AVX_OP(Por, por)
AVX_OP(Pshufd, pshufd)
AVX_OP(Pshufhw, pshufhw)
AVX_OP(Pshuflw, pshuflw)
@@ -236,7 +286,9 @@ class V8_EXPORT_PRIVATE SharedTurboAssembler : public TurboAssemblerBase {
AVX_OP(Psubd, psubd)
AVX_OP(Psubq, psubq)
AVX_OP(Psubsb, psubsb)
+ AVX_OP(Psubsw, psubsw)
AVX_OP(Psubusb, psubusb)
+ AVX_OP(Psubusw, psubusw)
AVX_OP(Psubw, psubw)
AVX_OP(Punpckhbw, punpckhbw)
AVX_OP(Punpckhdq, punpckhdq)
@@ -246,7 +298,6 @@ class V8_EXPORT_PRIVATE SharedTurboAssembler : public TurboAssemblerBase {
AVX_OP(Punpckldq, punpckldq)
AVX_OP(Punpcklqdq, punpcklqdq)
AVX_OP(Punpcklwd, punpcklwd)
- AVX_OP(Pxor, pxor)
AVX_OP(Rcpps, rcpps)
AVX_OP(Rsqrtps, rsqrtps)
AVX_OP(Sqrtpd, sqrtpd)
@@ -255,10 +306,18 @@ class V8_EXPORT_PRIVATE SharedTurboAssembler : public TurboAssemblerBase {
AVX_OP(Sqrtss, sqrtss)
AVX_OP(Subpd, subpd)
AVX_OP(Subps, subps)
+ AVX_OP(Subsd, subsd)
+ AVX_OP(Subss, subss)
AVX_OP(Unpcklps, unpcklps)
AVX_OP(Xorpd, xorpd)
AVX_OP(Xorps, xorps)
+ AVX_OP_WITH_DIFF_SSE_INSTR(Movapd, movapd, movaps)
+ AVX_OP_WITH_DIFF_SSE_INSTR(Movdqu, movdqu, movups)
+ AVX_OP_WITH_DIFF_SSE_INSTR(Pand, pand, andps)
+ AVX_OP_WITH_DIFF_SSE_INSTR(Por, por, orps)
+ AVX_OP_WITH_DIFF_SSE_INSTR(Pxor, pxor, xorps)
+
AVX_OP_SSE3(Haddps, haddps)
AVX_OP_SSE3(Movddup, movddup)
AVX_OP_SSE3(Movshdup, movshdup)
@@ -267,23 +326,32 @@ class V8_EXPORT_PRIVATE SharedTurboAssembler : public TurboAssemblerBase {
AVX_OP_SSSE3(Pabsd, pabsd)
AVX_OP_SSSE3(Pabsw, pabsw)
AVX_OP_SSSE3(Palignr, palignr)
+ AVX_OP_SSSE3(Pmulhrsw, pmulhrsw)
AVX_OP_SSSE3(Psignb, psignb)
AVX_OP_SSSE3(Psignd, psignd)
AVX_OP_SSSE3(Psignw, psignw)
AVX_OP_SSE4_1(Extractps, extractps)
+ AVX_OP_SSE4_1(Packusdw, packusdw)
AVX_OP_SSE4_1(Pblendw, pblendw)
AVX_OP_SSE4_1(Pextrb, pextrb)
AVX_OP_SSE4_1(Pextrw, pextrw)
+ AVX_OP_SSE4_1(Pinsrb, pinsrb)
AVX_OP_SSE4_1(Pmaxsb, pmaxsb)
AVX_OP_SSE4_1(Pmaxsd, pmaxsd)
+ AVX_OP_SSE4_1(Pmaxud, pmaxud)
+ AVX_OP_SSE4_1(Pmaxuw, pmaxuw)
AVX_OP_SSE4_1(Pminsb, pminsb)
+ AVX_OP_SSE4_1(Pminsd, pminsd)
+ AVX_OP_SSE4_1(Pminud, pminud)
+ AVX_OP_SSE4_1(Pminuw, pminuw)
AVX_OP_SSE4_1(Pmovsxbw, pmovsxbw)
AVX_OP_SSE4_1(Pmovsxdq, pmovsxdq)
AVX_OP_SSE4_1(Pmovsxwd, pmovsxwd)
AVX_OP_SSE4_1(Pmovzxbw, pmovzxbw)
AVX_OP_SSE4_1(Pmovzxdq, pmovzxdq)
AVX_OP_SSE4_1(Pmovzxwd, pmovzxwd)
+ AVX_OP_SSE4_1(Pmulld, pmulld)
AVX_OP_SSE4_1(Ptest, ptest)
AVX_OP_SSE4_1(Roundpd, roundpd)
AVX_OP_SSE4_1(Roundps, roundps)
@@ -298,6 +366,22 @@ class V8_EXPORT_PRIVATE SharedTurboAssembler : public TurboAssemblerBase {
void F32x4Splat(XMMRegister dst, DoubleRegister src);
void F32x4ExtractLane(FloatRegister dst, XMMRegister src, uint8_t lane);
void S128Store32Lane(Operand dst, XMMRegister src, uint8_t laneidx);
+ void I8x16Splat(XMMRegister dst, Register src, XMMRegister scratch);
+ void I8x16Splat(XMMRegister dst, Operand src, XMMRegister scratch);
+ void I8x16Shl(XMMRegister dst, XMMRegister src1, uint8_t src2, Register tmp1,
+ XMMRegister tmp2);
+ void I8x16Shl(XMMRegister dst, XMMRegister src1, Register src2, Register tmp1,
+ XMMRegister tmp2, XMMRegister tmp3);
+ void I8x16ShrS(XMMRegister dst, XMMRegister src1, uint8_t src2,
+ XMMRegister tmp);
+ void I8x16ShrS(XMMRegister dst, XMMRegister src1, Register src2,
+ Register tmp1, XMMRegister tmp2, XMMRegister tmp3);
+ void I8x16ShrU(XMMRegister dst, XMMRegister src1, uint8_t src2, Register tmp1,
+ XMMRegister tmp2);
+ void I8x16ShrU(XMMRegister dst, XMMRegister src1, Register src2,
+ Register tmp1, XMMRegister tmp2, XMMRegister tmp3);
+ void I16x8Splat(XMMRegister dst, Register src);
+ void I16x8Splat(XMMRegister dst, Operand src);
void I16x8ExtMulLow(XMMRegister dst, XMMRegister src1, XMMRegister src2,
XMMRegister scrat, bool is_signed);
void I16x8ExtMulHighS(XMMRegister dst, XMMRegister src1, XMMRegister src2,
@@ -307,6 +391,11 @@ class V8_EXPORT_PRIVATE SharedTurboAssembler : public TurboAssemblerBase {
void I16x8SConvertI8x16High(XMMRegister dst, XMMRegister src);
void I16x8UConvertI8x16High(XMMRegister dst, XMMRegister src,
XMMRegister scratch);
+ // Will move src1 to dst if AVX is not supported.
+ void I16x8Q15MulRSatS(XMMRegister dst, XMMRegister src1, XMMRegister src2,
+ XMMRegister scratch);
+ void I32x4ExtAddPairwiseI16x8U(XMMRegister dst, XMMRegister src,
+ XMMRegister tmp);
// Requires that dst == src1 if AVX is not supported.
void I32x4ExtMul(XMMRegister dst, XMMRegister src1, XMMRegister src2,
XMMRegister scratch, bool low, bool is_signed);
@@ -333,7 +422,338 @@ class V8_EXPORT_PRIVATE SharedTurboAssembler : public TurboAssemblerBase {
// Requires dst == mask when AVX is not supported.
void S128Select(XMMRegister dst, XMMRegister mask, XMMRegister src1,
XMMRegister src2, XMMRegister scratch);
+ void S128Load8Splat(XMMRegister dst, Operand src, XMMRegister scratch);
+ void S128Load16Splat(XMMRegister dst, Operand src, XMMRegister scratch);
+ void S128Load32Splat(XMMRegister dst, Operand src);
+ void S128Store64Lane(Operand dst, XMMRegister src, uint8_t laneidx);
+
+ private:
+ template <typename Op>
+ void I8x16SplatPreAvx2(XMMRegister dst, Op src, XMMRegister scratch);
+ template <typename Op>
+ void I16x8SplatPreAvx2(XMMRegister dst, Op src);
+};
+
+// Common base class template shared by ia32 and x64 TurboAssembler. This uses
+// the Curiously Recurring Template Pattern (CRTP), where Impl is the actual
+// class (subclass of SharedTurboAssemblerBase instantiated with the actual
+// class). This allows static polymorphism, where member functions can be move
+// into SharedTurboAssembler, and we can also call into member functions
+// defined in ia32 or x64 specific TurboAssembler from within this template
+// class, via Impl.
+//
+// Note: all member functions must be defined in this header file so that the
+// compiler can generate code for the function definitions. See
+// https://isocpp.org/wiki/faq/templates#templates-defn-vs-decl for rationale.
+// If a function does not need polymorphism, move it into SharedTurboAssembler,
+// and define it outside of this header.
+template <typename Impl>
+class V8_EXPORT_PRIVATE SharedTurboAssemblerBase : public SharedTurboAssembler {
+ using SharedTurboAssembler::SharedTurboAssembler;
+
+ public:
+ void F64x2ConvertLowI32x4U(XMMRegister dst, XMMRegister src,
+ Register scratch) {
+ ASM_CODE_COMMENT(this);
+ // dst = [ src_low, 0x43300000, src_high, 0x4330000 ];
+ // 0x43300000'00000000 is a special double where the significand bits
+ // precisely represents all uint32 numbers.
+ if (!CpuFeatures::IsSupported(AVX) && dst != src) {
+ movaps(dst, src);
+ src = dst;
+ }
+ Unpcklps(dst, src,
+ ExternalReferenceAsOperand(
+ ExternalReference::
+ address_of_wasm_f64x2_convert_low_i32x4_u_int_mask(),
+ scratch));
+ Subpd(dst,
+ ExternalReferenceAsOperand(
+ ExternalReference::address_of_wasm_double_2_power_52(), scratch));
+ }
+
+ void I32x4SConvertF32x4(XMMRegister dst, XMMRegister src, XMMRegister tmp,
+ Register scratch) {
+ Operand op = ExternalReferenceAsOperand(
+ ExternalReference::address_of_wasm_int32_overflow_as_float(), scratch);
+
+ // This algorithm works by:
+ // 1. lanes with NaNs are zero-ed
+ // 2. lanes ge than 2147483648.0f (MAX_INT32+1) set to 0xffff'ffff
+ // 3. cvttps2dq sets all out of range lanes to 0x8000'0000
+ // a. correct for underflows (< MIN_INT32)
+ // b. wrong for overflow, and we know which lanes overflow from 2.
+ // 4. adjust for 3b by xor-ing 2 and 3
+ // a. 0x8000'0000 xor 0xffff'ffff = 0x7fff'ffff (MAX_INT32)
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope scope(this, AVX);
+ vcmpeqps(tmp, src, src);
+ vandps(dst, src, tmp);
+ vcmpgeps(tmp, src, op);
+ vcvttps2dq(dst, dst);
+ vpxor(dst, dst, tmp);
+ } else {
+ if (src == dst) {
+ movaps(tmp, src);
+ cmpeqps(tmp, tmp);
+ andps(dst, tmp);
+ movaps(tmp, op);
+ cmpleps(tmp, dst);
+ cvttps2dq(dst, dst);
+ xorps(dst, tmp);
+ } else {
+ movaps(tmp, op);
+ cmpleps(tmp, src);
+ cvttps2dq(dst, src);
+ xorps(dst, tmp);
+ movaps(tmp, src);
+ cmpeqps(tmp, tmp);
+ andps(dst, tmp);
+ }
+ }
+ }
+
+ void I32x4TruncSatF64x2SZero(XMMRegister dst, XMMRegister src,
+ XMMRegister scratch, Register tmp) {
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope avx_scope(this, AVX);
+ XMMRegister original_dst = dst;
+ // Make sure we don't overwrite src.
+ if (dst == src) {
+ DCHECK_NE(src, scratch);
+ dst = scratch;
+ }
+ // dst = 0 if src == NaN, else all ones.
+ vcmpeqpd(dst, src, src);
+ // dst = 0 if src == NaN, else INT32_MAX as double.
+ vandpd(
+ dst, dst,
+ ExternalReferenceAsOperand(
+ ExternalReference::address_of_wasm_int32_max_as_double(), tmp));
+ // dst = 0 if src == NaN, src is saturated to INT32_MAX as double.
+ vminpd(dst, src, dst);
+ // Values > INT32_MAX already saturated, values < INT32_MIN raises an
+ // exception, which is masked and returns 0x80000000.
+ vcvttpd2dq(original_dst, dst);
+ } else {
+ if (dst != src) {
+ movaps(dst, src);
+ }
+ movaps(scratch, dst);
+ cmpeqpd(scratch, dst);
+ andps(scratch,
+ ExternalReferenceAsOperand(
+ ExternalReference::address_of_wasm_int32_max_as_double(), tmp));
+ minpd(dst, scratch);
+ cvttpd2dq(dst, dst);
+ }
+ }
+
+ void I32x4TruncSatF64x2UZero(XMMRegister dst, XMMRegister src,
+ XMMRegister scratch, Register tmp) {
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope avx_scope(this, AVX);
+ vxorpd(scratch, scratch, scratch);
+ // Saturate to 0.
+ vmaxpd(dst, src, scratch);
+ // Saturate to UINT32_MAX.
+ vminpd(
+ dst, dst,
+ ExternalReferenceAsOperand(
+ ExternalReference::address_of_wasm_uint32_max_as_double(), tmp));
+ // Truncate.
+ vroundpd(dst, dst, kRoundToZero);
+ // Add to special double where significant bits == uint32.
+ vaddpd(dst, dst,
+ ExternalReferenceAsOperand(
+ ExternalReference::address_of_wasm_double_2_power_52(), tmp));
+ // Extract low 32 bits of each double's significand, zero top lanes.
+ // dst = [dst[0], dst[2], 0, 0]
+ vshufps(dst, dst, scratch, 0x88);
+ } else {
+ CpuFeatureScope scope(this, SSE4_1);
+ if (dst != src) {
+ movaps(dst, src);
+ }
+ xorps(scratch, scratch);
+ maxpd(dst, scratch);
+ minpd(dst, ExternalReferenceAsOperand(
+ ExternalReference::address_of_wasm_uint32_max_as_double(),
+ tmp));
+ roundpd(dst, dst, kRoundToZero);
+ addpd(dst,
+ ExternalReferenceAsOperand(
+ ExternalReference::address_of_wasm_double_2_power_52(), tmp));
+ shufps(dst, scratch, 0x88);
+ }
+ }
+
+ void I32x4ExtAddPairwiseI16x8S(XMMRegister dst, XMMRegister src,
+ Register scratch) {
+ Operand op = ExternalReferenceAsOperand(
+ ExternalReference::address_of_wasm_i16x8_splat_0x0001(), scratch);
+ // pmaddwd multiplies signed words in src and op, producing
+ // signed doublewords, then adds pairwise.
+ // src = |a|b|c|d|e|f|g|h|
+ // dst = | a*1 + b*1 | c*1 + d*1 | e*1 + f*1 | g*1 + h*1 |
+ if (!CpuFeatures::IsSupported(AVX) && (dst != src)) {
+ movaps(dst, src);
+ src = dst;
+ }
+
+ Pmaddwd(dst, src, op);
+ }
+
+ void I16x8ExtAddPairwiseI8x16S(XMMRegister dst, XMMRegister src,
+ XMMRegister scratch, Register tmp) {
+ ASM_CODE_COMMENT(this);
+ // pmaddubsw treats the first operand as unsigned, so pass the external
+ // reference to it as the first operand.
+ Operand op = ExternalReferenceAsOperand(
+ ExternalReference::address_of_wasm_i8x16_splat_0x01(), tmp);
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope avx_scope(this, AVX);
+ vmovdqa(scratch, op);
+ vpmaddubsw(dst, scratch, src);
+ } else {
+ CpuFeatureScope sse_scope(this, SSSE3);
+ if (dst == src) {
+ movaps(scratch, op);
+ pmaddubsw(scratch, src);
+ movaps(dst, scratch);
+ } else {
+ movaps(dst, op);
+ pmaddubsw(dst, src);
+ }
+ }
+ }
+
+ void I16x8ExtAddPairwiseI8x16U(XMMRegister dst, XMMRegister src,
+ Register scratch) {
+ ASM_CODE_COMMENT(this);
+ Operand op = ExternalReferenceAsOperand(
+ ExternalReference::address_of_wasm_i8x16_splat_0x01(), scratch);
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope avx_scope(this, AVX);
+ vpmaddubsw(dst, src, op);
+ } else {
+ CpuFeatureScope sse_scope(this, SSSE3);
+ if (dst != src) {
+ movaps(dst, src);
+ }
+ pmaddubsw(dst, op);
+ }
+ }
+
+ void I8x16Swizzle(XMMRegister dst, XMMRegister src, XMMRegister mask,
+ XMMRegister scratch, Register tmp, bool omit_add = false) {
+ ASM_CODE_COMMENT(this);
+ if (omit_add) {
+ // We have determined that the indices are immediates, and they are either
+ // within bounds, or the top bit is set, so we can omit the add.
+ Pshufb(dst, src, mask);
+ return;
+ }
+
+ // Out-of-range indices should return 0, add 112 so that any value > 15
+ // saturates to 128 (top bit set), so pshufb will zero that lane.
+ Operand op = ExternalReferenceAsOperand(
+ ExternalReference::address_of_wasm_i8x16_swizzle_mask(), tmp);
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope avx_scope(this, AVX);
+ vpaddusb(scratch, mask, op);
+ vpshufb(dst, src, scratch);
+ } else {
+ CpuFeatureScope sse_scope(this, SSSE3);
+ movaps(scratch, op);
+ if (dst != src) {
+ DCHECK_NE(dst, mask);
+ movaps(dst, src);
+ }
+ paddusb(scratch, mask);
+ pshufb(dst, scratch);
+ }
+ }
+
+ void I8x16Popcnt(XMMRegister dst, XMMRegister src, XMMRegister tmp1,
+ XMMRegister tmp2, Register scratch) {
+ ASM_CODE_COMMENT(this);
+ DCHECK_NE(dst, tmp1);
+ DCHECK_NE(src, tmp1);
+ DCHECK_NE(dst, tmp2);
+ DCHECK_NE(src, tmp2);
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope avx_scope(this, AVX);
+ vmovdqa(tmp1, ExternalReferenceAsOperand(
+ ExternalReference::address_of_wasm_i8x16_splat_0x0f(),
+ scratch));
+ vpandn(tmp2, tmp1, src);
+ vpand(dst, tmp1, src);
+ vmovdqa(tmp1, ExternalReferenceAsOperand(
+ ExternalReference::address_of_wasm_i8x16_popcnt_mask(),
+ scratch));
+ vpsrlw(tmp2, tmp2, 4);
+ vpshufb(dst, tmp1, dst);
+ vpshufb(tmp2, tmp1, tmp2);
+ vpaddb(dst, dst, tmp2);
+ } else if (CpuFeatures::IsSupported(ATOM)) {
+ // Pre-Goldmont low-power Intel microarchitectures have very slow
+ // PSHUFB instruction, thus use PSHUFB-free divide-and-conquer
+ // algorithm on these processors. ATOM CPU feature captures exactly
+ // the right set of processors.
+ movaps(tmp1, src);
+ psrlw(tmp1, 1);
+ if (dst != src) {
+ movaps(dst, src);
+ }
+ andps(tmp1, ExternalReferenceAsOperand(
+ ExternalReference::address_of_wasm_i8x16_splat_0x55(),
+ scratch));
+ psubb(dst, tmp1);
+ Operand splat_0x33 = ExternalReferenceAsOperand(
+ ExternalReference::address_of_wasm_i8x16_splat_0x33(), scratch);
+ movaps(tmp1, dst);
+ andps(dst, splat_0x33);
+ psrlw(tmp1, 2);
+ andps(tmp1, splat_0x33);
+ paddb(dst, tmp1);
+ movaps(tmp1, dst);
+ psrlw(dst, 4);
+ paddb(dst, tmp1);
+ andps(dst, ExternalReferenceAsOperand(
+ ExternalReference::address_of_wasm_i8x16_splat_0x0f(),
+ scratch));
+ } else {
+ CpuFeatureScope sse_scope(this, SSSE3);
+ movaps(tmp1, ExternalReferenceAsOperand(
+ ExternalReference::address_of_wasm_i8x16_splat_0x0f(),
+ scratch));
+ Operand mask = ExternalReferenceAsOperand(
+ ExternalReference::address_of_wasm_i8x16_popcnt_mask(), scratch);
+ if (tmp2 != tmp1) {
+ movaps(tmp2, tmp1);
+ }
+ andps(tmp1, src);
+ andnps(tmp2, src);
+ psrlw(tmp2, 4);
+ movaps(dst, mask);
+ pshufb(dst, tmp1);
+ movaps(tmp1, mask);
+ pshufb(tmp1, tmp2);
+ paddb(dst, tmp1);
+ }
+ }
+
+ private:
+ // All implementation-specific methods must be called through this.
+ Impl* impl() { return static_cast<Impl*>(this); }
+
+ Operand ExternalReferenceAsOperand(ExternalReference reference,
+ Register scratch) {
+ return impl()->ExternalReferenceAsOperand(reference, scratch);
+ }
};
+
} // namespace internal
} // namespace v8
#endif // V8_CODEGEN_SHARED_IA32_X64_MACRO_ASSEMBLER_SHARED_IA32_X64_H_
diff --git a/deps/v8/src/codegen/x64/assembler-x64-inl.h b/deps/v8/src/codegen/x64/assembler-x64-inl.h
index 4d30f01c08..628f8b6eda 100644
--- a/deps/v8/src/codegen/x64/assembler-x64-inl.h
+++ b/deps/v8/src/codegen/x64/assembler-x64-inl.h
@@ -42,8 +42,7 @@ void Assembler::emit_runtime_entry(Address entry, RelocInfo::Mode rmode) {
RecordRelocInfo(rmode);
uint32_t offset = static_cast<uint32_t>(entry - options().code_range_start);
if (IsOnHeap()) {
- saved_offsets_for_runtime_entries_.push_back(
- std::make_pair(pc_offset(), offset));
+ saved_offsets_for_runtime_entries_.emplace_back(pc_offset(), offset);
emitl(relative_target_offset(entry, reinterpret_cast<Address>(pc_)));
// We must ensure that `emitl` is not growing the assembler buffer
// and falling back to off-heap compilation.
@@ -66,8 +65,7 @@ void Assembler::emit(Immediate64 x) {
if (x.rmode_ == RelocInfo::FULL_EMBEDDED_OBJECT && IsOnHeap()) {
int offset = pc_offset();
Handle<HeapObject> object(reinterpret_cast<Address*>(x.value_));
- saved_handles_for_raw_object_ptr_.push_back(
- std::make_pair(offset, x.value_));
+ saved_handles_for_raw_object_ptr_.emplace_back(offset, x.value_);
emitq(static_cast<uint64_t>(object->ptr()));
DCHECK(EmbeddedObjectMatches(offset, object));
return;
diff --git a/deps/v8/src/codegen/x64/assembler-x64.cc b/deps/v8/src/codegen/x64/assembler-x64.cc
index 1e66311d95..108f381ba7 100644
--- a/deps/v8/src/codegen/x64/assembler-x64.cc
+++ b/deps/v8/src/codegen/x64/assembler-x64.cc
@@ -3347,26 +3347,6 @@ void Assembler::cvtqsi2sd(XMMRegister dst, Register src) {
emit_sse_operand(dst, src);
}
-void Assembler::cvtss2sd(XMMRegister dst, XMMRegister src) {
- DCHECK(!IsEnabled(AVX));
- EnsureSpace ensure_space(this);
- emit(0xF3);
- emit_optional_rex_32(dst, src);
- emit(0x0F);
- emit(0x5A);
- emit_sse_operand(dst, src);
-}
-
-void Assembler::cvtss2sd(XMMRegister dst, Operand src) {
- DCHECK(!IsEnabled(AVX));
- EnsureSpace ensure_space(this);
- emit(0xF3);
- emit_optional_rex_32(dst, src);
- emit(0x0F);
- emit(0x5A);
- emit_sse_operand(dst, src);
-}
-
void Assembler::cvtsd2si(Register dst, XMMRegister src) {
DCHECK(!IsEnabled(AVX));
EnsureSpace ensure_space(this);
@@ -3601,6 +3581,14 @@ void Assembler::vmovdqa(XMMRegister dst, XMMRegister src) {
emit_sse_operand(dst, src);
}
+void Assembler::vmovdqa(YMMRegister dst, YMMRegister src) {
+ DCHECK(IsEnabled(AVX));
+ EnsureSpace ensure_space(this);
+ emit_vex_prefix(dst, xmm0, src, kL256, k66, k0F, kWIG);
+ emit(0x6F);
+ emit_sse_operand(dst, src);
+}
+
void Assembler::vmovdqu(XMMRegister dst, Operand src) {
DCHECK(IsEnabled(AVX));
EnsureSpace ensure_space(this);
@@ -3625,6 +3613,14 @@ void Assembler::vmovdqu(XMMRegister dst, XMMRegister src) {
emit_sse_operand(src, dst);
}
+void Assembler::vmovdqu(YMMRegister dst, YMMRegister src) {
+ DCHECK(IsEnabled(AVX));
+ EnsureSpace ensure_space(this);
+ emit_vex_prefix(src, xmm0, dst, kL256, kF3, k0F, kWIG);
+ emit(0x7F);
+ emit_sse_operand(src, dst);
+}
+
void Assembler::vmovlps(XMMRegister dst, XMMRegister src1, Operand src2) {
DCHECK(IsEnabled(AVX));
EnsureSpace ensure_space(this);
@@ -3688,6 +3684,15 @@ void Assembler::vps(byte op, XMMRegister dst, XMMRegister src1,
emit_sse_operand(dst, src2);
}
+void Assembler::vps(byte op, YMMRegister dst, YMMRegister src1,
+ YMMRegister src2) {
+ DCHECK(IsEnabled(AVX));
+ EnsureSpace ensure_space(this);
+ emit_vex_prefix(dst, src1, src2, kL256, kNone, k0F, kWIG);
+ emit(op);
+ emit_sse_operand(dst, src2);
+}
+
void Assembler::vps(byte op, XMMRegister dst, XMMRegister src1, Operand src2) {
DCHECK(IsEnabled(AVX));
EnsureSpace ensure_space(this);
@@ -3696,6 +3701,14 @@ void Assembler::vps(byte op, XMMRegister dst, XMMRegister src1, Operand src2) {
emit_sse_operand(dst, src2);
}
+void Assembler::vps(byte op, YMMRegister dst, YMMRegister src1, Operand src2) {
+ DCHECK(IsEnabled(AVX));
+ EnsureSpace ensure_space(this);
+ emit_vex_prefix(dst, src1, src2, kL256, kNone, k0F, kWIG);
+ emit(op);
+ emit_sse_operand(dst, src2);
+}
+
void Assembler::vps(byte op, XMMRegister dst, XMMRegister src1,
XMMRegister src2, byte imm8) {
DCHECK(IsEnabled(AVX));
diff --git a/deps/v8/src/codegen/x64/assembler-x64.h b/deps/v8/src/codegen/x64/assembler-x64.h
index c3d3af100b..cd93c7f856 100644
--- a/deps/v8/src/codegen/x64/assembler-x64.h
+++ b/deps/v8/src/codegen/x64/assembler-x64.h
@@ -235,6 +235,7 @@ class V8_EXPORT_PRIVATE Operand {
}
Operand(const Operand&) V8_NOEXCEPT = default;
+ Operand& operator=(const Operand&) V8_NOEXCEPT = default;
const Data& data() const { return data_; }
@@ -1241,9 +1242,6 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
void cvtqsi2sd(XMMRegister dst, Operand src);
void cvtqsi2sd(XMMRegister dst, Register src);
- void cvtss2sd(XMMRegister dst, XMMRegister src);
- void cvtss2sd(XMMRegister dst, Operand src);
-
void cvtsd2si(Register dst, XMMRegister src);
void cvtsd2siq(Register dst, XMMRegister src);
@@ -1256,14 +1254,15 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
void pmovmskb(Register dst, XMMRegister src);
+ void pinsrw(XMMRegister dst, Register src, uint8_t imm8);
+ void pinsrw(XMMRegister dst, Operand src, uint8_t imm8);
+
// SSE 4.1 instruction
void insertps(XMMRegister dst, XMMRegister src, byte imm8);
void insertps(XMMRegister dst, Operand src, byte imm8);
void pextrq(Register dst, XMMRegister src, int8_t imm8);
void pinsrb(XMMRegister dst, Register src, uint8_t imm8);
void pinsrb(XMMRegister dst, Operand src, uint8_t imm8);
- void pinsrw(XMMRegister dst, Register src, uint8_t imm8);
- void pinsrw(XMMRegister dst, Operand src, uint8_t imm8);
void pinsrd(XMMRegister dst, Register src, uint8_t imm8);
void pinsrd(XMMRegister dst, Operand src, uint8_t imm8);
void pinsrq(XMMRegister dst, Register src, uint8_t imm8);
@@ -1351,9 +1350,11 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
void vmovsd(Operand dst, XMMRegister src) { vsd(0x11, src, xmm0, dst); }
void vmovdqa(XMMRegister dst, Operand src);
void vmovdqa(XMMRegister dst, XMMRegister src);
+ void vmovdqa(YMMRegister dst, YMMRegister src);
void vmovdqu(XMMRegister dst, Operand src);
void vmovdqu(Operand dst, XMMRegister src);
void vmovdqu(XMMRegister dst, XMMRegister src);
+ void vmovdqu(YMMRegister dst, YMMRegister src);
void vmovlps(XMMRegister dst, XMMRegister src1, Operand src2);
void vmovlps(Operand dst, XMMRegister src);
@@ -1367,6 +1368,12 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
} \
void v##instr(XMMRegister dst, Operand src2) { \
vps(0x##opcode, dst, xmm0, src2); \
+ } \
+ void v##instr(YMMRegister dst, YMMRegister src2) { \
+ vps(0x##opcode, dst, ymm0, src2); \
+ } \
+ void v##instr(YMMRegister dst, Operand src2) { \
+ vps(0x##opcode, dst, ymm0, src2); \
}
SSE_UNOP_INSTRUCTION_LIST(AVX_SSE_UNOP)
#undef AVX_SSE_UNOP
@@ -1377,6 +1384,12 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
} \
void v##instr(XMMRegister dst, XMMRegister src1, Operand src2) { \
vps(0x##opcode, dst, src1, src2); \
+ } \
+ void v##instr(YMMRegister dst, YMMRegister src1, YMMRegister src2) { \
+ vps(0x##opcode, dst, src1, src2); \
+ } \
+ void v##instr(YMMRegister dst, YMMRegister src1, Operand src2) { \
+ vps(0x##opcode, dst, src1, src2); \
}
SSE_BINOP_INSTRUCTION_LIST(AVX_SSE_BINOP)
#undef AVX_SSE_BINOP
@@ -1422,12 +1435,6 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
void vcvtdq2pd(XMMRegister dst, XMMRegister src) {
vinstr(0xe6, dst, xmm0, src, kF3, k0F, kWIG);
}
- void vcvtss2sd(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
- vinstr(0x5a, dst, src1, src2, kF3, k0F, kWIG);
- }
- void vcvtss2sd(XMMRegister dst, XMMRegister src1, Operand src2) {
- vinstr(0x5a, dst, src1, src2, kF3, k0F, kWIG);
- }
void vcvttps2dq(XMMRegister dst, XMMRegister src) {
vinstr(0x5b, dst, xmm0, src, kF3, k0F, kWIG);
}
@@ -1590,6 +1597,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
AVX_CMP_P(vcmpneq, 0x4)
AVX_CMP_P(vcmpnlt, 0x5)
AVX_CMP_P(vcmpnle, 0x6)
+ AVX_CMP_P(vcmpge, 0xd)
#undef AVX_CMP_P
@@ -1693,7 +1701,9 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
}
void vps(byte op, XMMRegister dst, XMMRegister src1, XMMRegister src2);
+ void vps(byte op, YMMRegister dst, YMMRegister src1, YMMRegister src2);
void vps(byte op, XMMRegister dst, XMMRegister src1, Operand src2);
+ void vps(byte op, YMMRegister dst, YMMRegister src1, Operand src2);
void vps(byte op, XMMRegister dst, XMMRegister src1, XMMRegister src2,
byte imm8);
void vpd(byte op, XMMRegister dst, XMMRegister src1, XMMRegister src2);
diff --git a/deps/v8/src/codegen/x64/fma-instr.h b/deps/v8/src/codegen/x64/fma-instr.h
index f41c91ee51..c607429e33 100644
--- a/deps/v8/src/codegen/x64/fma-instr.h
+++ b/deps/v8/src/codegen/x64/fma-instr.h
@@ -30,9 +30,17 @@
V(vfnmsub132ss, LIG, 66, 0F, 38, W0, 9f) \
V(vfnmsub213ss, LIG, 66, 0F, 38, W0, af) \
V(vfnmsub231ss, LIG, 66, 0F, 38, W0, bf) \
+ V(vfmadd132ps, L128, 66, 0F, 38, W0, 98) \
+ V(vfmadd213ps, L128, 66, 0F, 38, W0, a8) \
V(vfmadd231ps, L128, 66, 0F, 38, W0, b8) \
+ V(vfnmadd132ps, L128, 66, 0F, 38, W0, 9c) \
+ V(vfnmadd213ps, L128, 66, 0F, 38, W0, ac) \
V(vfnmadd231ps, L128, 66, 0F, 38, W0, bc) \
+ V(vfmadd132pd, L128, 66, 0F, 38, W1, 98) \
+ V(vfmadd213pd, L128, 66, 0F, 38, W1, a8) \
V(vfmadd231pd, L128, 66, 0F, 38, W1, b8) \
+ V(vfnmadd132pd, L128, 66, 0F, 38, W1, 9c) \
+ V(vfnmadd213pd, L128, 66, 0F, 38, W1, ac) \
V(vfnmadd231pd, L128, 66, 0F, 38, W1, bc)
#endif // V8_CODEGEN_X64_FMA_INSTR_H_
diff --git a/deps/v8/src/codegen/x64/interface-descriptors-x64-inl.h b/deps/v8/src/codegen/x64/interface-descriptors-x64-inl.h
index 50ba12b836..fade1eda99 100644
--- a/deps/v8/src/codegen/x64/interface-descriptors-x64-inl.h
+++ b/deps/v8/src/codegen/x64/interface-descriptors-x64-inl.h
@@ -43,12 +43,12 @@ constexpr auto WriteBarrierDescriptor::registers() {
#ifdef V8_IS_TSAN
// static
-constexpr auto TSANRelaxedStoreDescriptor::registers() {
+constexpr auto TSANStoreDescriptor::registers() {
return RegisterArray(arg_reg_1, arg_reg_2, kReturnRegister0);
}
// static
-constexpr auto TSANRelaxedLoadDescriptor::registers() {
+constexpr auto TSANLoadDescriptor::registers() {
return RegisterArray(arg_reg_1, kReturnRegister0);
}
#endif // V8_IS_TSAN
diff --git a/deps/v8/src/codegen/x64/macro-assembler-x64.cc b/deps/v8/src/codegen/x64/macro-assembler-x64.cc
index 5a8dc356b8..f4c498dc10 100644
--- a/deps/v8/src/codegen/x64/macro-assembler-x64.cc
+++ b/deps/v8/src/codegen/x64/macro-assembler-x64.cc
@@ -294,6 +294,17 @@ void TurboAssembler::StoreTaggedSignedField(Operand dst_field_operand,
}
}
+void TurboAssembler::AtomicStoreTaggedField(Operand dst_field_operand,
+ Register value) {
+ if (COMPRESS_POINTERS_BOOL) {
+ movl(kScratchRegister, value);
+ xchgl(kScratchRegister, dst_field_operand);
+ } else {
+ movq(kScratchRegister, value);
+ xchgq(kScratchRegister, dst_field_operand);
+ }
+}
+
void TurboAssembler::DecompressTaggedSigned(Register destination,
Operand field_operand) {
ASM_CODE_COMMENT(this);
@@ -483,26 +494,27 @@ void TurboAssembler::CallRecordWriteStub(
}
#ifdef V8_IS_TSAN
-void TurboAssembler::CallTSANRelaxedStoreStub(Register address, Register value,
- SaveFPRegsMode fp_mode, int size,
- StubCallMode mode) {
+void TurboAssembler::CallTSANStoreStub(Register address, Register value,
+ SaveFPRegsMode fp_mode, int size,
+ StubCallMode mode,
+ std::memory_order order) {
ASM_CODE_COMMENT(this);
DCHECK(!AreAliased(address, value));
- TSANRelaxedStoreDescriptor descriptor;
+ TSANStoreDescriptor descriptor;
RegList registers = descriptor.allocatable_registers();
MaybeSaveRegisters(registers);
Register address_parameter(
- descriptor.GetRegisterParameter(TSANRelaxedStoreDescriptor::kAddress));
+ descriptor.GetRegisterParameter(TSANStoreDescriptor::kAddress));
Register value_parameter(
- descriptor.GetRegisterParameter(TSANRelaxedStoreDescriptor::kValue));
+ descriptor.GetRegisterParameter(TSANStoreDescriptor::kValue));
- // Prepare argument registers for calling GetTSANRelaxedStoreStub.
+ // Prepare argument registers for calling GetTSANStoreStub.
MovePair(address_parameter, address, value_parameter, value);
if (isolate()) {
- Builtin builtin = CodeFactory::GetTSANRelaxedStoreStub(fp_mode, size);
+ Builtin builtin = CodeFactory::GetTSANStoreStub(fp_mode, size, order);
Handle<Code> code_target = isolate()->builtins()->code_handle(builtin);
Call(code_target, RelocInfo::CODE_TARGET);
}
@@ -520,7 +532,7 @@ void TurboAssembler::CallTSANRelaxedStoreStub(Register address, Register value,
else {
DCHECK_EQ(mode, StubCallMode::kCallWasmRuntimeStub);
// Use {near_call} for direct Wasm call within a module.
- auto wasm_target = wasm::WasmCode::GetTSANRelaxedStoreStub(fp_mode, size);
+ auto wasm_target = wasm::WasmCode::GetTSANStoreStub(fp_mode, size, order);
near_call(wasm_target, RelocInfo::WASM_STUB_CALL);
}
#endif // V8_ENABLE_WEBASSEMBLY
@@ -531,13 +543,13 @@ void TurboAssembler::CallTSANRelaxedStoreStub(Register address, Register value,
void TurboAssembler::CallTSANRelaxedLoadStub(Register address,
SaveFPRegsMode fp_mode, int size,
StubCallMode mode) {
- TSANRelaxedLoadDescriptor descriptor;
+ TSANLoadDescriptor descriptor;
RegList registers = descriptor.allocatable_registers();
MaybeSaveRegisters(registers);
Register address_parameter(
- descriptor.GetRegisterParameter(TSANRelaxedLoadDescriptor::kAddress));
+ descriptor.GetRegisterParameter(TSANLoadDescriptor::kAddress));
// Prepare argument registers for calling TSANRelaxedLoad.
Move(address_parameter, address);
@@ -847,6 +859,99 @@ void TurboAssembler::Movq(Register dst, XMMRegister src) {
}
}
+// Helper macro to define qfma macro-assembler. This takes care of every
+// possible case of register aliasing to minimize the number of instructions.
+#define QFMA(ps_or_pd) \
+ if (CpuFeatures::IsSupported(FMA3)) { \
+ CpuFeatureScope fma3_scope(this, FMA3); \
+ if (dst == src1) { \
+ vfmadd231##ps_or_pd(dst, src2, src3); \
+ } else if (dst == src2) { \
+ vfmadd132##ps_or_pd(dst, src1, src3); \
+ } else if (dst == src3) { \
+ vfmadd213##ps_or_pd(dst, src2, src1); \
+ } else { \
+ vmovups(dst, src1); \
+ vfmadd231##ps_or_pd(dst, src2, src3); \
+ } \
+ } else if (CpuFeatures::IsSupported(AVX)) { \
+ CpuFeatureScope avx_scope(this, AVX); \
+ vmul##ps_or_pd(tmp, src2, src3); \
+ vadd##ps_or_pd(dst, src1, tmp); \
+ } else { \
+ if (dst == src1) { \
+ movaps(tmp, src2); \
+ mul##ps_or_pd(tmp, src3); \
+ add##ps_or_pd(dst, tmp); \
+ } else if (dst == src2) { \
+ DCHECK_NE(src2, src1); \
+ mul##ps_or_pd(src2, src3); \
+ add##ps_or_pd(src2, src1); \
+ } else if (dst == src3) { \
+ DCHECK_NE(src3, src1); \
+ mul##ps_or_pd(src3, src2); \
+ add##ps_or_pd(src3, src1); \
+ } else { \
+ movaps(dst, src2); \
+ mul##ps_or_pd(dst, src3); \
+ add##ps_or_pd(dst, src1); \
+ } \
+ }
+
+// Helper macro to define qfms macro-assembler. This takes care of every
+// possible case of register aliasing to minimize the number of instructions.
+#define QFMS(ps_or_pd) \
+ if (CpuFeatures::IsSupported(FMA3)) { \
+ CpuFeatureScope fma3_scope(this, FMA3); \
+ if (dst == src1) { \
+ vfnmadd231##ps_or_pd(dst, src2, src3); \
+ } else if (dst == src2) { \
+ vfnmadd132##ps_or_pd(dst, src1, src3); \
+ } else if (dst == src3) { \
+ vfnmadd213##ps_or_pd(dst, src2, src1); \
+ } else { \
+ vmovups(dst, src1); \
+ vfnmadd231##ps_or_pd(dst, src2, src3); \
+ } \
+ } else if (CpuFeatures::IsSupported(AVX)) { \
+ CpuFeatureScope avx_scope(this, AVX); \
+ vmul##ps_or_pd(tmp, src2, src3); \
+ vsub##ps_or_pd(dst, src1, tmp); \
+ } else { \
+ movaps(tmp, src2); \
+ mul##ps_or_pd(tmp, src3); \
+ if (dst != src1) { \
+ movaps(dst, src1); \
+ } \
+ sub##ps_or_pd(dst, tmp); \
+ }
+
+void TurboAssembler::F32x4Qfma(XMMRegister dst, XMMRegister src1,
+ XMMRegister src2, XMMRegister src3,
+ XMMRegister tmp) {
+ QFMA(ps)
+}
+
+void TurboAssembler::F32x4Qfms(XMMRegister dst, XMMRegister src1,
+ XMMRegister src2, XMMRegister src3,
+ XMMRegister tmp) {
+ QFMS(ps)
+}
+
+void TurboAssembler::F64x2Qfma(XMMRegister dst, XMMRegister src1,
+ XMMRegister src2, XMMRegister src3,
+ XMMRegister tmp) {
+ QFMA(pd);
+}
+
+void TurboAssembler::F64x2Qfms(XMMRegister dst, XMMRegister src1,
+ XMMRegister src2, XMMRegister src3,
+ XMMRegister tmp) {
+ QFMS(pd);
+}
+
+#undef QFMOP
+
void TurboAssembler::Movdqa(XMMRegister dst, Operand src) {
// See comments in Movdqa(XMMRegister, XMMRegister).
if (CpuFeatures::IsSupported(AVX)) {
@@ -1551,16 +1656,6 @@ void TurboAssembler::Move(XMMRegister dst, uint64_t high, uint64_t low) {
// ----------------------------------------------------------------------------
-void MacroAssembler::Absps(XMMRegister dst) {
- Andps(dst, ExternalReferenceAsOperand(
- ExternalReference::address_of_float_abs_constant()));
-}
-
-void MacroAssembler::Negps(XMMRegister dst) {
- Xorps(dst, ExternalReferenceAsOperand(
- ExternalReference::address_of_float_neg_constant()));
-}
-
void MacroAssembler::Cmp(Register dst, Handle<Object> source) {
if (source->IsSmi()) {
Cmp(dst, Smi::cast(*source));
@@ -1993,100 +2088,6 @@ void TurboAssembler::JumpCodeTObject(Register code, JumpMode jump_mode) {
}
}
-void TurboAssembler::RetpolineCall(Register reg) {
- ASM_CODE_COMMENT(this);
- Label setup_return, setup_target, inner_indirect_branch, capture_spec;
-
- jmp(&setup_return); // Jump past the entire retpoline below.
-
- bind(&inner_indirect_branch);
- call(&setup_target);
-
- bind(&capture_spec);
- pause();
- jmp(&capture_spec);
-
- bind(&setup_target);
- movq(Operand(rsp, 0), reg);
- ret(0);
-
- bind(&setup_return);
- call(&inner_indirect_branch); // Callee will return after this instruction.
-}
-
-void TurboAssembler::RetpolineCall(Address destination, RelocInfo::Mode rmode) {
- Move(kScratchRegister, destination, rmode);
- RetpolineCall(kScratchRegister);
-}
-
-void TurboAssembler::RetpolineJump(Register reg) {
- ASM_CODE_COMMENT(this);
- Label setup_target, capture_spec;
-
- call(&setup_target);
-
- bind(&capture_spec);
- pause();
- jmp(&capture_spec);
-
- bind(&setup_target);
- movq(Operand(rsp, 0), reg);
- ret(0);
-}
-
-void TurboAssembler::Pmaddwd(XMMRegister dst, XMMRegister src1, Operand src2) {
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope avx_scope(this, AVX);
- vpmaddwd(dst, src1, src2);
- } else {
- if (dst != src1) {
- movaps(dst, src1);
- }
- pmaddwd(dst, src2);
- }
-}
-
-void TurboAssembler::Pmaddwd(XMMRegister dst, XMMRegister src1,
- XMMRegister src2) {
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope avx_scope(this, AVX);
- vpmaddwd(dst, src1, src2);
- } else {
- if (dst != src1) {
- movaps(dst, src1);
- }
- pmaddwd(dst, src2);
- }
-}
-
-void TurboAssembler::Pmaddubsw(XMMRegister dst, XMMRegister src1,
- Operand src2) {
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope avx_scope(this, AVX);
- vpmaddubsw(dst, src1, src2);
- } else {
- CpuFeatureScope ssse3_scope(this, SSSE3);
- if (dst != src1) {
- movaps(dst, src1);
- }
- pmaddubsw(dst, src2);
- }
-}
-
-void TurboAssembler::Pmaddubsw(XMMRegister dst, XMMRegister src1,
- XMMRegister src2) {
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope avx_scope(this, AVX);
- vpmaddubsw(dst, src1, src2);
- } else {
- CpuFeatureScope ssse3_scope(this, SSSE3);
- if (dst != src1) {
- movaps(dst, src1);
- }
- pmaddubsw(dst, src2);
- }
-}
-
void TurboAssembler::Pextrd(Register dst, XMMRegister src, uint8_t imm8) {
if (imm8 == 0) {
Movd(dst, src);
@@ -2116,16 +2117,17 @@ using NoAvxFn = void (Assembler::*)(XMMRegister, Src, uint8_t);
template <typename Src>
void PinsrHelper(Assembler* assm, AvxFn<Src> avx, NoAvxFn<Src> noavx,
XMMRegister dst, XMMRegister src1, Src src2, uint8_t imm8,
+ uint32_t* load_pc_offset = nullptr,
base::Optional<CpuFeature> feature = base::nullopt) {
if (CpuFeatures::IsSupported(AVX)) {
CpuFeatureScope scope(assm, AVX);
+ if (load_pc_offset) *load_pc_offset = assm->pc_offset();
(assm->*avx)(dst, src1, src2, imm8);
return;
}
- if (dst != src1) {
- assm->movaps(dst, src1);
- }
+ if (dst != src1) assm->movaps(dst, src1);
+ if (load_pc_offset) *load_pc_offset = assm->pc_offset();
if (feature.has_value()) {
DCHECK(CpuFeatures::IsSupported(*feature));
CpuFeatureScope scope(assm, *feature);
@@ -2137,40 +2139,41 @@ void PinsrHelper(Assembler* assm, AvxFn<Src> avx, NoAvxFn<Src> noavx,
} // namespace
void TurboAssembler::Pinsrb(XMMRegister dst, XMMRegister src1, Register src2,
- uint8_t imm8) {
+ uint8_t imm8, uint32_t* load_pc_offset) {
PinsrHelper(this, &Assembler::vpinsrb, &Assembler::pinsrb, dst, src1, src2,
- imm8, base::Optional<CpuFeature>(SSE4_1));
+ imm8, load_pc_offset, {SSE4_1});
}
void TurboAssembler::Pinsrb(XMMRegister dst, XMMRegister src1, Operand src2,
- uint8_t imm8) {
+ uint8_t imm8, uint32_t* load_pc_offset) {
PinsrHelper(this, &Assembler::vpinsrb, &Assembler::pinsrb, dst, src1, src2,
- imm8, base::Optional<CpuFeature>(SSE4_1));
+ imm8, load_pc_offset, {SSE4_1});
}
void TurboAssembler::Pinsrw(XMMRegister dst, XMMRegister src1, Register src2,
- uint8_t imm8) {
+ uint8_t imm8, uint32_t* load_pc_offset) {
PinsrHelper(this, &Assembler::vpinsrw, &Assembler::pinsrw, dst, src1, src2,
- imm8);
+ imm8, load_pc_offset);
}
void TurboAssembler::Pinsrw(XMMRegister dst, XMMRegister src1, Operand src2,
- uint8_t imm8) {
+ uint8_t imm8, uint32_t* load_pc_offset) {
PinsrHelper(this, &Assembler::vpinsrw, &Assembler::pinsrw, dst, src1, src2,
- imm8);
+ imm8, load_pc_offset);
}
void TurboAssembler::Pinsrd(XMMRegister dst, XMMRegister src1, Register src2,
- uint8_t imm8) {
+ uint8_t imm8, uint32_t* load_pc_offset) {
// Need a fall back when SSE4_1 is unavailable. Pinsrb and Pinsrq are used
// only by Wasm SIMD, which requires SSE4_1 already.
if (CpuFeatures::IsSupported(SSE4_1)) {
PinsrHelper(this, &Assembler::vpinsrd, &Assembler::pinsrd, dst, src1, src2,
- imm8, base::Optional<CpuFeature>(SSE4_1));
+ imm8, load_pc_offset, {SSE4_1});
return;
}
Movd(kScratchDoubleReg, src2);
+ if (load_pc_offset) *load_pc_offset = pc_offset();
if (imm8 == 1) {
punpckldq(dst, kScratchDoubleReg);
} else {
@@ -2180,16 +2183,17 @@ void TurboAssembler::Pinsrd(XMMRegister dst, XMMRegister src1, Register src2,
}
void TurboAssembler::Pinsrd(XMMRegister dst, XMMRegister src1, Operand src2,
- uint8_t imm8) {
+ uint8_t imm8, uint32_t* load_pc_offset) {
// Need a fall back when SSE4_1 is unavailable. Pinsrb and Pinsrq are used
// only by Wasm SIMD, which requires SSE4_1 already.
if (CpuFeatures::IsSupported(SSE4_1)) {
PinsrHelper(this, &Assembler::vpinsrd, &Assembler::pinsrd, dst, src1, src2,
- imm8, base::Optional<CpuFeature>(SSE4_1));
+ imm8, load_pc_offset, {SSE4_1});
return;
}
Movd(kScratchDoubleReg, src2);
+ if (load_pc_offset) *load_pc_offset = pc_offset();
if (imm8 == 1) {
punpckldq(dst, kScratchDoubleReg);
} else {
@@ -2198,361 +2202,66 @@ void TurboAssembler::Pinsrd(XMMRegister dst, XMMRegister src1, Operand src2,
}
}
-void TurboAssembler::Pinsrd(XMMRegister dst, Register src2, uint8_t imm8) {
- Pinsrd(dst, dst, src2, imm8);
+void TurboAssembler::Pinsrd(XMMRegister dst, Register src2, uint8_t imm8,
+ uint32_t* load_pc_offset) {
+ Pinsrd(dst, dst, src2, imm8, load_pc_offset);
}
-void TurboAssembler::Pinsrd(XMMRegister dst, Operand src2, uint8_t imm8) {
- Pinsrd(dst, dst, src2, imm8);
+void TurboAssembler::Pinsrd(XMMRegister dst, Operand src2, uint8_t imm8,
+ uint32_t* load_pc_offset) {
+ Pinsrd(dst, dst, src2, imm8, load_pc_offset);
}
void TurboAssembler::Pinsrq(XMMRegister dst, XMMRegister src1, Register src2,
- uint8_t imm8) {
+ uint8_t imm8, uint32_t* load_pc_offset) {
PinsrHelper(this, &Assembler::vpinsrq, &Assembler::pinsrq, dst, src1, src2,
- imm8, base::Optional<CpuFeature>(SSE4_1));
+ imm8, load_pc_offset, {SSE4_1});
}
void TurboAssembler::Pinsrq(XMMRegister dst, XMMRegister src1, Operand src2,
- uint8_t imm8) {
+ uint8_t imm8, uint32_t* load_pc_offset) {
PinsrHelper(this, &Assembler::vpinsrq, &Assembler::pinsrq, dst, src1, src2,
- imm8, base::Optional<CpuFeature>(SSE4_1));
+ imm8, load_pc_offset, {SSE4_1});
}
-void TurboAssembler::Pblendvb(XMMRegister dst, XMMRegister src1,
- XMMRegister src2, XMMRegister mask) {
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope avx_scope(this, AVX);
- vpblendvb(dst, src1, src2, mask);
- } else {
- CpuFeatureScope scope(this, SSE4_1);
- DCHECK_EQ(dst, src1);
- DCHECK_EQ(xmm0, mask);
- pblendvb(dst, src2);
- }
-}
-
-void TurboAssembler::Blendvps(XMMRegister dst, XMMRegister src1,
- XMMRegister src2, XMMRegister mask) {
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope avx_scope(this, AVX);
- vblendvps(dst, src1, src2, mask);
- } else {
- CpuFeatureScope scope(this, SSE4_1);
- DCHECK_EQ(dst, src1);
- DCHECK_EQ(xmm0, mask);
- blendvps(dst, src2);
- }
-}
-
-void TurboAssembler::Blendvpd(XMMRegister dst, XMMRegister src1,
- XMMRegister src2, XMMRegister mask) {
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope avx_scope(this, AVX);
- vblendvpd(dst, src1, src2, mask);
- } else {
- CpuFeatureScope scope(this, SSE4_1);
- DCHECK_EQ(dst, src1);
- DCHECK_EQ(xmm0, mask);
- blendvpd(dst, src2);
- }
-}
-
-void TurboAssembler::Pshufb(XMMRegister dst, XMMRegister src,
- XMMRegister mask) {
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope avx_scope(this, AVX);
- vpshufb(dst, src, mask);
- } else {
- // Make sure these are different so that we won't overwrite mask.
- DCHECK_NE(dst, mask);
- if (dst != src) {
- movaps(dst, src);
- }
- CpuFeatureScope sse_scope(this, SSSE3);
- pshufb(dst, mask);
- }
-}
-
-void TurboAssembler::Pmulhrsw(XMMRegister dst, XMMRegister src1,
- XMMRegister src2) {
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope avx_scope(this, AVX);
- vpmulhrsw(dst, src1, src2);
- } else {
- if (dst != src1) {
- Movdqa(dst, src1);
- }
- CpuFeatureScope sse_scope(this, SSSE3);
- pmulhrsw(dst, src2);
- }
-}
-
-void TurboAssembler::I16x8Q15MulRSatS(XMMRegister dst, XMMRegister src1,
- XMMRegister src2) {
- // k = i16x8.splat(0x8000)
- Pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
- Psllw(kScratchDoubleReg, byte{15});
-
- Pmulhrsw(dst, src1, src2);
- Pcmpeqw(kScratchDoubleReg, dst);
- Pxor(dst, kScratchDoubleReg);
-}
-
-void TurboAssembler::S128Store64Lane(Operand dst, XMMRegister src,
- uint8_t laneidx) {
- if (laneidx == 0) {
- Movlps(dst, src);
- } else {
- DCHECK_EQ(1, laneidx);
- Movhps(dst, src);
- }
-}
-
-void TurboAssembler::I8x16Popcnt(XMMRegister dst, XMMRegister src,
- XMMRegister tmp) {
- DCHECK_NE(dst, tmp);
- DCHECK_NE(src, tmp);
- DCHECK_NE(kScratchDoubleReg, tmp);
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope avx_scope(this, AVX);
- vmovdqa(tmp, ExternalReferenceAsOperand(
- ExternalReference::address_of_wasm_i8x16_splat_0x0f()));
- vpandn(kScratchDoubleReg, tmp, src);
- vpand(dst, tmp, src);
- vmovdqa(tmp, ExternalReferenceAsOperand(
- ExternalReference::address_of_wasm_i8x16_popcnt_mask()));
- vpsrlw(kScratchDoubleReg, kScratchDoubleReg, 4);
- vpshufb(dst, tmp, dst);
- vpshufb(kScratchDoubleReg, tmp, kScratchDoubleReg);
- vpaddb(dst, dst, kScratchDoubleReg);
- } else if (CpuFeatures::IsSupported(ATOM)) {
- // Pre-Goldmont low-power Intel microarchitectures have very slow
- // PSHUFB instruction, thus use PSHUFB-free divide-and-conquer
- // algorithm on these processors. ATOM CPU feature captures exactly
- // the right set of processors.
- movaps(tmp, src);
- psrlw(tmp, 1);
- if (dst != src) {
- movaps(dst, src);
- }
- andps(tmp, ExternalReferenceAsOperand(
- ExternalReference::address_of_wasm_i8x16_splat_0x55()));
- psubb(dst, tmp);
- Operand splat_0x33 = ExternalReferenceAsOperand(
- ExternalReference::address_of_wasm_i8x16_splat_0x33());
- movaps(tmp, dst);
- andps(dst, splat_0x33);
- psrlw(tmp, 2);
- andps(tmp, splat_0x33);
- paddb(dst, tmp);
- movaps(tmp, dst);
- psrlw(dst, 4);
- paddb(dst, tmp);
- andps(dst, ExternalReferenceAsOperand(
- ExternalReference::address_of_wasm_i8x16_splat_0x0f()));
- } else {
- movaps(tmp, ExternalReferenceAsOperand(
- ExternalReference::address_of_wasm_i8x16_splat_0x0f()));
- Operand mask = ExternalReferenceAsOperand(
- ExternalReference::address_of_wasm_i8x16_popcnt_mask());
- Move(kScratchDoubleReg, tmp);
- andps(tmp, src);
- andnps(kScratchDoubleReg, src);
- psrlw(kScratchDoubleReg, 4);
- movaps(dst, mask);
- pshufb(dst, tmp);
- movaps(tmp, mask);
- pshufb(tmp, kScratchDoubleReg);
- paddb(dst, tmp);
- }
-}
-
-void TurboAssembler::F64x2ConvertLowI32x4U(XMMRegister dst, XMMRegister src) {
- // dst = [ src_low, 0x43300000, src_high, 0x4330000 ];
- // 0x43300000'00000000 is a special double where the significand bits
- // precisely represents all uint32 numbers.
+void TurboAssembler::Absps(XMMRegister dst, XMMRegister src) {
if (!CpuFeatures::IsSupported(AVX) && dst != src) {
movaps(dst, src);
src = dst;
}
- Unpcklps(dst, src,
- ExternalReferenceAsOperand(
- ExternalReference::
- address_of_wasm_f64x2_convert_low_i32x4_u_int_mask()));
- Subpd(dst, ExternalReferenceAsOperand(
- ExternalReference::address_of_wasm_double_2_power_52()));
-}
-
-void TurboAssembler::I32x4TruncSatF64x2SZero(XMMRegister dst, XMMRegister src) {
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope avx_scope(this, AVX);
- XMMRegister original_dst = dst;
- // Make sure we don't overwrite src.
- if (dst == src) {
- DCHECK_NE(src, kScratchDoubleReg);
- dst = kScratchDoubleReg;
- }
- // dst = 0 if src == NaN, else all ones.
- vcmpeqpd(dst, src, src);
- // dst = 0 if src == NaN, else INT32_MAX as double.
- vandpd(dst, dst,
- ExternalReferenceAsOperand(
- ExternalReference::address_of_wasm_int32_max_as_double()));
- // dst = 0 if src == NaN, src is saturated to INT32_MAX as double.
- vminpd(dst, src, dst);
- // Values > INT32_MAX already saturated, values < INT32_MIN raises an
- // exception, which is masked and returns 0x80000000.
- vcvttpd2dq(dst, dst);
- if (original_dst != dst) {
- Move(original_dst, dst);
- }
- } else {
- if (dst != src) {
- Move(dst, src);
- }
- Move(kScratchDoubleReg, dst);
- cmpeqpd(kScratchDoubleReg, dst);
- andps(kScratchDoubleReg,
- ExternalReferenceAsOperand(
- ExternalReference::address_of_wasm_int32_max_as_double()));
- minpd(dst, kScratchDoubleReg);
- cvttpd2dq(dst, dst);
- }
+ Andps(dst, src,
+ ExternalReferenceAsOperand(
+ ExternalReference::address_of_float_abs_constant()));
}
-void TurboAssembler::I32x4TruncSatF64x2UZero(XMMRegister dst, XMMRegister src) {
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope avx_scope(this, AVX);
- vxorpd(kScratchDoubleReg, kScratchDoubleReg, kScratchDoubleReg);
- // Saturate to 0.
- vmaxpd(dst, src, kScratchDoubleReg);
- // Saturate to UINT32_MAX.
- vminpd(dst, dst,
- ExternalReferenceAsOperand(
- ExternalReference::address_of_wasm_uint32_max_as_double()));
- // Truncate.
- vroundpd(dst, dst, kRoundToZero);
- // Add to special double where significant bits == uint32.
- vaddpd(dst, dst,
- ExternalReferenceAsOperand(
- ExternalReference::address_of_wasm_double_2_power_52()));
- // Extract low 32 bits of each double's significand, zero top lanes.
- // dst = [dst[0], dst[2], 0, 0]
- vshufps(dst, dst, kScratchDoubleReg, 0x88);
- } else {
- CpuFeatureScope scope(this, SSE4_1);
- if (dst != src) {
- Move(dst, src);
- }
- xorps(kScratchDoubleReg, kScratchDoubleReg);
- maxpd(dst, kScratchDoubleReg);
- minpd(dst, ExternalReferenceAsOperand(
- ExternalReference::address_of_wasm_uint32_max_as_double()));
- roundpd(dst, dst, kRoundToZero);
- addpd(dst, ExternalReferenceAsOperand(
- ExternalReference::address_of_wasm_double_2_power_52()));
- shufps(dst, kScratchDoubleReg, 0x88);
- }
-}
-
-void TurboAssembler::I16x8ExtAddPairwiseI8x16S(XMMRegister dst,
- XMMRegister src) {
- // pmaddubsw treats the first operand as unsigned, so the external reference
- // to be passed to it as the first operand.
- Operand op = ExternalReferenceAsOperand(
- ExternalReference::address_of_wasm_i8x16_splat_0x01());
- if (dst == src) {
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope avx_scope(this, AVX);
- vmovdqa(kScratchDoubleReg, op);
- vpmaddubsw(dst, kScratchDoubleReg, src);
- } else {
- CpuFeatureScope sse_scope(this, SSSE3);
- movaps(kScratchDoubleReg, op);
- pmaddubsw(kScratchDoubleReg, src);
- movaps(dst, kScratchDoubleReg);
- }
- } else {
- Movdqa(dst, op);
- Pmaddubsw(dst, dst, src);
+void TurboAssembler::Negps(XMMRegister dst, XMMRegister src) {
+ if (!CpuFeatures::IsSupported(AVX) && dst != src) {
+ movaps(dst, src);
+ src = dst;
}
+ Xorps(dst, src,
+ ExternalReferenceAsOperand(
+ ExternalReference::address_of_float_neg_constant()));
}
-void TurboAssembler::I32x4ExtAddPairwiseI16x8U(XMMRegister dst,
- XMMRegister src) {
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope avx_scope(this, AVX);
- // src = |a|b|c|d|e|f|g|h| (low)
- // scratch = |0|a|0|c|0|e|0|g|
- vpsrld(kScratchDoubleReg, src, 16);
- // dst = |0|b|0|d|0|f|0|h|
- vpblendw(dst, src, kScratchDoubleReg, 0xAA);
- // dst = |a+b|c+d|e+f|g+h|
- vpaddd(dst, kScratchDoubleReg, dst);
- } else if (CpuFeatures::IsSupported(SSE4_1)) {
- CpuFeatureScope sse_scope(this, SSE4_1);
- // There is a potentially better lowering if we get rip-relative constants,
- // see https://github.com/WebAssembly/simd/pull/380.
- movaps(kScratchDoubleReg, src);
- psrld(kScratchDoubleReg, 16);
- if (dst != src) {
- movaps(dst, src);
- }
- pblendw(dst, kScratchDoubleReg, 0xAA);
- paddd(dst, kScratchDoubleReg);
- } else {
- // src = |a|b|c|d|e|f|g|h|
- // kScratchDoubleReg = i32x4.splat(0x0000FFFF)
- pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
- psrld(kScratchDoubleReg, byte{16});
- // kScratchDoubleReg =|0|b|0|d|0|f|0|h|
- andps(kScratchDoubleReg, src);
- // dst = |0|a|0|c|0|e|0|g|
- if (dst != src) {
- movaps(dst, src);
- }
- psrld(dst, byte{16});
- // dst = |a+b|c+d|e+f|g+h|
- paddd(dst, kScratchDoubleReg);
+void TurboAssembler::Abspd(XMMRegister dst, XMMRegister src) {
+ if (!CpuFeatures::IsSupported(AVX) && dst != src) {
+ movaps(dst, src);
+ src = dst;
}
+ Andps(dst, src,
+ ExternalReferenceAsOperand(
+ ExternalReference::address_of_double_abs_constant()));
}
-void TurboAssembler::I8x16Swizzle(XMMRegister dst, XMMRegister src,
- XMMRegister mask, bool omit_add) {
- if (omit_add) {
- // We have determined that the indices are immediates, and they are either
- // within bounds, or the top bit is set, so we can omit the add.
- Pshufb(dst, src, mask);
- return;
- }
-
- // Out-of-range indices should return 0, add 112 so that any value > 15
- // saturates to 128 (top bit set), so pshufb will zero that lane.
- Operand op = ExternalReferenceAsOperand(
- ExternalReference::address_of_wasm_i8x16_swizzle_mask());
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope avx_scope(this, AVX);
- vpaddusb(kScratchDoubleReg, mask, op);
- vpshufb(dst, src, kScratchDoubleReg);
- } else {
- CpuFeatureScope sse_scope(this, SSSE3);
- movaps(kScratchDoubleReg, op);
- if (dst != src) {
- movaps(dst, src);
- }
- paddusb(kScratchDoubleReg, mask);
- pshufb(dst, kScratchDoubleReg);
+void TurboAssembler::Negpd(XMMRegister dst, XMMRegister src) {
+ if (!CpuFeatures::IsSupported(AVX) && dst != src) {
+ movaps(dst, src);
+ src = dst;
}
-}
-
-void TurboAssembler::Abspd(XMMRegister dst) {
- Andps(dst, ExternalReferenceAsOperand(
- ExternalReference::address_of_double_abs_constant()));
-}
-
-void TurboAssembler::Negpd(XMMRegister dst) {
- Xorps(dst, ExternalReferenceAsOperand(
- ExternalReference::address_of_double_neg_constant()));
+ Xorps(dst, src,
+ ExternalReferenceAsOperand(
+ ExternalReference::address_of_double_neg_constant()));
}
void TurboAssembler::Lzcntl(Register dst, Register src) {
@@ -2794,8 +2503,7 @@ void MacroAssembler::AssertCodeT(Register object) {
Check(not_equal, AbortReason::kOperandIsNotACodeT);
Push(object);
LoadMap(object, object);
- CmpInstanceType(object, V8_EXTERNAL_CODE_SPACE_BOOL ? CODE_DATA_CONTAINER_TYPE
- : CODE_TYPE);
+ CmpInstanceType(object, CODET_TYPE);
Pop(object);
Check(equal, AbortReason::kOperandIsNotACodeT);
}
@@ -3067,8 +2775,12 @@ void MacroAssembler::InvokePrologue(Register expected_parameter_count,
leaq(kScratchRegister,
Operand(expected_parameter_count, times_system_pointer_size, 0));
AllocateStackSpace(kScratchRegister);
- // Extra words are the receiver and the return address (if a jump).
- int extra_words = type == InvokeType::kCall ? 1 : 2;
+ // Extra words are the receiver (if not already included in argc) and the
+ // return address (if a jump).
+ int extra_words =
+ type == InvokeType::kCall ? 0 : kReturnAddressStackSlotCount;
+ if (!kJSArgcIncludesReceiver) extra_words++;
+
leaq(num, Operand(rax, extra_words)); // Number of words to copy.
Move(current, 0);
// Fall-through to the loop body because there are non-zero words to copy.
@@ -3523,11 +3235,6 @@ void TurboAssembler::ComputeCodeStartAddress(Register dst) {
leaq(dst, Operand(&current, -pc));
}
-void TurboAssembler::ResetSpeculationPoisonRegister() {
- // TODO(turbofan): Perhaps, we want to put an lfence here.
- Move(kSpeculationPoisonRegister, -1);
-}
-
void TurboAssembler::CallForDeoptimization(Builtin target, int, Label* exit,
DeoptimizeKind kind, Label* ret,
Label*) {
diff --git a/deps/v8/src/codegen/x64/macro-assembler-x64.h b/deps/v8/src/codegen/x64/macro-assembler-x64.h
index 02b9eb410e..ec35108aba 100644
--- a/deps/v8/src/codegen/x64/macro-assembler-x64.h
+++ b/deps/v8/src/codegen/x64/macro-assembler-x64.h
@@ -57,53 +57,23 @@ class StackArgumentsAccessor {
DISALLOW_IMPLICIT_CONSTRUCTORS(StackArgumentsAccessor);
};
-class V8_EXPORT_PRIVATE TurboAssembler : public SharedTurboAssembler {
+class V8_EXPORT_PRIVATE TurboAssembler
+ : public SharedTurboAssemblerBase<TurboAssembler> {
public:
- using SharedTurboAssembler::SharedTurboAssembler;
- AVX_OP(Subsd, subsd)
- AVX_OP(Divss, divss)
- AVX_OP(Divsd, divsd)
- AVX_OP(Pcmpgtw, pcmpgtw)
- AVX_OP(Pmaxsw, pmaxsw)
- AVX_OP(Pminsw, pminsw)
- AVX_OP(Addss, addss)
- AVX_OP(Addsd, addsd)
- AVX_OP(Mulsd, mulsd)
- AVX_OP(Cmpeqps, cmpeqps)
- AVX_OP(Cmpltps, cmpltps)
- AVX_OP(Cmpneqps, cmpneqps)
- AVX_OP(Cmpnltps, cmpnltps)
- AVX_OP(Cmpnleps, cmpnleps)
- AVX_OP(Cmpnltpd, cmpnltpd)
- AVX_OP(Cmpnlepd, cmpnlepd)
- AVX_OP(Cvttpd2dq, cvttpd2dq)
+ using SharedTurboAssemblerBase<TurboAssembler>::SharedTurboAssemblerBase;
AVX_OP(Ucomiss, ucomiss)
AVX_OP(Ucomisd, ucomisd)
- AVX_OP(Psubsw, psubsw)
- AVX_OP(Psubusw, psubusw)
- AVX_OP(Paddsw, paddsw)
- AVX_OP(Pcmpgtd, pcmpgtd)
AVX_OP(Pcmpeqb, pcmpeqb)
AVX_OP(Pcmpeqw, pcmpeqw)
AVX_OP(Pcmpeqd, pcmpeqd)
AVX_OP(Movlhps, movlhps)
- AVX_OP_SSSE3(Phaddd, phaddd)
- AVX_OP_SSSE3(Phaddw, phaddw)
- AVX_OP_SSSE3(Pshufb, pshufb)
AVX_OP_SSE4_1(Pcmpeqq, pcmpeqq)
AVX_OP_SSE4_1(Packusdw, packusdw)
- AVX_OP_SSE4_1(Pminsd, pminsd)
- AVX_OP_SSE4_1(Pminuw, pminuw)
- AVX_OP_SSE4_1(Pminud, pminud)
- AVX_OP_SSE4_1(Pmaxuw, pmaxuw)
- AVX_OP_SSE4_1(Pmaxud, pmaxud)
- AVX_OP_SSE4_1(Pmulld, pmulld)
AVX_OP_SSE4_1(Insertps, insertps)
AVX_OP_SSE4_1(Pinsrq, pinsrq)
AVX_OP_SSE4_1(Pextrq, pextrq)
AVX_OP_SSE4_1(Roundss, roundss)
AVX_OP_SSE4_1(Roundsd, roundsd)
- AVX_OP_SSE4_2(Pcmpgtq, pcmpgtq)
#undef AVX_OP
@@ -113,6 +83,15 @@ class V8_EXPORT_PRIVATE TurboAssembler : public SharedTurboAssembler {
void Movq(XMMRegister dst, Register src);
void Movq(Register dst, XMMRegister src);
+ void F64x2Qfma(XMMRegister dst, XMMRegister src1, XMMRegister src2,
+ XMMRegister src3, XMMRegister tmp);
+ void F64x2Qfms(XMMRegister dst, XMMRegister src1, XMMRegister src2,
+ XMMRegister src3, XMMRegister tmp);
+ void F32x4Qfma(XMMRegister dst, XMMRegister src1, XMMRegister src2,
+ XMMRegister src3, XMMRegister tmp);
+ void F32x4Qfms(XMMRegister dst, XMMRegister src1, XMMRegister src2,
+ XMMRegister src3, XMMRegister tmp);
+
void PushReturnAddressFrom(Register src) { pushq(src); }
void PopReturnAddressTo(Register dst) { popq(dst); }
@@ -432,17 +411,12 @@ class V8_EXPORT_PRIVATE TurboAssembler : public SharedTurboAssembler {
void CallCodeTObject(Register code);
void JumpCodeTObject(Register code, JumpMode jump_mode = JumpMode::kJump);
- void RetpolineCall(Register reg);
- void RetpolineCall(Address destination, RelocInfo::Mode rmode);
-
void Jump(Address destination, RelocInfo::Mode rmode);
void Jump(const ExternalReference& reference);
void Jump(Operand op);
void Jump(Handle<Code> code_object, RelocInfo::Mode rmode,
Condition cc = always);
- void RetpolineJump(Register reg);
-
void CallForDeoptimization(Builtin target, int deopt_id, Label* exit,
DeoptimizeKind kind, Label* ret,
Label* jump_deoptimization_entry_label);
@@ -450,58 +424,34 @@ class V8_EXPORT_PRIVATE TurboAssembler : public SharedTurboAssembler {
void Trap();
void DebugBreak();
- // Will move src1 to dst if dst != src1.
- void Pmaddwd(XMMRegister dst, XMMRegister src1, Operand src2);
- void Pmaddwd(XMMRegister dst, XMMRegister src1, XMMRegister src2);
- void Pmaddubsw(XMMRegister dst, XMMRegister src1, Operand src2);
- void Pmaddubsw(XMMRegister dst, XMMRegister src1, XMMRegister src2);
-
// Non-SSE2 instructions.
void Pextrd(Register dst, XMMRegister src, uint8_t imm8);
- void Pinsrb(XMMRegister dst, XMMRegister src1, Register src2, uint8_t imm8);
- void Pinsrb(XMMRegister dst, XMMRegister src1, Operand src2, uint8_t imm8);
- void Pinsrw(XMMRegister dst, XMMRegister src1, Register src2, uint8_t imm8);
- void Pinsrw(XMMRegister dst, XMMRegister src1, Operand src2, uint8_t imm8);
- void Pinsrd(XMMRegister dst, XMMRegister src1, Register src2, uint8_t imm8);
- void Pinsrd(XMMRegister dst, XMMRegister src1, Operand src2, uint8_t imm8);
- void Pinsrd(XMMRegister dst, Register src2, uint8_t imm8);
- void Pinsrd(XMMRegister dst, Operand src2, uint8_t imm8);
- void Pinsrq(XMMRegister dst, XMMRegister src1, Register src2, uint8_t imm8);
- void Pinsrq(XMMRegister dst, XMMRegister src1, Operand src2, uint8_t imm8);
-
- void Pblendvb(XMMRegister dst, XMMRegister src1, XMMRegister src2,
- XMMRegister mask);
- void Blendvps(XMMRegister dst, XMMRegister src1, XMMRegister src2,
- XMMRegister mask);
- void Blendvpd(XMMRegister dst, XMMRegister src1, XMMRegister src2,
- XMMRegister mask);
-
- // Supports both SSE and AVX. Move src1 to dst if they are not equal on SSE.
- void Pshufb(XMMRegister dst, XMMRegister src1, XMMRegister src2);
- void Pmulhrsw(XMMRegister dst, XMMRegister src1, XMMRegister src2);
-
- // These Wasm SIMD ops do not have direct lowerings on x64. These
- // helpers are optimized to produce the fastest and smallest codegen.
- // Defined here to allow usage on both TurboFan and Liftoff.
- void I16x8Q15MulRSatS(XMMRegister dst, XMMRegister src1, XMMRegister src2);
-
- void S128Store64Lane(Operand dst, XMMRegister src, uint8_t laneidx);
-
- void I8x16Popcnt(XMMRegister dst, XMMRegister src, XMMRegister tmp);
-
- void F64x2ConvertLowI32x4U(XMMRegister dst, XMMRegister src);
- void I32x4TruncSatF64x2SZero(XMMRegister dst, XMMRegister src);
- void I32x4TruncSatF64x2UZero(XMMRegister dst, XMMRegister src);
-
- void I16x8ExtAddPairwiseI8x16S(XMMRegister dst, XMMRegister src);
- void I32x4ExtAddPairwiseI16x8U(XMMRegister dst, XMMRegister src);
-
- void I8x16Swizzle(XMMRegister dst, XMMRegister src, XMMRegister mask,
- bool omit_add = false);
-
- void Abspd(XMMRegister dst);
- void Negpd(XMMRegister dst);
+ void Pinsrb(XMMRegister dst, XMMRegister src1, Register src2, uint8_t imm8,
+ uint32_t* load_pc_offset = nullptr);
+ void Pinsrb(XMMRegister dst, XMMRegister src1, Operand src2, uint8_t imm8,
+ uint32_t* load_pc_offset = nullptr);
+ void Pinsrw(XMMRegister dst, XMMRegister src1, Register src2, uint8_t imm8,
+ uint32_t* load_pc_offset = nullptr);
+ void Pinsrw(XMMRegister dst, XMMRegister src1, Operand src2, uint8_t imm8,
+ uint32_t* load_pc_offset = nullptr);
+ void Pinsrd(XMMRegister dst, XMMRegister src1, Register src2, uint8_t imm8,
+ uint32_t* load_pc_offset = nullptr);
+ void Pinsrd(XMMRegister dst, XMMRegister src1, Operand src2, uint8_t imm8,
+ uint32_t* load_pc_offset = nullptr);
+ void Pinsrd(XMMRegister dst, Register src2, uint8_t imm8,
+ uint32_t* load_pc_offset = nullptr);
+ void Pinsrd(XMMRegister dst, Operand src2, uint8_t imm8,
+ uint32_t* load_pc_offset = nullptr);
+ void Pinsrq(XMMRegister dst, XMMRegister src1, Register src2, uint8_t imm8,
+ uint32_t* load_pc_offset = nullptr);
+ void Pinsrq(XMMRegister dst, XMMRegister src1, Operand src2, uint8_t imm8,
+ uint32_t* load_pc_offset = nullptr);
+
+ void Absps(XMMRegister dst, XMMRegister src);
+ void Negps(XMMRegister dst, XMMRegister src);
+ void Abspd(XMMRegister dst, XMMRegister src);
+ void Negpd(XMMRegister dst, XMMRegister src);
void CompareRoot(Register with, RootIndex index);
void CompareRoot(Operand with, RootIndex index);
@@ -595,9 +545,9 @@ class V8_EXPORT_PRIVATE TurboAssembler : public SharedTurboAssembler {
StubCallMode mode = StubCallMode::kCallBuiltinPointer);
#ifdef V8_IS_TSAN
- void CallTSANRelaxedStoreStub(Register address, Register value,
- SaveFPRegsMode fp_mode, int size,
- StubCallMode mode);
+ void CallTSANStoreStub(Register address, Register value,
+ SaveFPRegsMode fp_mode, int size, StubCallMode mode,
+ std::memory_order order);
void CallTSANRelaxedLoadStub(Register address, SaveFPRegsMode fp_mode,
int size, StubCallMode mode);
#endif // V8_IS_TSAN
@@ -632,8 +582,6 @@ class V8_EXPORT_PRIVATE TurboAssembler : public SharedTurboAssembler {
// This is an alternative to embedding the {CodeObject} handle as a reference.
void ComputeCodeStartAddress(Register dst);
- void ResetSpeculationPoisonRegister();
-
// Control-flow integrity:
// Define a function entrypoint. This doesn't emit any code for this
@@ -676,6 +624,7 @@ class V8_EXPORT_PRIVATE TurboAssembler : public SharedTurboAssembler {
void StoreTaggedField(Operand dst_field_operand, Immediate immediate);
void StoreTaggedField(Operand dst_field_operand, Register value);
void StoreTaggedSignedField(Operand dst_field_operand, Smi value);
+ void AtomicStoreTaggedField(Operand dst_field_operand, Register value);
// The following macros work even when pointer compression is not enabled.
void DecompressTaggedSigned(Register destination, Operand field_operand);
@@ -851,10 +800,6 @@ class V8_EXPORT_PRIVATE MacroAssembler : public TurboAssembler {
void Pop(Operand dst);
void PopQuad(Operand dst);
- // ---------------------------------------------------------------------------
- // SIMD macros.
- void Absps(XMMRegister dst);
- void Negps(XMMRegister dst);
// Generates a trampoline to jump to the off-heap instruction stream.
void JumpToInstructionStream(Address entry);
diff --git a/deps/v8/src/codegen/x64/register-x64.h b/deps/v8/src/codegen/x64/register-x64.h
index 61e7ccf396..f36763f2e4 100644
--- a/deps/v8/src/codegen/x64/register-x64.h
+++ b/deps/v8/src/codegen/x64/register-x64.h
@@ -155,6 +155,24 @@ constexpr Register arg_reg_4 = rcx;
V(xmm13) \
V(xmm14)
+#define YMM_REGISTERS(V) \
+ V(ymm0) \
+ V(ymm1) \
+ V(ymm2) \
+ V(ymm3) \
+ V(ymm4) \
+ V(ymm5) \
+ V(ymm6) \
+ V(ymm7) \
+ V(ymm8) \
+ V(ymm9) \
+ V(ymm10) \
+ V(ymm11) \
+ V(ymm12) \
+ V(ymm13) \
+ V(ymm14) \
+ V(ymm15)
+
// Returns the number of padding slots needed for stack pointer alignment.
constexpr int ArgumentPaddingSlots(int argument_count) {
// No argument padding required.
@@ -171,6 +189,17 @@ enum DoubleRegisterCode {
kDoubleAfterLast
};
+enum YMMRegisterCode {
+#define REGISTER_CODE(R) kYMMCode_##R,
+ YMM_REGISTERS(REGISTER_CODE)
+#undef REGISTER_CODE
+ kYMMAfterLast
+};
+static_assert(static_cast<int>(kDoubleAfterLast) ==
+ static_cast<int>(kYMMAfterLast),
+ "The number of XMM register codes must match the number of YMM "
+ "register codes");
+
class XMMRegister : public RegisterBase<XMMRegister, kDoubleAfterLast> {
public:
// Return the high bit of the register code as a 0 or 1. Used often
@@ -180,7 +209,7 @@ class XMMRegister : public RegisterBase<XMMRegister, kDoubleAfterLast> {
// in modR/M, SIB, and opcode bytes.
int low_bits() const { return code() & 0x7; }
- private:
+ protected:
friend class RegisterBase<XMMRegister, kDoubleAfterLast>;
explicit constexpr XMMRegister(int code) : RegisterBase(code) {}
};
@@ -189,6 +218,22 @@ ASSERT_TRIVIALLY_COPYABLE(XMMRegister);
static_assert(sizeof(XMMRegister) == sizeof(int),
"XMMRegister can efficiently be passed by value");
+class YMMRegister : public XMMRegister {
+ public:
+ static constexpr YMMRegister from_code(int code) {
+ DCHECK(base::IsInRange(code, 0, XMMRegister::kNumRegisters - 1));
+ return YMMRegister(code);
+ }
+
+ private:
+ friend class XMMRegister;
+ explicit constexpr YMMRegister(int code) : XMMRegister(code) {}
+};
+
+ASSERT_TRIVIALLY_COPYABLE(YMMRegister);
+static_assert(sizeof(YMMRegister) == sizeof(int),
+ "YMMRegister can efficiently be passed by value");
+
using FloatRegister = XMMRegister;
using DoubleRegister = XMMRegister;
@@ -201,9 +246,15 @@ DOUBLE_REGISTERS(DECLARE_REGISTER)
#undef DECLARE_REGISTER
constexpr DoubleRegister no_dreg = DoubleRegister::no_reg();
+#define DECLARE_REGISTER(R) \
+ constexpr YMMRegister R = YMMRegister::from_code(kYMMCode_##R);
+YMM_REGISTERS(DECLARE_REGISTER)
+#undef DECLARE_REGISTER
+
// Define {RegisterName} methods for the register types.
DEFINE_REGISTER_NAMES(Register, GENERAL_REGISTERS)
DEFINE_REGISTER_NAMES(XMMRegister, DOUBLE_REGISTERS)
+DEFINE_REGISTER_NAMES(YMMRegister, YMM_REGISTERS)
// Give alias names to registers for calling conventions.
constexpr Register kReturnRegister0 = rax;
@@ -212,7 +263,6 @@ constexpr Register kReturnRegister2 = r8;
constexpr Register kJSFunctionRegister = rdi;
constexpr Register kContextRegister = rsi;
constexpr Register kAllocateSizeRegister = rdx;
-constexpr Register kSpeculationPoisonRegister = r11;
constexpr Register kInterpreterAccumulatorRegister = rax;
constexpr Register kInterpreterBytecodeOffsetRegister = r9;
constexpr Register kInterpreterBytecodeArrayRegister = r12;
diff --git a/deps/v8/src/codegen/x64/sse-instr.h b/deps/v8/src/codegen/x64/sse-instr.h
index 452cc0f690..d1223b69a1 100644
--- a/deps/v8/src/codegen/x64/sse-instr.h
+++ b/deps/v8/src/codegen/x64/sse-instr.h
@@ -32,6 +32,7 @@
V(sqrtss, F3, 0F, 51) \
V(addss, F3, 0F, 58) \
V(mulss, F3, 0F, 59) \
+ V(cvtss2sd, F3, 0F, 5A) \
V(subss, F3, 0F, 5C) \
V(minss, F3, 0F, 5D) \
V(divss, F3, 0F, 5E) \
diff --git a/deps/v8/src/common/globals.h b/deps/v8/src/common/globals.h
index 6aee59eb83..6df1da88ae 100644
--- a/deps/v8/src/common/globals.h
+++ b/deps/v8/src/common/globals.h
@@ -62,6 +62,9 @@ constexpr int GB = MB * 1024;
#if (V8_TARGET_ARCH_RISCV64 && !V8_HOST_ARCH_RISCV64)
#define USE_SIMULATOR 1
#endif
+#if (V8_TARGET_ARCH_LOONG64 && !V8_HOST_ARCH_LOONG64)
+#define USE_SIMULATOR 1
+#endif
#endif
// Determine whether the architecture uses an embedded constant pool
@@ -587,9 +590,14 @@ constexpr intptr_t kPointerAlignmentMask = kPointerAlignment - 1;
constexpr intptr_t kDoubleAlignment = 8;
constexpr intptr_t kDoubleAlignmentMask = kDoubleAlignment - 1;
-// Desired alignment for generated code is 32 bytes (to improve cache line
-// utilization).
+// Desired alignment for generated code is 64 bytes on x64 (to allow 64-bytes
+// loop header alignment) and 32 bytes (to improve cache line utilization) on
+// other architectures.
+#if V8_TARGET_ARCH_X64
+constexpr int kCodeAlignmentBits = 6;
+#else
constexpr int kCodeAlignmentBits = 5;
+#endif
constexpr intptr_t kCodeAlignment = 1 << kCodeAlignmentBits;
constexpr intptr_t kCodeAlignmentMask = kCodeAlignment - 1;
@@ -1701,20 +1709,6 @@ enum IsolateAddressId {
kIsolateAddressCount
};
-enum class PoisoningMitigationLevel {
- kPoisonAll,
- kDontPoison,
- kPoisonCriticalOnly
-};
-
-enum class LoadSensitivity {
- kCritical, // Critical loads are poisoned whenever we can run untrusted
- // code (i.e., when --untrusted-code-mitigations is on).
- kUnsafe, // Unsafe loads are poisoned when full poisoning is on
- // (--branch-load-poisoning).
- kSafe // Safe loads are never poisoned.
-};
-
// The reason for a WebAssembly trap.
#define FOREACH_WASM_TRAPREASON(V) \
V(TrapUnreachable) \
@@ -1785,7 +1779,20 @@ constexpr int kSwissNameDictionaryInitialCapacity = 4;
constexpr int kSmallOrderedHashSetMinCapacity = 4;
constexpr int kSmallOrderedHashMapMinCapacity = 4;
-static const uint16_t kDontAdaptArgumentsSentinel = static_cast<uint16_t>(-1);
+#ifdef V8_INCLUDE_RECEIVER_IN_ARGC
+constexpr bool kJSArgcIncludesReceiver = true;
+constexpr int kJSArgcReceiverSlots = 1;
+constexpr uint16_t kDontAdaptArgumentsSentinel = 0;
+#else
+constexpr bool kJSArgcIncludesReceiver = false;
+constexpr int kJSArgcReceiverSlots = 0;
+constexpr uint16_t kDontAdaptArgumentsSentinel = static_cast<uint16_t>(-1);
+#endif
+
+// Helper to get the parameter count for functions with JS linkage.
+inline constexpr int JSParameterCount(int param_count_without_receiver) {
+ return param_count_without_receiver + kJSArgcReceiverSlots;
+}
// Opaque data type for identifying stack frames. Used extensively
// by the debugger.
diff --git a/deps/v8/src/common/message-template.h b/deps/v8/src/common/message-template.h
index 89ef319db1..a925300c5c 100644
--- a/deps/v8/src/common/message-template.h
+++ b/deps/v8/src/common/message-template.h
@@ -380,6 +380,7 @@ namespace internal {
T(TypedArrayTooLargeToSort, \
"Custom comparefn not supported for huge TypedArrays") \
T(ValueOutOfRange, "Value % out of range for % options property %") \
+ T(CollectionGrowFailed, "% maximum size exceeded") \
/* SyntaxError */ \
T(AmbiguousExport, \
"The requested module '%' contains conflicting star exports for name '%'") \
@@ -439,6 +440,10 @@ namespace internal {
T(InvalidRegExpFlags, "Invalid flags supplied to RegExp constructor '%'") \
T(InvalidOrUnexpectedToken, "Invalid or unexpected token") \
T(InvalidPrivateBrand, "Object must be an instance of class %") \
+ T(InvalidPrivateBrandReinitialization, \
+ "Cannot initialize private methods of class % twice on the same object") \
+ T(InvalidPrivateFieldReitialization, \
+ "Cannot initialize % twice on the same object") \
T(InvalidPrivateFieldResolution, \
"Private field '%' must be declared in an enclosing class") \
T(InvalidPrivateMemberRead, \
diff --git a/deps/v8/src/compiler-dispatcher/OWNERS b/deps/v8/src/compiler-dispatcher/OWNERS
index f08a549385..84cd0368eb 100644
--- a/deps/v8/src/compiler-dispatcher/OWNERS
+++ b/deps/v8/src/compiler-dispatcher/OWNERS
@@ -1,4 +1,3 @@
jkummerow@chromium.org
leszeks@chromium.org
-rmcilroy@chromium.org
victorgomes@chromium.org
diff --git a/deps/v8/src/compiler-dispatcher/optimizing-compile-dispatcher.cc b/deps/v8/src/compiler-dispatcher/optimizing-compile-dispatcher.cc
index f8a7fa8814..45f3684fb6 100644
--- a/deps/v8/src/compiler-dispatcher/optimizing-compile-dispatcher.cc
+++ b/deps/v8/src/compiler-dispatcher/optimizing-compile-dispatcher.cc
@@ -173,7 +173,6 @@ void OptimizingCompileDispatcher::AwaitCompileTasks() {
void OptimizingCompileDispatcher::FlushQueues(
BlockingBehavior blocking_behavior, bool restore_function_code) {
- if (FLAG_block_concurrent_recompilation) Unblock();
FlushInputQueue();
if (blocking_behavior == BlockingBehavior::kBlock) {
base::MutexGuard lock_guard(&ref_count_mutex_);
@@ -231,7 +230,7 @@ bool OptimizingCompileDispatcher::HasJobs() {
// Note: This relies on {output_queue_} being mutated by a background thread
// only when {ref_count_} is not zero. Also, {ref_count_} is never incremented
// by a background thread.
- return ref_count_ != 0 || !output_queue_.empty() || blocked_jobs_ != 0;
+ return ref_count_ != 0 || !output_queue_.empty();
}
void OptimizingCompileDispatcher::QueueForOptimization(
@@ -244,20 +243,8 @@ void OptimizingCompileDispatcher::QueueForOptimization(
input_queue_[InputQueueIndex(input_queue_length_)] = job;
input_queue_length_++;
}
- if (FLAG_block_concurrent_recompilation) {
- blocked_jobs_++;
- } else {
- V8::GetCurrentPlatform()->CallOnWorkerThread(
- std::make_unique<CompileTask>(isolate_, this));
- }
-}
-
-void OptimizingCompileDispatcher::Unblock() {
- while (blocked_jobs_ > 0) {
- V8::GetCurrentPlatform()->CallOnWorkerThread(
- std::make_unique<CompileTask>(isolate_, this));
- blocked_jobs_--;
- }
+ V8::GetCurrentPlatform()->CallOnWorkerThread(
+ std::make_unique<CompileTask>(isolate_, this));
}
} // namespace internal
diff --git a/deps/v8/src/compiler-dispatcher/optimizing-compile-dispatcher.h b/deps/v8/src/compiler-dispatcher/optimizing-compile-dispatcher.h
index 56592ed9b4..ccfb4f2a4a 100644
--- a/deps/v8/src/compiler-dispatcher/optimizing-compile-dispatcher.h
+++ b/deps/v8/src/compiler-dispatcher/optimizing-compile-dispatcher.h
@@ -30,7 +30,6 @@ class V8_EXPORT_PRIVATE OptimizingCompileDispatcher {
input_queue_capacity_(FLAG_concurrent_recompilation_queue_length),
input_queue_length_(0),
input_queue_shift_(0),
- blocked_jobs_(0),
ref_count_(0),
recompilation_delay_(FLAG_concurrent_recompilation_delay) {
input_queue_ = NewArray<OptimizedCompilationJob*>(input_queue_capacity_);
@@ -42,7 +41,6 @@ class V8_EXPORT_PRIVATE OptimizingCompileDispatcher {
void Flush(BlockingBehavior blocking_behavior);
// Takes ownership of |job|.
void QueueForOptimization(OptimizedCompilationJob* job);
- void Unblock();
void AwaitCompileTasks();
void InstallOptimizedFunctions();
@@ -99,8 +97,6 @@ class V8_EXPORT_PRIVATE OptimizingCompileDispatcher {
// different threads.
base::Mutex output_queue_mutex_;
- int blocked_jobs_;
-
std::atomic<int> ref_count_;
base::Mutex ref_count_mutex_;
base::ConditionVariable ref_count_zero_;
diff --git a/deps/v8/src/compiler/OWNERS b/deps/v8/src/compiler/OWNERS
index 1626bc5487..a415cbfa66 100644
--- a/deps/v8/src/compiler/OWNERS
+++ b/deps/v8/src/compiler/OWNERS
@@ -4,7 +4,6 @@ mvstanton@chromium.org
neis@chromium.org
nicohartmann@chromium.org
sigurds@chromium.org
-solanes@chromium.org
per-file wasm-*=ahaas@chromium.org
per-file wasm-*=bbudge@chromium.org
diff --git a/deps/v8/src/compiler/access-builder.cc b/deps/v8/src/compiler/access-builder.cc
index 675371df57..fda0727dd1 100644
--- a/deps/v8/src/compiler/access-builder.cc
+++ b/deps/v8/src/compiler/access-builder.cc
@@ -82,25 +82,25 @@ FieldAccess AccessBuilder::ForJSObjectPropertiesOrHash() {
FieldAccess access = {kTaggedBase, JSObject::kPropertiesOrHashOffset,
MaybeHandle<Name>(), MaybeHandle<Map>(),
Type::Any(), MachineType::AnyTagged(),
- kFullWriteBarrier, LoadSensitivity::kCritical};
+ kFullWriteBarrier};
return access;
}
// static
FieldAccess AccessBuilder::ForJSObjectPropertiesOrHashKnownPointer() {
- FieldAccess access = {kTaggedBase, JSObject::kPropertiesOrHashOffset,
- MaybeHandle<Name>(), MaybeHandle<Map>(),
- Type::Any(), MachineType::TaggedPointer(),
- kPointerWriteBarrier, LoadSensitivity::kCritical};
+ FieldAccess access = {kTaggedBase, JSObject::kPropertiesOrHashOffset,
+ MaybeHandle<Name>(), MaybeHandle<Map>(),
+ Type::Any(), MachineType::TaggedPointer(),
+ kPointerWriteBarrier};
return access;
}
// static
FieldAccess AccessBuilder::ForJSObjectElements() {
- FieldAccess access = {kTaggedBase, JSObject::kElementsOffset,
- MaybeHandle<Name>(), MaybeHandle<Map>(),
- Type::Internal(), MachineType::TaggedPointer(),
- kPointerWriteBarrier, LoadSensitivity::kCritical};
+ FieldAccess access = {kTaggedBase, JSObject::kElementsOffset,
+ MaybeHandle<Name>(), MaybeHandle<Map>(),
+ Type::Internal(), MachineType::TaggedPointer(),
+ kPointerWriteBarrier};
return access;
}
@@ -410,26 +410,22 @@ FieldAccess AccessBuilder::ForJSTypedArrayBasePointer() {
FieldAccess access = {kTaggedBase, JSTypedArray::kBasePointerOffset,
MaybeHandle<Name>(), MaybeHandle<Map>(),
Type::OtherInternal(), MachineType::AnyTagged(),
- kFullWriteBarrier, LoadSensitivity::kCritical};
+ kFullWriteBarrier};
return access;
}
// static
FieldAccess AccessBuilder::ForJSTypedArrayExternalPointer() {
- FieldAccess access = {kTaggedBase,
- JSTypedArray::kExternalPointerOffset,
- MaybeHandle<Name>(),
- MaybeHandle<Map>(),
- V8_HEAP_SANDBOX_BOOL ? Type::SandboxedExternalPointer()
- : Type::ExternalPointer(),
- MachineType::Pointer(),
- kNoWriteBarrier,
- LoadSensitivity::kCritical,
- ConstFieldInfo::None(),
- false,
-#ifdef V8_HEAP_SANDBOX
- kTypedArrayExternalPointerTag
-#endif
+ FieldAccess access = {
+ kTaggedBase,
+ JSTypedArray::kExternalPointerOffset,
+ MaybeHandle<Name>(),
+ MaybeHandle<Map>(),
+ Type::ExternalPointer(),
+ MachineType::Pointer(),
+ kNoWriteBarrier,
+ ConstFieldInfo::None(),
+ false,
};
return access;
}
@@ -441,16 +437,11 @@ FieldAccess AccessBuilder::ForJSDataViewDataPointer() {
JSDataView::kDataPointerOffset,
MaybeHandle<Name>(),
MaybeHandle<Map>(),
- V8_HEAP_SANDBOX_BOOL ? Type::SandboxedExternalPointer()
- : Type::ExternalPointer(),
+ Type::ExternalPointer(),
MachineType::Pointer(),
kNoWriteBarrier,
- LoadSensitivity::kUnsafe,
ConstFieldInfo::None(),
false,
-#ifdef V8_HEAP_SANDBOX
- kDataViewDataPointerTag,
-#endif
};
return access;
}
@@ -756,7 +747,6 @@ FieldAccess AccessBuilder::ForExternalStringResourceData() {
: Type::ExternalPointer(),
MachineType::Pointer(),
kNoWriteBarrier,
- LoadSensitivity::kUnsafe,
ConstFieldInfo::None(),
false,
#ifdef V8_HEAP_SANDBOX
@@ -902,10 +892,10 @@ FieldAccess AccessBuilder::ForWeakFixedArraySlot(int index) {
}
// static
FieldAccess AccessBuilder::ForCellValue() {
- FieldAccess access = {kTaggedBase, Cell::kValueOffset,
- Handle<Name>(), MaybeHandle<Map>(),
- Type::Any(), MachineType::AnyTagged(),
- kFullWriteBarrier, LoadSensitivity::kCritical};
+ FieldAccess access = {kTaggedBase, Cell::kValueOffset,
+ Handle<Name>(), MaybeHandle<Map>(),
+ Type::Any(), MachineType::AnyTagged(),
+ kFullWriteBarrier};
return access;
}
@@ -966,11 +956,9 @@ ElementAccess AccessBuilder::ForSloppyArgumentsElementsMappedEntry() {
}
// statics
-ElementAccess AccessBuilder::ForFixedArrayElement(
- ElementsKind kind, LoadSensitivity load_sensitivity) {
- ElementAccess access = {kTaggedBase, FixedArray::kHeaderSize,
- Type::Any(), MachineType::AnyTagged(),
- kFullWriteBarrier, load_sensitivity};
+ElementAccess AccessBuilder::ForFixedArrayElement(ElementsKind kind) {
+ ElementAccess access = {kTaggedBase, FixedArray::kHeaderSize, Type::Any(),
+ MachineType::AnyTagged(), kFullWriteBarrier};
switch (kind) {
case PACKED_SMI_ELEMENTS:
access.type = Type::SignedSmall();
@@ -1038,59 +1026,50 @@ FieldAccess AccessBuilder::ForEnumCacheIndices() {
}
// static
-ElementAccess AccessBuilder::ForTypedArrayElement(
- ExternalArrayType type, bool is_external,
- LoadSensitivity load_sensitivity) {
+ElementAccess AccessBuilder::ForTypedArrayElement(ExternalArrayType type,
+ bool is_external) {
BaseTaggedness taggedness = is_external ? kUntaggedBase : kTaggedBase;
int header_size = is_external ? 0 : ByteArray::kHeaderSize;
switch (type) {
case kExternalInt8Array: {
- ElementAccess access = {taggedness, header_size,
- Type::Signed32(), MachineType::Int8(),
- kNoWriteBarrier, load_sensitivity};
+ ElementAccess access = {taggedness, header_size, Type::Signed32(),
+ MachineType::Int8(), kNoWriteBarrier};
return access;
}
case kExternalUint8Array:
case kExternalUint8ClampedArray: {
- ElementAccess access = {taggedness, header_size,
- Type::Unsigned32(), MachineType::Uint8(),
- kNoWriteBarrier, load_sensitivity};
+ ElementAccess access = {taggedness, header_size, Type::Unsigned32(),
+ MachineType::Uint8(), kNoWriteBarrier};
return access;
}
case kExternalInt16Array: {
- ElementAccess access = {taggedness, header_size,
- Type::Signed32(), MachineType::Int16(),
- kNoWriteBarrier, load_sensitivity};
+ ElementAccess access = {taggedness, header_size, Type::Signed32(),
+ MachineType::Int16(), kNoWriteBarrier};
return access;
}
case kExternalUint16Array: {
- ElementAccess access = {taggedness, header_size,
- Type::Unsigned32(), MachineType::Uint16(),
- kNoWriteBarrier, load_sensitivity};
+ ElementAccess access = {taggedness, header_size, Type::Unsigned32(),
+ MachineType::Uint16(), kNoWriteBarrier};
return access;
}
case kExternalInt32Array: {
- ElementAccess access = {taggedness, header_size,
- Type::Signed32(), MachineType::Int32(),
- kNoWriteBarrier, load_sensitivity};
+ ElementAccess access = {taggedness, header_size, Type::Signed32(),
+ MachineType::Int32(), kNoWriteBarrier};
return access;
}
case kExternalUint32Array: {
- ElementAccess access = {taggedness, header_size,
- Type::Unsigned32(), MachineType::Uint32(),
- kNoWriteBarrier, load_sensitivity};
+ ElementAccess access = {taggedness, header_size, Type::Unsigned32(),
+ MachineType::Uint32(), kNoWriteBarrier};
return access;
}
case kExternalFloat32Array: {
- ElementAccess access = {taggedness, header_size,
- Type::Number(), MachineType::Float32(),
- kNoWriteBarrier, load_sensitivity};
+ ElementAccess access = {taggedness, header_size, Type::Number(),
+ MachineType::Float32(), kNoWriteBarrier};
return access;
}
case kExternalFloat64Array: {
- ElementAccess access = {taggedness, header_size,
- Type::Number(), MachineType::Float64(),
- kNoWriteBarrier, load_sensitivity};
+ ElementAccess access = {taggedness, header_size, Type::Number(),
+ MachineType::Float64(), kNoWriteBarrier};
return access;
}
case kExternalBigInt64Array:
@@ -1239,15 +1218,6 @@ FieldAccess AccessBuilder::ForDictionaryObjectHashIndex() {
}
// static
-FieldAccess AccessBuilder::ForFeedbackCellValue() {
- FieldAccess access = {kTaggedBase, FeedbackCell::kValueOffset,
- Handle<Name>(), MaybeHandle<Map>(),
- Type::Any(), MachineType::TaggedPointer(),
- kFullWriteBarrier};
- return access;
-}
-
-// static
FieldAccess AccessBuilder::ForFeedbackCellInterruptBudget() {
FieldAccess access = {kTaggedBase,
FeedbackCell::kInterruptBudgetOffset,
diff --git a/deps/v8/src/compiler/access-builder.h b/deps/v8/src/compiler/access-builder.h
index fa68628cf8..99ffde19c4 100644
--- a/deps/v8/src/compiler/access-builder.h
+++ b/deps/v8/src/compiler/access-builder.h
@@ -299,9 +299,7 @@ class V8_EXPORT_PRIVATE AccessBuilder final
// Provides access to FixedArray elements.
static ElementAccess ForFixedArrayElement();
- static ElementAccess ForFixedArrayElement(
- ElementsKind kind,
- LoadSensitivity load_sensitivity = LoadSensitivity::kUnsafe);
+ static ElementAccess ForFixedArrayElement(ElementsKind kind);
// Provides access to SloppyArgumentsElements elements.
static ElementAccess ForSloppyArgumentsElementsMappedEntry();
@@ -319,9 +317,8 @@ class V8_EXPORT_PRIVATE AccessBuilder final
static FieldAccess ForEnumCacheIndices();
// Provides access to Fixed{type}TypedArray and External{type}Array elements.
- static ElementAccess ForTypedArrayElement(
- ExternalArrayType type, bool is_external,
- LoadSensitivity load_sensitivity = LoadSensitivity::kUnsafe);
+ static ElementAccess ForTypedArrayElement(ExternalArrayType type,
+ bool is_external);
// Provides access to HashTable fields.
static FieldAccess ForHashTableBaseNumberOfElements();
@@ -342,7 +339,6 @@ class V8_EXPORT_PRIVATE AccessBuilder final
static FieldAccess ForDictionaryObjectHashIndex();
// Provides access to FeedbackCell fields.
- static FieldAccess ForFeedbackCellValue();
static FieldAccess ForFeedbackCellInterruptBudget();
// Provides access to a FeedbackVector fields.
diff --git a/deps/v8/src/compiler/access-info.cc b/deps/v8/src/compiler/access-info.cc
index 21f453f4d8..e68ced7460 100644
--- a/deps/v8/src/compiler/access-info.cc
+++ b/deps/v8/src/compiler/access-info.cc
@@ -8,7 +8,6 @@
#include "src/builtins/accessors.h"
#include "src/compiler/compilation-dependencies.h"
-#include "src/compiler/compilation-dependency.h"
#include "src/compiler/simplified-operator.h"
#include "src/compiler/type-cache.h"
#include "src/ic/call-optimization.h"
@@ -57,7 +56,8 @@ bool HasFieldRepresentationDependenciesOnMap(
ZoneVector<CompilationDependency const*>& dependencies,
Handle<Map> const& field_owner_map) {
for (auto dep : dependencies) {
- if (dep->IsFieldRepresentationDependencyOnMap(field_owner_map)) {
+ if (CompilationDependencies::IsFieldRepresentationDependencyOnMap(
+ dep, field_owner_map)) {
return true;
}
}
@@ -109,6 +109,7 @@ PropertyAccessInfo PropertyAccessInfo::DataField(
FieldIndex field_index, Representation field_representation,
Type field_type, MapRef field_owner_map, base::Optional<MapRef> field_map,
base::Optional<JSObjectRef> holder, base::Optional<MapRef> transition_map) {
+ DCHECK(!field_representation.IsNone());
DCHECK_IMPLIES(
field_representation.IsDouble(),
HasFieldRepresentationDependenciesOnMap(
@@ -129,6 +130,7 @@ PropertyAccessInfo PropertyAccessInfo::FastDataConstant(
FieldIndex field_index, Representation field_representation,
Type field_type, MapRef field_owner_map, base::Optional<MapRef> field_map,
base::Optional<JSObjectRef> holder, base::Optional<MapRef> transition_map) {
+ DCHECK(!field_representation.IsNone());
return PropertyAccessInfo(kFastDataConstant, holder, transition_map,
field_index, field_representation, field_type,
field_owner_map, field_map, {{receiver_map}, zone},
@@ -384,7 +386,7 @@ AccessInfoFactory::AccessInfoFactory(JSHeapBroker* broker,
base::Optional<ElementAccessInfo> AccessInfoFactory::ComputeElementAccessInfo(
MapRef map, AccessMode access_mode) const {
- if (!CanInlineElementAccess(map)) return {};
+ if (!map.CanInlineElementAccess()) return {};
return ElementAccessInfo({{map}, zone()}, map.elements_kind(), zone());
}
@@ -542,7 +544,7 @@ PropertyAccessInfo AccessorAccessInfoHelper(
Handle<Cell> cell = broker->CanonicalPersistentHandle(
Cell::cast(module_namespace->module().exports().Lookup(
isolate, name.object(), Smi::ToInt(name.object()->GetHash()))));
- if (cell->value().IsTheHole(isolate)) {
+ if (cell->value(kRelaxedLoad).IsTheHole(isolate)) {
// This module has not been fully initialized yet.
return PropertyAccessInfo::Invalid(zone);
}
@@ -1050,7 +1052,7 @@ base::Optional<ElementAccessInfo> AccessInfoFactory::ConsolidateElementLoad(
base::Optional<MapRef> map = TryMakeRef(broker(), map_handle);
if (!map.has_value()) return {};
if (map->instance_type() != instance_type ||
- !CanInlineElementAccess(*map)) {
+ !map->CanInlineElementAccess()) {
return {};
}
if (!GeneralizeElementsKind(elements_kind, map->elements_kind())
@@ -1132,6 +1134,8 @@ PropertyAccessInfo AccessInfoFactory::LookupTransition(
int const index = details.field_index();
Representation details_representation = details.representation();
+ if (details_representation.IsNone()) return Invalid();
+
FieldIndex field_index = FieldIndex::ForPropertyIndex(
*transition_map.object(), index, details_representation);
Type field_type = Type::NonInternal();
diff --git a/deps/v8/src/compiler/backend/arm/code-generator-arm.cc b/deps/v8/src/compiler/backend/arm/code-generator-arm.cc
index 29c7897ec9..7bc90fd822 100644
--- a/deps/v8/src/compiler/backend/arm/code-generator-arm.cc
+++ b/deps/v8/src/compiler/backend/arm/code-generator-arm.cc
@@ -36,9 +36,7 @@ class ArmOperandConverter final : public InstructionOperandConverter {
SBit OutputSBit() const {
switch (instr_->flags_mode()) {
case kFlags_branch:
- case kFlags_branch_and_poison:
case kFlags_deoptimize:
- case kFlags_deoptimize_and_poison:
case kFlags_set:
case kFlags_trap:
case kFlags_select:
@@ -322,35 +320,6 @@ Condition FlagsConditionToCondition(FlagsCondition condition) {
UNREACHABLE();
}
-void EmitWordLoadPoisoningIfNeeded(CodeGenerator* codegen,
- InstructionCode opcode,
- ArmOperandConverter const& i) {
- const MemoryAccessMode access_mode = AccessModeField::decode(opcode);
- if (access_mode == kMemoryAccessPoisoned) {
- Register value = i.OutputRegister();
- codegen->tasm()->and_(value, value, Operand(kSpeculationPoisonRegister));
- }
-}
-
-void ComputePoisonedAddressForLoad(CodeGenerator* codegen,
- InstructionCode opcode,
- ArmOperandConverter const& i,
- Register address) {
- DCHECK_EQ(kMemoryAccessPoisoned, AccessModeField::decode(opcode));
- switch (AddressingModeField::decode(opcode)) {
- case kMode_Offset_RI:
- codegen->tasm()->mov(address, i.InputImmediate(1));
- codegen->tasm()->add(address, address, i.InputRegister(0));
- break;
- case kMode_Offset_RR:
- codegen->tasm()->add(address, i.InputRegister(0), i.InputRegister(1));
- break;
- default:
- UNREACHABLE();
- }
- codegen->tasm()->and_(address, address, Operand(kSpeculationPoisonRegister));
-}
-
} // namespace
#define ASSEMBLE_ATOMIC_LOAD_INTEGER(asm_instr) \
@@ -360,12 +329,11 @@ void ComputePoisonedAddressForLoad(CodeGenerator* codegen,
__ dmb(ISH); \
} while (0)
-#define ASSEMBLE_ATOMIC_STORE_INTEGER(asm_instr) \
- do { \
- __ dmb(ISH); \
- __ asm_instr(i.InputRegister(2), \
- MemOperand(i.InputRegister(0), i.InputRegister(1))); \
- __ dmb(ISH); \
+#define ASSEMBLE_ATOMIC_STORE_INTEGER(asm_instr, order) \
+ do { \
+ __ dmb(ISH); \
+ __ asm_instr(i.InputRegister(0), i.InputOffset(1)); \
+ if (order == AtomicMemoryOrder::kSeqCst) __ dmb(ISH); \
} while (0)
#define ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(load_instr, store_instr) \
@@ -691,25 +659,6 @@ void CodeGenerator::BailoutIfDeoptimized() {
RelocInfo::CODE_TARGET, ne);
}
-void CodeGenerator::GenerateSpeculationPoisonFromCodeStartRegister() {
- UseScratchRegisterScope temps(tasm());
- Register scratch = temps.Acquire();
-
- // Set a mask which has all bits set in the normal case, but has all
- // bits cleared if we are speculatively executing the wrong PC.
- __ ComputeCodeStartAddress(scratch);
- __ cmp(kJavaScriptCallCodeStartRegister, scratch);
- __ mov(kSpeculationPoisonRegister, Operand(-1), SBit::LeaveCC, eq);
- __ mov(kSpeculationPoisonRegister, Operand(0), SBit::LeaveCC, ne);
- __ csdb();
-}
-
-void CodeGenerator::AssembleRegisterArgumentPoisoning() {
- __ and_(kJSFunctionRegister, kJSFunctionRegister, kSpeculationPoisonRegister);
- __ and_(kContextRegister, kContextRegister, kSpeculationPoisonRegister);
- __ and_(sp, sp, kSpeculationPoisonRegister);
-}
-
// Assembles an instruction after register allocation, producing machine code.
CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Instruction* instr) {
@@ -977,15 +926,24 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
i.InputDoubleRegister(0), DetermineStubCallMode());
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
- case kArchStoreWithWriteBarrier: {
- RecordWriteMode mode =
- static_cast<RecordWriteMode>(MiscField::decode(instr->opcode()));
+ case kArchStoreWithWriteBarrier: // Fall through.
+ case kArchAtomicStoreWithWriteBarrier: {
+ RecordWriteMode mode;
+ if (arch_opcode == kArchStoreWithWriteBarrier) {
+ mode = static_cast<RecordWriteMode>(MiscField::decode(instr->opcode()));
+ } else {
+ mode = AtomicStoreRecordWriteModeField::decode(instr->opcode());
+ }
Register object = i.InputRegister(0);
Register value = i.InputRegister(2);
AddressingMode addressing_mode =
AddressingModeField::decode(instr->opcode());
Operand offset(0);
+
+ if (arch_opcode == kArchAtomicStoreWithWriteBarrier) {
+ __ dmb(ISH);
+ }
if (addressing_mode == kMode_Offset_RI) {
int32_t immediate = i.InputInt32(1);
offset = Operand(immediate);
@@ -996,6 +954,12 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
offset = Operand(reg);
__ str(value, MemOperand(object, reg));
}
+ if (arch_opcode == kArchAtomicStoreWithWriteBarrier &&
+ AtomicMemoryOrderField::decode(instr->opcode()) ==
+ AtomicMemoryOrder::kSeqCst) {
+ __ dmb(ISH);
+ }
+
auto ool = zone()->New<OutOfLineRecordWrite>(
this, object, offset, value, mode, DetermineStubCallMode(),
&unwinding_info_writer_);
@@ -1619,12 +1583,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArmLdrb:
__ ldrb(i.OutputRegister(), i.InputOffset());
DCHECK_EQ(LeaveCC, i.OutputSBit());
- EmitWordLoadPoisoningIfNeeded(this, opcode, i);
break;
case kArmLdrsb:
__ ldrsb(i.OutputRegister(), i.InputOffset());
DCHECK_EQ(LeaveCC, i.OutputSBit());
- EmitWordLoadPoisoningIfNeeded(this, opcode, i);
break;
case kArmStrb:
__ strb(i.InputRegister(0), i.InputOffset(1));
@@ -1632,11 +1594,9 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
case kArmLdrh:
__ ldrh(i.OutputRegister(), i.InputOffset());
- EmitWordLoadPoisoningIfNeeded(this, opcode, i);
break;
case kArmLdrsh:
__ ldrsh(i.OutputRegister(), i.InputOffset());
- EmitWordLoadPoisoningIfNeeded(this, opcode, i);
break;
case kArmStrh:
__ strh(i.InputRegister(0), i.InputOffset(1));
@@ -1644,22 +1604,13 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
case kArmLdr:
__ ldr(i.OutputRegister(), i.InputOffset());
- EmitWordLoadPoisoningIfNeeded(this, opcode, i);
break;
case kArmStr:
__ str(i.InputRegister(0), i.InputOffset(1));
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
case kArmVldrF32: {
- const MemoryAccessMode access_mode = AccessModeField::decode(opcode);
- if (access_mode == kMemoryAccessPoisoned) {
- UseScratchRegisterScope temps(tasm());
- Register address = temps.Acquire();
- ComputePoisonedAddressForLoad(this, opcode, i, address);
- __ vldr(i.OutputFloatRegister(), address, 0);
- } else {
- __ vldr(i.OutputFloatRegister(), i.InputOffset());
- }
+ __ vldr(i.OutputFloatRegister(), i.InputOffset());
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
}
@@ -1688,15 +1639,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kArmVldrF64: {
- const MemoryAccessMode access_mode = AccessModeField::decode(opcode);
- if (access_mode == kMemoryAccessPoisoned) {
- UseScratchRegisterScope temps(tasm());
- Register address = temps.Acquire();
- ComputePoisonedAddressForLoad(this, opcode, i, address);
- __ vldr(i.OutputDoubleRegister(), address, 0);
- } else {
- __ vldr(i.OutputDoubleRegister(), i.InputOffset());
- }
+ __ vldr(i.OutputDoubleRegister(), i.InputOffset());
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
}
@@ -1832,10 +1775,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ isb(SY);
break;
}
- case kArchWordPoisonOnSpeculation:
- __ and_(i.OutputRegister(0), i.InputRegister(0),
- Operand(kSpeculationPoisonRegister));
- break;
case kArmVmullLow: {
auto dt = static_cast<NeonDataType>(MiscField::decode(instr->opcode()));
__ vmull(dt, i.OutputSimd128Register(), i.InputSimd128Register(0).low(),
@@ -3373,94 +3312,97 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ StoreLane(sz, src_list, i.InputUint8(1), i.NeonInputOperand(2));
break;
}
- case kWord32AtomicLoadInt8:
+ case kAtomicLoadInt8:
ASSEMBLE_ATOMIC_LOAD_INTEGER(ldrsb);
break;
- case kWord32AtomicLoadUint8:
+ case kAtomicLoadUint8:
ASSEMBLE_ATOMIC_LOAD_INTEGER(ldrb);
break;
- case kWord32AtomicLoadInt16:
+ case kAtomicLoadInt16:
ASSEMBLE_ATOMIC_LOAD_INTEGER(ldrsh);
break;
- case kWord32AtomicLoadUint16:
+ case kAtomicLoadUint16:
ASSEMBLE_ATOMIC_LOAD_INTEGER(ldrh);
break;
- case kWord32AtomicLoadWord32:
+ case kAtomicLoadWord32:
ASSEMBLE_ATOMIC_LOAD_INTEGER(ldr);
break;
- case kWord32AtomicStoreWord8:
- ASSEMBLE_ATOMIC_STORE_INTEGER(strb);
+ case kAtomicStoreWord8:
+ ASSEMBLE_ATOMIC_STORE_INTEGER(strb,
+ AtomicMemoryOrderField::decode(opcode));
break;
- case kWord32AtomicStoreWord16:
- ASSEMBLE_ATOMIC_STORE_INTEGER(strh);
+ case kAtomicStoreWord16:
+ ASSEMBLE_ATOMIC_STORE_INTEGER(strh,
+ AtomicMemoryOrderField::decode(opcode));
break;
- case kWord32AtomicStoreWord32:
- ASSEMBLE_ATOMIC_STORE_INTEGER(str);
+ case kAtomicStoreWord32:
+ ASSEMBLE_ATOMIC_STORE_INTEGER(str,
+ AtomicMemoryOrderField::decode(opcode));
break;
- case kWord32AtomicExchangeInt8:
+ case kAtomicExchangeInt8:
ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(ldrexb, strexb);
__ sxtb(i.OutputRegister(0), i.OutputRegister(0));
break;
- case kWord32AtomicExchangeUint8:
+ case kAtomicExchangeUint8:
ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(ldrexb, strexb);
break;
- case kWord32AtomicExchangeInt16:
+ case kAtomicExchangeInt16:
ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(ldrexh, strexh);
__ sxth(i.OutputRegister(0), i.OutputRegister(0));
break;
- case kWord32AtomicExchangeUint16:
+ case kAtomicExchangeUint16:
ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(ldrexh, strexh);
break;
- case kWord32AtomicExchangeWord32:
+ case kAtomicExchangeWord32:
ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(ldrex, strex);
break;
- case kWord32AtomicCompareExchangeInt8:
+ case kAtomicCompareExchangeInt8:
__ add(i.TempRegister(1), i.InputRegister(0), i.InputRegister(1));
__ uxtb(i.TempRegister(2), i.InputRegister(2));
ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(ldrexb, strexb,
i.TempRegister(2));
__ sxtb(i.OutputRegister(0), i.OutputRegister(0));
break;
- case kWord32AtomicCompareExchangeUint8:
+ case kAtomicCompareExchangeUint8:
__ add(i.TempRegister(1), i.InputRegister(0), i.InputRegister(1));
__ uxtb(i.TempRegister(2), i.InputRegister(2));
ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(ldrexb, strexb,
i.TempRegister(2));
break;
- case kWord32AtomicCompareExchangeInt16:
+ case kAtomicCompareExchangeInt16:
__ add(i.TempRegister(1), i.InputRegister(0), i.InputRegister(1));
__ uxth(i.TempRegister(2), i.InputRegister(2));
ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(ldrexh, strexh,
i.TempRegister(2));
__ sxth(i.OutputRegister(0), i.OutputRegister(0));
break;
- case kWord32AtomicCompareExchangeUint16:
+ case kAtomicCompareExchangeUint16:
__ add(i.TempRegister(1), i.InputRegister(0), i.InputRegister(1));
__ uxth(i.TempRegister(2), i.InputRegister(2));
ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(ldrexh, strexh,
i.TempRegister(2));
break;
- case kWord32AtomicCompareExchangeWord32:
+ case kAtomicCompareExchangeWord32:
__ add(i.TempRegister(1), i.InputRegister(0), i.InputRegister(1));
ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(ldrex, strex,
i.InputRegister(2));
break;
#define ATOMIC_BINOP_CASE(op, inst) \
- case kWord32Atomic##op##Int8: \
+ case kAtomic##op##Int8: \
ASSEMBLE_ATOMIC_BINOP(ldrexb, strexb, inst); \
__ sxtb(i.OutputRegister(0), i.OutputRegister(0)); \
break; \
- case kWord32Atomic##op##Uint8: \
+ case kAtomic##op##Uint8: \
ASSEMBLE_ATOMIC_BINOP(ldrexb, strexb, inst); \
break; \
- case kWord32Atomic##op##Int16: \
+ case kAtomic##op##Int16: \
ASSEMBLE_ATOMIC_BINOP(ldrexh, strexh, inst); \
__ sxth(i.OutputRegister(0), i.OutputRegister(0)); \
break; \
- case kWord32Atomic##op##Uint16: \
+ case kAtomic##op##Uint16: \
ASSEMBLE_ATOMIC_BINOP(ldrexh, strexh, inst); \
break; \
- case kWord32Atomic##op##Word32: \
+ case kAtomic##op##Word32: \
ASSEMBLE_ATOMIC_BINOP(ldrex, strex, inst); \
break;
ATOMIC_BINOP_CASE(Add, add)
@@ -3597,20 +3539,6 @@ void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
if (!branch->fallthru) __ b(flabel); // no fallthru to flabel.
}
-void CodeGenerator::AssembleBranchPoisoning(FlagsCondition condition,
- Instruction* instr) {
- // TODO(jarin) Handle float comparisons (kUnordered[Not]Equal).
- if (condition == kUnorderedEqual || condition == kUnorderedNotEqual) {
- return;
- }
-
- condition = NegateFlagsCondition(condition);
- __ eor(kSpeculationPoisonRegister, kSpeculationPoisonRegister,
- Operand(kSpeculationPoisonRegister), SBit::LeaveCC,
- FlagsConditionToCondition(condition));
- __ csdb();
-}
-
void CodeGenerator::AssembleArchDeoptBranch(Instruction* instr,
BranchInfo* branch) {
AssembleArchBranch(instr, branch);
@@ -3805,7 +3733,6 @@ void CodeGenerator::AssembleConstructFrame() {
__ RecordComment("-- OSR entrypoint --");
osr_pc_offset_ = __ pc_offset();
required_slots -= osr_helper()->UnoptimizedFrameSlots();
- ResetSpeculationPoison();
}
const RegList saves = call_descriptor->CalleeSavedRegisters();
@@ -3955,12 +3882,20 @@ void CodeGenerator::AssembleReturn(InstructionOperand* additional_pop_count) {
// DropArguments().
DCHECK_EQ(0u, call_descriptor->CalleeSavedRegisters() & argc_reg.bit());
if (parameter_slots > 1) {
- const int parameter_slots_without_receiver = parameter_slots - 1;
- __ cmp(argc_reg, Operand(parameter_slots_without_receiver));
- __ mov(argc_reg, Operand(parameter_slots_without_receiver), LeaveCC, lt);
+ if (kJSArgcIncludesReceiver) {
+ __ cmp(argc_reg, Operand(parameter_slots));
+ __ mov(argc_reg, Operand(parameter_slots), LeaveCC, lt);
+ } else {
+ const int parameter_slots_without_receiver = parameter_slots - 1;
+ __ cmp(argc_reg, Operand(parameter_slots_without_receiver));
+ __ mov(argc_reg, Operand(parameter_slots_without_receiver), LeaveCC,
+ lt);
+ }
}
__ DropArguments(argc_reg, TurboAssembler::kCountIsInteger,
- TurboAssembler::kCountExcludesReceiver);
+ kJSArgcIncludesReceiver
+ ? TurboAssembler::kCountIncludesReceiver
+ : TurboAssembler::kCountExcludesReceiver);
} else if (additional_pop_count->IsImmediate()) {
DCHECK_EQ(Constant::kInt32, g.ToConstant(additional_pop_count).type());
int additional_count = g.ToConstant(additional_pop_count).ToInt32();
diff --git a/deps/v8/src/compiler/backend/arm/instruction-selector-arm.cc b/deps/v8/src/compiler/backend/arm/instruction-selector-arm.cc
index 2698d45ae7..3de9b2aab6 100644
--- a/deps/v8/src/compiler/backend/arm/instruction-selector-arm.cc
+++ b/deps/v8/src/compiler/backend/arm/instruction-selector-arm.cc
@@ -430,17 +430,18 @@ void EmitLoad(InstructionSelector* selector, InstructionCode opcode,
void EmitStore(InstructionSelector* selector, InstructionCode opcode,
size_t input_count, InstructionOperand* inputs, Node* index) {
ArmOperandGenerator g(selector);
+ ArchOpcode arch_opcode = ArchOpcodeField::decode(opcode);
if (g.CanBeImmediate(index, opcode)) {
inputs[input_count++] = g.UseImmediate(index);
opcode |= AddressingModeField::encode(kMode_Offset_RI);
- } else if ((opcode == kArmStr) &&
+ } else if ((arch_opcode == kArmStr || arch_opcode == kAtomicStoreWord32) &&
TryMatchLSLImmediate(selector, &opcode, index, &inputs[2],
&inputs[3])) {
input_count = 4;
} else {
inputs[input_count++] = g.UseRegister(index);
- if (opcode == kArmVst1S128) {
+ if (arch_opcode == kArmVst1S128) {
// Inputs are value, base, index, only care about base and index.
EmitAddBeforeS128LoadStore(selector, &opcode, &input_count, &inputs[1]);
} else {
@@ -630,29 +631,69 @@ void InstructionSelector::VisitLoad(Node* node) {
case MachineRepresentation::kNone:
UNREACHABLE();
}
- if (node->opcode() == IrOpcode::kPoisonedLoad) {
- CHECK_NE(poisoning_level_, PoisoningMitigationLevel::kDontPoison);
- opcode |= AccessModeField::encode(kMemoryAccessPoisoned);
- }
InstructionOperand output = g.DefineAsRegister(node);
EmitLoad(this, opcode, &output, base, index);
}
-void InstructionSelector::VisitPoisonedLoad(Node* node) { VisitLoad(node); }
-
void InstructionSelector::VisitProtectedLoad(Node* node) {
// TODO(eholk)
UNIMPLEMENTED();
}
-void InstructionSelector::VisitStore(Node* node) {
- ArmOperandGenerator g(this);
+namespace {
+
+ArchOpcode GetStoreOpcode(MachineRepresentation rep) {
+ switch (rep) {
+ case MachineRepresentation::kFloat32:
+ return kArmVstrF32;
+ case MachineRepresentation::kFloat64:
+ return kArmVstrF64;
+ case MachineRepresentation::kBit: // Fall through.
+ case MachineRepresentation::kWord8:
+ return kArmStrb;
+ case MachineRepresentation::kWord16:
+ return kArmStrh;
+ case MachineRepresentation::kTaggedSigned: // Fall through.
+ case MachineRepresentation::kTaggedPointer: // Fall through.
+ case MachineRepresentation::kTagged: // Fall through.
+ case MachineRepresentation::kWord32:
+ return kArmStr;
+ case MachineRepresentation::kSimd128:
+ return kArmVst1S128;
+ case MachineRepresentation::kCompressedPointer: // Fall through.
+ case MachineRepresentation::kCompressed: // Fall through.
+ case MachineRepresentation::kWord64: // Fall through.
+ case MachineRepresentation::kMapWord: // Fall through.
+ case MachineRepresentation::kNone:
+ UNREACHABLE();
+ }
+}
+
+ArchOpcode GetAtomicStoreOpcode(MachineRepresentation rep) {
+ switch (rep) {
+ case MachineRepresentation::kWord8:
+ return kAtomicStoreWord8;
+ case MachineRepresentation::kWord16:
+ return kAtomicStoreWord16;
+ case MachineRepresentation::kTaggedSigned: // Fall through.
+ case MachineRepresentation::kTaggedPointer: // Fall through.
+ case MachineRepresentation::kTagged: // Fall through.
+ case MachineRepresentation::kWord32:
+ return kAtomicStoreWord32;
+ default:
+ UNREACHABLE();
+ }
+}
+
+void VisitStoreCommon(InstructionSelector* selector, Node* node,
+ StoreRepresentation store_rep,
+ base::Optional<AtomicMemoryOrder> atomic_order) {
+ ArmOperandGenerator g(selector);
Node* base = node->InputAt(0);
Node* index = node->InputAt(1);
Node* value = node->InputAt(2);
- StoreRepresentation store_rep = StoreRepresentationOf(node->op());
WriteBarrierKind write_barrier_kind = store_rep.write_barrier_kind();
MachineRepresentation rep = store_rep.representation();
@@ -678,58 +719,44 @@ void InstructionSelector::VisitStore(Node* node) {
inputs[input_count++] = g.UseUniqueRegister(value);
RecordWriteMode record_write_mode =
WriteBarrierKindToRecordWriteMode(write_barrier_kind);
- InstructionCode code = kArchStoreWithWriteBarrier;
+ InstructionCode code;
+ if (!atomic_order) {
+ code = kArchStoreWithWriteBarrier;
+ code |= MiscField::encode(static_cast<int>(record_write_mode));
+ } else {
+ code = kArchAtomicStoreWithWriteBarrier;
+ code |= AtomicMemoryOrderField::encode(*atomic_order);
+ code |= AtomicStoreRecordWriteModeField::encode(record_write_mode);
+ }
code |= AddressingModeField::encode(addressing_mode);
- code |= MiscField::encode(static_cast<int>(record_write_mode));
- Emit(code, 0, nullptr, input_count, inputs);
+ selector->Emit(code, 0, nullptr, input_count, inputs);
} else {
InstructionCode opcode = kArchNop;
- switch (rep) {
- case MachineRepresentation::kFloat32:
- opcode = kArmVstrF32;
- break;
- case MachineRepresentation::kFloat64:
- opcode = kArmVstrF64;
- break;
- case MachineRepresentation::kBit: // Fall through.
- case MachineRepresentation::kWord8:
- opcode = kArmStrb;
- break;
- case MachineRepresentation::kWord16:
- opcode = kArmStrh;
- break;
- case MachineRepresentation::kTaggedSigned: // Fall through.
- case MachineRepresentation::kTaggedPointer: // Fall through.
- case MachineRepresentation::kTagged: // Fall through.
- case MachineRepresentation::kWord32:
- opcode = kArmStr;
- break;
- case MachineRepresentation::kSimd128:
- opcode = kArmVst1S128;
- break;
- case MachineRepresentation::kCompressedPointer: // Fall through.
- case MachineRepresentation::kCompressed: // Fall through.
- case MachineRepresentation::kWord64: // Fall through.
- case MachineRepresentation::kMapWord: // Fall through.
- case MachineRepresentation::kNone:
- UNREACHABLE();
+ if (!atomic_order) {
+ opcode = GetStoreOpcode(rep);
+ } else {
+ // Release stores emit DMB ISH; STR while sequentially consistent stores
+ // emit DMB ISH; STR; DMB ISH.
+ // https://www.cl.cam.ac.uk/~pes20/cpp/cpp0xmappings.html
+ opcode = GetAtomicStoreOpcode(rep);
+ opcode |= AtomicMemoryOrderField::encode(*atomic_order);
}
ExternalReferenceMatcher m(base);
if (m.HasResolvedValue() &&
- CanAddressRelativeToRootsRegister(m.ResolvedValue())) {
+ selector->CanAddressRelativeToRootsRegister(m.ResolvedValue())) {
Int32Matcher int_matcher(index);
if (int_matcher.HasResolvedValue()) {
ptrdiff_t const delta =
int_matcher.ResolvedValue() +
TurboAssemblerBase::RootRegisterOffsetForExternalReference(
- isolate(), m.ResolvedValue());
+ selector->isolate(), m.ResolvedValue());
int input_count = 2;
InstructionOperand inputs[2];
inputs[0] = g.UseRegister(value);
inputs[1] = g.UseImmediate(static_cast<int32_t>(delta));
opcode |= AddressingModeField::encode(kMode_Root);
- Emit(opcode, 0, nullptr, input_count, inputs);
+ selector->Emit(opcode, 0, nullptr, input_count, inputs);
return;
}
}
@@ -738,10 +765,17 @@ void InstructionSelector::VisitStore(Node* node) {
size_t input_count = 0;
inputs[input_count++] = g.UseRegister(value);
inputs[input_count++] = g.UseRegister(base);
- EmitStore(this, opcode, input_count, inputs, index);
+ EmitStore(selector, opcode, input_count, inputs, index);
}
}
+} // namespace
+
+void InstructionSelector::VisitStore(Node* node) {
+ VisitStoreCommon(this, node, StoreRepresentationOf(node->op()),
+ base::nullopt);
+}
+
void InstructionSelector::VisitProtectedStore(Node* node) {
// TODO(eholk)
UNIMPLEMENTED();
@@ -2236,22 +2270,27 @@ void InstructionSelector::VisitMemoryBarrier(Node* node) {
}
void InstructionSelector::VisitWord32AtomicLoad(Node* node) {
- LoadRepresentation load_rep = LoadRepresentationOf(node->op());
+ // The memory order is ignored as both acquire and sequentially consistent
+ // loads can emit LDR; DMB ISH.
+ // https://www.cl.cam.ac.uk/~pes20/cpp/cpp0xmappings.html
+ AtomicLoadParameters atomic_load_params = AtomicLoadParametersOf(node->op());
+ LoadRepresentation load_rep = atomic_load_params.representation();
ArmOperandGenerator g(this);
Node* base = node->InputAt(0);
Node* index = node->InputAt(1);
ArchOpcode opcode;
switch (load_rep.representation()) {
case MachineRepresentation::kWord8:
- opcode =
- load_rep.IsSigned() ? kWord32AtomicLoadInt8 : kWord32AtomicLoadUint8;
+ opcode = load_rep.IsSigned() ? kAtomicLoadInt8 : kAtomicLoadUint8;
break;
case MachineRepresentation::kWord16:
- opcode = load_rep.IsSigned() ? kWord32AtomicLoadInt16
- : kWord32AtomicLoadUint16;
+ opcode = load_rep.IsSigned() ? kAtomicLoadInt16 : kAtomicLoadUint16;
break;
+ case MachineRepresentation::kTaggedSigned: // Fall through.
+ case MachineRepresentation::kTaggedPointer: // Fall through.
+ case MachineRepresentation::kTagged: // Fall through.
case MachineRepresentation::kWord32:
- opcode = kWord32AtomicLoadWord32;
+ opcode = kAtomicLoadWord32;
break;
default:
UNREACHABLE();
@@ -2261,34 +2300,9 @@ void InstructionSelector::VisitWord32AtomicLoad(Node* node) {
}
void InstructionSelector::VisitWord32AtomicStore(Node* node) {
- MachineRepresentation rep = AtomicStoreRepresentationOf(node->op());
- ArmOperandGenerator g(this);
- Node* base = node->InputAt(0);
- Node* index = node->InputAt(1);
- Node* value = node->InputAt(2);
- ArchOpcode opcode;
- switch (rep) {
- case MachineRepresentation::kWord8:
- opcode = kWord32AtomicStoreWord8;
- break;
- case MachineRepresentation::kWord16:
- opcode = kWord32AtomicStoreWord16;
- break;
- case MachineRepresentation::kWord32:
- opcode = kWord32AtomicStoreWord32;
- break;
- default:
- UNREACHABLE();
- }
-
- AddressingMode addressing_mode = kMode_Offset_RR;
- InstructionOperand inputs[4];
- size_t input_count = 0;
- inputs[input_count++] = g.UseUniqueRegister(base);
- inputs[input_count++] = g.UseUniqueRegister(index);
- inputs[input_count++] = g.UseUniqueRegister(value);
- InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
- Emit(code, 0, nullptr, input_count, inputs);
+ AtomicStoreParameters store_params = AtomicStoreParametersOf(node->op());
+ VisitStoreCommon(this, node, store_params.store_representation(),
+ store_params.order());
}
void InstructionSelector::VisitWord32AtomicExchange(Node* node) {
@@ -2299,15 +2313,15 @@ void InstructionSelector::VisitWord32AtomicExchange(Node* node) {
ArchOpcode opcode;
MachineType type = AtomicOpType(node->op());
if (type == MachineType::Int8()) {
- opcode = kWord32AtomicExchangeInt8;
+ opcode = kAtomicExchangeInt8;
} else if (type == MachineType::Uint8()) {
- opcode = kWord32AtomicExchangeUint8;
+ opcode = kAtomicExchangeUint8;
} else if (type == MachineType::Int16()) {
- opcode = kWord32AtomicExchangeInt16;
+ opcode = kAtomicExchangeInt16;
} else if (type == MachineType::Uint16()) {
- opcode = kWord32AtomicExchangeUint16;
+ opcode = kAtomicExchangeUint16;
} else if (type == MachineType::Int32() || type == MachineType::Uint32()) {
- opcode = kWord32AtomicExchangeWord32;
+ opcode = kAtomicExchangeWord32;
} else {
UNREACHABLE();
}
@@ -2334,15 +2348,15 @@ void InstructionSelector::VisitWord32AtomicCompareExchange(Node* node) {
ArchOpcode opcode;
MachineType type = AtomicOpType(node->op());
if (type == MachineType::Int8()) {
- opcode = kWord32AtomicCompareExchangeInt8;
+ opcode = kAtomicCompareExchangeInt8;
} else if (type == MachineType::Uint8()) {
- opcode = kWord32AtomicCompareExchangeUint8;
+ opcode = kAtomicCompareExchangeUint8;
} else if (type == MachineType::Int16()) {
- opcode = kWord32AtomicCompareExchangeInt16;
+ opcode = kAtomicCompareExchangeInt16;
} else if (type == MachineType::Uint16()) {
- opcode = kWord32AtomicCompareExchangeUint16;
+ opcode = kAtomicCompareExchangeUint16;
} else if (type == MachineType::Int32() || type == MachineType::Uint32()) {
- opcode = kWord32AtomicCompareExchangeWord32;
+ opcode = kAtomicCompareExchangeWord32;
} else {
UNREACHABLE();
}
@@ -2399,12 +2413,11 @@ void InstructionSelector::VisitWord32AtomicBinaryOperation(
Emit(code, 1, outputs, input_count, inputs, arraysize(temps), temps);
}
-#define VISIT_ATOMIC_BINOP(op) \
- void InstructionSelector::VisitWord32Atomic##op(Node* node) { \
- VisitWord32AtomicBinaryOperation( \
- node, kWord32Atomic##op##Int8, kWord32Atomic##op##Uint8, \
- kWord32Atomic##op##Int16, kWord32Atomic##op##Uint16, \
- kWord32Atomic##op##Word32); \
+#define VISIT_ATOMIC_BINOP(op) \
+ void InstructionSelector::VisitWord32Atomic##op(Node* node) { \
+ VisitWord32AtomicBinaryOperation( \
+ node, kAtomic##op##Int8, kAtomic##op##Uint8, kAtomic##op##Int16, \
+ kAtomic##op##Uint16, kAtomic##op##Word32); \
}
VISIT_ATOMIC_BINOP(Add)
VISIT_ATOMIC_BINOP(Sub)
diff --git a/deps/v8/src/compiler/backend/arm64/code-generator-arm64.cc b/deps/v8/src/compiler/backend/arm64/code-generator-arm64.cc
index c121383426..fcab0a739b 100644
--- a/deps/v8/src/compiler/backend/arm64/code-generator-arm64.cc
+++ b/deps/v8/src/compiler/backend/arm64/code-generator-arm64.cc
@@ -235,7 +235,6 @@ class Arm64OperandConverter final : public InstructionOperandConverter {
constant.ToDelayedStringConstant());
case Constant::kRpoNumber:
UNREACHABLE(); // TODO(dcarney): RPO immediates on arm64.
- break;
}
UNREACHABLE();
}
@@ -460,47 +459,6 @@ void EmitOOLTrapIfNeeded(Zone* zone, CodeGenerator* codegen,
}
#endif // V8_ENABLE_WEBASSEMBLY
-void EmitWordLoadPoisoningIfNeeded(CodeGenerator* codegen,
- InstructionCode opcode, Instruction* instr,
- Arm64OperandConverter const& i) {
- const MemoryAccessMode access_mode = AccessModeField::decode(opcode);
- if (access_mode == kMemoryAccessPoisoned) {
- Register value = i.OutputRegister();
- Register poison = value.Is64Bits() ? kSpeculationPoisonRegister
- : kSpeculationPoisonRegister.W();
- codegen->tasm()->And(value, value, Operand(poison));
- }
-}
-
-void EmitMaybePoisonedFPLoad(CodeGenerator* codegen, InstructionCode opcode,
- Arm64OperandConverter* i, VRegister output_reg) {
- const MemoryAccessMode access_mode = AccessModeField::decode(opcode);
- AddressingMode address_mode = AddressingModeField::decode(opcode);
- if (access_mode == kMemoryAccessPoisoned && address_mode != kMode_Root) {
- UseScratchRegisterScope temps(codegen->tasm());
- Register address = temps.AcquireX();
- switch (address_mode) {
- case kMode_MRI: // Fall through.
- case kMode_MRR:
- codegen->tasm()->Add(address, i->InputRegister(0), i->InputOperand(1));
- break;
- case kMode_Operand2_R_LSL_I:
- codegen->tasm()->Add(address, i->InputRegister(0),
- i->InputOperand2_64(1));
- break;
- default:
- // Note: we don't need poisoning for kMode_Root loads as those loads
- // target a fixed offset from root register which is set once when
- // initializing the vm.
- UNREACHABLE();
- }
- codegen->tasm()->And(address, address, Operand(kSpeculationPoisonRegister));
- codegen->tasm()->Ldr(output_reg, MemOperand(address));
- } else {
- codegen->tasm()->Ldr(output_reg, i->MemoryOperand());
- }
-}
-
// Handles unary ops that work for float (scalar), double (scalar), or NEON.
template <typename Fn>
void EmitFpOrNeonUnop(TurboAssembler* tasm, Fn fn, Instruction* instr,
@@ -714,29 +672,6 @@ void CodeGenerator::BailoutIfDeoptimized() {
__ Bind(&not_deoptimized);
}
-void CodeGenerator::GenerateSpeculationPoisonFromCodeStartRegister() {
- UseScratchRegisterScope temps(tasm());
- Register scratch = temps.AcquireX();
-
- // Set a mask which has all bits set in the normal case, but has all
- // bits cleared if we are speculatively executing the wrong PC.
- __ ComputeCodeStartAddress(scratch);
- __ Cmp(kJavaScriptCallCodeStartRegister, scratch);
- __ Csetm(kSpeculationPoisonRegister, eq);
- __ Csdb();
-}
-
-void CodeGenerator::AssembleRegisterArgumentPoisoning() {
- UseScratchRegisterScope temps(tasm());
- Register scratch = temps.AcquireX();
-
- __ Mov(scratch, sp);
- __ And(kJSFunctionRegister, kJSFunctionRegister, kSpeculationPoisonRegister);
- __ And(kContextRegister, kContextRegister, kSpeculationPoisonRegister);
- __ And(scratch, scratch, kSpeculationPoisonRegister);
- __ Mov(sp, scratch);
-}
-
// Assembles an instruction after register allocation, producing machine code.
CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Instruction* instr) {
@@ -1034,6 +969,25 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ Bind(ool->exit());
break;
}
+ case kArchAtomicStoreWithWriteBarrier: {
+ DCHECK_EQ(AddressingModeField::decode(instr->opcode()), kMode_MRR);
+ RecordWriteMode mode =
+ static_cast<RecordWriteMode>(MiscField::decode(instr->opcode()));
+ Register object = i.InputRegister(0);
+ Register offset = i.InputRegister(1);
+ Register value = i.InputRegister(2);
+ auto ool = zone()->New<OutOfLineRecordWrite>(
+ this, object, offset, value, mode, DetermineStubCallMode(),
+ &unwinding_info_writer_);
+ __ AtomicStoreTaggedField(value, object, offset, i.TempRegister(0));
+ if (mode > RecordWriteMode::kValueIsPointer) {
+ __ JumpIfSmi(value, ool->exit());
+ }
+ __ CheckPageFlag(object, MemoryChunk::kPointersFromHereAreInterestingMask,
+ eq, ool->entry());
+ __ Bind(ool->exit());
+ break;
+ }
case kArchStackSlot: {
FrameOffset offset =
frame_access_state()->GetFrameOffset(i.InputInt32(0));
@@ -1232,6 +1186,39 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
i.InputSimd128Register(0).Format(src_f));
break;
}
+ case kArm64ISplat: {
+ VectorFormat f = VectorFormatFillQ(LaneSizeField::decode(opcode));
+ Register src = LaneSizeField::decode(opcode) == 64 ? i.InputRegister64(0)
+ : i.InputRegister32(0);
+ __ Dup(i.OutputSimd128Register().Format(f), src);
+ break;
+ }
+ case kArm64FSplat: {
+ VectorFormat src_f =
+ ScalarFormatFromLaneSize(LaneSizeField::decode(opcode));
+ VectorFormat dst_f = VectorFormatFillQ(src_f);
+ __ Dup(i.OutputSimd128Register().Format(dst_f),
+ i.InputSimd128Register(0).Format(src_f), 0);
+ break;
+ }
+ case kArm64Smlal: {
+ VectorFormat dst_f = VectorFormatFillQ(LaneSizeField::decode(opcode));
+ VectorFormat src_f = VectorFormatHalfWidth(dst_f);
+ DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ __ Smlal(i.OutputSimd128Register().Format(dst_f),
+ i.InputSimd128Register(1).Format(src_f),
+ i.InputSimd128Register(2).Format(src_f));
+ break;
+ }
+ case kArm64Smlal2: {
+ VectorFormat dst_f = VectorFormatFillQ(LaneSizeField::decode(opcode));
+ VectorFormat src_f = VectorFormatHalfWidthDoubleLanes(dst_f);
+ DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ __ Smlal2(i.OutputSimd128Register().Format(dst_f),
+ i.InputSimd128Register(1).Format(src_f),
+ i.InputSimd128Register(2).Format(src_f));
+ break;
+ }
case kArm64Smull: {
if (instr->InputAt(0)->IsRegister()) {
__ Smull(i.OutputRegister(), i.InputRegister32(0),
@@ -1254,6 +1241,24 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
i.InputSimd128Register(1).Format(src_f));
break;
}
+ case kArm64Umlal: {
+ VectorFormat dst_f = VectorFormatFillQ(LaneSizeField::decode(opcode));
+ VectorFormat src_f = VectorFormatHalfWidth(dst_f);
+ DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ __ Umlal(i.OutputSimd128Register().Format(dst_f),
+ i.InputSimd128Register(1).Format(src_f),
+ i.InputSimd128Register(2).Format(src_f));
+ break;
+ }
+ case kArm64Umlal2: {
+ VectorFormat dst_f = VectorFormatFillQ(LaneSizeField::decode(opcode));
+ VectorFormat src_f = VectorFormatHalfWidthDoubleLanes(dst_f);
+ DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ __ Umlal2(i.OutputSimd128Register().Format(dst_f),
+ i.InputSimd128Register(1).Format(src_f),
+ i.InputSimd128Register(2).Format(src_f));
+ break;
+ }
case kArm64Umull: {
if (instr->InputAt(0)->IsRegister()) {
__ Umull(i.OutputRegister(), i.InputRegister32(0),
@@ -1551,6 +1556,14 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArm64Cmn32:
__ Cmn(i.InputOrZeroRegister32(0), i.InputOperand2_32(1));
break;
+ case kArm64Cnt32: {
+ __ PopcntHelper(i.OutputRegister32(), i.InputRegister32(0));
+ break;
+ }
+ case kArm64Cnt64: {
+ __ PopcntHelper(i.OutputRegister64(), i.InputRegister64(0));
+ break;
+ }
case kArm64Cnt: {
VectorFormat f = VectorFormatFillQ(LaneSizeField::decode(opcode));
__ Cnt(i.OutputSimd128Register().Format(f),
@@ -1814,12 +1827,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArm64Ldrb:
EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset());
__ Ldrb(i.OutputRegister(), i.MemoryOperand());
- EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kArm64Ldrsb:
EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset());
__ Ldrsb(i.OutputRegister(), i.MemoryOperand());
- EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kArm64LdrsbW:
EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset());
@@ -1832,12 +1843,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArm64Ldrh:
EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset());
__ Ldrh(i.OutputRegister(), i.MemoryOperand());
- EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kArm64Ldrsh:
EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset());
__ Ldrsh(i.OutputRegister(), i.MemoryOperand());
- EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kArm64LdrshW:
EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset());
@@ -1850,12 +1859,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArm64Ldrsw:
EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset());
__ Ldrsw(i.OutputRegister(), i.MemoryOperand());
- EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kArm64LdrW:
EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset());
__ Ldr(i.OutputRegister32(), i.MemoryOperand());
- EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kArm64StrW:
EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset());
@@ -1864,19 +1871,27 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArm64Ldr:
EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset());
__ Ldr(i.OutputRegister(), i.MemoryOperand());
- EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kArm64LdrDecompressTaggedSigned:
__ DecompressTaggedSigned(i.OutputRegister(), i.MemoryOperand());
- EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kArm64LdrDecompressTaggedPointer:
__ DecompressTaggedPointer(i.OutputRegister(), i.MemoryOperand());
- EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kArm64LdrDecompressAnyTagged:
__ DecompressAnyTagged(i.OutputRegister(), i.MemoryOperand());
- EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
+ break;
+ case kArm64LdarDecompressTaggedSigned:
+ __ AtomicDecompressTaggedSigned(i.OutputRegister(), i.InputRegister(0),
+ i.InputRegister(1), i.TempRegister(0));
+ break;
+ case kArm64LdarDecompressTaggedPointer:
+ __ AtomicDecompressTaggedPointer(i.OutputRegister(), i.InputRegister(0),
+ i.InputRegister(1), i.TempRegister(0));
+ break;
+ case kArm64LdarDecompressAnyTagged:
+ __ AtomicDecompressAnyTagged(i.OutputRegister(), i.InputRegister(0),
+ i.InputRegister(1), i.TempRegister(0));
break;
case kArm64Str:
EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset());
@@ -1885,9 +1900,15 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArm64StrCompressTagged:
__ StoreTaggedField(i.InputOrZeroRegister64(0), i.MemoryOperand(1));
break;
+ case kArm64StlrCompressTagged:
+ // To be consistent with other STLR instructions, the value is stored at
+ // the 3rd input register instead of the 1st.
+ __ AtomicStoreTaggedField(i.InputRegister(2), i.InputRegister(0),
+ i.InputRegister(1), i.TempRegister(0));
+ break;
case kArm64LdrS:
EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset());
- EmitMaybePoisonedFPLoad(this, opcode, &i, i.OutputDoubleRegister().S());
+ __ Ldr(i.OutputDoubleRegister().S(), i.MemoryOperand());
break;
case kArm64StrS:
EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset());
@@ -1895,7 +1916,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
case kArm64LdrD:
EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset());
- EmitMaybePoisonedFPLoad(this, opcode, &i, i.OutputDoubleRegister());
+ __ Ldr(i.OutputDoubleRegister(), i.MemoryOperand());
break;
case kArm64StrD:
EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset());
@@ -1916,117 +1937,100 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ Dsb(FullSystem, BarrierAll);
__ Isb();
break;
- case kArchWordPoisonOnSpeculation:
- __ And(i.OutputRegister(0), i.InputRegister(0),
- Operand(kSpeculationPoisonRegister));
- break;
- case kWord32AtomicLoadInt8:
+ case kAtomicLoadInt8:
+ DCHECK_EQ(AtomicWidthField::decode(opcode), AtomicWidth::kWord32);
ASSEMBLE_ATOMIC_LOAD_INTEGER(Ldarb, Register32);
__ Sxtb(i.OutputRegister(0), i.OutputRegister(0));
break;
- case kWord32AtomicLoadUint8:
- case kArm64Word64AtomicLoadUint8:
+ case kAtomicLoadUint8:
ASSEMBLE_ATOMIC_LOAD_INTEGER(Ldarb, Register32);
break;
- case kWord32AtomicLoadInt16:
+ case kAtomicLoadInt16:
+ DCHECK_EQ(AtomicWidthField::decode(opcode), AtomicWidth::kWord32);
ASSEMBLE_ATOMIC_LOAD_INTEGER(Ldarh, Register32);
__ Sxth(i.OutputRegister(0), i.OutputRegister(0));
break;
- case kWord32AtomicLoadUint16:
- case kArm64Word64AtomicLoadUint16:
+ case kAtomicLoadUint16:
ASSEMBLE_ATOMIC_LOAD_INTEGER(Ldarh, Register32);
break;
- case kWord32AtomicLoadWord32:
- case kArm64Word64AtomicLoadUint32:
+ case kAtomicLoadWord32:
ASSEMBLE_ATOMIC_LOAD_INTEGER(Ldar, Register32);
break;
case kArm64Word64AtomicLoadUint64:
ASSEMBLE_ATOMIC_LOAD_INTEGER(Ldar, Register);
break;
- case kWord32AtomicStoreWord8:
- case kArm64Word64AtomicStoreWord8:
+ case kAtomicStoreWord8:
ASSEMBLE_ATOMIC_STORE_INTEGER(Stlrb, Register32);
break;
- case kWord32AtomicStoreWord16:
- case kArm64Word64AtomicStoreWord16:
+ case kAtomicStoreWord16:
ASSEMBLE_ATOMIC_STORE_INTEGER(Stlrh, Register32);
break;
- case kWord32AtomicStoreWord32:
- case kArm64Word64AtomicStoreWord32:
+ case kAtomicStoreWord32:
ASSEMBLE_ATOMIC_STORE_INTEGER(Stlr, Register32);
break;
case kArm64Word64AtomicStoreWord64:
ASSEMBLE_ATOMIC_STORE_INTEGER(Stlr, Register);
break;
- case kWord32AtomicExchangeInt8:
+ case kAtomicExchangeInt8:
ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(ldaxrb, stlxrb, Register32);
__ Sxtb(i.OutputRegister(0), i.OutputRegister(0));
break;
- case kWord32AtomicExchangeUint8:
- case kArm64Word64AtomicExchangeUint8:
+ case kAtomicExchangeUint8:
ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(ldaxrb, stlxrb, Register32);
break;
- case kWord32AtomicExchangeInt16:
+ case kAtomicExchangeInt16:
ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(ldaxrh, stlxrh, Register32);
__ Sxth(i.OutputRegister(0), i.OutputRegister(0));
break;
- case kWord32AtomicExchangeUint16:
- case kArm64Word64AtomicExchangeUint16:
+ case kAtomicExchangeUint16:
ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(ldaxrh, stlxrh, Register32);
break;
- case kWord32AtomicExchangeWord32:
- case kArm64Word64AtomicExchangeUint32:
+ case kAtomicExchangeWord32:
ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(ldaxr, stlxr, Register32);
break;
case kArm64Word64AtomicExchangeUint64:
ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(ldaxr, stlxr, Register);
break;
- case kWord32AtomicCompareExchangeInt8:
+ case kAtomicCompareExchangeInt8:
ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(ldaxrb, stlxrb, UXTB,
Register32);
__ Sxtb(i.OutputRegister(0), i.OutputRegister(0));
break;
- case kWord32AtomicCompareExchangeUint8:
- case kArm64Word64AtomicCompareExchangeUint8:
+ case kAtomicCompareExchangeUint8:
ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(ldaxrb, stlxrb, UXTB,
Register32);
break;
- case kWord32AtomicCompareExchangeInt16:
+ case kAtomicCompareExchangeInt16:
ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(ldaxrh, stlxrh, UXTH,
Register32);
__ Sxth(i.OutputRegister(0), i.OutputRegister(0));
break;
- case kWord32AtomicCompareExchangeUint16:
- case kArm64Word64AtomicCompareExchangeUint16:
+ case kAtomicCompareExchangeUint16:
ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(ldaxrh, stlxrh, UXTH,
Register32);
break;
- case kWord32AtomicCompareExchangeWord32:
- case kArm64Word64AtomicCompareExchangeUint32:
+ case kAtomicCompareExchangeWord32:
ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(ldaxr, stlxr, UXTW, Register32);
break;
case kArm64Word64AtomicCompareExchangeUint64:
ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(ldaxr, stlxr, UXTX, Register);
break;
#define ATOMIC_BINOP_CASE(op, inst) \
- case kWord32Atomic##op##Int8: \
+ case kAtomic##op##Int8: \
ASSEMBLE_ATOMIC_BINOP(ldaxrb, stlxrb, inst, Register32); \
__ Sxtb(i.OutputRegister(0), i.OutputRegister(0)); \
break; \
- case kWord32Atomic##op##Uint8: \
- case kArm64Word64Atomic##op##Uint8: \
+ case kAtomic##op##Uint8: \
ASSEMBLE_ATOMIC_BINOP(ldaxrb, stlxrb, inst, Register32); \
break; \
- case kWord32Atomic##op##Int16: \
+ case kAtomic##op##Int16: \
ASSEMBLE_ATOMIC_BINOP(ldaxrh, stlxrh, inst, Register32); \
__ Sxth(i.OutputRegister(0), i.OutputRegister(0)); \
break; \
- case kWord32Atomic##op##Uint16: \
- case kArm64Word64Atomic##op##Uint16: \
+ case kAtomic##op##Uint16: \
ASSEMBLE_ATOMIC_BINOP(ldaxrh, stlxrh, inst, Register32); \
break; \
- case kWord32Atomic##op##Word32: \
- case kArm64Word64Atomic##op##Uint32: \
+ case kAtomic##op##Word32: \
ASSEMBLE_ATOMIC_BINOP(ldaxr, stlxr, inst, Register32); \
break; \
case kArm64Word64Atomic##op##Uint64: \
@@ -2052,12 +2056,27 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ Instr(i.OutputSimd128Register().V##FORMAT(), \
i.InputSimd128Register(0).V##FORMAT()); \
break;
+#define SIMD_UNOP_LANE_SIZE_CASE(Op, Instr) \
+ case Op: { \
+ VectorFormat f = VectorFormatFillQ(LaneSizeField::decode(opcode)); \
+ __ Instr(i.OutputSimd128Register().Format(f), \
+ i.InputSimd128Register(0).Format(f)); \
+ break; \
+ }
#define SIMD_BINOP_CASE(Op, Instr, FORMAT) \
case Op: \
__ Instr(i.OutputSimd128Register().V##FORMAT(), \
i.InputSimd128Register(0).V##FORMAT(), \
i.InputSimd128Register(1).V##FORMAT()); \
break;
+#define SIMD_BINOP_LANE_SIZE_CASE(Op, Instr) \
+ case Op: { \
+ VectorFormat f = VectorFormatFillQ(LaneSizeField::decode(opcode)); \
+ __ Instr(i.OutputSimd128Register().Format(f), \
+ i.InputSimd128Register(0).Format(f), \
+ i.InputSimd128Register(1).Format(f)); \
+ break; \
+ }
#define SIMD_DESTRUCTIVE_BINOP_CASE(Op, Instr, FORMAT) \
case Op: { \
VRegister dst = i.OutputSimd128Register().V##FORMAT(); \
@@ -2066,7 +2085,33 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
i.InputSimd128Register(2).V##FORMAT()); \
break; \
}
-
+#define SIMD_DESTRUCTIVE_BINOP_LANE_SIZE_CASE(Op, Instr) \
+ case Op: { \
+ VectorFormat f = VectorFormatFillQ(LaneSizeField::decode(opcode)); \
+ VRegister dst = i.OutputSimd128Register().Format(f); \
+ DCHECK_EQ(dst, i.InputSimd128Register(0).Format(f)); \
+ __ Instr(dst, i.InputSimd128Register(1).Format(f), \
+ i.InputSimd128Register(2).Format(f)); \
+ break; \
+ }
+ SIMD_BINOP_LANE_SIZE_CASE(kArm64FMin, Fmin);
+ SIMD_BINOP_LANE_SIZE_CASE(kArm64FMax, Fmax);
+ SIMD_UNOP_LANE_SIZE_CASE(kArm64FAbs, Fabs);
+ SIMD_UNOP_LANE_SIZE_CASE(kArm64FSqrt, Fsqrt);
+ SIMD_BINOP_LANE_SIZE_CASE(kArm64FAdd, Fadd);
+ SIMD_BINOP_LANE_SIZE_CASE(kArm64FSub, Fsub);
+ SIMD_BINOP_LANE_SIZE_CASE(kArm64FMul, Fmul);
+ SIMD_BINOP_LANE_SIZE_CASE(kArm64FDiv, Fdiv);
+ SIMD_UNOP_LANE_SIZE_CASE(kArm64FNeg, Fneg);
+ SIMD_UNOP_LANE_SIZE_CASE(kArm64IAbs, Abs);
+ SIMD_UNOP_LANE_SIZE_CASE(kArm64INeg, Neg);
+ SIMD_BINOP_LANE_SIZE_CASE(kArm64RoundingAverageU, Urhadd);
+ SIMD_BINOP_LANE_SIZE_CASE(kArm64IMinS, Smin);
+ SIMD_BINOP_LANE_SIZE_CASE(kArm64IMaxS, Smax);
+ SIMD_BINOP_LANE_SIZE_CASE(kArm64IMinU, Umin);
+ SIMD_BINOP_LANE_SIZE_CASE(kArm64IMaxU, Umax);
+ SIMD_DESTRUCTIVE_BINOP_LANE_SIZE_CASE(kArm64Mla, Mla);
+ SIMD_DESTRUCTIVE_BINOP_LANE_SIZE_CASE(kArm64Mls, Mls);
case kArm64Sxtl: {
VectorFormat wide = VectorFormatFillQ(LaneSizeField::decode(opcode));
VectorFormat narrow = VectorFormatHalfWidth(wide);
@@ -2129,49 +2174,45 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
i.InputSimd128Register(0).V2S());
break;
}
- case kArm64F64x2Splat: {
- __ Dup(i.OutputSimd128Register().V2D(), i.InputSimd128Register(0).D(), 0);
+ case kArm64FExtractLane: {
+ VectorFormat dst_f =
+ ScalarFormatFromLaneSize(LaneSizeField::decode(opcode));
+ VectorFormat src_f = VectorFormatFillQ(dst_f);
+ __ Mov(i.OutputSimd128Register().Format(dst_f),
+ i.InputSimd128Register(0).Format(src_f), i.InputInt8(1));
break;
}
- case kArm64F64x2ExtractLane: {
- __ Mov(i.OutputSimd128Register().D(), i.InputSimd128Register(0).V2D(),
- i.InputInt8(1));
- break;
- }
- case kArm64F64x2ReplaceLane: {
- VRegister dst = i.OutputSimd128Register().V2D(),
- src1 = i.InputSimd128Register(0).V2D();
+ case kArm64FReplaceLane: {
+ VectorFormat f = VectorFormatFillQ(LaneSizeField::decode(opcode));
+ VRegister dst = i.OutputSimd128Register().Format(f),
+ src1 = i.InputSimd128Register(0).Format(f);
if (dst != src1) {
__ Mov(dst, src1);
}
- __ Mov(dst, i.InputInt8(1), i.InputSimd128Register(2).V2D(), 0);
- break;
- }
- SIMD_UNOP_CASE(kArm64F64x2Abs, Fabs, 2D);
- SIMD_UNOP_CASE(kArm64F64x2Neg, Fneg, 2D);
- SIMD_UNOP_CASE(kArm64F64x2Sqrt, Fsqrt, 2D);
- SIMD_BINOP_CASE(kArm64F64x2Add, Fadd, 2D);
- SIMD_BINOP_CASE(kArm64F64x2Sub, Fsub, 2D);
- SIMD_BINOP_CASE(kArm64F64x2Mul, Fmul, 2D);
- SIMD_BINOP_CASE(kArm64F64x2Div, Fdiv, 2D);
- SIMD_BINOP_CASE(kArm64F64x2Min, Fmin, 2D);
- SIMD_BINOP_CASE(kArm64F64x2Max, Fmax, 2D);
- SIMD_BINOP_CASE(kArm64F64x2Eq, Fcmeq, 2D);
- case kArm64F64x2Ne: {
- VRegister dst = i.OutputSimd128Register().V2D();
- __ Fcmeq(dst, i.InputSimd128Register(0).V2D(),
- i.InputSimd128Register(1).V2D());
+ __ Mov(dst, i.InputInt8(1), i.InputSimd128Register(2).Format(f), 0);
+ break;
+ }
+ SIMD_BINOP_LANE_SIZE_CASE(kArm64FEq, Fcmeq);
+ case kArm64FNe: {
+ VectorFormat f = VectorFormatFillQ(LaneSizeField::decode(opcode));
+ VRegister dst = i.OutputSimd128Register().Format(f);
+ __ Fcmeq(dst, i.InputSimd128Register(0).Format(f),
+ i.InputSimd128Register(1).Format(f));
__ Mvn(dst, dst);
break;
}
- case kArm64F64x2Lt: {
- __ Fcmgt(i.OutputSimd128Register().V2D(), i.InputSimd128Register(1).V2D(),
- i.InputSimd128Register(0).V2D());
+ case kArm64FLt: {
+ VectorFormat f = VectorFormatFillQ(LaneSizeField::decode(opcode));
+ __ Fcmgt(i.OutputSimd128Register().Format(f),
+ i.InputSimd128Register(1).Format(f),
+ i.InputSimd128Register(0).Format(f));
break;
}
- case kArm64F64x2Le: {
- __ Fcmge(i.OutputSimd128Register().V2D(), i.InputSimd128Register(1).V2D(),
- i.InputSimd128Register(0).V2D());
+ case kArm64FLe: {
+ VectorFormat f = VectorFormatFillQ(LaneSizeField::decode(opcode));
+ __ Fcmge(i.OutputSimd128Register().Format(f),
+ i.InputSimd128Register(1).Format(f),
+ i.InputSimd128Register(0).Format(f));
break;
}
SIMD_DESTRUCTIVE_BINOP_CASE(kArm64F64x2Qfma, Fmla, 2D);
@@ -2197,63 +2238,17 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ Bsl(dst.V16B(), rhs.V16B(), lhs.V16B());
break;
}
- case kArm64F32x4Splat: {
- __ Dup(i.OutputSimd128Register().V4S(), i.InputSimd128Register(0).S(), 0);
- break;
- }
- case kArm64F32x4ExtractLane: {
- __ Mov(i.OutputSimd128Register().S(), i.InputSimd128Register(0).V4S(),
- i.InputInt8(1));
- break;
- }
- case kArm64F32x4ReplaceLane: {
- VRegister dst = i.OutputSimd128Register().V4S(),
- src1 = i.InputSimd128Register(0).V4S();
- if (dst != src1) {
- __ Mov(dst, src1);
- }
- __ Mov(dst, i.InputInt8(1), i.InputSimd128Register(2).V4S(), 0);
- break;
- }
SIMD_UNOP_CASE(kArm64F32x4SConvertI32x4, Scvtf, 4S);
SIMD_UNOP_CASE(kArm64F32x4UConvertI32x4, Ucvtf, 4S);
- SIMD_UNOP_CASE(kArm64F32x4Abs, Fabs, 4S);
- SIMD_UNOP_CASE(kArm64F32x4Neg, Fneg, 4S);
- SIMD_UNOP_CASE(kArm64F32x4Sqrt, Fsqrt, 4S);
SIMD_UNOP_CASE(kArm64F32x4RecipApprox, Frecpe, 4S);
SIMD_UNOP_CASE(kArm64F32x4RecipSqrtApprox, Frsqrte, 4S);
- SIMD_BINOP_CASE(kArm64F32x4Add, Fadd, 4S);
- SIMD_BINOP_CASE(kArm64F32x4Sub, Fsub, 4S);
- SIMD_BINOP_CASE(kArm64F32x4Mul, Fmul, 4S);
- SIMD_BINOP_CASE(kArm64F32x4Div, Fdiv, 4S);
- SIMD_BINOP_CASE(kArm64F32x4Min, Fmin, 4S);
- SIMD_BINOP_CASE(kArm64F32x4Max, Fmax, 4S);
- SIMD_BINOP_CASE(kArm64F32x4Eq, Fcmeq, 4S);
- case kArm64F32x4MulElement: {
- __ Fmul(i.OutputSimd128Register().V4S(), i.InputSimd128Register(0).V4S(),
- i.InputSimd128Register(1).S(), i.InputInt8(2));
- break;
- }
- case kArm64F64x2MulElement: {
- __ Fmul(i.OutputSimd128Register().V2D(), i.InputSimd128Register(0).V2D(),
- i.InputSimd128Register(1).D(), i.InputInt8(2));
- break;
- }
- case kArm64F32x4Ne: {
- VRegister dst = i.OutputSimd128Register().V4S();
- __ Fcmeq(dst, i.InputSimd128Register(0).V4S(),
- i.InputSimd128Register(1).V4S());
- __ Mvn(dst, dst);
- break;
- }
- case kArm64F32x4Lt: {
- __ Fcmgt(i.OutputSimd128Register().V4S(), i.InputSimd128Register(1).V4S(),
- i.InputSimd128Register(0).V4S());
- break;
- }
- case kArm64F32x4Le: {
- __ Fcmge(i.OutputSimd128Register().V4S(), i.InputSimd128Register(1).V4S(),
- i.InputSimd128Register(0).V4S());
+ case kArm64FMulElement: {
+ VectorFormat s_f =
+ ScalarFormatFromLaneSize(LaneSizeField::decode(opcode));
+ VectorFormat v_f = VectorFormatFillQ(s_f);
+ __ Fmul(i.OutputSimd128Register().Format(v_f),
+ i.InputSimd128Register(0).Format(v_f),
+ i.InputSimd128Register(1).Format(s_f), i.InputInt8(2));
break;
}
SIMD_DESTRUCTIVE_BINOP_CASE(kArm64F32x4Qfma, Fmla, 4S);
@@ -2279,26 +2274,25 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ Bsl(dst.V16B(), rhs.V16B(), lhs.V16B());
break;
}
- case kArm64I64x2Splat: {
- __ Dup(i.OutputSimd128Register().V2D(), i.InputRegister64(0));
- break;
- }
- case kArm64I64x2ExtractLane: {
- __ Mov(i.OutputRegister64(), i.InputSimd128Register(0).V2D(),
- i.InputInt8(1));
+ case kArm64IExtractLane: {
+ VectorFormat f = VectorFormatFillQ(LaneSizeField::decode(opcode));
+ Register dst =
+ f == kFormat2D ? i.OutputRegister64() : i.OutputRegister32();
+ __ Mov(dst, i.InputSimd128Register(0).Format(f), i.InputInt8(1));
break;
}
- case kArm64I64x2ReplaceLane: {
- VRegister dst = i.OutputSimd128Register().V2D(),
- src1 = i.InputSimd128Register(0).V2D();
+ case kArm64IReplaceLane: {
+ VectorFormat f = VectorFormatFillQ(LaneSizeField::decode(opcode));
+ VRegister dst = i.OutputSimd128Register().Format(f),
+ src1 = i.InputSimd128Register(0).Format(f);
+ Register src2 =
+ f == kFormat2D ? i.InputRegister64(2) : i.InputRegister32(2);
if (dst != src1) {
__ Mov(dst, src1);
}
- __ Mov(dst, i.InputInt8(1), i.InputRegister64(2));
+ __ Mov(dst, i.InputInt8(1), src2);
break;
}
- SIMD_UNOP_CASE(kArm64I64x2Abs, Abs, 2D);
- SIMD_UNOP_CASE(kArm64I64x2Neg, Neg, 2D);
case kArm64I64x2Shl: {
ASSEMBLE_SIMD_SHIFT_LEFT(Shl, 6, V2D, Sshl, X);
break;
@@ -2307,8 +2301,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
ASSEMBLE_SIMD_SHIFT_RIGHT(Sshr, 6, V2D, Sshl, X);
break;
}
- SIMD_BINOP_CASE(kArm64I64x2Add, Add, 2D);
- SIMD_BINOP_CASE(kArm64I64x2Sub, Sub, 2D);
+ SIMD_BINOP_LANE_SIZE_CASE(kArm64IAdd, Add);
+ SIMD_BINOP_LANE_SIZE_CASE(kArm64ISub, Sub);
case kArm64I64x2Mul: {
UseScratchRegisterScope scope(tasm());
VRegister dst = i.OutputSimd128Register();
@@ -2368,16 +2362,17 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
- SIMD_BINOP_CASE(kArm64I64x2Eq, Cmeq, 2D);
- case kArm64I64x2Ne: {
- VRegister dst = i.OutputSimd128Register().V2D();
- __ Cmeq(dst, i.InputSimd128Register(0).V2D(),
- i.InputSimd128Register(1).V2D());
+ SIMD_BINOP_LANE_SIZE_CASE(kArm64IEq, Cmeq);
+ case kArm64INe: {
+ VectorFormat f = VectorFormatFillQ(LaneSizeField::decode(opcode));
+ VRegister dst = i.OutputSimd128Register().Format(f);
+ __ Cmeq(dst, i.InputSimd128Register(0).Format(f),
+ i.InputSimd128Register(1).Format(f));
__ Mvn(dst, dst);
break;
}
- SIMD_BINOP_CASE(kArm64I64x2GtS, Cmgt, 2D);
- SIMD_BINOP_CASE(kArm64I64x2GeS, Cmge, 2D);
+ SIMD_BINOP_LANE_SIZE_CASE(kArm64IGtS, Cmgt);
+ SIMD_BINOP_LANE_SIZE_CASE(kArm64IGeS, Cmge);
case kArm64I64x2ShrU: {
ASSEMBLE_SIMD_SHIFT_RIGHT(Ushr, 6, V2D, Ushl, X);
break;
@@ -2386,26 +2381,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ I64x2BitMask(i.OutputRegister32(), i.InputSimd128Register(0));
break;
}
- case kArm64I32x4Splat: {
- __ Dup(i.OutputSimd128Register().V4S(), i.InputRegister32(0));
- break;
- }
- case kArm64I32x4ExtractLane: {
- __ Mov(i.OutputRegister32(), i.InputSimd128Register(0).V4S(),
- i.InputInt8(1));
- break;
- }
- case kArm64I32x4ReplaceLane: {
- VRegister dst = i.OutputSimd128Register().V4S(),
- src1 = i.InputSimd128Register(0).V4S();
- if (dst != src1) {
- __ Mov(dst, src1);
- }
- __ Mov(dst, i.InputInt8(1), i.InputRegister32(2));
- break;
- }
SIMD_UNOP_CASE(kArm64I32x4SConvertF32x4, Fcvtzs, 4S);
- SIMD_UNOP_CASE(kArm64I32x4Neg, Neg, 4S);
case kArm64I32x4Shl: {
ASSEMBLE_SIMD_SHIFT_LEFT(Shl, 5, V4S, Sshl, W);
break;
@@ -2414,33 +2390,14 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
ASSEMBLE_SIMD_SHIFT_RIGHT(Sshr, 5, V4S, Sshl, W);
break;
}
- SIMD_BINOP_CASE(kArm64I32x4Add, Add, 4S);
- SIMD_BINOP_CASE(kArm64I32x4Sub, Sub, 4S);
SIMD_BINOP_CASE(kArm64I32x4Mul, Mul, 4S);
- SIMD_DESTRUCTIVE_BINOP_CASE(kArm64I32x4Mla, Mla, 4S);
- SIMD_DESTRUCTIVE_BINOP_CASE(kArm64I32x4Mls, Mls, 4S);
- SIMD_BINOP_CASE(kArm64I32x4MinS, Smin, 4S);
- SIMD_BINOP_CASE(kArm64I32x4MaxS, Smax, 4S);
- SIMD_BINOP_CASE(kArm64I32x4Eq, Cmeq, 4S);
- case kArm64I32x4Ne: {
- VRegister dst = i.OutputSimd128Register().V4S();
- __ Cmeq(dst, i.InputSimd128Register(0).V4S(),
- i.InputSimd128Register(1).V4S());
- __ Mvn(dst, dst);
- break;
- }
- SIMD_BINOP_CASE(kArm64I32x4GtS, Cmgt, 4S);
- SIMD_BINOP_CASE(kArm64I32x4GeS, Cmge, 4S);
SIMD_UNOP_CASE(kArm64I32x4UConvertF32x4, Fcvtzu, 4S);
case kArm64I32x4ShrU: {
ASSEMBLE_SIMD_SHIFT_RIGHT(Ushr, 5, V4S, Ushl, W);
break;
}
- SIMD_BINOP_CASE(kArm64I32x4MinU, Umin, 4S);
- SIMD_BINOP_CASE(kArm64I32x4MaxU, Umax, 4S);
- SIMD_BINOP_CASE(kArm64I32x4GtU, Cmhi, 4S);
- SIMD_BINOP_CASE(kArm64I32x4GeU, Cmhs, 4S);
- SIMD_UNOP_CASE(kArm64I32x4Abs, Abs, 4S);
+ SIMD_BINOP_LANE_SIZE_CASE(kArm64IGtU, Cmhi);
+ SIMD_BINOP_LANE_SIZE_CASE(kArm64IGeU, Cmhs);
case kArm64I32x4BitMask: {
UseScratchRegisterScope scope(tasm());
Register dst = i.OutputRegister32();
@@ -2468,30 +2425,18 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ Addp(i.OutputSimd128Register().V4S(), tmp1, tmp2);
break;
}
- case kArm64I16x8Splat: {
- __ Dup(i.OutputSimd128Register().V8H(), i.InputRegister32(0));
- break;
- }
- case kArm64I16x8ExtractLaneU: {
- __ Umov(i.OutputRegister32(), i.InputSimd128Register(0).V8H(),
+ case kArm64IExtractLaneU: {
+ VectorFormat f = VectorFormatFillQ(LaneSizeField::decode(opcode));
+ __ Umov(i.OutputRegister32(), i.InputSimd128Register(0).Format(f),
i.InputInt8(1));
break;
}
- case kArm64I16x8ExtractLaneS: {
- __ Smov(i.OutputRegister32(), i.InputSimd128Register(0).V8H(),
+ case kArm64IExtractLaneS: {
+ VectorFormat f = VectorFormatFillQ(LaneSizeField::decode(opcode));
+ __ Smov(i.OutputRegister32(), i.InputSimd128Register(0).Format(f),
i.InputInt8(1));
break;
}
- case kArm64I16x8ReplaceLane: {
- VRegister dst = i.OutputSimd128Register().V8H(),
- src1 = i.InputSimd128Register(0).V8H();
- if (dst != src1) {
- __ Mov(dst, src1);
- }
- __ Mov(dst, i.InputInt8(1), i.InputRegister32(2));
- break;
- }
- SIMD_UNOP_CASE(kArm64I16x8Neg, Neg, 8H);
case kArm64I16x8Shl: {
ASSEMBLE_SIMD_SHIFT_LEFT(Shl, 4, V8H, Sshl, W);
break;
@@ -2514,25 +2459,9 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ Sqxtn2(dst.V8H(), src1.V4S());
break;
}
- SIMD_BINOP_CASE(kArm64I16x8Add, Add, 8H);
- SIMD_BINOP_CASE(kArm64I16x8AddSatS, Sqadd, 8H);
- SIMD_BINOP_CASE(kArm64I16x8Sub, Sub, 8H);
- SIMD_BINOP_CASE(kArm64I16x8SubSatS, Sqsub, 8H);
+ SIMD_BINOP_LANE_SIZE_CASE(kArm64IAddSatS, Sqadd);
+ SIMD_BINOP_LANE_SIZE_CASE(kArm64ISubSatS, Sqsub);
SIMD_BINOP_CASE(kArm64I16x8Mul, Mul, 8H);
- SIMD_DESTRUCTIVE_BINOP_CASE(kArm64I16x8Mla, Mla, 8H);
- SIMD_DESTRUCTIVE_BINOP_CASE(kArm64I16x8Mls, Mls, 8H);
- SIMD_BINOP_CASE(kArm64I16x8MinS, Smin, 8H);
- SIMD_BINOP_CASE(kArm64I16x8MaxS, Smax, 8H);
- SIMD_BINOP_CASE(kArm64I16x8Eq, Cmeq, 8H);
- case kArm64I16x8Ne: {
- VRegister dst = i.OutputSimd128Register().V8H();
- __ Cmeq(dst, i.InputSimd128Register(0).V8H(),
- i.InputSimd128Register(1).V8H());
- __ Mvn(dst, dst);
- break;
- }
- SIMD_BINOP_CASE(kArm64I16x8GtS, Cmgt, 8H);
- SIMD_BINOP_CASE(kArm64I16x8GeS, Cmge, 8H);
case kArm64I16x8ShrU: {
ASSEMBLE_SIMD_SHIFT_RIGHT(Ushr, 4, V8H, Ushl, W);
break;
@@ -2551,15 +2480,9 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ Sqxtun2(dst.V8H(), src1.V4S());
break;
}
- SIMD_BINOP_CASE(kArm64I16x8AddSatU, Uqadd, 8H);
- SIMD_BINOP_CASE(kArm64I16x8SubSatU, Uqsub, 8H);
- SIMD_BINOP_CASE(kArm64I16x8MinU, Umin, 8H);
- SIMD_BINOP_CASE(kArm64I16x8MaxU, Umax, 8H);
- SIMD_BINOP_CASE(kArm64I16x8GtU, Cmhi, 8H);
- SIMD_BINOP_CASE(kArm64I16x8GeU, Cmhs, 8H);
- SIMD_BINOP_CASE(kArm64I16x8RoundingAverageU, Urhadd, 8H);
+ SIMD_BINOP_LANE_SIZE_CASE(kArm64IAddSatU, Uqadd);
+ SIMD_BINOP_LANE_SIZE_CASE(kArm64ISubSatU, Uqsub);
SIMD_BINOP_CASE(kArm64I16x8Q15MulRSatS, Sqrdmulh, 8H);
- SIMD_UNOP_CASE(kArm64I16x8Abs, Abs, 8H);
case kArm64I16x8BitMask: {
UseScratchRegisterScope scope(tasm());
Register dst = i.OutputRegister32();
@@ -2576,30 +2499,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ Mov(dst.W(), tmp.V8H(), 0);
break;
}
- case kArm64I8x16Splat: {
- __ Dup(i.OutputSimd128Register().V16B(), i.InputRegister32(0));
- break;
- }
- case kArm64I8x16ExtractLaneU: {
- __ Umov(i.OutputRegister32(), i.InputSimd128Register(0).V16B(),
- i.InputInt8(1));
- break;
- }
- case kArm64I8x16ExtractLaneS: {
- __ Smov(i.OutputRegister32(), i.InputSimd128Register(0).V16B(),
- i.InputInt8(1));
- break;
- }
- case kArm64I8x16ReplaceLane: {
- VRegister dst = i.OutputSimd128Register().V16B(),
- src1 = i.InputSimd128Register(0).V16B();
- if (dst != src1) {
- __ Mov(dst, src1);
- }
- __ Mov(dst, i.InputInt8(1), i.InputRegister32(2));
- break;
- }
- SIMD_UNOP_CASE(kArm64I8x16Neg, Neg, 16B);
case kArm64I8x16Shl: {
ASSEMBLE_SIMD_SHIFT_LEFT(Shl, 3, V16B, Sshl, W);
break;
@@ -2622,24 +2521,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ Sqxtn2(dst.V16B(), src1.V8H());
break;
}
- SIMD_BINOP_CASE(kArm64I8x16Add, Add, 16B);
- SIMD_BINOP_CASE(kArm64I8x16AddSatS, Sqadd, 16B);
- SIMD_BINOP_CASE(kArm64I8x16Sub, Sub, 16B);
- SIMD_BINOP_CASE(kArm64I8x16SubSatS, Sqsub, 16B);
- SIMD_DESTRUCTIVE_BINOP_CASE(kArm64I8x16Mla, Mla, 16B);
- SIMD_DESTRUCTIVE_BINOP_CASE(kArm64I8x16Mls, Mls, 16B);
- SIMD_BINOP_CASE(kArm64I8x16MinS, Smin, 16B);
- SIMD_BINOP_CASE(kArm64I8x16MaxS, Smax, 16B);
- SIMD_BINOP_CASE(kArm64I8x16Eq, Cmeq, 16B);
- case kArm64I8x16Ne: {
- VRegister dst = i.OutputSimd128Register().V16B();
- __ Cmeq(dst, i.InputSimd128Register(0).V16B(),
- i.InputSimd128Register(1).V16B());
- __ Mvn(dst, dst);
- break;
- }
- SIMD_BINOP_CASE(kArm64I8x16GtS, Cmgt, 16B);
- SIMD_BINOP_CASE(kArm64I8x16GeS, Cmge, 16B);
case kArm64I8x16ShrU: {
ASSEMBLE_SIMD_SHIFT_RIGHT(Ushr, 3, V16B, Ushl, W);
break;
@@ -2658,14 +2539,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ Sqxtun2(dst.V16B(), src1.V8H());
break;
}
- SIMD_BINOP_CASE(kArm64I8x16AddSatU, Uqadd, 16B);
- SIMD_BINOP_CASE(kArm64I8x16SubSatU, Uqsub, 16B);
- SIMD_BINOP_CASE(kArm64I8x16MinU, Umin, 16B);
- SIMD_BINOP_CASE(kArm64I8x16MaxU, Umax, 16B);
- SIMD_BINOP_CASE(kArm64I8x16GtU, Cmhi, 16B);
- SIMD_BINOP_CASE(kArm64I8x16GeU, Cmhs, 16B);
- SIMD_BINOP_CASE(kArm64I8x16RoundingAverageU, Urhadd, 16B);
- SIMD_UNOP_CASE(kArm64I8x16Abs, Abs, 16B);
case kArm64I8x16BitMask: {
UseScratchRegisterScope scope(tasm());
Register dst = i.OutputRegister32();
@@ -2716,12 +2589,29 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
default:
UNREACHABLE();
- break;
}
break;
}
SIMD_DESTRUCTIVE_BINOP_CASE(kArm64S128Select, Bsl, 16B);
SIMD_BINOP_CASE(kArm64S128AndNot, Bic, 16B);
+ case kArm64Ssra: {
+ int8_t laneSize = LaneSizeField::decode(opcode);
+ VectorFormat f = VectorFormatFillQ(laneSize);
+ int8_t mask = laneSize - 1;
+ VRegister dst = i.OutputSimd128Register().Format(f);
+ DCHECK_EQ(dst, i.InputSimd128Register(0).Format(f));
+ __ Ssra(dst, i.InputSimd128Register(1).Format(f), i.InputInt8(2) & mask);
+ break;
+ }
+ case kArm64Usra: {
+ int8_t laneSize = LaneSizeField::decode(opcode);
+ VectorFormat f = VectorFormatFillQ(laneSize);
+ int8_t mask = laneSize - 1;
+ VRegister dst = i.OutputSimd128Register().Format(f);
+ DCHECK_EQ(dst, i.InputSimd128Register(0).Format(f));
+ __ Usra(dst, i.InputSimd128Register(1).Format(f), i.InputUint8(2) & mask);
+ break;
+ }
case kArm64S32x4Shuffle: {
Simd128Register dst = i.OutputSimd128Register().V4S(),
src0 = i.InputSimd128Register(0).V4S(),
@@ -2892,8 +2782,11 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
#undef SIMD_UNOP_CASE
+#undef SIMD_UNOP_LANE_SIZE_CASE
#undef SIMD_BINOP_CASE
+#undef SIMD_BINOP_LANE_SIZE_CASE
#undef SIMD_DESTRUCTIVE_BINOP_CASE
+#undef SIMD_DESTRUCTIVE_BINOP_LANE_SIZE_CASE
#undef SIMD_REDUCE_OP_CASE
#undef ASSEMBLE_SIMD_SHIFT_LEFT
#undef ASSEMBLE_SIMD_SHIFT_RIGHT
@@ -2907,7 +2800,6 @@ void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
ArchOpcode opcode = instr->arch_opcode();
if (opcode == kArm64CompareAndBranch32) {
- DCHECK(FlagsModeField::decode(instr->opcode()) != kFlags_branch_and_poison);
switch (condition) {
case kEqual:
__ Cbz(i.InputRegister32(0), tlabel);
@@ -2919,7 +2811,6 @@ void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
UNREACHABLE();
}
} else if (opcode == kArm64CompareAndBranch) {
- DCHECK(FlagsModeField::decode(instr->opcode()) != kFlags_branch_and_poison);
switch (condition) {
case kEqual:
__ Cbz(i.InputRegister64(0), tlabel);
@@ -2931,7 +2822,6 @@ void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
UNREACHABLE();
}
} else if (opcode == kArm64TestAndBranch32) {
- DCHECK(FlagsModeField::decode(instr->opcode()) != kFlags_branch_and_poison);
switch (condition) {
case kEqual:
__ Tbz(i.InputRegister32(0), i.InputInt5(1), tlabel);
@@ -2943,7 +2833,6 @@ void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
UNREACHABLE();
}
} else if (opcode == kArm64TestAndBranch) {
- DCHECK(FlagsModeField::decode(instr->opcode()) != kFlags_branch_and_poison);
switch (condition) {
case kEqual:
__ Tbz(i.InputRegister64(0), i.InputInt6(1), tlabel);
@@ -2961,19 +2850,6 @@ void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
if (!branch->fallthru) __ B(flabel); // no fallthru to flabel.
}
-void CodeGenerator::AssembleBranchPoisoning(FlagsCondition condition,
- Instruction* instr) {
- // TODO(jarin) Handle float comparisons (kUnordered[Not]Equal).
- if (condition == kUnorderedEqual || condition == kUnorderedNotEqual) {
- return;
- }
-
- condition = NegateFlagsCondition(condition);
- __ CmovX(kSpeculationPoisonRegister, xzr,
- FlagsConditionToCondition(condition));
- __ Csdb();
-}
-
void CodeGenerator::AssembleArchDeoptBranch(Instruction* instr,
BranchInfo* branch) {
AssembleArchBranch(instr, branch);
@@ -3143,7 +3019,6 @@ void CodeGenerator::AssembleConstructFrame() {
// arguments count was pushed.
required_slots -=
unoptimized_frame_slots - TurboAssembler::kExtraSlotClaimedByPrologue;
- ResetSpeculationPoison();
}
#if V8_ENABLE_WEBASSEMBLY
@@ -3343,7 +3218,9 @@ void CodeGenerator::AssembleReturn(InstructionOperand* additional_pop_count) {
// number of arguments is given by max(1 + argc_reg, parameter_slots).
Label argc_reg_has_final_count;
DCHECK_EQ(0u, call_descriptor->CalleeSavedRegisters() & argc_reg.bit());
- __ Add(argc_reg, argc_reg, 1); // Consider the receiver.
+ if (!kJSArgcIncludesReceiver) {
+ __ Add(argc_reg, argc_reg, 1); // Consider the receiver.
+ }
if (parameter_slots > 1) {
__ Cmp(argc_reg, Operand(parameter_slots));
__ B(&argc_reg_has_final_count, ge);
diff --git a/deps/v8/src/compiler/backend/arm64/instruction-codes-arm64.h b/deps/v8/src/compiler/backend/arm64/instruction-codes-arm64.h
index 3f2e6151b6..d57203639e 100644
--- a/deps/v8/src/compiler/backend/arm64/instruction-codes-arm64.h
+++ b/deps/v8/src/compiler/backend/arm64/instruction-codes-arm64.h
@@ -11,423 +11,337 @@ namespace compiler {
// ARM64-specific opcodes that specify which assembly sequence to emit.
// Most opcodes specify a single instruction.
-#define TARGET_ARCH_OPCODE_LIST(V) \
- V(Arm64Add) \
- V(Arm64Add32) \
- V(Arm64And) \
- V(Arm64And32) \
- V(Arm64Bic) \
- V(Arm64Bic32) \
- V(Arm64Clz) \
- V(Arm64Clz32) \
- V(Arm64Cmp) \
- V(Arm64Cmp32) \
- V(Arm64Cmn) \
- V(Arm64Cmn32) \
- V(Arm64Cnt) \
- V(Arm64Tst) \
- V(Arm64Tst32) \
- V(Arm64Or) \
- V(Arm64Or32) \
- V(Arm64Orn) \
- V(Arm64Orn32) \
- V(Arm64Eor) \
- V(Arm64Eor32) \
- V(Arm64Eon) \
- V(Arm64Eon32) \
- V(Arm64Sadalp) \
- V(Arm64Saddlp) \
- V(Arm64Sub) \
- V(Arm64Sub32) \
- V(Arm64Mul) \
- V(Arm64Mul32) \
- V(Arm64Smull) \
- V(Arm64Smull2) \
- V(Arm64Uadalp) \
- V(Arm64Uaddlp) \
- V(Arm64Umull) \
- V(Arm64Umull2) \
- V(Arm64Madd) \
- V(Arm64Madd32) \
- V(Arm64Msub) \
- V(Arm64Msub32) \
- V(Arm64Mneg) \
- V(Arm64Mneg32) \
- V(Arm64Idiv) \
- V(Arm64Idiv32) \
- V(Arm64Udiv) \
- V(Arm64Udiv32) \
- V(Arm64Imod) \
- V(Arm64Imod32) \
- V(Arm64Umod) \
- V(Arm64Umod32) \
- V(Arm64Not) \
- V(Arm64Not32) \
- V(Arm64Lsl) \
- V(Arm64Lsl32) \
- V(Arm64Lsr) \
- V(Arm64Lsr32) \
- V(Arm64Asr) \
- V(Arm64Asr32) \
- V(Arm64Ror) \
- V(Arm64Ror32) \
- V(Arm64Mov32) \
- V(Arm64Sxtb32) \
- V(Arm64Sxth32) \
- V(Arm64Sxtb) \
- V(Arm64Sxth) \
- V(Arm64Sxtw) \
- V(Arm64Sbfx) \
- V(Arm64Sbfx32) \
- V(Arm64Ubfx) \
- V(Arm64Ubfx32) \
- V(Arm64Ubfiz32) \
- V(Arm64Bfi) \
- V(Arm64Rbit) \
- V(Arm64Rbit32) \
- V(Arm64Rev) \
- V(Arm64Rev32) \
- V(Arm64TestAndBranch32) \
- V(Arm64TestAndBranch) \
- V(Arm64CompareAndBranch32) \
- V(Arm64CompareAndBranch) \
- V(Arm64Claim) \
- V(Arm64Poke) \
- V(Arm64PokePair) \
- V(Arm64Peek) \
- V(Arm64Float32Cmp) \
- V(Arm64Float32Add) \
- V(Arm64Float32Sub) \
- V(Arm64Float32Mul) \
- V(Arm64Float32Div) \
- V(Arm64Float32Abs) \
- V(Arm64Float32Abd) \
- V(Arm64Float32Neg) \
- V(Arm64Float32Sqrt) \
- V(Arm64Float32Fnmul) \
- V(Arm64Float32RoundDown) \
- V(Arm64Float32Max) \
- V(Arm64Float32Min) \
- V(Arm64Float64Cmp) \
- V(Arm64Float64Add) \
- V(Arm64Float64Sub) \
- V(Arm64Float64Mul) \
- V(Arm64Float64Div) \
- V(Arm64Float64Mod) \
- V(Arm64Float64Max) \
- V(Arm64Float64Min) \
- V(Arm64Float64Abs) \
- V(Arm64Float64Abd) \
- V(Arm64Float64Neg) \
- V(Arm64Float64Sqrt) \
- V(Arm64Float64Fnmul) \
- V(Arm64Float64RoundDown) \
- V(Arm64Float32RoundUp) \
- V(Arm64Float64RoundUp) \
- V(Arm64Float64RoundTiesAway) \
- V(Arm64Float32RoundTruncate) \
- V(Arm64Float64RoundTruncate) \
- V(Arm64Float32RoundTiesEven) \
- V(Arm64Float64RoundTiesEven) \
- V(Arm64Float64SilenceNaN) \
- V(Arm64Float32ToFloat64) \
- V(Arm64Float64ToFloat32) \
- V(Arm64Float32ToInt32) \
- V(Arm64Float64ToInt32) \
- V(Arm64Float32ToUint32) \
- V(Arm64Float64ToUint32) \
- V(Arm64Float32ToInt64) \
- V(Arm64Float64ToInt64) \
- V(Arm64Float32ToUint64) \
- V(Arm64Float64ToUint64) \
- V(Arm64Int32ToFloat32) \
- V(Arm64Int32ToFloat64) \
- V(Arm64Int64ToFloat32) \
- V(Arm64Int64ToFloat64) \
- V(Arm64Uint32ToFloat32) \
- V(Arm64Uint32ToFloat64) \
- V(Arm64Uint64ToFloat32) \
- V(Arm64Uint64ToFloat64) \
- V(Arm64Float64ExtractLowWord32) \
- V(Arm64Float64ExtractHighWord32) \
- V(Arm64Float64InsertLowWord32) \
- V(Arm64Float64InsertHighWord32) \
- V(Arm64Float64MoveU64) \
- V(Arm64U64MoveFloat64) \
- V(Arm64LdrS) \
- V(Arm64StrS) \
- V(Arm64LdrD) \
- V(Arm64StrD) \
- V(Arm64LdrQ) \
- V(Arm64StrQ) \
- V(Arm64Ldrb) \
- V(Arm64Ldrsb) \
- V(Arm64LdrsbW) \
- V(Arm64Strb) \
- V(Arm64Ldrh) \
- V(Arm64Ldrsh) \
- V(Arm64LdrshW) \
- V(Arm64Strh) \
- V(Arm64Ldrsw) \
- V(Arm64LdrW) \
- V(Arm64StrW) \
- V(Arm64Ldr) \
- V(Arm64LdrDecompressTaggedSigned) \
- V(Arm64LdrDecompressTaggedPointer) \
- V(Arm64LdrDecompressAnyTagged) \
- V(Arm64Str) \
- V(Arm64StrCompressTagged) \
- V(Arm64DmbIsh) \
- V(Arm64DsbIsb) \
- V(Arm64Sxtl) \
- V(Arm64Sxtl2) \
- V(Arm64Uxtl) \
- V(Arm64Uxtl2) \
- V(Arm64F64x2Splat) \
- V(Arm64F64x2ExtractLane) \
- V(Arm64F64x2ReplaceLane) \
- V(Arm64F64x2Abs) \
- V(Arm64F64x2Neg) \
- V(Arm64F64x2Sqrt) \
- V(Arm64F64x2Add) \
- V(Arm64F64x2Sub) \
- V(Arm64F64x2Mul) \
- V(Arm64F64x2MulElement) \
- V(Arm64F64x2Div) \
- V(Arm64F64x2Min) \
- V(Arm64F64x2Max) \
- V(Arm64F64x2Eq) \
- V(Arm64F64x2Ne) \
- V(Arm64F64x2Lt) \
- V(Arm64F64x2Le) \
- V(Arm64F64x2Qfma) \
- V(Arm64F64x2Qfms) \
- V(Arm64F64x2Pmin) \
- V(Arm64F64x2Pmax) \
- V(Arm64F64x2ConvertLowI32x4S) \
- V(Arm64F64x2ConvertLowI32x4U) \
- V(Arm64F64x2PromoteLowF32x4) \
- V(Arm64F32x4Splat) \
- V(Arm64F32x4ExtractLane) \
- V(Arm64F32x4ReplaceLane) \
- V(Arm64F32x4SConvertI32x4) \
- V(Arm64F32x4UConvertI32x4) \
- V(Arm64F32x4Abs) \
- V(Arm64F32x4Neg) \
- V(Arm64F32x4Sqrt) \
- V(Arm64F32x4RecipApprox) \
- V(Arm64F32x4RecipSqrtApprox) \
- V(Arm64F32x4Add) \
- V(Arm64F32x4Sub) \
- V(Arm64F32x4Mul) \
- V(Arm64F32x4MulElement) \
- V(Arm64F32x4Div) \
- V(Arm64F32x4Min) \
- V(Arm64F32x4Max) \
- V(Arm64F32x4Eq) \
- V(Arm64F32x4Ne) \
- V(Arm64F32x4Lt) \
- V(Arm64F32x4Le) \
- V(Arm64F32x4Qfma) \
- V(Arm64F32x4Qfms) \
- V(Arm64F32x4Pmin) \
- V(Arm64F32x4Pmax) \
- V(Arm64F32x4DemoteF64x2Zero) \
- V(Arm64I64x2Splat) \
- V(Arm64I64x2ExtractLane) \
- V(Arm64I64x2ReplaceLane) \
- V(Arm64I64x2Abs) \
- V(Arm64I64x2Neg) \
- V(Arm64I64x2Shl) \
- V(Arm64I64x2ShrS) \
- V(Arm64I64x2Add) \
- V(Arm64I64x2Sub) \
- V(Arm64I64x2Mul) \
- V(Arm64I64x2Eq) \
- V(Arm64I64x2Ne) \
- V(Arm64I64x2GtS) \
- V(Arm64I64x2GeS) \
- V(Arm64I64x2ShrU) \
- V(Arm64I64x2BitMask) \
- V(Arm64I32x4Splat) \
- V(Arm64I32x4ExtractLane) \
- V(Arm64I32x4ReplaceLane) \
- V(Arm64I32x4SConvertF32x4) \
- V(Arm64I32x4Neg) \
- V(Arm64I32x4Shl) \
- V(Arm64I32x4ShrS) \
- V(Arm64I32x4Add) \
- V(Arm64I32x4Sub) \
- V(Arm64I32x4Mul) \
- V(Arm64I32x4Mla) \
- V(Arm64I32x4Mls) \
- V(Arm64I32x4MinS) \
- V(Arm64I32x4MaxS) \
- V(Arm64I32x4Eq) \
- V(Arm64I32x4Ne) \
- V(Arm64I32x4GtS) \
- V(Arm64I32x4GeS) \
- V(Arm64I32x4UConvertF32x4) \
- V(Arm64I32x4ShrU) \
- V(Arm64I32x4MinU) \
- V(Arm64I32x4MaxU) \
- V(Arm64I32x4GtU) \
- V(Arm64I32x4GeU) \
- V(Arm64I32x4Abs) \
- V(Arm64I32x4BitMask) \
- V(Arm64I32x4DotI16x8S) \
- V(Arm64I32x4TruncSatF64x2SZero) \
- V(Arm64I32x4TruncSatF64x2UZero) \
- V(Arm64I16x8Splat) \
- V(Arm64I16x8ExtractLaneU) \
- V(Arm64I16x8ExtractLaneS) \
- V(Arm64I16x8ReplaceLane) \
- V(Arm64I16x8Neg) \
- V(Arm64I16x8Shl) \
- V(Arm64I16x8ShrS) \
- V(Arm64I16x8SConvertI32x4) \
- V(Arm64I16x8Add) \
- V(Arm64I16x8AddSatS) \
- V(Arm64I16x8Sub) \
- V(Arm64I16x8SubSatS) \
- V(Arm64I16x8Mul) \
- V(Arm64I16x8Mla) \
- V(Arm64I16x8Mls) \
- V(Arm64I16x8MinS) \
- V(Arm64I16x8MaxS) \
- V(Arm64I16x8Eq) \
- V(Arm64I16x8Ne) \
- V(Arm64I16x8GtS) \
- V(Arm64I16x8GeS) \
- V(Arm64I16x8ShrU) \
- V(Arm64I16x8UConvertI32x4) \
- V(Arm64I16x8AddSatU) \
- V(Arm64I16x8SubSatU) \
- V(Arm64I16x8MinU) \
- V(Arm64I16x8MaxU) \
- V(Arm64I16x8GtU) \
- V(Arm64I16x8GeU) \
- V(Arm64I16x8RoundingAverageU) \
- V(Arm64I16x8Q15MulRSatS) \
- V(Arm64I16x8Abs) \
- V(Arm64I16x8BitMask) \
- V(Arm64I8x16Splat) \
- V(Arm64I8x16ExtractLaneU) \
- V(Arm64I8x16ExtractLaneS) \
- V(Arm64I8x16ReplaceLane) \
- V(Arm64I8x16Neg) \
- V(Arm64I8x16Shl) \
- V(Arm64I8x16ShrS) \
- V(Arm64I8x16SConvertI16x8) \
- V(Arm64I8x16Add) \
- V(Arm64I8x16AddSatS) \
- V(Arm64I8x16Sub) \
- V(Arm64I8x16SubSatS) \
- V(Arm64I8x16Mla) \
- V(Arm64I8x16Mls) \
- V(Arm64I8x16MinS) \
- V(Arm64I8x16MaxS) \
- V(Arm64I8x16Eq) \
- V(Arm64I8x16Ne) \
- V(Arm64I8x16GtS) \
- V(Arm64I8x16GeS) \
- V(Arm64I8x16ShrU) \
- V(Arm64I8x16UConvertI16x8) \
- V(Arm64I8x16AddSatU) \
- V(Arm64I8x16SubSatU) \
- V(Arm64I8x16MinU) \
- V(Arm64I8x16MaxU) \
- V(Arm64I8x16GtU) \
- V(Arm64I8x16GeU) \
- V(Arm64I8x16RoundingAverageU) \
- V(Arm64I8x16Abs) \
- V(Arm64I8x16BitMask) \
- V(Arm64S128Const) \
- V(Arm64S128Zero) \
- V(Arm64S128Dup) \
- V(Arm64S128And) \
- V(Arm64S128Or) \
- V(Arm64S128Xor) \
- V(Arm64S128Not) \
- V(Arm64S128Select) \
- V(Arm64S128AndNot) \
- V(Arm64S32x4ZipLeft) \
- V(Arm64S32x4ZipRight) \
- V(Arm64S32x4UnzipLeft) \
- V(Arm64S32x4UnzipRight) \
- V(Arm64S32x4TransposeLeft) \
- V(Arm64S32x4TransposeRight) \
- V(Arm64S32x4Shuffle) \
- V(Arm64S16x8ZipLeft) \
- V(Arm64S16x8ZipRight) \
- V(Arm64S16x8UnzipLeft) \
- V(Arm64S16x8UnzipRight) \
- V(Arm64S16x8TransposeLeft) \
- V(Arm64S16x8TransposeRight) \
- V(Arm64S8x16ZipLeft) \
- V(Arm64S8x16ZipRight) \
- V(Arm64S8x16UnzipLeft) \
- V(Arm64S8x16UnzipRight) \
- V(Arm64S8x16TransposeLeft) \
- V(Arm64S8x16TransposeRight) \
- V(Arm64S8x16Concat) \
- V(Arm64I8x16Swizzle) \
- V(Arm64I8x16Shuffle) \
- V(Arm64S32x2Reverse) \
- V(Arm64S16x4Reverse) \
- V(Arm64S16x2Reverse) \
- V(Arm64S8x8Reverse) \
- V(Arm64S8x4Reverse) \
- V(Arm64S8x2Reverse) \
- V(Arm64V128AnyTrue) \
- V(Arm64I64x2AllTrue) \
- V(Arm64I32x4AllTrue) \
- V(Arm64I16x8AllTrue) \
- V(Arm64I8x16AllTrue) \
- V(Arm64LoadSplat) \
- V(Arm64LoadLane) \
- V(Arm64StoreLane) \
- V(Arm64S128Load8x8S) \
- V(Arm64S128Load8x8U) \
- V(Arm64S128Load16x4S) \
- V(Arm64S128Load16x4U) \
- V(Arm64S128Load32x2S) \
- V(Arm64S128Load32x2U) \
- V(Arm64Word64AtomicLoadUint8) \
- V(Arm64Word64AtomicLoadUint16) \
- V(Arm64Word64AtomicLoadUint32) \
- V(Arm64Word64AtomicLoadUint64) \
- V(Arm64Word64AtomicStoreWord8) \
- V(Arm64Word64AtomicStoreWord16) \
- V(Arm64Word64AtomicStoreWord32) \
- V(Arm64Word64AtomicStoreWord64) \
- V(Arm64Word64AtomicAddUint8) \
- V(Arm64Word64AtomicAddUint16) \
- V(Arm64Word64AtomicAddUint32) \
- V(Arm64Word64AtomicAddUint64) \
- V(Arm64Word64AtomicSubUint8) \
- V(Arm64Word64AtomicSubUint16) \
- V(Arm64Word64AtomicSubUint32) \
- V(Arm64Word64AtomicSubUint64) \
- V(Arm64Word64AtomicAndUint8) \
- V(Arm64Word64AtomicAndUint16) \
- V(Arm64Word64AtomicAndUint32) \
- V(Arm64Word64AtomicAndUint64) \
- V(Arm64Word64AtomicOrUint8) \
- V(Arm64Word64AtomicOrUint16) \
- V(Arm64Word64AtomicOrUint32) \
- V(Arm64Word64AtomicOrUint64) \
- V(Arm64Word64AtomicXorUint8) \
- V(Arm64Word64AtomicXorUint16) \
- V(Arm64Word64AtomicXorUint32) \
- V(Arm64Word64AtomicXorUint64) \
- V(Arm64Word64AtomicExchangeUint8) \
- V(Arm64Word64AtomicExchangeUint16) \
- V(Arm64Word64AtomicExchangeUint32) \
- V(Arm64Word64AtomicExchangeUint64) \
- V(Arm64Word64AtomicCompareExchangeUint8) \
- V(Arm64Word64AtomicCompareExchangeUint16) \
- V(Arm64Word64AtomicCompareExchangeUint32) \
+#define TARGET_ARCH_OPCODE_LIST(V) \
+ V(Arm64Add) \
+ V(Arm64Add32) \
+ V(Arm64And) \
+ V(Arm64And32) \
+ V(Arm64Bic) \
+ V(Arm64Bic32) \
+ V(Arm64Clz) \
+ V(Arm64Clz32) \
+ V(Arm64Cmp) \
+ V(Arm64Cmp32) \
+ V(Arm64Cmn) \
+ V(Arm64Cmn32) \
+ V(Arm64Cnt) \
+ V(Arm64Cnt32) \
+ V(Arm64Cnt64) \
+ V(Arm64Tst) \
+ V(Arm64Tst32) \
+ V(Arm64Or) \
+ V(Arm64Or32) \
+ V(Arm64Orn) \
+ V(Arm64Orn32) \
+ V(Arm64Eor) \
+ V(Arm64Eor32) \
+ V(Arm64Eon) \
+ V(Arm64Eon32) \
+ V(Arm64Sadalp) \
+ V(Arm64Saddlp) \
+ V(Arm64Sub) \
+ V(Arm64Sub32) \
+ V(Arm64Mul) \
+ V(Arm64Mul32) \
+ V(Arm64Smlal) \
+ V(Arm64Smlal2) \
+ V(Arm64Smull) \
+ V(Arm64Smull2) \
+ V(Arm64Uadalp) \
+ V(Arm64Uaddlp) \
+ V(Arm64Umlal) \
+ V(Arm64Umlal2) \
+ V(Arm64Umull) \
+ V(Arm64Umull2) \
+ V(Arm64Madd) \
+ V(Arm64Madd32) \
+ V(Arm64Msub) \
+ V(Arm64Msub32) \
+ V(Arm64Mneg) \
+ V(Arm64Mneg32) \
+ V(Arm64Idiv) \
+ V(Arm64Idiv32) \
+ V(Arm64Udiv) \
+ V(Arm64Udiv32) \
+ V(Arm64Imod) \
+ V(Arm64Imod32) \
+ V(Arm64Umod) \
+ V(Arm64Umod32) \
+ V(Arm64Not) \
+ V(Arm64Not32) \
+ V(Arm64Lsl) \
+ V(Arm64Lsl32) \
+ V(Arm64Lsr) \
+ V(Arm64Lsr32) \
+ V(Arm64Asr) \
+ V(Arm64Asr32) \
+ V(Arm64Ror) \
+ V(Arm64Ror32) \
+ V(Arm64Mov32) \
+ V(Arm64Sxtb32) \
+ V(Arm64Sxth32) \
+ V(Arm64Sxtb) \
+ V(Arm64Sxth) \
+ V(Arm64Sxtw) \
+ V(Arm64Sbfx) \
+ V(Arm64Sbfx32) \
+ V(Arm64Ubfx) \
+ V(Arm64Ubfx32) \
+ V(Arm64Ubfiz32) \
+ V(Arm64Bfi) \
+ V(Arm64Rbit) \
+ V(Arm64Rbit32) \
+ V(Arm64Rev) \
+ V(Arm64Rev32) \
+ V(Arm64TestAndBranch32) \
+ V(Arm64TestAndBranch) \
+ V(Arm64CompareAndBranch32) \
+ V(Arm64CompareAndBranch) \
+ V(Arm64Claim) \
+ V(Arm64Poke) \
+ V(Arm64PokePair) \
+ V(Arm64Peek) \
+ V(Arm64Float32Cmp) \
+ V(Arm64Float32Add) \
+ V(Arm64Float32Sub) \
+ V(Arm64Float32Mul) \
+ V(Arm64Float32Div) \
+ V(Arm64Float32Abs) \
+ V(Arm64Float32Abd) \
+ V(Arm64Float32Neg) \
+ V(Arm64Float32Sqrt) \
+ V(Arm64Float32Fnmul) \
+ V(Arm64Float32RoundDown) \
+ V(Arm64Float32Max) \
+ V(Arm64Float32Min) \
+ V(Arm64Float64Cmp) \
+ V(Arm64Float64Add) \
+ V(Arm64Float64Sub) \
+ V(Arm64Float64Mul) \
+ V(Arm64Float64Div) \
+ V(Arm64Float64Mod) \
+ V(Arm64Float64Max) \
+ V(Arm64Float64Min) \
+ V(Arm64Float64Abs) \
+ V(Arm64Float64Abd) \
+ V(Arm64Float64Neg) \
+ V(Arm64Float64Sqrt) \
+ V(Arm64Float64Fnmul) \
+ V(Arm64Float64RoundDown) \
+ V(Arm64Float32RoundUp) \
+ V(Arm64Float64RoundUp) \
+ V(Arm64Float64RoundTiesAway) \
+ V(Arm64Float32RoundTruncate) \
+ V(Arm64Float64RoundTruncate) \
+ V(Arm64Float32RoundTiesEven) \
+ V(Arm64Float64RoundTiesEven) \
+ V(Arm64Float64SilenceNaN) \
+ V(Arm64Float32ToFloat64) \
+ V(Arm64Float64ToFloat32) \
+ V(Arm64Float32ToInt32) \
+ V(Arm64Float64ToInt32) \
+ V(Arm64Float32ToUint32) \
+ V(Arm64Float64ToUint32) \
+ V(Arm64Float32ToInt64) \
+ V(Arm64Float64ToInt64) \
+ V(Arm64Float32ToUint64) \
+ V(Arm64Float64ToUint64) \
+ V(Arm64Int32ToFloat32) \
+ V(Arm64Int32ToFloat64) \
+ V(Arm64Int64ToFloat32) \
+ V(Arm64Int64ToFloat64) \
+ V(Arm64Uint32ToFloat32) \
+ V(Arm64Uint32ToFloat64) \
+ V(Arm64Uint64ToFloat32) \
+ V(Arm64Uint64ToFloat64) \
+ V(Arm64Float64ExtractLowWord32) \
+ V(Arm64Float64ExtractHighWord32) \
+ V(Arm64Float64InsertLowWord32) \
+ V(Arm64Float64InsertHighWord32) \
+ V(Arm64Float64MoveU64) \
+ V(Arm64U64MoveFloat64) \
+ V(Arm64LdrS) \
+ V(Arm64StrS) \
+ V(Arm64LdrD) \
+ V(Arm64StrD) \
+ V(Arm64LdrQ) \
+ V(Arm64StrQ) \
+ V(Arm64Ldrb) \
+ V(Arm64Ldrsb) \
+ V(Arm64LdrsbW) \
+ V(Arm64Strb) \
+ V(Arm64Ldrh) \
+ V(Arm64Ldrsh) \
+ V(Arm64LdrshW) \
+ V(Arm64Strh) \
+ V(Arm64Ldrsw) \
+ V(Arm64LdrW) \
+ V(Arm64StrW) \
+ V(Arm64Ldr) \
+ V(Arm64LdrDecompressTaggedSigned) \
+ V(Arm64LdrDecompressTaggedPointer) \
+ V(Arm64LdrDecompressAnyTagged) \
+ V(Arm64LdarDecompressTaggedSigned) \
+ V(Arm64LdarDecompressTaggedPointer) \
+ V(Arm64LdarDecompressAnyTagged) \
+ V(Arm64Str) \
+ V(Arm64StrCompressTagged) \
+ V(Arm64StlrCompressTagged) \
+ V(Arm64DmbIsh) \
+ V(Arm64DsbIsb) \
+ V(Arm64Sxtl) \
+ V(Arm64Sxtl2) \
+ V(Arm64Uxtl) \
+ V(Arm64Uxtl2) \
+ V(Arm64FSplat) \
+ V(Arm64FAbs) \
+ V(Arm64FSqrt) \
+ V(Arm64FNeg) \
+ V(Arm64FExtractLane) \
+ V(Arm64FReplaceLane) \
+ V(Arm64FAdd) \
+ V(Arm64FSub) \
+ V(Arm64FMul) \
+ V(Arm64FMulElement) \
+ V(Arm64FDiv) \
+ V(Arm64FMin) \
+ V(Arm64FMax) \
+ V(Arm64FEq) \
+ V(Arm64FNe) \
+ V(Arm64FLt) \
+ V(Arm64FLe) \
+ V(Arm64F64x2Qfma) \
+ V(Arm64F64x2Qfms) \
+ V(Arm64F64x2Pmin) \
+ V(Arm64F64x2Pmax) \
+ V(Arm64F64x2ConvertLowI32x4S) \
+ V(Arm64F64x2ConvertLowI32x4U) \
+ V(Arm64F64x2PromoteLowF32x4) \
+ V(Arm64F32x4SConvertI32x4) \
+ V(Arm64F32x4UConvertI32x4) \
+ V(Arm64F32x4RecipApprox) \
+ V(Arm64F32x4RecipSqrtApprox) \
+ V(Arm64F32x4Qfma) \
+ V(Arm64F32x4Qfms) \
+ V(Arm64F32x4Pmin) \
+ V(Arm64F32x4Pmax) \
+ V(Arm64F32x4DemoteF64x2Zero) \
+ V(Arm64ISplat) \
+ V(Arm64IAbs) \
+ V(Arm64INeg) \
+ V(Arm64IExtractLane) \
+ V(Arm64IReplaceLane) \
+ V(Arm64I64x2Shl) \
+ V(Arm64I64x2ShrS) \
+ V(Arm64IAdd) \
+ V(Arm64ISub) \
+ V(Arm64I64x2Mul) \
+ V(Arm64IEq) \
+ V(Arm64INe) \
+ V(Arm64IGtS) \
+ V(Arm64IGeS) \
+ V(Arm64I64x2ShrU) \
+ V(Arm64I64x2BitMask) \
+ V(Arm64I32x4SConvertF32x4) \
+ V(Arm64I32x4Shl) \
+ V(Arm64I32x4ShrS) \
+ V(Arm64I32x4Mul) \
+ V(Arm64Mla) \
+ V(Arm64Mls) \
+ V(Arm64IMinS) \
+ V(Arm64IMaxS) \
+ V(Arm64I32x4UConvertF32x4) \
+ V(Arm64I32x4ShrU) \
+ V(Arm64IMinU) \
+ V(Arm64IMaxU) \
+ V(Arm64IGtU) \
+ V(Arm64IGeU) \
+ V(Arm64I32x4BitMask) \
+ V(Arm64I32x4DotI16x8S) \
+ V(Arm64I32x4TruncSatF64x2SZero) \
+ V(Arm64I32x4TruncSatF64x2UZero) \
+ V(Arm64IExtractLaneU) \
+ V(Arm64IExtractLaneS) \
+ V(Arm64I16x8Shl) \
+ V(Arm64I16x8ShrS) \
+ V(Arm64I16x8SConvertI32x4) \
+ V(Arm64IAddSatS) \
+ V(Arm64ISubSatS) \
+ V(Arm64I16x8Mul) \
+ V(Arm64I16x8ShrU) \
+ V(Arm64I16x8UConvertI32x4) \
+ V(Arm64IAddSatU) \
+ V(Arm64ISubSatU) \
+ V(Arm64RoundingAverageU) \
+ V(Arm64I16x8Q15MulRSatS) \
+ V(Arm64I16x8BitMask) \
+ V(Arm64I8x16Shl) \
+ V(Arm64I8x16ShrS) \
+ V(Arm64I8x16SConvertI16x8) \
+ V(Arm64I8x16ShrU) \
+ V(Arm64I8x16UConvertI16x8) \
+ V(Arm64I8x16BitMask) \
+ V(Arm64S128Const) \
+ V(Arm64S128Zero) \
+ V(Arm64S128Dup) \
+ V(Arm64S128And) \
+ V(Arm64S128Or) \
+ V(Arm64S128Xor) \
+ V(Arm64S128Not) \
+ V(Arm64S128Select) \
+ V(Arm64S128AndNot) \
+ V(Arm64Ssra) \
+ V(Arm64Usra) \
+ V(Arm64S32x4ZipLeft) \
+ V(Arm64S32x4ZipRight) \
+ V(Arm64S32x4UnzipLeft) \
+ V(Arm64S32x4UnzipRight) \
+ V(Arm64S32x4TransposeLeft) \
+ V(Arm64S32x4TransposeRight) \
+ V(Arm64S32x4Shuffle) \
+ V(Arm64S16x8ZipLeft) \
+ V(Arm64S16x8ZipRight) \
+ V(Arm64S16x8UnzipLeft) \
+ V(Arm64S16x8UnzipRight) \
+ V(Arm64S16x8TransposeLeft) \
+ V(Arm64S16x8TransposeRight) \
+ V(Arm64S8x16ZipLeft) \
+ V(Arm64S8x16ZipRight) \
+ V(Arm64S8x16UnzipLeft) \
+ V(Arm64S8x16UnzipRight) \
+ V(Arm64S8x16TransposeLeft) \
+ V(Arm64S8x16TransposeRight) \
+ V(Arm64S8x16Concat) \
+ V(Arm64I8x16Swizzle) \
+ V(Arm64I8x16Shuffle) \
+ V(Arm64S32x2Reverse) \
+ V(Arm64S16x4Reverse) \
+ V(Arm64S16x2Reverse) \
+ V(Arm64S8x8Reverse) \
+ V(Arm64S8x4Reverse) \
+ V(Arm64S8x2Reverse) \
+ V(Arm64V128AnyTrue) \
+ V(Arm64I64x2AllTrue) \
+ V(Arm64I32x4AllTrue) \
+ V(Arm64I16x8AllTrue) \
+ V(Arm64I8x16AllTrue) \
+ V(Arm64LoadSplat) \
+ V(Arm64LoadLane) \
+ V(Arm64StoreLane) \
+ V(Arm64S128Load8x8S) \
+ V(Arm64S128Load8x8U) \
+ V(Arm64S128Load16x4S) \
+ V(Arm64S128Load16x4U) \
+ V(Arm64S128Load32x2S) \
+ V(Arm64S128Load32x2U) \
+ V(Arm64Word64AtomicLoadUint64) \
+ V(Arm64Word64AtomicStoreWord64) \
+ V(Arm64Word64AtomicAddUint64) \
+ V(Arm64Word64AtomicSubUint64) \
+ V(Arm64Word64AtomicAndUint64) \
+ V(Arm64Word64AtomicOrUint64) \
+ V(Arm64Word64AtomicXorUint64) \
+ V(Arm64Word64AtomicExchangeUint64) \
V(Arm64Word64AtomicCompareExchangeUint64)
// Addressing modes represent the "shape" of inputs to an instruction.
diff --git a/deps/v8/src/compiler/backend/arm64/instruction-scheduler-arm64.cc b/deps/v8/src/compiler/backend/arm64/instruction-scheduler-arm64.cc
index f4446cdbf8..bb16b76aaf 100644
--- a/deps/v8/src/compiler/backend/arm64/instruction-scheduler-arm64.cc
+++ b/deps/v8/src/compiler/backend/arm64/instruction-scheduler-arm64.cc
@@ -26,6 +26,8 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kArm64Cmn:
case kArm64Cmn32:
case kArm64Cnt:
+ case kArm64Cnt32:
+ case kArm64Cnt64:
case kArm64Tst:
case kArm64Tst32:
case kArm64Or:
@@ -42,10 +44,14 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kArm64Sub32:
case kArm64Mul:
case kArm64Mul32:
+ case kArm64Smlal:
+ case kArm64Smlal2:
case kArm64Smull:
case kArm64Smull2:
case kArm64Uadalp:
case kArm64Uaddlp:
+ case kArm64Umlal:
+ case kArm64Umlal2:
case kArm64Umull:
case kArm64Umull2:
case kArm64Madd:
@@ -147,23 +153,23 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kArm64Float64MoveU64:
case kArm64U64MoveFloat64:
case kArm64Float64SilenceNaN:
- case kArm64F64x2Splat:
- case kArm64F64x2ExtractLane:
- case kArm64F64x2ReplaceLane:
- case kArm64F64x2Abs:
- case kArm64F64x2Neg:
- case kArm64F64x2Sqrt:
- case kArm64F64x2Add:
- case kArm64F64x2Sub:
- case kArm64F64x2Mul:
- case kArm64F64x2MulElement:
- case kArm64F64x2Div:
- case kArm64F64x2Min:
- case kArm64F64x2Max:
- case kArm64F64x2Eq:
- case kArm64F64x2Ne:
- case kArm64F64x2Lt:
- case kArm64F64x2Le:
+ case kArm64FExtractLane:
+ case kArm64FReplaceLane:
+ case kArm64FSplat:
+ case kArm64FAbs:
+ case kArm64FSqrt:
+ case kArm64FNeg:
+ case kArm64FAdd:
+ case kArm64FSub:
+ case kArm64FMul:
+ case kArm64FMulElement:
+ case kArm64FDiv:
+ case kArm64FMin:
+ case kArm64FMax:
+ case kArm64FEq:
+ case kArm64FNe:
+ case kArm64FLt:
+ case kArm64FLe:
case kArm64F64x2Qfma:
case kArm64F64x2Qfms:
case kArm64F64x2Pmin:
@@ -171,144 +177,73 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kArm64F64x2ConvertLowI32x4S:
case kArm64F64x2ConvertLowI32x4U:
case kArm64F64x2PromoteLowF32x4:
- case kArm64F32x4Splat:
- case kArm64F32x4ExtractLane:
- case kArm64F32x4ReplaceLane:
case kArm64F32x4SConvertI32x4:
case kArm64F32x4UConvertI32x4:
- case kArm64F32x4Abs:
- case kArm64F32x4Neg:
- case kArm64F32x4Sqrt:
case kArm64F32x4RecipApprox:
case kArm64F32x4RecipSqrtApprox:
- case kArm64F32x4Add:
- case kArm64F32x4Sub:
- case kArm64F32x4Mul:
- case kArm64F32x4MulElement:
- case kArm64F32x4Div:
- case kArm64F32x4Min:
- case kArm64F32x4Max:
- case kArm64F32x4Eq:
- case kArm64F32x4Ne:
- case kArm64F32x4Lt:
- case kArm64F32x4Le:
case kArm64F32x4Qfma:
case kArm64F32x4Qfms:
case kArm64F32x4Pmin:
case kArm64F32x4Pmax:
case kArm64F32x4DemoteF64x2Zero:
- case kArm64I64x2Splat:
- case kArm64I64x2ExtractLane:
- case kArm64I64x2ReplaceLane:
- case kArm64I64x2Abs:
- case kArm64I64x2Neg:
+ case kArm64IExtractLane:
+ case kArm64IReplaceLane:
+ case kArm64ISplat:
+ case kArm64IAbs:
+ case kArm64INeg:
+ case kArm64Mla:
+ case kArm64Mls:
+ case kArm64RoundingAverageU:
case kArm64I64x2Shl:
case kArm64I64x2ShrS:
- case kArm64I64x2Add:
- case kArm64I64x2Sub:
+ case kArm64IAdd:
+ case kArm64ISub:
case kArm64I64x2Mul:
- case kArm64I64x2Eq:
- case kArm64I64x2Ne:
- case kArm64I64x2GtS:
- case kArm64I64x2GeS:
+ case kArm64IEq:
+ case kArm64INe:
+ case kArm64IGtS:
+ case kArm64IGeS:
case kArm64I64x2ShrU:
case kArm64I64x2BitMask:
- case kArm64I32x4Splat:
- case kArm64I32x4ExtractLane:
- case kArm64I32x4ReplaceLane:
case kArm64I32x4SConvertF32x4:
case kArm64Sxtl:
case kArm64Sxtl2:
case kArm64Uxtl:
case kArm64Uxtl2:
- case kArm64I32x4Neg:
case kArm64I32x4Shl:
case kArm64I32x4ShrS:
- case kArm64I32x4Add:
- case kArm64I32x4Sub:
case kArm64I32x4Mul:
- case kArm64I32x4Mla:
- case kArm64I32x4Mls:
- case kArm64I32x4MinS:
- case kArm64I32x4MaxS:
- case kArm64I32x4Eq:
- case kArm64I32x4Ne:
- case kArm64I32x4GtS:
- case kArm64I32x4GeS:
+ case kArm64IMinS:
+ case kArm64IMaxS:
case kArm64I32x4UConvertF32x4:
case kArm64I32x4ShrU:
- case kArm64I32x4MinU:
- case kArm64I32x4MaxU:
- case kArm64I32x4GtU:
- case kArm64I32x4GeU:
- case kArm64I32x4Abs:
+ case kArm64IMinU:
+ case kArm64IMaxU:
+ case kArm64IGtU:
+ case kArm64IGeU:
case kArm64I32x4BitMask:
case kArm64I32x4DotI16x8S:
case kArm64I32x4TruncSatF64x2SZero:
case kArm64I32x4TruncSatF64x2UZero:
- case kArm64I16x8Splat:
- case kArm64I16x8ExtractLaneU:
- case kArm64I16x8ExtractLaneS:
- case kArm64I16x8ReplaceLane:
- case kArm64I16x8Neg:
+ case kArm64IExtractLaneU:
+ case kArm64IExtractLaneS:
case kArm64I16x8Shl:
case kArm64I16x8ShrS:
case kArm64I16x8SConvertI32x4:
- case kArm64I16x8Add:
- case kArm64I16x8AddSatS:
- case kArm64I16x8Sub:
- case kArm64I16x8SubSatS:
+ case kArm64IAddSatS:
+ case kArm64ISubSatS:
case kArm64I16x8Mul:
- case kArm64I16x8Mla:
- case kArm64I16x8Mls:
- case kArm64I16x8MinS:
- case kArm64I16x8MaxS:
- case kArm64I16x8Eq:
- case kArm64I16x8Ne:
- case kArm64I16x8GtS:
- case kArm64I16x8GeS:
case kArm64I16x8ShrU:
case kArm64I16x8UConvertI32x4:
- case kArm64I16x8AddSatU:
- case kArm64I16x8SubSatU:
- case kArm64I16x8MinU:
- case kArm64I16x8MaxU:
- case kArm64I16x8GtU:
- case kArm64I16x8GeU:
- case kArm64I16x8RoundingAverageU:
+ case kArm64IAddSatU:
+ case kArm64ISubSatU:
case kArm64I16x8Q15MulRSatS:
- case kArm64I16x8Abs:
case kArm64I16x8BitMask:
- case kArm64I8x16Splat:
- case kArm64I8x16ExtractLaneU:
- case kArm64I8x16ExtractLaneS:
- case kArm64I8x16ReplaceLane:
- case kArm64I8x16Neg:
case kArm64I8x16Shl:
case kArm64I8x16ShrS:
case kArm64I8x16SConvertI16x8:
- case kArm64I8x16Add:
- case kArm64I8x16AddSatS:
- case kArm64I8x16Sub:
- case kArm64I8x16SubSatS:
- case kArm64I8x16Mla:
- case kArm64I8x16Mls:
- case kArm64I8x16MinS:
- case kArm64I8x16MaxS:
- case kArm64I8x16Eq:
- case kArm64I8x16Ne:
- case kArm64I8x16GtS:
- case kArm64I8x16GeS:
case kArm64I8x16UConvertI16x8:
- case kArm64I8x16AddSatU:
- case kArm64I8x16SubSatU:
case kArm64I8x16ShrU:
- case kArm64I8x16MinU:
- case kArm64I8x16MaxU:
- case kArm64I8x16GtU:
- case kArm64I8x16GeU:
- case kArm64I8x16RoundingAverageU:
- case kArm64I8x16Abs:
case kArm64I8x16BitMask:
case kArm64S128Const:
case kArm64S128Zero:
@@ -319,6 +254,8 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kArm64S128Not:
case kArm64S128Select:
case kArm64S128AndNot:
+ case kArm64Ssra:
+ case kArm64Usra:
case kArm64S32x4ZipLeft:
case kArm64S32x4ZipRight:
case kArm64S32x4UnzipLeft:
@@ -373,6 +310,9 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kArm64LdrDecompressTaggedSigned:
case kArm64LdrDecompressTaggedPointer:
case kArm64LdrDecompressAnyTagged:
+ case kArm64LdarDecompressTaggedSigned:
+ case kArm64LdarDecompressTaggedPointer:
+ case kArm64LdarDecompressAnyTagged:
case kArm64Peek:
case kArm64LoadSplat:
case kArm64LoadLane:
@@ -395,48 +335,22 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kArm64StrW:
case kArm64Str:
case kArm64StrCompressTagged:
+ case kArm64StlrCompressTagged:
case kArm64DmbIsh:
case kArm64DsbIsb:
case kArm64StoreLane:
return kHasSideEffect;
- case kArm64Word64AtomicLoadUint8:
- case kArm64Word64AtomicLoadUint16:
- case kArm64Word64AtomicLoadUint32:
case kArm64Word64AtomicLoadUint64:
return kIsLoadOperation;
- case kArm64Word64AtomicStoreWord8:
- case kArm64Word64AtomicStoreWord16:
- case kArm64Word64AtomicStoreWord32:
case kArm64Word64AtomicStoreWord64:
- case kArm64Word64AtomicAddUint8:
- case kArm64Word64AtomicAddUint16:
- case kArm64Word64AtomicAddUint32:
case kArm64Word64AtomicAddUint64:
- case kArm64Word64AtomicSubUint8:
- case kArm64Word64AtomicSubUint16:
- case kArm64Word64AtomicSubUint32:
case kArm64Word64AtomicSubUint64:
- case kArm64Word64AtomicAndUint8:
- case kArm64Word64AtomicAndUint16:
- case kArm64Word64AtomicAndUint32:
case kArm64Word64AtomicAndUint64:
- case kArm64Word64AtomicOrUint8:
- case kArm64Word64AtomicOrUint16:
- case kArm64Word64AtomicOrUint32:
case kArm64Word64AtomicOrUint64:
- case kArm64Word64AtomicXorUint8:
- case kArm64Word64AtomicXorUint16:
- case kArm64Word64AtomicXorUint32:
case kArm64Word64AtomicXorUint64:
- case kArm64Word64AtomicExchangeUint8:
- case kArm64Word64AtomicExchangeUint16:
- case kArm64Word64AtomicExchangeUint32:
case kArm64Word64AtomicExchangeUint64:
- case kArm64Word64AtomicCompareExchangeUint8:
- case kArm64Word64AtomicCompareExchangeUint16:
- case kArm64Word64AtomicCompareExchangeUint32:
case kArm64Word64AtomicCompareExchangeUint64:
return kHasSideEffect;
diff --git a/deps/v8/src/compiler/backend/arm64/instruction-selector-arm64.cc b/deps/v8/src/compiler/backend/arm64/instruction-selector-arm64.cc
index 6a1a101e35..d102ecabb2 100644
--- a/deps/v8/src/compiler/backend/arm64/instruction-selector-arm64.cc
+++ b/deps/v8/src/compiler/backend/arm64/instruction-selector-arm64.cc
@@ -190,7 +190,8 @@ void VisitSimdShiftRRR(InstructionSelector* selector, ArchOpcode opcode,
}
}
-void VisitRRI(InstructionSelector* selector, ArchOpcode opcode, Node* node) {
+void VisitRRI(InstructionSelector* selector, InstructionCode opcode,
+ Node* node) {
Arm64OperandGenerator g(selector);
int32_t imm = OpParameter<int32_t>(node->op());
selector->Emit(opcode, g.DefineAsRegister(node),
@@ -205,7 +206,8 @@ void VisitRRO(InstructionSelector* selector, ArchOpcode opcode, Node* node,
g.UseOperand(node->InputAt(1), operand_mode));
}
-void VisitRRIR(InstructionSelector* selector, ArchOpcode opcode, Node* node) {
+void VisitRRIR(InstructionSelector* selector, InstructionCode opcode,
+ Node* node) {
Arm64OperandGenerator g(selector);
int32_t imm = OpParameter<int32_t>(node->op());
selector->Emit(opcode, g.DefineAsRegister(node),
@@ -845,10 +847,6 @@ void InstructionSelector::VisitLoad(Node* node) {
case MachineRepresentation::kNone:
UNREACHABLE();
}
- if (node->opcode() == IrOpcode::kPoisonedLoad) {
- CHECK_NE(poisoning_level_, PoisoningMitigationLevel::kDontPoison);
- opcode |= AccessModeField::encode(kMemoryAccessPoisoned);
- }
if (node->opcode() == IrOpcode::kProtectedLoad) {
opcode |= AccessModeField::encode(kMemoryAccessProtected);
}
@@ -856,8 +854,6 @@ void InstructionSelector::VisitLoad(Node* node) {
EmitLoad(this, node, opcode, immediate_mode, rep);
}
-void InstructionSelector::VisitPoisonedLoad(Node* node) { VisitLoad(node); }
-
void InstructionSelector::VisitProtectedLoad(Node* node) { VisitLoad(node); }
void InstructionSelector::VisitStore(Node* node) {
@@ -1441,6 +1437,8 @@ void InstructionSelector::VisitWord64Ror(Node* node) {
#define RR_OP_LIST(V) \
V(Word64Clz, kArm64Clz) \
V(Word32Clz, kArm64Clz32) \
+ V(Word32Popcnt, kArm64Cnt32) \
+ V(Word64Popcnt, kArm64Cnt64) \
V(Word32ReverseBits, kArm64Rbit32) \
V(Word64ReverseBits, kArm64Rbit) \
V(Word32ReverseBytes, kArm64Rev32) \
@@ -1531,10 +1529,6 @@ void InstructionSelector::VisitWord32Ctz(Node* node) { UNREACHABLE(); }
void InstructionSelector::VisitWord64Ctz(Node* node) { UNREACHABLE(); }
-void InstructionSelector::VisitWord32Popcnt(Node* node) { UNREACHABLE(); }
-
-void InstructionSelector::VisitWord64Popcnt(Node* node) { UNREACHABLE(); }
-
void InstructionSelector::VisitInt32Add(Node* node) {
Arm64OperandGenerator g(this);
Int32BinopMatcher m(node);
@@ -1938,7 +1932,9 @@ void InstructionSelector::VisitBitcastWord32ToWord64(Node* node) {
void InstructionSelector::VisitChangeInt32ToInt64(Node* node) {
Node* value = node->InputAt(0);
- if (value->opcode() == IrOpcode::kLoad && CanCover(node, value)) {
+ if ((value->opcode() == IrOpcode::kLoad ||
+ value->opcode() == IrOpcode::kLoadImmutable) &&
+ CanCover(node, value)) {
// Generate sign-extending load.
LoadRepresentation load_rep = LoadRepresentationOf(value->op());
MachineRepresentation rep = load_rep.representation();
@@ -2324,9 +2320,6 @@ template <int N>
bool TryEmitCbzOrTbz(InstructionSelector* selector, Node* node,
typename CbzOrTbzMatchTrait<N>::IntegralType value,
Node* user, FlagsCondition cond, FlagsContinuation* cont) {
- // Branch poisoning requires flags to be set, so when it's enabled for
- // a particular branch, we shouldn't be applying the cbz/tbz optimization.
- DCHECK(!cont->IsPoisoned());
// Only handle branches and deoptimisations.
if (!cont->IsBranch() && !cont->IsDeoptimize()) return false;
@@ -2414,7 +2407,7 @@ void VisitWordCompare(InstructionSelector* selector, Node* node,
std::swap(left, right);
}
- if (opcode == kArm64Cmp && !cont->IsPoisoned()) {
+ if (opcode == kArm64Cmp) {
Int64Matcher m(right);
if (m.HasResolvedValue()) {
if (TryEmitCbzOrTbz<64>(selector, left, m.ResolvedValue(), node,
@@ -2432,19 +2425,17 @@ void VisitWord32Compare(InstructionSelector* selector, Node* node,
FlagsContinuation* cont) {
Int32BinopMatcher m(node);
FlagsCondition cond = cont->condition();
- if (!cont->IsPoisoned()) {
- if (m.right().HasResolvedValue()) {
- if (TryEmitCbzOrTbz<32>(selector, m.left().node(),
- m.right().ResolvedValue(), node, cond, cont)) {
- return;
- }
- } else if (m.left().HasResolvedValue()) {
- FlagsCondition commuted_cond = CommuteFlagsCondition(cond);
- if (TryEmitCbzOrTbz<32>(selector, m.right().node(),
- m.left().ResolvedValue(), node, commuted_cond,
- cont)) {
- return;
- }
+ if (m.right().HasResolvedValue()) {
+ if (TryEmitCbzOrTbz<32>(selector, m.left().node(),
+ m.right().ResolvedValue(), node, cond, cont)) {
+ return;
+ }
+ } else if (m.left().HasResolvedValue()) {
+ FlagsCondition commuted_cond = CommuteFlagsCondition(cond);
+ if (TryEmitCbzOrTbz<32>(selector, m.right().node(),
+ m.left().ResolvedValue(), node, commuted_cond,
+ cont)) {
+ return;
}
}
ArchOpcode opcode = kArm64Cmp32;
@@ -2533,8 +2524,7 @@ struct TestAndBranchMatcher {
Matcher matcher_;
void Initialize() {
- if (cont_->IsBranch() && !cont_->IsPoisoned() &&
- matcher_.right().HasResolvedValue() &&
+ if (cont_->IsBranch() && matcher_.right().HasResolvedValue() &&
base::bits::IsPowerOfTwo(matcher_.right().ResolvedValue())) {
// If the mask has only one bit set, we can use tbz/tbnz.
DCHECK((cont_->condition() == kEqual) ||
@@ -2583,7 +2573,7 @@ void VisitFloat64Compare(InstructionSelector* selector, Node* node,
}
void VisitAtomicExchange(InstructionSelector* selector, Node* node,
- ArchOpcode opcode) {
+ ArchOpcode opcode, AtomicWidth width) {
Arm64OperandGenerator g(selector);
Node* base = node->InputAt(0);
Node* index = node->InputAt(1);
@@ -2592,13 +2582,14 @@ void VisitAtomicExchange(InstructionSelector* selector, Node* node,
g.UseUniqueRegister(value)};
InstructionOperand outputs[] = {g.DefineAsRegister(node)};
InstructionOperand temps[] = {g.TempRegister(), g.TempRegister()};
- InstructionCode code = opcode | AddressingModeField::encode(kMode_MRR);
+ InstructionCode code = opcode | AddressingModeField::encode(kMode_MRR) |
+ AtomicWidthField::encode(width);
selector->Emit(code, arraysize(outputs), outputs, arraysize(inputs), inputs,
arraysize(temps), temps);
}
void VisitAtomicCompareExchange(InstructionSelector* selector, Node* node,
- ArchOpcode opcode) {
+ ArchOpcode opcode, AtomicWidth width) {
Arm64OperandGenerator g(selector);
Node* base = node->InputAt(0);
Node* index = node->InputAt(1);
@@ -2609,40 +2600,149 @@ void VisitAtomicCompareExchange(InstructionSelector* selector, Node* node,
g.UseUniqueRegister(new_value)};
InstructionOperand outputs[] = {g.DefineAsRegister(node)};
InstructionOperand temps[] = {g.TempRegister(), g.TempRegister()};
- InstructionCode code = opcode | AddressingModeField::encode(kMode_MRR);
+ InstructionCode code = opcode | AddressingModeField::encode(kMode_MRR) |
+ AtomicWidthField::encode(width);
selector->Emit(code, arraysize(outputs), outputs, arraysize(inputs), inputs,
arraysize(temps), temps);
}
void VisitAtomicLoad(InstructionSelector* selector, Node* node,
- ArchOpcode opcode) {
+ AtomicWidth width) {
Arm64OperandGenerator g(selector);
Node* base = node->InputAt(0);
Node* index = node->InputAt(1);
InstructionOperand inputs[] = {g.UseRegister(base), g.UseRegister(index)};
InstructionOperand outputs[] = {g.DefineAsRegister(node)};
InstructionOperand temps[] = {g.TempRegister()};
- InstructionCode code = opcode | AddressingModeField::encode(kMode_MRR);
+
+ // The memory order is ignored as both acquire and sequentially consistent
+ // loads can emit LDAR.
+ // https://www.cl.cam.ac.uk/~pes20/cpp/cpp0xmappings.html
+ AtomicLoadParameters atomic_load_params = AtomicLoadParametersOf(node->op());
+ LoadRepresentation load_rep = atomic_load_params.representation();
+ InstructionCode code;
+ switch (load_rep.representation()) {
+ case MachineRepresentation::kWord8:
+ DCHECK_IMPLIES(load_rep.IsSigned(), width == AtomicWidth::kWord32);
+ code = load_rep.IsSigned() ? kAtomicLoadInt8 : kAtomicLoadUint8;
+ break;
+ case MachineRepresentation::kWord16:
+ DCHECK_IMPLIES(load_rep.IsSigned(), width == AtomicWidth::kWord32);
+ code = load_rep.IsSigned() ? kAtomicLoadInt16 : kAtomicLoadUint16;
+ break;
+ case MachineRepresentation::kWord32:
+ code = kAtomicLoadWord32;
+ break;
+ case MachineRepresentation::kWord64:
+ code = kArm64Word64AtomicLoadUint64;
+ break;
+#ifdef V8_COMPRESS_POINTERS
+ case MachineRepresentation::kTaggedSigned:
+ code = kArm64LdarDecompressTaggedSigned;
+ break;
+ case MachineRepresentation::kTaggedPointer:
+ code = kArm64LdarDecompressTaggedPointer;
+ break;
+ case MachineRepresentation::kTagged:
+ code = kArm64LdarDecompressAnyTagged;
+ break;
+#else
+ case MachineRepresentation::kTaggedSigned: // Fall through.
+ case MachineRepresentation::kTaggedPointer: // Fall through.
+ case MachineRepresentation::kTagged:
+ if (kTaggedSize == 8) {
+ code = kArm64Word64AtomicLoadUint64;
+ } else {
+ code = kAtomicLoadWord32;
+ }
+ break;
+#endif
+ case MachineRepresentation::kCompressedPointer: // Fall through.
+ case MachineRepresentation::kCompressed:
+ DCHECK(COMPRESS_POINTERS_BOOL);
+ code = kAtomicLoadWord32;
+ break;
+ default:
+ UNREACHABLE();
+ }
+ code |=
+ AddressingModeField::encode(kMode_MRR) | AtomicWidthField::encode(width);
selector->Emit(code, arraysize(outputs), outputs, arraysize(inputs), inputs,
arraysize(temps), temps);
}
void VisitAtomicStore(InstructionSelector* selector, Node* node,
- ArchOpcode opcode) {
+ AtomicWidth width) {
Arm64OperandGenerator g(selector);
Node* base = node->InputAt(0);
Node* index = node->InputAt(1);
Node* value = node->InputAt(2);
+
+ // The memory order is ignored as both release and sequentially consistent
+ // stores can emit STLR.
+ // https://www.cl.cam.ac.uk/~pes20/cpp/cpp0xmappings.html
+ AtomicStoreParameters store_params = AtomicStoreParametersOf(node->op());
+ WriteBarrierKind write_barrier_kind = store_params.write_barrier_kind();
+ MachineRepresentation rep = store_params.representation();
+
+ if (FLAG_enable_unconditional_write_barriers &&
+ CanBeTaggedOrCompressedPointer(rep)) {
+ write_barrier_kind = kFullWriteBarrier;
+ }
+
InstructionOperand inputs[] = {g.UseRegister(base), g.UseRegister(index),
g.UseUniqueRegister(value)};
InstructionOperand temps[] = {g.TempRegister()};
- InstructionCode code = opcode | AddressingModeField::encode(kMode_MRR);
+ InstructionCode code;
+
+ if (write_barrier_kind != kNoWriteBarrier && !FLAG_disable_write_barriers) {
+ DCHECK(CanBeTaggedOrCompressedPointer(rep));
+ DCHECK_EQ(AtomicWidthSize(width), kTaggedSize);
+
+ RecordWriteMode record_write_mode =
+ WriteBarrierKindToRecordWriteMode(write_barrier_kind);
+ code = kArchAtomicStoreWithWriteBarrier;
+ code |= MiscField::encode(static_cast<int>(record_write_mode));
+ } else {
+ switch (rep) {
+ case MachineRepresentation::kWord8:
+ code = kAtomicStoreWord8;
+ break;
+ case MachineRepresentation::kWord16:
+ code = kAtomicStoreWord16;
+ break;
+ case MachineRepresentation::kWord32:
+ code = kAtomicStoreWord32;
+ break;
+ case MachineRepresentation::kWord64:
+ DCHECK_EQ(width, AtomicWidth::kWord64);
+ code = kArm64Word64AtomicStoreWord64;
+ break;
+ case MachineRepresentation::kTaggedSigned: // Fall through.
+ case MachineRepresentation::kTaggedPointer: // Fall through.
+ case MachineRepresentation::kTagged:
+ DCHECK_EQ(AtomicWidthSize(width), kTaggedSize);
+ code = kArm64StlrCompressTagged;
+ break;
+ case MachineRepresentation::kCompressedPointer: // Fall through.
+ case MachineRepresentation::kCompressed:
+ CHECK(COMPRESS_POINTERS_BOOL);
+ DCHECK_EQ(width, AtomicWidth::kWord32);
+ code = kArm64StlrCompressTagged;
+ break;
+ default:
+ UNREACHABLE();
+ }
+ code |= AtomicWidthField::encode(width);
+ }
+
+ code |= AddressingModeField::encode(kMode_MRR);
selector->Emit(code, 0, nullptr, arraysize(inputs), inputs, arraysize(temps),
temps);
}
void VisitAtomicBinop(InstructionSelector* selector, Node* node,
- ArchOpcode opcode) {
+ ArchOpcode opcode, AtomicWidth width) {
Arm64OperandGenerator g(selector);
Node* base = node->InputAt(0);
Node* index = node->InputAt(1);
@@ -2653,7 +2753,8 @@ void VisitAtomicBinop(InstructionSelector* selector, Node* node,
InstructionOperand outputs[] = {g.DefineAsRegister(node)};
InstructionOperand temps[] = {g.TempRegister(), g.TempRegister(),
g.TempRegister()};
- InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
+ InstructionCode code = opcode | AddressingModeField::encode(addressing_mode) |
+ AtomicWidthField::encode(width);
selector->Emit(code, arraysize(outputs), outputs, arraysize(inputs), inputs,
arraysize(temps), temps);
}
@@ -2842,7 +2943,7 @@ void InstructionSelector::VisitWordCompareZero(Node* user, Node* value,
}
// Branch could not be combined with a compare, compare against 0 and branch.
- if (!cont->IsPoisoned() && cont->IsBranch()) {
+ if (cont->IsBranch()) {
Emit(cont->Encode(kArm64CompareAndBranch32), g.NoOutput(),
g.UseRegister(value), g.Label(cont->true_block()),
g.Label(cont->false_block()));
@@ -3196,159 +3297,91 @@ void InstructionSelector::VisitMemoryBarrier(Node* node) {
}
void InstructionSelector::VisitWord32AtomicLoad(Node* node) {
- LoadRepresentation load_rep = LoadRepresentationOf(node->op());
- ArchOpcode opcode;
- switch (load_rep.representation()) {
- case MachineRepresentation::kWord8:
- opcode =
- load_rep.IsSigned() ? kWord32AtomicLoadInt8 : kWord32AtomicLoadUint8;
- break;
- case MachineRepresentation::kWord16:
- opcode = load_rep.IsSigned() ? kWord32AtomicLoadInt16
- : kWord32AtomicLoadUint16;
- break;
- case MachineRepresentation::kWord32:
- opcode = kWord32AtomicLoadWord32;
- break;
- default:
- UNREACHABLE();
- }
- VisitAtomicLoad(this, node, opcode);
+ VisitAtomicLoad(this, node, AtomicWidth::kWord32);
}
void InstructionSelector::VisitWord64AtomicLoad(Node* node) {
- LoadRepresentation load_rep = LoadRepresentationOf(node->op());
- ArchOpcode opcode;
- switch (load_rep.representation()) {
- case MachineRepresentation::kWord8:
- opcode = kArm64Word64AtomicLoadUint8;
- break;
- case MachineRepresentation::kWord16:
- opcode = kArm64Word64AtomicLoadUint16;
- break;
- case MachineRepresentation::kWord32:
- opcode = kArm64Word64AtomicLoadUint32;
- break;
- case MachineRepresentation::kWord64:
- opcode = kArm64Word64AtomicLoadUint64;
- break;
- default:
- UNREACHABLE();
- }
- VisitAtomicLoad(this, node, opcode);
+ VisitAtomicLoad(this, node, AtomicWidth::kWord64);
}
void InstructionSelector::VisitWord32AtomicStore(Node* node) {
- MachineRepresentation rep = AtomicStoreRepresentationOf(node->op());
- ArchOpcode opcode;
- switch (rep) {
- case MachineRepresentation::kWord8:
- opcode = kWord32AtomicStoreWord8;
- break;
- case MachineRepresentation::kWord16:
- opcode = kWord32AtomicStoreWord16;
- break;
- case MachineRepresentation::kWord32:
- opcode = kWord32AtomicStoreWord32;
- break;
- default:
- UNREACHABLE();
- }
- VisitAtomicStore(this, node, opcode);
+ VisitAtomicStore(this, node, AtomicWidth::kWord32);
}
void InstructionSelector::VisitWord64AtomicStore(Node* node) {
- MachineRepresentation rep = AtomicStoreRepresentationOf(node->op());
- ArchOpcode opcode;
- switch (rep) {
- case MachineRepresentation::kWord8:
- opcode = kArm64Word64AtomicStoreWord8;
- break;
- case MachineRepresentation::kWord16:
- opcode = kArm64Word64AtomicStoreWord16;
- break;
- case MachineRepresentation::kWord32:
- opcode = kArm64Word64AtomicStoreWord32;
- break;
- case MachineRepresentation::kWord64:
- opcode = kArm64Word64AtomicStoreWord64;
- break;
- default:
- UNREACHABLE();
- }
- VisitAtomicStore(this, node, opcode);
+ VisitAtomicStore(this, node, AtomicWidth::kWord64);
}
void InstructionSelector::VisitWord32AtomicExchange(Node* node) {
ArchOpcode opcode;
MachineType type = AtomicOpType(node->op());
if (type == MachineType::Int8()) {
- opcode = kWord32AtomicExchangeInt8;
+ opcode = kAtomicExchangeInt8;
} else if (type == MachineType::Uint8()) {
- opcode = kWord32AtomicExchangeUint8;
+ opcode = kAtomicExchangeUint8;
} else if (type == MachineType::Int16()) {
- opcode = kWord32AtomicExchangeInt16;
+ opcode = kAtomicExchangeInt16;
} else if (type == MachineType::Uint16()) {
- opcode = kWord32AtomicExchangeUint16;
+ opcode = kAtomicExchangeUint16;
} else if (type == MachineType::Int32() || type == MachineType::Uint32()) {
- opcode = kWord32AtomicExchangeWord32;
+ opcode = kAtomicExchangeWord32;
} else {
UNREACHABLE();
}
- VisitAtomicExchange(this, node, opcode);
+ VisitAtomicExchange(this, node, opcode, AtomicWidth::kWord32);
}
void InstructionSelector::VisitWord64AtomicExchange(Node* node) {
ArchOpcode opcode;
MachineType type = AtomicOpType(node->op());
if (type == MachineType::Uint8()) {
- opcode = kArm64Word64AtomicExchangeUint8;
+ opcode = kAtomicExchangeUint8;
} else if (type == MachineType::Uint16()) {
- opcode = kArm64Word64AtomicExchangeUint16;
+ opcode = kAtomicExchangeUint16;
} else if (type == MachineType::Uint32()) {
- opcode = kArm64Word64AtomicExchangeUint32;
+ opcode = kAtomicExchangeWord32;
} else if (type == MachineType::Uint64()) {
opcode = kArm64Word64AtomicExchangeUint64;
} else {
UNREACHABLE();
}
- VisitAtomicExchange(this, node, opcode);
+ VisitAtomicExchange(this, node, opcode, AtomicWidth::kWord64);
}
void InstructionSelector::VisitWord32AtomicCompareExchange(Node* node) {
ArchOpcode opcode;
MachineType type = AtomicOpType(node->op());
if (type == MachineType::Int8()) {
- opcode = kWord32AtomicCompareExchangeInt8;
+ opcode = kAtomicCompareExchangeInt8;
} else if (type == MachineType::Uint8()) {
- opcode = kWord32AtomicCompareExchangeUint8;
+ opcode = kAtomicCompareExchangeUint8;
} else if (type == MachineType::Int16()) {
- opcode = kWord32AtomicCompareExchangeInt16;
+ opcode = kAtomicCompareExchangeInt16;
} else if (type == MachineType::Uint16()) {
- opcode = kWord32AtomicCompareExchangeUint16;
+ opcode = kAtomicCompareExchangeUint16;
} else if (type == MachineType::Int32() || type == MachineType::Uint32()) {
- opcode = kWord32AtomicCompareExchangeWord32;
+ opcode = kAtomicCompareExchangeWord32;
} else {
UNREACHABLE();
}
- VisitAtomicCompareExchange(this, node, opcode);
+ VisitAtomicCompareExchange(this, node, opcode, AtomicWidth::kWord32);
}
void InstructionSelector::VisitWord64AtomicCompareExchange(Node* node) {
ArchOpcode opcode;
MachineType type = AtomicOpType(node->op());
if (type == MachineType::Uint8()) {
- opcode = kArm64Word64AtomicCompareExchangeUint8;
+ opcode = kAtomicCompareExchangeUint8;
} else if (type == MachineType::Uint16()) {
- opcode = kArm64Word64AtomicCompareExchangeUint16;
+ opcode = kAtomicCompareExchangeUint16;
} else if (type == MachineType::Uint32()) {
- opcode = kArm64Word64AtomicCompareExchangeUint32;
+ opcode = kAtomicCompareExchangeWord32;
} else if (type == MachineType::Uint64()) {
opcode = kArm64Word64AtomicCompareExchangeUint64;
} else {
UNREACHABLE();
}
- VisitAtomicCompareExchange(this, node, opcode);
+ VisitAtomicCompareExchange(this, node, opcode, AtomicWidth::kWord64);
}
void InstructionSelector::VisitWord32AtomicBinaryOperation(
@@ -3369,15 +3402,14 @@ void InstructionSelector::VisitWord32AtomicBinaryOperation(
} else {
UNREACHABLE();
}
- VisitAtomicBinop(this, node, opcode);
+ VisitAtomicBinop(this, node, opcode, AtomicWidth::kWord32);
}
-#define VISIT_ATOMIC_BINOP(op) \
- void InstructionSelector::VisitWord32Atomic##op(Node* node) { \
- VisitWord32AtomicBinaryOperation( \
- node, kWord32Atomic##op##Int8, kWord32Atomic##op##Uint8, \
- kWord32Atomic##op##Int16, kWord32Atomic##op##Uint16, \
- kWord32Atomic##op##Word32); \
+#define VISIT_ATOMIC_BINOP(op) \
+ void InstructionSelector::VisitWord32Atomic##op(Node* node) { \
+ VisitWord32AtomicBinaryOperation( \
+ node, kAtomic##op##Int8, kAtomic##op##Uint8, kAtomic##op##Int16, \
+ kAtomic##op##Uint16, kAtomic##op##Word32); \
}
VISIT_ATOMIC_BINOP(Add)
VISIT_ATOMIC_BINOP(Sub)
@@ -3402,14 +3434,14 @@ void InstructionSelector::VisitWord64AtomicBinaryOperation(
} else {
UNREACHABLE();
}
- VisitAtomicBinop(this, node, opcode);
+ VisitAtomicBinop(this, node, opcode, AtomicWidth::kWord64);
}
-#define VISIT_ATOMIC_BINOP(op) \
- void InstructionSelector::VisitWord64Atomic##op(Node* node) { \
- VisitWord64AtomicBinaryOperation( \
- node, kArm64Word64Atomic##op##Uint8, kArm64Word64Atomic##op##Uint16, \
- kArm64Word64Atomic##op##Uint32, kArm64Word64Atomic##op##Uint64); \
+#define VISIT_ATOMIC_BINOP(op) \
+ void InstructionSelector::VisitWord64Atomic##op(Node* node) { \
+ VisitWord64AtomicBinaryOperation(node, kAtomic##op##Uint8, \
+ kAtomic##op##Uint16, kAtomic##op##Word32, \
+ kArm64Word64Atomic##op##Uint64); \
}
VISIT_ATOMIC_BINOP(Add)
VISIT_ATOMIC_BINOP(Sub)
@@ -3426,44 +3458,22 @@ void InstructionSelector::VisitInt64AbsWithOverflow(Node* node) {
UNREACHABLE();
}
-#define SIMD_TYPE_LIST(V) \
- V(F64x2) \
- V(F32x4) \
- V(I64x2) \
- V(I32x4) \
- V(I16x8) \
- V(I8x16)
-
#define SIMD_UNOP_LIST(V) \
- V(F64x2Abs, kArm64F64x2Abs) \
- V(F64x2Neg, kArm64F64x2Neg) \
- V(F64x2Sqrt, kArm64F64x2Sqrt) \
V(F64x2ConvertLowI32x4S, kArm64F64x2ConvertLowI32x4S) \
V(F64x2ConvertLowI32x4U, kArm64F64x2ConvertLowI32x4U) \
V(F64x2PromoteLowF32x4, kArm64F64x2PromoteLowF32x4) \
V(F32x4SConvertI32x4, kArm64F32x4SConvertI32x4) \
V(F32x4UConvertI32x4, kArm64F32x4UConvertI32x4) \
- V(F32x4Abs, kArm64F32x4Abs) \
- V(F32x4Neg, kArm64F32x4Neg) \
- V(F32x4Sqrt, kArm64F32x4Sqrt) \
V(F32x4RecipApprox, kArm64F32x4RecipApprox) \
V(F32x4RecipSqrtApprox, kArm64F32x4RecipSqrtApprox) \
V(F32x4DemoteF64x2Zero, kArm64F32x4DemoteF64x2Zero) \
- V(I64x2Abs, kArm64I64x2Abs) \
- V(I64x2Neg, kArm64I64x2Neg) \
V(I64x2BitMask, kArm64I64x2BitMask) \
V(I32x4SConvertF32x4, kArm64I32x4SConvertF32x4) \
- V(I32x4Neg, kArm64I32x4Neg) \
V(I32x4UConvertF32x4, kArm64I32x4UConvertF32x4) \
- V(I32x4Abs, kArm64I32x4Abs) \
V(I32x4BitMask, kArm64I32x4BitMask) \
V(I32x4TruncSatF64x2SZero, kArm64I32x4TruncSatF64x2SZero) \
V(I32x4TruncSatF64x2UZero, kArm64I32x4TruncSatF64x2UZero) \
- V(I16x8Neg, kArm64I16x8Neg) \
- V(I16x8Abs, kArm64I16x8Abs) \
V(I16x8BitMask, kArm64I16x8BitMask) \
- V(I8x16Neg, kArm64I8x16Neg) \
- V(I8x16Abs, kArm64I8x16Abs) \
V(I8x16BitMask, kArm64I8x16BitMask) \
V(S128Not, kArm64S128Not) \
V(V128AnyTrue, kArm64V128AnyTrue) \
@@ -3472,6 +3482,28 @@ void InstructionSelector::VisitInt64AbsWithOverflow(Node* node) {
V(I16x8AllTrue, kArm64I16x8AllTrue) \
V(I8x16AllTrue, kArm64I8x16AllTrue)
+#define SIMD_UNOP_LANE_SIZE_LIST(V) \
+ V(F64x2Splat, kArm64FSplat, 64) \
+ V(F64x2Abs, kArm64FAbs, 64) \
+ V(F64x2Sqrt, kArm64FSqrt, 64) \
+ V(F64x2Neg, kArm64FNeg, 64) \
+ V(F32x4Splat, kArm64FSplat, 32) \
+ V(F32x4Abs, kArm64FAbs, 32) \
+ V(F32x4Sqrt, kArm64FSqrt, 32) \
+ V(F32x4Neg, kArm64FNeg, 32) \
+ V(I64x2Splat, kArm64ISplat, 64) \
+ V(I64x2Abs, kArm64IAbs, 64) \
+ V(I64x2Neg, kArm64INeg, 64) \
+ V(I32x4Splat, kArm64ISplat, 32) \
+ V(I32x4Abs, kArm64IAbs, 32) \
+ V(I32x4Neg, kArm64INeg, 32) \
+ V(I16x8Splat, kArm64ISplat, 16) \
+ V(I16x8Abs, kArm64IAbs, 16) \
+ V(I16x8Neg, kArm64INeg, 16) \
+ V(I8x16Splat, kArm64ISplat, 8) \
+ V(I8x16Abs, kArm64IAbs, 8) \
+ V(I8x16Neg, kArm64INeg, 8)
+
#define SIMD_SHIFT_OP_LIST(V) \
V(I64x2Shl, 64) \
V(I64x2ShrS, 64) \
@@ -3487,85 +3519,85 @@ void InstructionSelector::VisitInt64AbsWithOverflow(Node* node) {
V(I8x16ShrU, 8)
#define SIMD_BINOP_LIST(V) \
- V(F64x2Add, kArm64F64x2Add) \
- V(F64x2Sub, kArm64F64x2Sub) \
- V(F64x2Div, kArm64F64x2Div) \
- V(F64x2Min, kArm64F64x2Min) \
- V(F64x2Max, kArm64F64x2Max) \
- V(F64x2Eq, kArm64F64x2Eq) \
- V(F64x2Ne, kArm64F64x2Ne) \
- V(F64x2Lt, kArm64F64x2Lt) \
- V(F64x2Le, kArm64F64x2Le) \
- V(F32x4Add, kArm64F32x4Add) \
- V(F32x4Sub, kArm64F32x4Sub) \
- V(F32x4Div, kArm64F32x4Div) \
- V(F32x4Min, kArm64F32x4Min) \
- V(F32x4Max, kArm64F32x4Max) \
- V(F32x4Eq, kArm64F32x4Eq) \
- V(F32x4Ne, kArm64F32x4Ne) \
- V(F32x4Lt, kArm64F32x4Lt) \
- V(F32x4Le, kArm64F32x4Le) \
- V(I64x2Add, kArm64I64x2Add) \
- V(I64x2Sub, kArm64I64x2Sub) \
- V(I64x2Eq, kArm64I64x2Eq) \
- V(I64x2Ne, kArm64I64x2Ne) \
- V(I64x2GtS, kArm64I64x2GtS) \
- V(I64x2GeS, kArm64I64x2GeS) \
V(I32x4Mul, kArm64I32x4Mul) \
- V(I32x4MinS, kArm64I32x4MinS) \
- V(I32x4MaxS, kArm64I32x4MaxS) \
- V(I32x4Eq, kArm64I32x4Eq) \
- V(I32x4Ne, kArm64I32x4Ne) \
- V(I32x4GtS, kArm64I32x4GtS) \
- V(I32x4GeS, kArm64I32x4GeS) \
- V(I32x4MinU, kArm64I32x4MinU) \
- V(I32x4MaxU, kArm64I32x4MaxU) \
- V(I32x4GtU, kArm64I32x4GtU) \
- V(I32x4GeU, kArm64I32x4GeU) \
V(I32x4DotI16x8S, kArm64I32x4DotI16x8S) \
V(I16x8SConvertI32x4, kArm64I16x8SConvertI32x4) \
- V(I16x8AddSatS, kArm64I16x8AddSatS) \
- V(I16x8SubSatS, kArm64I16x8SubSatS) \
V(I16x8Mul, kArm64I16x8Mul) \
- V(I16x8MinS, kArm64I16x8MinS) \
- V(I16x8MaxS, kArm64I16x8MaxS) \
- V(I16x8Eq, kArm64I16x8Eq) \
- V(I16x8Ne, kArm64I16x8Ne) \
- V(I16x8GtS, kArm64I16x8GtS) \
- V(I16x8GeS, kArm64I16x8GeS) \
V(I16x8UConvertI32x4, kArm64I16x8UConvertI32x4) \
- V(I16x8AddSatU, kArm64I16x8AddSatU) \
- V(I16x8SubSatU, kArm64I16x8SubSatU) \
- V(I16x8MinU, kArm64I16x8MinU) \
- V(I16x8MaxU, kArm64I16x8MaxU) \
- V(I16x8GtU, kArm64I16x8GtU) \
- V(I16x8GeU, kArm64I16x8GeU) \
- V(I16x8RoundingAverageU, kArm64I16x8RoundingAverageU) \
V(I16x8Q15MulRSatS, kArm64I16x8Q15MulRSatS) \
- V(I8x16Add, kArm64I8x16Add) \
- V(I8x16Sub, kArm64I8x16Sub) \
V(I8x16SConvertI16x8, kArm64I8x16SConvertI16x8) \
- V(I8x16AddSatS, kArm64I8x16AddSatS) \
- V(I8x16SubSatS, kArm64I8x16SubSatS) \
- V(I8x16MinS, kArm64I8x16MinS) \
- V(I8x16MaxS, kArm64I8x16MaxS) \
- V(I8x16Eq, kArm64I8x16Eq) \
- V(I8x16Ne, kArm64I8x16Ne) \
- V(I8x16GtS, kArm64I8x16GtS) \
- V(I8x16GeS, kArm64I8x16GeS) \
V(I8x16UConvertI16x8, kArm64I8x16UConvertI16x8) \
- V(I8x16AddSatU, kArm64I8x16AddSatU) \
- V(I8x16SubSatU, kArm64I8x16SubSatU) \
- V(I8x16MinU, kArm64I8x16MinU) \
- V(I8x16MaxU, kArm64I8x16MaxU) \
- V(I8x16GtU, kArm64I8x16GtU) \
- V(I8x16GeU, kArm64I8x16GeU) \
- V(I8x16RoundingAverageU, kArm64I8x16RoundingAverageU) \
V(S128And, kArm64S128And) \
V(S128Or, kArm64S128Or) \
V(S128Xor, kArm64S128Xor) \
V(S128AndNot, kArm64S128AndNot)
+#define SIMD_BINOP_LANE_SIZE_LIST(V) \
+ V(F64x2Min, kArm64FMin, 64) \
+ V(F64x2Max, kArm64FMax, 64) \
+ V(F64x2Add, kArm64FAdd, 64) \
+ V(F64x2Sub, kArm64FSub, 64) \
+ V(F64x2Div, kArm64FDiv, 64) \
+ V(F64x2Eq, kArm64FEq, 64) \
+ V(F64x2Ne, kArm64FNe, 64) \
+ V(F64x2Lt, kArm64FLt, 64) \
+ V(F64x2Le, kArm64FLe, 64) \
+ V(F32x4Min, kArm64FMin, 32) \
+ V(F32x4Max, kArm64FMax, 32) \
+ V(F32x4Add, kArm64FAdd, 32) \
+ V(F32x4Sub, kArm64FSub, 32) \
+ V(F32x4Div, kArm64FDiv, 32) \
+ V(F32x4Eq, kArm64FEq, 32) \
+ V(F32x4Ne, kArm64FNe, 32) \
+ V(F32x4Lt, kArm64FLt, 32) \
+ V(F32x4Le, kArm64FLe, 32) \
+ V(I64x2Sub, kArm64ISub, 64) \
+ V(I64x2Eq, kArm64IEq, 64) \
+ V(I64x2Ne, kArm64INe, 64) \
+ V(I64x2GtS, kArm64IGtS, 64) \
+ V(I64x2GeS, kArm64IGeS, 64) \
+ V(I32x4Eq, kArm64IEq, 32) \
+ V(I32x4Ne, kArm64INe, 32) \
+ V(I32x4GtS, kArm64IGtS, 32) \
+ V(I32x4GeS, kArm64IGeS, 32) \
+ V(I32x4GtU, kArm64IGtU, 32) \
+ V(I32x4GeU, kArm64IGeU, 32) \
+ V(I32x4MinS, kArm64IMinS, 32) \
+ V(I32x4MaxS, kArm64IMaxS, 32) \
+ V(I32x4MinU, kArm64IMinU, 32) \
+ V(I32x4MaxU, kArm64IMaxU, 32) \
+ V(I16x8AddSatS, kArm64IAddSatS, 16) \
+ V(I16x8SubSatS, kArm64ISubSatS, 16) \
+ V(I16x8AddSatU, kArm64IAddSatU, 16) \
+ V(I16x8SubSatU, kArm64ISubSatU, 16) \
+ V(I16x8Eq, kArm64IEq, 16) \
+ V(I16x8Ne, kArm64INe, 16) \
+ V(I16x8GtS, kArm64IGtS, 16) \
+ V(I16x8GeS, kArm64IGeS, 16) \
+ V(I16x8GtU, kArm64IGtU, 16) \
+ V(I16x8GeU, kArm64IGeU, 16) \
+ V(I16x8RoundingAverageU, kArm64RoundingAverageU, 16) \
+ V(I8x16RoundingAverageU, kArm64RoundingAverageU, 8) \
+ V(I16x8MinS, kArm64IMinS, 16) \
+ V(I16x8MaxS, kArm64IMaxS, 16) \
+ V(I16x8MinU, kArm64IMinU, 16) \
+ V(I16x8MaxU, kArm64IMaxU, 16) \
+ V(I8x16Sub, kArm64ISub, 8) \
+ V(I8x16AddSatS, kArm64IAddSatS, 8) \
+ V(I8x16SubSatS, kArm64ISubSatS, 8) \
+ V(I8x16AddSatU, kArm64IAddSatU, 8) \
+ V(I8x16SubSatU, kArm64ISubSatU, 8) \
+ V(I8x16Eq, kArm64IEq, 8) \
+ V(I8x16Ne, kArm64INe, 8) \
+ V(I8x16GtS, kArm64IGtS, 8) \
+ V(I8x16GeS, kArm64IGeS, 8) \
+ V(I8x16GtU, kArm64IGtU, 8) \
+ V(I8x16GeU, kArm64IGeU, 8) \
+ V(I8x16MinS, kArm64IMinS, 8) \
+ V(I8x16MaxS, kArm64IMaxS, 8) \
+ V(I8x16MinU, kArm64IMinU, 8) \
+ V(I8x16MaxU, kArm64IMaxU, 8)
+
void InstructionSelector::VisitS128Const(Node* node) {
Arm64OperandGenerator g(this);
static const int kUint32Immediates = 4;
@@ -3589,34 +3621,34 @@ void InstructionSelector::VisitS128Zero(Node* node) {
Emit(kArm64S128Zero, g.DefineAsRegister(node));
}
-#define SIMD_VISIT_SPLAT(Type) \
- void InstructionSelector::Visit##Type##Splat(Node* node) { \
- VisitRR(this, kArm64##Type##Splat, node); \
- }
-SIMD_TYPE_LIST(SIMD_VISIT_SPLAT)
-#undef SIMD_VISIT_SPLAT
-
-#define SIMD_VISIT_EXTRACT_LANE(Type, Sign) \
- void InstructionSelector::Visit##Type##ExtractLane##Sign(Node* node) { \
- VisitRRI(this, kArm64##Type##ExtractLane##Sign, node); \
- }
-SIMD_VISIT_EXTRACT_LANE(F64x2, )
-SIMD_VISIT_EXTRACT_LANE(F32x4, )
-SIMD_VISIT_EXTRACT_LANE(I64x2, )
-SIMD_VISIT_EXTRACT_LANE(I32x4, )
-SIMD_VISIT_EXTRACT_LANE(I16x8, U)
-SIMD_VISIT_EXTRACT_LANE(I16x8, S)
-SIMD_VISIT_EXTRACT_LANE(I8x16, U)
-SIMD_VISIT_EXTRACT_LANE(I8x16, S)
+#define SIMD_VISIT_EXTRACT_LANE(Type, T, Sign, LaneSize) \
+ void InstructionSelector::Visit##Type##ExtractLane##Sign(Node* node) { \
+ VisitRRI(this, \
+ kArm64##T##ExtractLane##Sign | LaneSizeField::encode(LaneSize), \
+ node); \
+ }
+SIMD_VISIT_EXTRACT_LANE(F64x2, F, , 64)
+SIMD_VISIT_EXTRACT_LANE(F32x4, F, , 32)
+SIMD_VISIT_EXTRACT_LANE(I64x2, I, , 64)
+SIMD_VISIT_EXTRACT_LANE(I32x4, I, , 32)
+SIMD_VISIT_EXTRACT_LANE(I16x8, I, U, 16)
+SIMD_VISIT_EXTRACT_LANE(I16x8, I, S, 16)
+SIMD_VISIT_EXTRACT_LANE(I8x16, I, U, 8)
+SIMD_VISIT_EXTRACT_LANE(I8x16, I, S, 8)
#undef SIMD_VISIT_EXTRACT_LANE
-#define SIMD_VISIT_REPLACE_LANE(Type) \
- void InstructionSelector::Visit##Type##ReplaceLane(Node* node) { \
- VisitRRIR(this, kArm64##Type##ReplaceLane, node); \
- }
-SIMD_TYPE_LIST(SIMD_VISIT_REPLACE_LANE)
+#define SIMD_VISIT_REPLACE_LANE(Type, T, LaneSize) \
+ void InstructionSelector::Visit##Type##ReplaceLane(Node* node) { \
+ VisitRRIR(this, kArm64##T##ReplaceLane | LaneSizeField::encode(LaneSize), \
+ node); \
+ }
+SIMD_VISIT_REPLACE_LANE(F64x2, F, 64)
+SIMD_VISIT_REPLACE_LANE(F32x4, F, 32)
+SIMD_VISIT_REPLACE_LANE(I64x2, I, 64)
+SIMD_VISIT_REPLACE_LANE(I32x4, I, 32)
+SIMD_VISIT_REPLACE_LANE(I16x8, I, 16)
+SIMD_VISIT_REPLACE_LANE(I8x16, I, 8)
#undef SIMD_VISIT_REPLACE_LANE
-#undef SIMD_TYPE_LIST
#define SIMD_VISIT_UNOP(Name, instruction) \
void InstructionSelector::Visit##Name(Node* node) { \
@@ -3642,6 +3674,22 @@ SIMD_BINOP_LIST(SIMD_VISIT_BINOP)
#undef SIMD_VISIT_BINOP
#undef SIMD_BINOP_LIST
+#define SIMD_VISIT_BINOP_LANE_SIZE(Name, instruction, LaneSize) \
+ void InstructionSelector::Visit##Name(Node* node) { \
+ VisitRRR(this, instruction | LaneSizeField::encode(LaneSize), node); \
+ }
+SIMD_BINOP_LANE_SIZE_LIST(SIMD_VISIT_BINOP_LANE_SIZE)
+#undef SIMD_VISIT_BINOP_LANE_SIZE
+#undef SIMD_BINOP_LANE_SIZE_LIST
+
+#define SIMD_VISIT_UNOP_LANE_SIZE(Name, instruction, LaneSize) \
+ void InstructionSelector::Visit##Name(Node* node) { \
+ VisitRR(this, instruction | LaneSizeField::encode(LaneSize), node); \
+ }
+SIMD_UNOP_LANE_SIZE_LIST(SIMD_VISIT_UNOP_LANE_SIZE)
+#undef SIMD_VISIT_UNOP_LANE_SIZE
+#undef SIMD_UNOP_LANE_SIZE_LIST
+
using ShuffleMatcher =
ValueMatcher<S128ImmediateParameter, IrOpcode::kI8x16Shuffle>;
using BinopWithShuffleMatcher = BinopMatcher<ShuffleMatcher, ShuffleMatcher>;
@@ -3702,22 +3750,22 @@ MulWithDupResult TryMatchMulWithDup(Node* node) {
void InstructionSelector::VisitF32x4Mul(Node* node) {
if (MulWithDupResult result = TryMatchMulWithDup<4>(node)) {
Arm64OperandGenerator g(this);
- Emit(kArm64F32x4MulElement, g.DefineAsRegister(node),
- g.UseRegister(result.input), g.UseRegister(result.dup_node),
- g.UseImmediate(result.index));
+ Emit(kArm64FMulElement | LaneSizeField::encode(32),
+ g.DefineAsRegister(node), g.UseRegister(result.input),
+ g.UseRegister(result.dup_node), g.UseImmediate(result.index));
} else {
- return VisitRRR(this, kArm64F32x4Mul, node);
+ return VisitRRR(this, kArm64FMul | LaneSizeField::encode(32), node);
}
}
void InstructionSelector::VisitF64x2Mul(Node* node) {
if (MulWithDupResult result = TryMatchMulWithDup<2>(node)) {
Arm64OperandGenerator g(this);
- Emit(kArm64F64x2MulElement, g.DefineAsRegister(node),
- g.UseRegister(result.input), g.UseRegister(result.dup_node),
- g.UseImmediate(result.index));
+ Emit(kArm64FMulElement | LaneSizeField::encode(64),
+ g.DefineAsRegister(node), g.UseRegister(result.input),
+ g.UseRegister(result.dup_node), g.UseImmediate(result.index));
} else {
- return VisitRRR(this, kArm64F64x2Mul, node);
+ return VisitRRR(this, kArm64FMul | LaneSizeField::encode(64), node);
}
}
@@ -3729,84 +3777,178 @@ void InstructionSelector::VisitI64x2Mul(Node* node) {
arraysize(temps), temps);
}
-#define VISIT_SIMD_ADD(Type, PairwiseType, LaneSize) \
- void InstructionSelector::Visit##Type##Add(Node* node) { \
- Arm64OperandGenerator g(this); \
- Node* left = node->InputAt(0); \
- Node* right = node->InputAt(1); \
- /* Select Mla(z, x, y) for Add(Mul(x, y), z). */ \
- if (left->opcode() == IrOpcode::k##Type##Mul && CanCover(node, left)) { \
- Emit(kArm64##Type##Mla, g.DefineSameAsFirst(node), g.UseRegister(right), \
- g.UseRegister(left->InputAt(0)), g.UseRegister(left->InputAt(1))); \
- return; \
- } \
- /* Select Mla(z, x, y) for Add(z, Mul(x, y)). */ \
- if (right->opcode() == IrOpcode::k##Type##Mul && CanCover(node, right)) { \
- Emit(kArm64##Type##Mla, g.DefineSameAsFirst(node), g.UseRegister(left), \
- g.UseRegister(right->InputAt(0)), \
- g.UseRegister(right->InputAt(1))); \
- return; \
- } \
- /* Select Sadalp(x, y) for Add(x, ExtAddPairwiseS(y)). */ \
- if (right->opcode() == \
- IrOpcode::k##Type##ExtAddPairwise##PairwiseType##S && \
- CanCover(node, right)) { \
- Emit(kArm64Sadalp | LaneSizeField::encode(LaneSize), \
- g.DefineSameAsFirst(node), g.UseRegister(left), \
- g.UseRegister(right->InputAt(0))); \
- return; \
- } \
- /* Select Sadalp(y, x) for Add(ExtAddPairwiseS(x), y). */ \
- if (left->opcode() == \
- IrOpcode::k##Type##ExtAddPairwise##PairwiseType##S && \
- CanCover(node, left)) { \
- Emit(kArm64Sadalp | LaneSizeField::encode(LaneSize), \
- g.DefineSameAsFirst(node), g.UseRegister(right), \
- g.UseRegister(left->InputAt(0))); \
- return; \
- } \
- /* Select Uadalp(x, y) for Add(x, ExtAddPairwiseU(y)). */ \
- if (right->opcode() == \
- IrOpcode::k##Type##ExtAddPairwise##PairwiseType##U && \
- CanCover(node, right)) { \
- Emit(kArm64Uadalp | LaneSizeField::encode(LaneSize), \
- g.DefineSameAsFirst(node), g.UseRegister(left), \
- g.UseRegister(right->InputAt(0))); \
- return; \
- } \
- /* Select Uadalp(y, x) for Add(ExtAddPairwiseU(x), y). */ \
- if (left->opcode() == \
- IrOpcode::k##Type##ExtAddPairwise##PairwiseType##U && \
- CanCover(node, left)) { \
- Emit(kArm64Uadalp | LaneSizeField::encode(LaneSize), \
- g.DefineSameAsFirst(node), g.UseRegister(right), \
- g.UseRegister(left->InputAt(0))); \
- return; \
- } \
- VisitRRR(this, kArm64##Type##Add, node); \
+namespace {
+
+// Used for pattern matching SIMD Add operations where one of the inputs matches
+// |opcode| and ensure that the matched input is on the LHS (input 0).
+struct SimdAddOpMatcher : public NodeMatcher {
+ explicit SimdAddOpMatcher(Node* node, IrOpcode::Value opcode)
+ : NodeMatcher(node),
+ opcode_(opcode),
+ left_(InputAt(0)),
+ right_(InputAt(1)) {
+ DCHECK(HasProperty(Operator::kCommutative));
+ PutOpOnLeft();
+ }
+
+ bool Matches() { return left_->opcode() == opcode_; }
+ Node* left() const { return left_; }
+ Node* right() const { return right_; }
+
+ private:
+ void PutOpOnLeft() {
+ if (right_->opcode() == opcode_) {
+ std::swap(left_, right_);
+ node()->ReplaceInput(0, left_);
+ node()->ReplaceInput(1, right_);
+ }
+ }
+ IrOpcode::Value opcode_;
+ Node* left_;
+ Node* right_;
+};
+
+bool ShraHelper(InstructionSelector* selector, Node* node, int lane_size,
+ InstructionCode shra_code, InstructionCode add_code,
+ IrOpcode::Value shift_op) {
+ Arm64OperandGenerator g(selector);
+ SimdAddOpMatcher m(node, shift_op);
+ if (!m.Matches() || !selector->CanCover(node, m.left())) return false;
+ if (!g.IsIntegerConstant(m.left()->InputAt(1))) return false;
+
+ // If shifting by zero, just do the addition
+ if (g.GetIntegerConstantValue(m.left()->InputAt(1)) % lane_size == 0) {
+ selector->Emit(add_code, g.DefineAsRegister(node),
+ g.UseRegister(m.left()->InputAt(0)),
+ g.UseRegister(m.right()));
+ } else {
+ selector->Emit(shra_code | LaneSizeField::encode(lane_size),
+ g.DefineSameAsFirst(node), g.UseRegister(m.right()),
+ g.UseRegister(m.left()->InputAt(0)),
+ g.UseImmediate(m.left()->InputAt(1)));
+ }
+ return true;
+}
+
+bool AdalpHelper(InstructionSelector* selector, Node* node, int lane_size,
+ InstructionCode adalp_code, IrOpcode::Value ext_op) {
+ Arm64OperandGenerator g(selector);
+ SimdAddOpMatcher m(node, ext_op);
+ if (!m.Matches() || !selector->CanCover(node, m.left())) return false;
+ selector->Emit(adalp_code | LaneSizeField::encode(lane_size),
+ g.DefineSameAsFirst(node), g.UseRegister(m.right()),
+ g.UseRegister(m.left()->InputAt(0)));
+ return true;
+}
+
+bool MlaHelper(InstructionSelector* selector, Node* node,
+ InstructionCode mla_code, IrOpcode::Value mul_op) {
+ Arm64OperandGenerator g(selector);
+ SimdAddOpMatcher m(node, mul_op);
+ if (!m.Matches() || !selector->CanCover(node, m.left())) return false;
+ selector->Emit(mla_code, g.DefineSameAsFirst(node), g.UseRegister(m.right()),
+ g.UseRegister(m.left()->InputAt(0)),
+ g.UseRegister(m.left()->InputAt(1)));
+ return true;
+}
+
+bool SmlalHelper(InstructionSelector* selector, Node* node, int lane_size,
+ InstructionCode smlal_code, IrOpcode::Value ext_mul_op) {
+ Arm64OperandGenerator g(selector);
+ SimdAddOpMatcher m(node, ext_mul_op);
+ if (!m.Matches() || !selector->CanCover(node, m.left())) return false;
+
+ selector->Emit(smlal_code | LaneSizeField::encode(lane_size),
+ g.DefineSameAsFirst(node), g.UseRegister(m.right()),
+ g.UseRegister(m.left()->InputAt(0)),
+ g.UseRegister(m.left()->InputAt(1)));
+ return true;
+}
+
+} // namespace
+
+void InstructionSelector::VisitI64x2Add(Node* node) {
+ if (!ShraHelper(this, node, 64, kArm64Ssra,
+ kArm64IAdd | LaneSizeField::encode(64),
+ IrOpcode::kI64x2ShrS) &&
+ !ShraHelper(this, node, 64, kArm64Usra,
+ kArm64IAdd | LaneSizeField::encode(64),
+ IrOpcode::kI64x2ShrU)) {
+ VisitRRR(this, kArm64IAdd | LaneSizeField::encode(64), node);
+ }
+}
+
+void InstructionSelector::VisitI8x16Add(Node* node) {
+ if (!ShraHelper(this, node, 8, kArm64Ssra,
+ kArm64IAdd | LaneSizeField::encode(8),
+ IrOpcode::kI8x16ShrS) &&
+ !ShraHelper(this, node, 8, kArm64Usra,
+ kArm64IAdd | LaneSizeField::encode(8),
+ IrOpcode::kI8x16ShrU)) {
+ VisitRRR(this, kArm64IAdd | LaneSizeField::encode(8), node);
+ }
+}
+
+#define VISIT_SIMD_ADD(Type, PairwiseType, LaneSize) \
+ void InstructionSelector::Visit##Type##Add(Node* node) { \
+ /* Select Mla(z, x, y) for Add(x, Mul(y, z)). */ \
+ if (MlaHelper(this, node, kArm64Mla | LaneSizeField::encode(LaneSize), \
+ IrOpcode::k##Type##Mul)) { \
+ return; \
+ } \
+ /* Select S/Uadalp(x, y) for Add(x, ExtAddPairwise(y)). */ \
+ if (AdalpHelper(this, node, LaneSize, kArm64Sadalp, \
+ IrOpcode::k##Type##ExtAddPairwise##PairwiseType##S) || \
+ AdalpHelper(this, node, LaneSize, kArm64Uadalp, \
+ IrOpcode::k##Type##ExtAddPairwise##PairwiseType##U)) { \
+ return; \
+ } \
+ /* Select S/Usra(x, y) for Add(x, ShiftRight(y, imm)). */ \
+ if (ShraHelper(this, node, LaneSize, kArm64Ssra, \
+ kArm64IAdd | LaneSizeField::encode(LaneSize), \
+ IrOpcode::k##Type##ShrS) || \
+ ShraHelper(this, node, LaneSize, kArm64Usra, \
+ kArm64IAdd | LaneSizeField::encode(LaneSize), \
+ IrOpcode::k##Type##ShrU)) { \
+ return; \
+ } \
+ /* Select Smlal/Umlal(x, y, z) for Add(x, ExtMulLow(y, z)) and \
+ * Smlal2/Umlal2(x, y, z) for Add(x, ExtMulHigh(y, z)). */ \
+ if (SmlalHelper(this, node, LaneSize, kArm64Smlal, \
+ IrOpcode::k##Type##ExtMulLow##PairwiseType##S) || \
+ SmlalHelper(this, node, LaneSize, kArm64Smlal2, \
+ IrOpcode::k##Type##ExtMulHigh##PairwiseType##S) || \
+ SmlalHelper(this, node, LaneSize, kArm64Umlal, \
+ IrOpcode::k##Type##ExtMulLow##PairwiseType##U) || \
+ SmlalHelper(this, node, LaneSize, kArm64Umlal2, \
+ IrOpcode::k##Type##ExtMulHigh##PairwiseType##U)) { \
+ return; \
+ } \
+ VisitRRR(this, kArm64IAdd | LaneSizeField::encode(LaneSize), node); \
}
VISIT_SIMD_ADD(I32x4, I16x8, 32)
VISIT_SIMD_ADD(I16x8, I8x16, 16)
#undef VISIT_SIMD_ADD
-#define VISIT_SIMD_SUB(Type) \
+#define VISIT_SIMD_SUB(Type, LaneSize) \
void InstructionSelector::Visit##Type##Sub(Node* node) { \
Arm64OperandGenerator g(this); \
Node* left = node->InputAt(0); \
Node* right = node->InputAt(1); \
/* Select Mls(z, x, y) for Sub(z, Mul(x, y)). */ \
if (right->opcode() == IrOpcode::k##Type##Mul && CanCover(node, right)) { \
- Emit(kArm64##Type##Mls, g.DefineSameAsFirst(node), g.UseRegister(left), \
+ Emit(kArm64Mls | LaneSizeField::encode(LaneSize), \
+ g.DefineSameAsFirst(node), g.UseRegister(left), \
g.UseRegister(right->InputAt(0)), \
g.UseRegister(right->InputAt(1))); \
return; \
} \
- VisitRRR(this, kArm64##Type##Sub, node); \
+ VisitRRR(this, kArm64ISub | LaneSizeField::encode(LaneSize), node); \
}
-VISIT_SIMD_SUB(I32x4)
-VISIT_SIMD_SUB(I16x8)
+VISIT_SIMD_SUB(I32x4, 32)
+VISIT_SIMD_SUB(I16x8, 16)
#undef VISIT_SIMD_SUB
void InstructionSelector::VisitS128Select(Node* node) {
@@ -4110,6 +4252,8 @@ InstructionSelector::SupportedMachineOperatorFlags() {
MachineOperatorBuilder::kFloat64RoundTiesAway |
MachineOperatorBuilder::kFloat32RoundTiesEven |
MachineOperatorBuilder::kFloat64RoundTiesEven |
+ MachineOperatorBuilder::kWord32Popcnt |
+ MachineOperatorBuilder::kWord64Popcnt |
MachineOperatorBuilder::kWord32ShiftIsSafe |
MachineOperatorBuilder::kInt32DivIsSafe |
MachineOperatorBuilder::kUint32DivIsSafe |
diff --git a/deps/v8/src/compiler/backend/code-generator.cc b/deps/v8/src/compiler/backend/code-generator.cc
index 9e378b8458..ad5e18d002 100644
--- a/deps/v8/src/compiler/backend/code-generator.cc
+++ b/deps/v8/src/compiler/backend/code-generator.cc
@@ -41,14 +41,16 @@ class CodeGenerator::JumpTable final : public ZoneObject {
size_t const target_count_;
};
-CodeGenerator::CodeGenerator(
- Zone* codegen_zone, Frame* frame, Linkage* linkage,
- InstructionSequence* instructions, OptimizedCompilationInfo* info,
- Isolate* isolate, base::Optional<OsrHelper> osr_helper,
- int start_source_position, JumpOptimizationInfo* jump_opt,
- PoisoningMitigationLevel poisoning_level, const AssemblerOptions& options,
- Builtin builtin, size_t max_unoptimized_frame_height,
- size_t max_pushed_argument_count, const char* debug_name)
+CodeGenerator::CodeGenerator(Zone* codegen_zone, Frame* frame, Linkage* linkage,
+ InstructionSequence* instructions,
+ OptimizedCompilationInfo* info, Isolate* isolate,
+ base::Optional<OsrHelper> osr_helper,
+ int start_source_position,
+ JumpOptimizationInfo* jump_opt,
+ const AssemblerOptions& options, Builtin builtin,
+ size_t max_unoptimized_frame_height,
+ size_t max_pushed_argument_count,
+ const char* debug_name)
: zone_(codegen_zone),
isolate_(isolate),
frame_access_state_(nullptr),
@@ -80,7 +82,6 @@ CodeGenerator::CodeGenerator(
codegen_zone, SourcePositionTableBuilder::RECORD_SOURCE_POSITIONS),
protected_instructions_(codegen_zone),
result_(kSuccess),
- poisoning_level_(poisoning_level),
block_starts_(codegen_zone),
instr_starts_(codegen_zone),
debug_name_(debug_name) {
@@ -284,9 +285,6 @@ void CodeGenerator::AssembleCode() {
BailoutIfDeoptimized();
}
- offsets_info_.init_poison = tasm()->pc_offset();
- InitializeSpeculationPoison();
-
// Define deoptimization literals for all inlined functions.
DCHECK_EQ(0u, deoptimization_literals_.size());
for (OptimizedCompilationInfo::InlinedFunctionHolder& inlined :
@@ -355,8 +353,6 @@ void CodeGenerator::AssembleCode() {
tasm()->bind(GetLabel(current_block_));
- TryInsertBranchPoisoning(block);
-
if (block->must_construct_frame()) {
AssembleConstructFrame();
// We need to setup the root register after we assemble the prologue, to
@@ -494,37 +490,6 @@ void CodeGenerator::AssembleCode() {
result_ = kSuccess;
}
-void CodeGenerator::TryInsertBranchPoisoning(const InstructionBlock* block) {
- // See if our predecessor was a basic block terminated by a branch_and_poison
- // instruction. If yes, then perform the masking based on the flags.
- if (block->PredecessorCount() != 1) return;
- RpoNumber pred_rpo = (block->predecessors())[0];
- const InstructionBlock* pred = instructions()->InstructionBlockAt(pred_rpo);
- if (pred->code_start() == pred->code_end()) return;
- Instruction* instr = instructions()->InstructionAt(pred->code_end() - 1);
- FlagsMode mode = FlagsModeField::decode(instr->opcode());
- switch (mode) {
- case kFlags_branch_and_poison: {
- BranchInfo branch;
- RpoNumber target = ComputeBranchInfo(&branch, instr);
- if (!target.IsValid()) {
- // Non-trivial branch, add the masking code.
- FlagsCondition condition = branch.condition;
- if (branch.false_label == GetLabel(block->rpo_number())) {
- condition = NegateFlagsCondition(condition);
- }
- AssembleBranchPoisoning(condition, instr);
- }
- break;
- }
- case kFlags_deoptimize_and_poison: {
- UNREACHABLE();
- }
- default:
- break;
- }
-}
-
void CodeGenerator::AssembleArchBinarySearchSwitchRange(
Register input, RpoNumber def_block, std::pair<int32_t, Label*>* begin,
std::pair<int32_t, Label*>* end) {
@@ -839,8 +804,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleInstruction(
FlagsCondition condition = FlagsConditionField::decode(instr->opcode());
switch (mode) {
- case kFlags_branch:
- case kFlags_branch_and_poison: {
+ case kFlags_branch: {
BranchInfo branch;
RpoNumber target = ComputeBranchInfo(&branch, instr);
if (target.IsValid()) {
@@ -854,8 +818,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleInstruction(
AssembleArchBranch(instr, &branch);
break;
}
- case kFlags_deoptimize:
- case kFlags_deoptimize_and_poison: {
+ case kFlags_deoptimize: {
// Assemble a conditional eager deoptimization after this instruction.
InstructionOperandConverter i(this, instr);
size_t frame_state_offset =
@@ -864,17 +827,12 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleInstruction(
DeoptImmedArgsCountField::decode(instr->opcode());
DeoptimizationExit* const exit = AddDeoptimizationExit(
instr, frame_state_offset, immediate_args_count);
- Label continue_label;
BranchInfo branch;
branch.condition = condition;
branch.true_label = exit->label();
- branch.false_label = &continue_label;
+ branch.false_label = exit->continue_label();
branch.fallthru = true;
AssembleArchDeoptBranch(instr, &branch);
- tasm()->bind(&continue_label);
- if (mode == kFlags_deoptimize_and_poison) {
- AssembleBranchPoisoning(NegateFlagsCondition(branch.condition), instr);
- }
tasm()->bind(exit->continue_label());
break;
}
@@ -890,21 +848,16 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleInstruction(
case kFlags_trap: {
#if V8_ENABLE_WEBASSEMBLY
AssembleArchTrap(instr, condition);
+ break;
#else
UNREACHABLE();
#endif // V8_ENABLE_WEBASSEMBLY
- break;
}
case kFlags_none: {
break;
}
}
- // TODO(jarin) We should thread the flag through rather than set it.
- if (instr->IsCall()) {
- ResetSpeculationPoison();
- }
-
return kSuccess;
}
@@ -1087,9 +1040,9 @@ void CodeGenerator::RecordCallPosition(Instruction* instr) {
if (needs_frame_state) {
MarkLazyDeoptSite();
- // If the frame state is present, it starts at argument 2 - after
- // the code address and the poison-alias index.
- size_t frame_state_offset = 2;
+ // If the frame state is present, it starts at argument 1 - after
+ // the code address.
+ size_t frame_state_offset = 1;
FrameStateDescriptor* descriptor =
GetDeoptimizationEntry(instr, frame_state_offset).descriptor();
int pc_offset = tasm()->pc_offset_for_safepoint();
@@ -1428,29 +1381,6 @@ DeoptimizationExit* CodeGenerator::AddDeoptimizationExit(
OutputFrameStateCombine::Ignore());
}
-void CodeGenerator::InitializeSpeculationPoison() {
- if (poisoning_level_ == PoisoningMitigationLevel::kDontPoison) return;
-
- // Initialize {kSpeculationPoisonRegister} either by comparing the expected
- // with the actual call target, or by unconditionally using {-1} initially.
- // Masking register arguments with it only makes sense in the first case.
- if (info()->called_with_code_start_register()) {
- tasm()->RecordComment("-- Prologue: generate speculation poison --");
- GenerateSpeculationPoisonFromCodeStartRegister();
- if (info()->poison_register_arguments()) {
- AssembleRegisterArgumentPoisoning();
- }
- } else {
- ResetSpeculationPoison();
- }
-}
-
-void CodeGenerator::ResetSpeculationPoison() {
- if (poisoning_level_ != PoisoningMitigationLevel::kDontPoison) {
- tasm()->ResetSpeculationPoisonRegister();
- }
-}
-
OutOfLineCode::OutOfLineCode(CodeGenerator* gen)
: frame_(gen->frame()), tasm_(gen->tasm()), next_(gen->ools_) {
gen->ools_ = this;
diff --git a/deps/v8/src/compiler/backend/code-generator.h b/deps/v8/src/compiler/backend/code-generator.h
index 7ccb09d5ac..18de20f92c 100644
--- a/deps/v8/src/compiler/backend/code-generator.h
+++ b/deps/v8/src/compiler/backend/code-generator.h
@@ -103,7 +103,6 @@ class DeoptimizationLiteral {
struct TurbolizerCodeOffsetsInfo {
int code_start_register_check = -1;
int deopt_check = -1;
- int init_poison = -1;
int blocks_start = -1;
int out_of_line_code = -1;
int deoptimization_exits = -1;
@@ -120,14 +119,16 @@ struct TurbolizerInstructionStartInfo {
// Generates native code for a sequence of instructions.
class V8_EXPORT_PRIVATE CodeGenerator final : public GapResolver::Assembler {
public:
- explicit CodeGenerator(
- Zone* codegen_zone, Frame* frame, Linkage* linkage,
- InstructionSequence* instructions, OptimizedCompilationInfo* info,
- Isolate* isolate, base::Optional<OsrHelper> osr_helper,
- int start_source_position, JumpOptimizationInfo* jump_opt,
- PoisoningMitigationLevel poisoning_level, const AssemblerOptions& options,
- Builtin builtin, size_t max_unoptimized_frame_height,
- size_t max_pushed_argument_count, const char* debug_name = nullptr);
+ explicit CodeGenerator(Zone* codegen_zone, Frame* frame, Linkage* linkage,
+ InstructionSequence* instructions,
+ OptimizedCompilationInfo* info, Isolate* isolate,
+ base::Optional<OsrHelper> osr_helper,
+ int start_source_position,
+ JumpOptimizationInfo* jump_opt,
+ const AssemblerOptions& options, Builtin builtin,
+ size_t max_unoptimized_frame_height,
+ size_t max_pushed_argument_count,
+ const char* debug_name = nullptr);
// Generate native code. After calling AssembleCode, call FinalizeCode to
// produce the actual code object. If an error occurs during either phase,
@@ -216,17 +217,6 @@ class V8_EXPORT_PRIVATE CodeGenerator final : public GapResolver::Assembler {
// Assemble instructions for the specified block.
CodeGenResult AssembleBlock(const InstructionBlock* block);
- // Inserts mask update at the beginning of an instruction block if the
- // predecessor blocks ends with a masking branch.
- void TryInsertBranchPoisoning(const InstructionBlock* block);
-
- // Initializes the masking register in the prologue of a function.
- void InitializeSpeculationPoison();
- // Reset the masking register during execution of a function.
- void ResetSpeculationPoison();
- // Generates a mask from the pc passed in {kJavaScriptCallCodeStartRegister}.
- void GenerateSpeculationPoisonFromCodeStartRegister();
-
// Assemble code for the specified instruction.
CodeGenResult AssembleInstruction(int instruction_index,
const InstructionBlock* block);
@@ -276,18 +266,12 @@ class V8_EXPORT_PRIVATE CodeGenerator final : public GapResolver::Assembler {
// contains the expected pointer to the start of the instruction stream.
void AssembleCodeStartRegisterCheck();
- void AssembleBranchPoisoning(FlagsCondition condition, Instruction* instr);
-
// When entering a code that is marked for deoptimization, rather continuing
// with its execution, we jump to a lazy compiled code. We need to do this
// because this code has already been deoptimized and needs to be unlinked
// from the JS functions referring it.
void BailoutIfDeoptimized();
- // Generates code to poison the stack pointer and implicit register arguments
- // like the context register and the function register.
- void AssembleRegisterArgumentPoisoning();
-
// Generates an architecture-specific, descriptor-specific prologue
// to set up a stack frame.
void AssembleConstructFrame();
@@ -484,7 +468,6 @@ class V8_EXPORT_PRIVATE CodeGenerator final : public GapResolver::Assembler {
SourcePositionTableBuilder source_position_table_builder_;
ZoneVector<trap_handler::ProtectedInstructionData> protected_instructions_;
CodeGenResult result_;
- PoisoningMitigationLevel poisoning_level_;
ZoneVector<int> block_starts_;
TurbolizerCodeOffsetsInfo offsets_info_;
ZoneVector<TurbolizerInstructionStartInfo> instr_starts_;
diff --git a/deps/v8/src/compiler/backend/ia32/code-generator-ia32.cc b/deps/v8/src/compiler/backend/ia32/code-generator-ia32.cc
index 5db3f20fa4..e03f934ba5 100644
--- a/deps/v8/src/compiler/backend/ia32/code-generator-ia32.cc
+++ b/deps/v8/src/compiler/backend/ia32/code-generator-ia32.cc
@@ -5,6 +5,7 @@
#include "src/base/overflowing-math.h"
#include "src/codegen/assembler-inl.h"
#include "src/codegen/callable.h"
+#include "src/codegen/cpu-features.h"
#include "src/codegen/ia32/assembler-ia32.h"
#include "src/codegen/ia32/register-ia32.h"
#include "src/codegen/macro-assembler.h"
@@ -684,16 +685,6 @@ void CodeGenerator::BailoutIfDeoptimized() {
__ bind(&skip);
}
-void CodeGenerator::GenerateSpeculationPoisonFromCodeStartRegister() {
- // TODO(860429): Remove remaining poisoning infrastructure on ia32.
- UNREACHABLE();
-}
-
-void CodeGenerator::AssembleRegisterArgumentPoisoning() {
- // TODO(860429): Remove remaining poisoning infrastructure on ia32.
- UNREACHABLE();
-}
-
// Assembles an instruction after register allocation, producing machine code.
CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Instruction* instr) {
@@ -712,11 +703,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
instr->HasCallDescriptorFlag(CallDescriptor::kFixedTargetRegister),
reg == kJavaScriptCallCodeStartRegister);
__ LoadCodeObjectEntry(reg, reg);
- if (instr->HasCallDescriptorFlag(CallDescriptor::kRetpoline)) {
- __ RetpolineCall(reg);
- } else {
- __ call(reg);
- }
+ __ call(reg);
}
RecordCallPosition(instr);
frame_access_state()->ClearSPDelta();
@@ -738,19 +725,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
if (DetermineStubCallMode() == StubCallMode::kCallWasmRuntimeStub) {
__ wasm_call(wasm_code, constant.rmode());
} else {
- if (instr->HasCallDescriptorFlag(CallDescriptor::kRetpoline)) {
- __ RetpolineCall(wasm_code, constant.rmode());
- } else {
- __ call(wasm_code, constant.rmode());
- }
+ __ call(wasm_code, constant.rmode());
}
} else {
- Register reg = i.InputRegister(0);
- if (instr->HasCallDescriptorFlag(CallDescriptor::kRetpoline)) {
- __ RetpolineCall(reg);
- } else {
- __ call(reg);
- }
+ __ call(i.InputRegister(0));
}
RecordCallPosition(instr);
frame_access_state()->ClearSPDelta();
@@ -762,12 +740,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Address wasm_code = static_cast<Address>(constant.ToInt32());
__ jmp(wasm_code, constant.rmode());
} else {
- Register reg = i.InputRegister(0);
- if (instr->HasCallDescriptorFlag(CallDescriptor::kRetpoline)) {
- __ RetpolineJump(reg);
- } else {
- __ jmp(reg);
- }
+ __ jmp(i.InputRegister(0));
}
frame_access_state()->ClearSPDelta();
frame_access_state()->SetFrameAccessToDefault();
@@ -784,11 +757,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
instr->HasCallDescriptorFlag(CallDescriptor::kFixedTargetRegister),
reg == kJavaScriptCallCodeStartRegister);
__ LoadCodeObjectEntry(reg, reg);
- if (instr->HasCallDescriptorFlag(CallDescriptor::kRetpoline)) {
- __ RetpolineJump(reg);
- } else {
- __ jmp(reg);
- }
+ __ jmp(reg);
}
frame_access_state()->ClearSPDelta();
frame_access_state()->SetFrameAccessToDefault();
@@ -800,11 +769,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
DCHECK_IMPLIES(
instr->HasCallDescriptorFlag(CallDescriptor::kFixedTargetRegister),
reg == kJavaScriptCallCodeStartRegister);
- if (instr->HasCallDescriptorFlag(CallDescriptor::kRetpoline)) {
- __ RetpolineJump(reg);
- } else {
- __ jmp(reg);
- }
+ __ jmp(reg);
frame_access_state()->ClearSPDelta();
frame_access_state()->SetFrameAccessToDefault();
break;
@@ -993,7 +958,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ bind(ool->exit());
break;
}
- case kArchStoreWithWriteBarrier: {
+ case kArchStoreWithWriteBarrier: // Fall thrugh.
+ case kArchAtomicStoreWithWriteBarrier: {
RecordWriteMode mode =
static_cast<RecordWriteMode>(MiscField::decode(instr->opcode()));
Register object = i.InputRegister(0);
@@ -1005,7 +971,12 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
auto ool = zone()->New<OutOfLineRecordWrite>(this, object, operand, value,
scratch0, scratch1, mode,
DetermineStubCallMode());
- __ mov(operand, value);
+ if (arch_opcode == kArchStoreWithWriteBarrier) {
+ __ mov(operand, value);
+ } else {
+ __ mov(scratch0, value);
+ __ xchg(scratch0, operand);
+ }
if (mode > RecordWriteMode::kValueIsPointer) {
__ JumpIfSmi(value, ool->exit());
}
@@ -1278,9 +1249,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kIA32Bswap:
__ bswap(i.OutputRegister());
break;
- case kArchWordPoisonOnSpeculation:
- // TODO(860429): Remove remaining poisoning infrastructure on ia32.
- UNREACHABLE();
case kIA32MFence:
__ mfence();
break;
@@ -1290,40 +1258,9 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kSSEFloat32Cmp:
__ ucomiss(i.InputDoubleRegister(0), i.InputOperand(1));
break;
- case kSSEFloat32Add:
- __ addss(i.InputDoubleRegister(0), i.InputOperand(1));
- break;
- case kSSEFloat32Sub:
- __ subss(i.InputDoubleRegister(0), i.InputOperand(1));
- break;
- case kSSEFloat32Mul:
- __ mulss(i.InputDoubleRegister(0), i.InputOperand(1));
- break;
- case kSSEFloat32Div:
- __ divss(i.InputDoubleRegister(0), i.InputOperand(1));
- // Don't delete this mov. It may improve performance on some CPUs,
- // when there is a (v)mulss depending on the result.
- __ movaps(i.OutputDoubleRegister(), i.OutputDoubleRegister());
- break;
case kSSEFloat32Sqrt:
__ sqrtss(i.OutputDoubleRegister(), i.InputOperand(0));
break;
- case kSSEFloat32Abs: {
- // TODO(bmeurer): Use 128-bit constants.
- XMMRegister tmp = i.TempSimd128Register(0);
- __ pcmpeqd(tmp, tmp);
- __ psrlq(tmp, 33);
- __ andps(i.OutputDoubleRegister(), tmp);
- break;
- }
- case kSSEFloat32Neg: {
- // TODO(bmeurer): Use 128-bit constants.
- XMMRegister tmp = i.TempSimd128Register(0);
- __ pcmpeqd(tmp, tmp);
- __ psllq(tmp, 31);
- __ xorps(i.OutputDoubleRegister(), tmp);
- break;
- }
case kSSEFloat32Round: {
CpuFeatureScope sse_scope(tasm(), SSE4_1);
RoundingMode const mode =
@@ -1334,21 +1271,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kSSEFloat64Cmp:
__ ucomisd(i.InputDoubleRegister(0), i.InputOperand(1));
break;
- case kSSEFloat64Add:
- __ addsd(i.InputDoubleRegister(0), i.InputOperand(1));
- break;
- case kSSEFloat64Sub:
- __ subsd(i.InputDoubleRegister(0), i.InputOperand(1));
- break;
- case kSSEFloat64Mul:
- __ mulsd(i.InputDoubleRegister(0), i.InputOperand(1));
- break;
- case kSSEFloat64Div:
- __ divsd(i.InputDoubleRegister(0), i.InputOperand(1));
- // Don't delete this mov. It may improve performance on some CPUs,
- // when there is a (v)mulsd depending on the result.
- __ movaps(i.OutputDoubleRegister(), i.OutputDoubleRegister());
- break;
case kSSEFloat32Max: {
Label compare_swap, done_compare;
if (instr->InputAt(1)->IsFPRegister()) {
@@ -1488,22 +1410,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ mov(esp, tmp);
break;
}
- case kSSEFloat64Abs: {
- // TODO(bmeurer): Use 128-bit constants.
- XMMRegister tmp = i.TempSimd128Register(0);
- __ pcmpeqd(tmp, tmp);
- __ psrlq(tmp, 1);
- __ andps(i.OutputDoubleRegister(), tmp);
- break;
- }
- case kSSEFloat64Neg: {
- // TODO(bmeurer): Use 128-bit constants.
- XMMRegister tmp = i.TempSimd128Register(0);
- __ pcmpeqd(tmp, tmp);
- __ psllq(tmp, 63);
- __ xorps(i.OutputDoubleRegister(), tmp);
- break;
- }
case kSSEFloat64Sqrt:
__ sqrtsd(i.OutputDoubleRegister(), i.InputOperand(0));
break;
@@ -1571,94 +1477,106 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kSSEFloat64LoadLowWord32:
__ movd(i.OutputDoubleRegister(), i.InputOperand(0));
break;
- case kAVXFloat32Add: {
- CpuFeatureScope avx_scope(tasm(), AVX);
- __ vaddss(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
- i.InputOperand(1));
+ case kFloat32Add: {
+ __ Addss(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+ i.InputOperand(1));
break;
}
- case kAVXFloat32Sub: {
- CpuFeatureScope avx_scope(tasm(), AVX);
- __ vsubss(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
- i.InputOperand(1));
+ case kFloat32Sub: {
+ __ Subss(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+ i.InputOperand(1));
break;
}
- case kAVXFloat32Mul: {
- CpuFeatureScope avx_scope(tasm(), AVX);
- __ vmulss(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
- i.InputOperand(1));
+ case kFloat32Mul: {
+ __ Mulss(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+ i.InputOperand(1));
break;
}
- case kAVXFloat32Div: {
- CpuFeatureScope avx_scope(tasm(), AVX);
- __ vdivss(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
- i.InputOperand(1));
+ case kFloat32Div: {
+ __ Divss(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+ i.InputOperand(1));
// Don't delete this mov. It may improve performance on some CPUs,
// when there is a (v)mulss depending on the result.
__ movaps(i.OutputDoubleRegister(), i.OutputDoubleRegister());
break;
}
- case kAVXFloat64Add: {
- CpuFeatureScope avx_scope(tasm(), AVX);
- __ vaddsd(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
- i.InputOperand(1));
+ case kFloat64Add: {
+ __ Addsd(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+ i.InputOperand(1));
break;
}
- case kAVXFloat64Sub: {
- CpuFeatureScope avx_scope(tasm(), AVX);
- __ vsubsd(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
- i.InputOperand(1));
+ case kFloat64Sub: {
+ __ Subsd(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+ i.InputOperand(1));
break;
}
- case kAVXFloat64Mul: {
- CpuFeatureScope avx_scope(tasm(), AVX);
- __ vmulsd(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
- i.InputOperand(1));
+ case kFloat64Mul: {
+ __ Mulsd(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+ i.InputOperand(1));
break;
}
- case kAVXFloat64Div: {
- CpuFeatureScope avx_scope(tasm(), AVX);
- __ vdivsd(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
- i.InputOperand(1));
+ case kFloat64Div: {
+ __ Divsd(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+ i.InputOperand(1));
// Don't delete this mov. It may improve performance on some CPUs,
// when there is a (v)mulsd depending on the result.
__ movaps(i.OutputDoubleRegister(), i.OutputDoubleRegister());
break;
}
- case kAVXFloat32Abs: {
+ case kFloat32Abs: {
// TODO(bmeurer): Use RIP relative 128-bit constants.
- XMMRegister tmp = i.TempSimd128Register(0);
- __ pcmpeqd(tmp, tmp);
- __ psrlq(tmp, 33);
- CpuFeatureScope avx_scope(tasm(), AVX);
- __ vandps(i.OutputDoubleRegister(), tmp, i.InputOperand(0));
+ __ Pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
+ __ Psrlq(kScratchDoubleReg, byte{33});
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope avx_scope(tasm(), AVX);
+ __ vandps(i.OutputDoubleRegister(), kScratchDoubleReg,
+ i.InputOperand(0));
+ } else {
+ DCHECK_EQ(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
+ __ andps(i.OutputDoubleRegister(), kScratchDoubleReg);
+ }
break;
}
- case kAVXFloat32Neg: {
+ case kFloat32Neg: {
// TODO(bmeurer): Use RIP relative 128-bit constants.
- XMMRegister tmp = i.TempSimd128Register(0);
- __ pcmpeqd(tmp, tmp);
- __ psllq(tmp, 31);
- CpuFeatureScope avx_scope(tasm(), AVX);
- __ vxorps(i.OutputDoubleRegister(), tmp, i.InputOperand(0));
+ __ Pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
+ __ Psllq(kScratchDoubleReg, byte{31});
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope avx_scope(tasm(), AVX);
+ __ vxorps(i.OutputDoubleRegister(), kScratchDoubleReg,
+ i.InputOperand(0));
+ } else {
+ DCHECK_EQ(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
+ __ xorps(i.OutputDoubleRegister(), kScratchDoubleReg);
+ }
break;
}
- case kAVXFloat64Abs: {
+ case kFloat64Abs: {
// TODO(bmeurer): Use RIP relative 128-bit constants.
- XMMRegister tmp = i.TempSimd128Register(0);
- __ pcmpeqd(tmp, tmp);
- __ psrlq(tmp, 1);
- CpuFeatureScope avx_scope(tasm(), AVX);
- __ vandpd(i.OutputDoubleRegister(), tmp, i.InputOperand(0));
+ __ Pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
+ __ Psrlq(kScratchDoubleReg, byte{1});
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope avx_scope(tasm(), AVX);
+ __ vandpd(i.OutputDoubleRegister(), kScratchDoubleReg,
+ i.InputOperand(0));
+ } else {
+ DCHECK_EQ(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
+ __ andps(i.OutputDoubleRegister(), kScratchDoubleReg);
+ }
break;
}
- case kAVXFloat64Neg: {
+ case kFloat64Neg: {
// TODO(bmeurer): Use RIP relative 128-bit constants.
- XMMRegister tmp = i.TempSimd128Register(0);
- __ pcmpeqd(tmp, tmp);
- __ psllq(tmp, 63);
- CpuFeatureScope avx_scope(tasm(), AVX);
- __ vxorpd(i.OutputDoubleRegister(), tmp, i.InputOperand(0));
+ __ Pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
+ __ Psllq(kScratchDoubleReg, byte{63});
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope avx_scope(tasm(), AVX);
+ __ vxorpd(i.OutputDoubleRegister(), kScratchDoubleReg,
+ i.InputOperand(0));
+ } else {
+ DCHECK_EQ(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
+ __ xorps(i.OutputDoubleRegister(), kScratchDoubleReg);
+ }
break;
}
case kSSEFloat64SilenceNaN:
@@ -2374,48 +2292,24 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ vandnps(dst, dst, kScratchDoubleReg);
break;
}
- case kSSEF32x4Eq: {
- DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
- __ cmpeqps(i.OutputSimd128Register(), i.InputOperand(1));
+ case kIA32F32x4Eq: {
+ __ Cmpeqps(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputOperand(1));
break;
}
- case kAVXF32x4Eq: {
- CpuFeatureScope avx_scope(tasm(), AVX);
- __ vcmpeqps(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ case kIA32F32x4Ne: {
+ __ Cmpneqps(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputOperand(1));
break;
}
- case kSSEF32x4Ne: {
- DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
- __ cmpneqps(i.OutputSimd128Register(), i.InputOperand(1));
- break;
- }
- case kAVXF32x4Ne: {
- CpuFeatureScope avx_scope(tasm(), AVX);
- __ vcmpneqps(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputOperand(1));
- break;
- }
- case kSSEF32x4Lt: {
- DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
- __ cmpltps(i.OutputSimd128Register(), i.InputOperand(1));
- break;
- }
- case kAVXF32x4Lt: {
- CpuFeatureScope avx_scope(tasm(), AVX);
- __ vcmpltps(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputOperand(1));
- break;
- }
- case kSSEF32x4Le: {
- DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
- __ cmpleps(i.OutputSimd128Register(), i.InputOperand(1));
+ case kIA32F32x4Lt: {
+ __ Cmpltps(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputOperand(1));
break;
}
- case kAVXF32x4Le: {
- CpuFeatureScope avx_scope(tasm(), AVX);
- __ vcmpleps(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputOperand(1));
+ case kIA32F32x4Le: {
+ __ Cmpleps(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputOperand(1));
break;
}
case kIA32F32x4Pmin: {
@@ -2445,20 +2339,9 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kIA32I32x4SConvertF32x4: {
- XMMRegister dst = i.OutputSimd128Register();
- XMMRegister src = i.InputSimd128Register(0);
- // NAN->0
- __ Cmpeqps(kScratchDoubleReg, src, src);
- __ Pand(dst, src, kScratchDoubleReg);
- // Set top bit if >= 0 (but not -0.0!)
- __ Pxor(kScratchDoubleReg, dst);
- // Convert
- __ Cvttps2dq(dst, dst);
- // Set top bit if >=0 is now < 0
- __ Pand(kScratchDoubleReg, dst);
- __ Psrad(kScratchDoubleReg, kScratchDoubleReg, byte{31});
- // Set positive overflow lanes to 0x7FFFFFFF
- __ Pxor(dst, kScratchDoubleReg);
+ __ I32x4SConvertF32x4(i.OutputSimd128Register(),
+ i.InputSimd128Register(0), kScratchDoubleReg,
+ i.TempRegister(0));
break;
}
case kIA32I32x4SConvertI16x8Low: {
@@ -2490,117 +2373,63 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
ASSEMBLE_SIMD_SHIFT(Psrad, 5);
break;
}
- case kSSEI32x4Add: {
- DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
- __ paddd(i.OutputSimd128Register(), i.InputOperand(1));
+ case kIA32I32x4Add: {
+ __ Paddd(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputOperand(1));
break;
}
- case kAVXI32x4Add: {
- CpuFeatureScope avx_scope(tasm(), AVX);
- __ vpaddd(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputOperand(1));
+ case kIA32I32x4Sub: {
+ __ Psubd(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputOperand(1));
break;
}
- case kSSEI32x4Sub: {
- DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
- __ psubd(i.OutputSimd128Register(), i.InputOperand(1));
+ case kIA32I32x4Mul: {
+ __ Pmulld(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputOperand(1));
break;
}
- case kAVXI32x4Sub: {
- CpuFeatureScope avx_scope(tasm(), AVX);
- __ vpsubd(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ case kIA32I32x4MinS: {
+ __ Pminsd(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputOperand(1));
break;
}
- case kSSEI32x4Mul: {
- DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
- CpuFeatureScope sse_scope(tasm(), SSE4_1);
- __ pmulld(i.OutputSimd128Register(), i.InputOperand(1));
+ case kIA32I32x4MaxS: {
+ __ Pmaxsd(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputOperand(1));
break;
}
- case kAVXI32x4Mul: {
- CpuFeatureScope avx_scope(tasm(), AVX);
- __ vpmulld(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ case kIA32I32x4Eq: {
+ __ Pcmpeqd(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputOperand(1));
break;
}
- case kSSEI32x4MinS: {
- DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
- CpuFeatureScope sse_scope(tasm(), SSE4_1);
- __ pminsd(i.OutputSimd128Register(), i.InputOperand(1));
- break;
- }
- case kAVXI32x4MinS: {
- CpuFeatureScope avx_scope(tasm(), AVX);
- __ vpminsd(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ case kIA32I32x4Ne: {
+ __ Pcmpeqd(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputOperand(1));
+ __ Pcmpeqd(kScratchDoubleReg, kScratchDoubleReg, kScratchDoubleReg);
+ __ Pxor(i.OutputSimd128Register(), i.OutputSimd128Register(),
+ kScratchDoubleReg);
break;
}
- case kSSEI32x4MaxS: {
- DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
- CpuFeatureScope sse_scope(tasm(), SSE4_1);
- __ pmaxsd(i.OutputSimd128Register(), i.InputOperand(1));
- break;
- }
- case kAVXI32x4MaxS: {
- CpuFeatureScope avx_scope(tasm(), AVX);
- __ vpmaxsd(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ case kIA32I32x4GtS: {
+ __ Pcmpgtd(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputOperand(1));
break;
}
- case kSSEI32x4Eq: {
- DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
- __ pcmpeqd(i.OutputSimd128Register(), i.InputOperand(1));
- break;
- }
- case kAVXI32x4Eq: {
- CpuFeatureScope avx_scope(tasm(), AVX);
- __ vpcmpeqd(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputOperand(1));
- break;
- }
- case kSSEI32x4Ne: {
- DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
- __ pcmpeqd(i.OutputSimd128Register(), i.InputOperand(1));
- __ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
- __ xorps(i.OutputSimd128Register(), kScratchDoubleReg);
- break;
- }
- case kAVXI32x4Ne: {
- CpuFeatureScope avx_scope(tasm(), AVX);
- __ vpcmpeqd(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputOperand(1));
- __ vpcmpeqd(kScratchDoubleReg, kScratchDoubleReg, kScratchDoubleReg);
- __ vpxor(i.OutputSimd128Register(), i.OutputSimd128Register(),
- kScratchDoubleReg);
- break;
- }
- case kSSEI32x4GtS: {
- DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
- __ pcmpgtd(i.OutputSimd128Register(), i.InputOperand(1));
- break;
- }
- case kAVXI32x4GtS: {
- CpuFeatureScope avx_scope(tasm(), AVX);
- __ vpcmpgtd(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputOperand(1));
- break;
- }
- case kSSEI32x4GeS: {
- DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
- CpuFeatureScope sse_scope(tasm(), SSE4_1);
+ case kIA32I32x4GeS: {
XMMRegister dst = i.OutputSimd128Register();
- Operand src = i.InputOperand(1);
- __ pminsd(dst, src);
- __ pcmpeqd(dst, src);
- break;
- }
- case kAVXI32x4GeS: {
- CpuFeatureScope avx_scope(tasm(), AVX);
XMMRegister src1 = i.InputSimd128Register(0);
- Operand src2 = i.InputOperand(1);
- __ vpminsd(kScratchDoubleReg, src1, src2);
- __ vpcmpeqd(i.OutputSimd128Register(), kScratchDoubleReg, src2);
+ XMMRegister src2 = i.InputSimd128Register(1);
+ if (CpuFeatures::IsSupported(AVX)) {
+ CpuFeatureScope avx_scope(tasm(), AVX);
+ __ vpminsd(kScratchDoubleReg, src1, src2);
+ __ vpcmpeqd(dst, kScratchDoubleReg, src2);
+ } else {
+ DCHECK_EQ(dst, src1);
+ CpuFeatureScope sse_scope(tasm(), SSE4_1);
+ __ pminsd(dst, src2);
+ __ pcmpeqd(dst, src2);
+ }
break;
}
case kSSEI32x4UConvertF32x4: {
@@ -2671,28 +2500,14 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
ASSEMBLE_SIMD_SHIFT(Psrld, 5);
break;
}
- case kSSEI32x4MinU: {
- DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
- CpuFeatureScope sse_scope(tasm(), SSE4_1);
- __ pminud(i.OutputSimd128Register(), i.InputOperand(1));
- break;
- }
- case kAVXI32x4MinU: {
- CpuFeatureScope avx_scope(tasm(), AVX);
- __ vpminud(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputOperand(1));
- break;
- }
- case kSSEI32x4MaxU: {
- DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
- CpuFeatureScope sse_scope(tasm(), SSE4_1);
- __ pmaxud(i.OutputSimd128Register(), i.InputOperand(1));
+ case kIA32I32x4MinU: {
+ __ Pminud(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputOperand(1));
break;
}
- case kAVXI32x4MaxU: {
- CpuFeatureScope avx_scope(tasm(), AVX);
- __ vpmaxud(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputOperand(1));
+ case kIA32I32x4MaxU: {
+ __ Pmaxud(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputOperand(1));
break;
}
case kSSEI32x4GtU: {
@@ -2748,10 +2563,11 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kIA32I16x8Splat: {
- XMMRegister dst = i.OutputSimd128Register();
- __ Movd(dst, i.InputOperand(0));
- __ Pshuflw(dst, dst, uint8_t{0x0});
- __ Pshufd(dst, dst, uint8_t{0x0});
+ if (instr->InputAt(0)->IsRegister()) {
+ __ I16x8Splat(i.OutputSimd128Register(), i.InputRegister(0));
+ } else {
+ __ I16x8Splat(i.OutputSimd128Register(), i.InputOperand(0));
+ }
break;
}
case kIA32I16x8ExtractLaneS: {
@@ -2789,105 +2605,51 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
ASSEMBLE_SIMD_SHIFT(Psraw, 4);
break;
}
- case kSSEI16x8SConvertI32x4: {
- DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
- __ packssdw(i.OutputSimd128Register(), i.InputSimd128Register(1));
- break;
- }
- case kAVXI16x8SConvertI32x4: {
- CpuFeatureScope avx_scope(tasm(), AVX);
- __ vpackssdw(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputOperand(1));
+ case kIA32I16x8SConvertI32x4: {
+ __ Packssdw(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputOperand(1));
break;
}
- case kSSEI16x8Add: {
- DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
- __ paddw(i.OutputSimd128Register(), i.InputOperand(1));
+ case kIA32I16x8Add: {
+ __ Paddw(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputOperand(1));
break;
}
- case kAVXI16x8Add: {
- CpuFeatureScope avx_scope(tasm(), AVX);
- __ vpaddw(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ case kIA32I16x8AddSatS: {
+ __ Paddsw(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputOperand(1));
break;
}
- case kSSEI16x8AddSatS: {
- DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
- __ paddsw(i.OutputSimd128Register(), i.InputOperand(1));
- break;
- }
- case kAVXI16x8AddSatS: {
- CpuFeatureScope avx_scope(tasm(), AVX);
- __ vpaddsw(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputOperand(1));
- break;
- }
- case kSSEI16x8Sub: {
- DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
- __ psubw(i.OutputSimd128Register(), i.InputOperand(1));
+ case kIA32I16x8Sub: {
+ __ Psubw(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputOperand(1));
break;
}
- case kAVXI16x8Sub: {
- CpuFeatureScope avx_scope(tasm(), AVX);
- __ vpsubw(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ case kIA32I16x8SubSatS: {
+ __ Psubsw(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputOperand(1));
break;
}
- case kSSEI16x8SubSatS: {
- DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
- __ psubsw(i.OutputSimd128Register(), i.InputOperand(1));
- break;
- }
- case kAVXI16x8SubSatS: {
- CpuFeatureScope avx_scope(tasm(), AVX);
- __ vpsubsw(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputOperand(1));
- break;
- }
- case kSSEI16x8Mul: {
- DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
- __ pmullw(i.OutputSimd128Register(), i.InputOperand(1));
- break;
- }
- case kAVXI16x8Mul: {
- CpuFeatureScope avx_scope(tasm(), AVX);
- __ vpmullw(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputOperand(1));
- break;
- }
- case kSSEI16x8MinS: {
- DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
- __ pminsw(i.OutputSimd128Register(), i.InputOperand(1));
+ case kIA32I16x8Mul: {
+ __ Pmullw(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputOperand(1));
break;
}
- case kAVXI16x8MinS: {
- CpuFeatureScope avx_scope(tasm(), AVX);
- __ vpminsw(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputOperand(1));
+ case kIA32I16x8MinS: {
+ __ Pminsw(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputOperand(1));
break;
}
- case kSSEI16x8MaxS: {
- DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
- __ pmaxsw(i.OutputSimd128Register(), i.InputOperand(1));
+ case kIA32I16x8MaxS: {
+ __ Pmaxsw(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputOperand(1));
break;
}
- case kAVXI16x8MaxS: {
- CpuFeatureScope avx_scope(tasm(), AVX);
- __ vpmaxsw(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ case kIA32I16x8Eq: {
+ __ Pcmpeqw(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputOperand(1));
break;
}
- case kSSEI16x8Eq: {
- DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
- __ pcmpeqw(i.OutputSimd128Register(), i.InputOperand(1));
- break;
- }
- case kAVXI16x8Eq: {
- CpuFeatureScope avx_scope(tasm(), AVX);
- __ vpcmpeqw(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputOperand(1));
- break;
- }
case kSSEI16x8Ne: {
DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
__ pcmpeqw(i.OutputSimd128Register(), i.InputOperand(1));
@@ -2904,15 +2666,9 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
kScratchDoubleReg);
break;
}
- case kSSEI16x8GtS: {
- DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
- __ pcmpgtw(i.OutputSimd128Register(), i.InputOperand(1));
- break;
- }
- case kAVXI16x8GtS: {
- CpuFeatureScope avx_scope(tasm(), AVX);
- __ vpcmpgtw(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputOperand(1));
+ case kIA32I16x8GtS: {
+ __ Pcmpgtw(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputOperand(1));
break;
}
case kSSEI16x8GeS: {
@@ -2944,63 +2700,29 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
ASSEMBLE_SIMD_SHIFT(Psrlw, 4);
break;
}
- case kSSEI16x8UConvertI32x4: {
- DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
- CpuFeatureScope sse_scope(tasm(), SSE4_1);
- __ packusdw(i.OutputSimd128Register(), i.InputSimd128Register(1));
- break;
- }
- case kAVXI16x8UConvertI32x4: {
- DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
- CpuFeatureScope avx_scope(tasm(), AVX);
- XMMRegister dst = i.OutputSimd128Register();
- __ vpackusdw(dst, dst, i.InputSimd128Register(1));
- break;
- }
- case kSSEI16x8AddSatU: {
- DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
- __ paddusw(i.OutputSimd128Register(), i.InputOperand(1));
- break;
- }
- case kAVXI16x8AddSatU: {
- CpuFeatureScope avx_scope(tasm(), AVX);
- __ vpaddusw(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputOperand(1));
+ case kIA32I16x8UConvertI32x4: {
+ __ Packusdw(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
break;
}
- case kSSEI16x8SubSatU: {
- DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
- __ psubusw(i.OutputSimd128Register(), i.InputOperand(1));
- break;
- }
- case kAVXI16x8SubSatU: {
- CpuFeatureScope avx_scope(tasm(), AVX);
- __ vpsubusw(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputOperand(1));
- break;
- }
- case kSSEI16x8MinU: {
- DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
- CpuFeatureScope sse_scope(tasm(), SSE4_1);
- __ pminuw(i.OutputSimd128Register(), i.InputOperand(1));
+ case kIA32I16x8AddSatU: {
+ __ Paddusw(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputOperand(1));
break;
}
- case kAVXI16x8MinU: {
- CpuFeatureScope avx_scope(tasm(), AVX);
- __ vpminuw(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ case kIA32I16x8SubSatU: {
+ __ Psubusw(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputOperand(1));
break;
}
- case kSSEI16x8MaxU: {
- DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
- CpuFeatureScope sse_scope(tasm(), SSE4_1);
- __ pmaxuw(i.OutputSimd128Register(), i.InputOperand(1));
+ case kIA32I16x8MinU: {
+ __ Pminuw(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputOperand(1));
break;
}
- case kAVXI16x8MaxU: {
- CpuFeatureScope avx_scope(tasm(), AVX);
- __ vpmaxuw(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputOperand(1));
+ case kIA32I16x8MaxU: {
+ __ Pmaxuw(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputOperand(1));
break;
}
case kSSEI16x8GtU: {
@@ -3060,10 +2782,13 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kIA32I8x16Splat: {
- XMMRegister dst = i.OutputSimd128Register();
- __ Movd(dst, i.InputOperand(0));
- __ Pxor(kScratchDoubleReg, kScratchDoubleReg);
- __ Pshufb(dst, kScratchDoubleReg);
+ if (instr->InputAt(0)->IsRegister()) {
+ __ I8x16Splat(i.OutputSimd128Register(), i.InputRegister(0),
+ kScratchDoubleReg);
+ } else {
+ __ I8x16Splat(i.OutputSimd128Register(), i.InputOperand(0),
+ kScratchDoubleReg);
+ }
break;
}
case kIA32I8x16ExtractLaneS: {
@@ -3137,15 +2862,9 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ S128Store32Lane(operand, i.InputSimd128Register(index), laneidx);
break;
}
- case kSSEI8x16SConvertI16x8: {
- DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
- __ packsswb(i.OutputSimd128Register(), i.InputOperand(1));
- break;
- }
- case kAVXI8x16SConvertI16x8: {
- CpuFeatureScope avx_scope(tasm(), AVX);
- __ vpacksswb(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputOperand(1));
+ case kIA32I8x16SConvertI16x8: {
+ __ Packsswb(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputOperand(1));
break;
}
case kIA32I8x16Neg: {
@@ -3162,64 +2881,29 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
case kIA32I8x16Shl: {
XMMRegister dst = i.OutputSimd128Register();
- DCHECK_EQ(dst, i.InputSimd128Register(0));
- Register tmp = i.ToRegister(instr->TempAt(0));
- XMMRegister tmp_simd = i.TempSimd128Register(1);
+ XMMRegister src = i.InputSimd128Register(0);
+ DCHECK_IMPLIES(!CpuFeatures::IsSupported(AVX), dst == src);
+ Register tmp = i.TempRegister(0);
if (HasImmediateInput(instr, 1)) {
- // Perform 16-bit shift, then mask away low bits.
- uint8_t shift = i.InputInt3(1);
- __ Psllw(dst, dst, byte{shift});
-
- uint8_t bmask = static_cast<uint8_t>(0xff << shift);
- uint32_t mask = bmask << 24 | bmask << 16 | bmask << 8 | bmask;
- __ mov(tmp, mask);
- __ Movd(tmp_simd, tmp);
- __ Pshufd(tmp_simd, tmp_simd, uint8_t{0});
- __ Pand(dst, tmp_simd);
+ __ I8x16Shl(dst, src, i.InputInt3(1), tmp, kScratchDoubleReg);
} else {
- // Take shift value modulo 8.
- __ mov(tmp, i.InputRegister(1));
- __ and_(tmp, 7);
- // Mask off the unwanted bits before word-shifting.
- __ Pcmpeqw(kScratchDoubleReg, kScratchDoubleReg);
- __ add(tmp, Immediate(8));
- __ Movd(tmp_simd, tmp);
- __ Psrlw(kScratchDoubleReg, kScratchDoubleReg, tmp_simd);
- __ Packuswb(kScratchDoubleReg, kScratchDoubleReg);
- __ Pand(dst, kScratchDoubleReg);
- // TODO(zhin): sub here to avoid asking for another temporary register,
- // examine codegen for other i8x16 shifts, they use less instructions.
- __ sub(tmp, Immediate(8));
- __ Movd(tmp_simd, tmp);
- __ Psllw(dst, dst, tmp_simd);
+ XMMRegister tmp_simd = i.TempSimd128Register(1);
+ __ I8x16Shl(dst, src, i.InputRegister(1), tmp, kScratchDoubleReg,
+ tmp_simd);
}
break;
}
case kIA32I8x16ShrS: {
XMMRegister dst = i.OutputSimd128Register();
- DCHECK_EQ(dst, i.InputSimd128Register(0));
+ XMMRegister src = i.InputSimd128Register(0);
+ DCHECK_IMPLIES(!CpuFeatures::IsSupported(AVX), dst == src);
+
if (HasImmediateInput(instr, 1)) {
- __ Punpckhbw(kScratchDoubleReg, dst);
- __ Punpcklbw(dst, dst);
- uint8_t shift = i.InputInt3(1) + 8;
- __ Psraw(kScratchDoubleReg, shift);
- __ Psraw(dst, shift);
- __ Packsswb(dst, kScratchDoubleReg);
+ __ I8x16ShrS(dst, src, i.InputInt3(1), kScratchDoubleReg);
} else {
- Register tmp = i.ToRegister(instr->TempAt(0));
- XMMRegister tmp_simd = i.TempSimd128Register(1);
- // Unpack the bytes into words, do arithmetic shifts, and repack.
- __ Punpckhbw(kScratchDoubleReg, dst);
- __ Punpcklbw(dst, dst);
- __ mov(tmp, i.InputRegister(1));
- // Take shift value modulo 8.
- __ and_(tmp, 7);
- __ add(tmp, Immediate(8));
- __ Movd(tmp_simd, tmp);
- __ Psraw(kScratchDoubleReg, kScratchDoubleReg, tmp_simd);
- __ Psraw(dst, dst, tmp_simd);
- __ Packsswb(dst, kScratchDoubleReg);
+ __ I8x16ShrS(dst, src, i.InputRegister(1), i.TempRegister(0),
+ kScratchDoubleReg, i.TempSimd128Register(1));
}
break;
}
@@ -3296,18 +2980,9 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ vpcmpeqb(i.OutputSimd128Register(), kScratchDoubleReg, src2);
break;
}
- case kSSEI8x16UConvertI16x8: {
- DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
- CpuFeatureScope sse_scope(tasm(), SSE4_1);
- XMMRegister dst = i.OutputSimd128Register();
- __ packuswb(dst, i.InputOperand(1));
- break;
- }
- case kAVXI8x16UConvertI16x8: {
- DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
- CpuFeatureScope avx_scope(tasm(), AVX);
- XMMRegister dst = i.OutputSimd128Register();
- __ vpackuswb(dst, dst, i.InputOperand(1));
+ case kIA32I8x16UConvertI16x8: {
+ __ Packuswb(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
break;
}
case kIA32I8x16AddSatU: {
@@ -3322,34 +2997,17 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
case kIA32I8x16ShrU: {
XMMRegister dst = i.OutputSimd128Register();
- DCHECK_EQ(dst, i.InputSimd128Register(0));
- Register tmp = i.ToRegister(instr->TempAt(0));
- XMMRegister tmp_simd = i.TempSimd128Register(1);
+ XMMRegister src = i.InputSimd128Register(0);
+ DCHECK_IMPLIES(!CpuFeatures::IsSupported(AVX), dst == src);
+ Register tmp = i.TempRegister(0);
if (HasImmediateInput(instr, 1)) {
- // Perform 16-bit shift, then mask away high bits.
- uint8_t shift = i.InputInt3(1);
- __ Psrlw(dst, dst, byte{shift});
-
- uint8_t bmask = 0xff >> shift;
- uint32_t mask = bmask << 24 | bmask << 16 | bmask << 8 | bmask;
- __ mov(tmp, mask);
- __ Movd(tmp_simd, tmp);
- __ Pshufd(tmp_simd, tmp_simd, uint8_t{0});
- __ Pand(dst, tmp_simd);
+ __ I8x16ShrU(dst, src, i.InputInt3(1), tmp, kScratchDoubleReg);
} else {
- // Unpack the bytes into words, do logical shifts, and repack.
- __ Punpckhbw(kScratchDoubleReg, dst);
- __ Punpcklbw(dst, dst);
- __ mov(tmp, i.InputRegister(1));
- // Take shift value modulo 8.
- __ and_(tmp, 7);
- __ add(tmp, Immediate(8));
- __ Movd(tmp_simd, tmp);
- __ Psrlw(kScratchDoubleReg, kScratchDoubleReg, tmp_simd);
- __ Psrlw(dst, dst, tmp_simd);
- __ Packuswb(dst, kScratchDoubleReg);
+ __ I8x16ShrU(dst, src, i.InputRegister(1), tmp, kScratchDoubleReg,
+ i.TempSimd128Register(1));
}
+
break;
}
case kIA32I8x16MinU: {
@@ -3444,37 +3102,19 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
kScratchDoubleReg);
break;
}
- case kSSES128And: {
- DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
- __ andps(i.OutputSimd128Register(), i.InputOperand(1));
- break;
- }
- case kAVXS128And: {
- CpuFeatureScope avx_scope(tasm(), AVX);
- __ vpand(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputOperand(1));
- break;
- }
- case kSSES128Or: {
- DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
- __ orps(i.OutputSimd128Register(), i.InputOperand(1));
- break;
- }
- case kAVXS128Or: {
- CpuFeatureScope avx_scope(tasm(), AVX);
- __ vpor(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ case kIA32S128And: {
+ __ Pand(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputOperand(1));
break;
}
- case kSSES128Xor: {
- DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
- __ xorps(i.OutputSimd128Register(), i.InputOperand(1));
+ case kIA32S128Or: {
+ __ Por(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputOperand(1));
break;
}
- case kAVXS128Xor: {
- CpuFeatureScope avx_scope(tasm(), AVX);
- __ vpxor(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputOperand(1));
+ case kIA32S128Xor: {
+ __ Pxor(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputOperand(1));
break;
}
case kIA32S128Select: {
@@ -3541,20 +3181,17 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kIA32S128Load8Splat: {
- __ Pinsrb(i.OutputSimd128Register(), i.MemoryOperand(), 0);
- __ Pxor(kScratchDoubleReg, kScratchDoubleReg);
- __ Pshufb(i.OutputSimd128Register(), kScratchDoubleReg);
+ __ S128Load8Splat(i.OutputSimd128Register(), i.MemoryOperand(),
+ kScratchDoubleReg);
break;
}
case kIA32S128Load16Splat: {
- __ Pinsrw(i.OutputSimd128Register(), i.MemoryOperand(), 0);
- __ Pshuflw(i.OutputSimd128Register(), i.OutputSimd128Register(),
- uint8_t{0});
- __ Punpcklqdq(i.OutputSimd128Register(), i.OutputSimd128Register());
+ __ S128Load16Splat(i.OutputSimd128Register(), i.MemoryOperand(),
+ kScratchDoubleReg);
break;
}
case kIA32S128Load32Splat: {
- __ Vbroadcastss(i.OutputSimd128Register(), i.MemoryOperand());
+ __ S128Load32Splat(i.OutputSimd128Register(), i.MemoryOperand());
break;
}
case kIA32S128Load64Splat: {
@@ -3640,10 +3277,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
uint8_t half_dup = lane4 | (lane4 << 2) | (lane4 << 4) | (lane4 << 6);
if (lane < 4) {
__ Pshuflw(dst, src, half_dup);
- __ Pshufd(dst, dst, uint8_t{0});
+ __ Punpcklqdq(dst, dst);
} else {
__ Pshufhw(dst, src, half_dup);
- __ Pshufd(dst, dst, uint8_t{0xaa});
+ __ Punpckhqdq(dst, dst);
}
break;
}
@@ -3671,10 +3308,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
uint8_t half_dup = lane4 | (lane4 << 2) | (lane4 << 4) | (lane4 << 6);
if (lane < 4) {
__ Pshuflw(dst, dst, half_dup);
- __ Pshufd(dst, dst, uint8_t{0});
+ __ Punpcklqdq(dst, dst);
} else {
__ Pshufhw(dst, dst, half_dup);
- __ Pshufd(dst, dst, uint8_t{0xaa});
+ __ Punpckhqdq(dst, dst);
}
break;
}
@@ -3937,17 +3574,31 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kIA32Word32AtomicPairLoad: {
- XMMRegister tmp = i.ToDoubleRegister(instr->TempAt(0));
- __ movq(tmp, i.MemoryOperand());
- __ Pextrd(i.OutputRegister(0), tmp, 0);
- __ Pextrd(i.OutputRegister(1), tmp, 1);
+ __ movq(kScratchDoubleReg, i.MemoryOperand());
+ __ Pextrd(i.OutputRegister(0), kScratchDoubleReg, 0);
+ __ Pextrd(i.OutputRegister(1), kScratchDoubleReg, 1);
break;
}
- case kIA32Word32AtomicPairStore: {
+ case kIA32Word32ReleasePairStore: {
+ __ push(ebx);
+ i.MoveInstructionOperandToRegister(ebx, instr->InputAt(1));
+ __ push(ebx);
+ i.MoveInstructionOperandToRegister(ebx, instr->InputAt(0));
+ __ push(ebx);
+ frame_access_state()->IncreaseSPDelta(3);
+ __ movq(kScratchDoubleReg, MemOperand(esp, 0));
+ __ pop(ebx);
+ __ pop(ebx);
+ __ pop(ebx);
+ frame_access_state()->IncreaseSPDelta(-3);
+ __ movq(i.MemoryOperand(2), kScratchDoubleReg);
+ break;
+ }
+ case kIA32Word32SeqCstPairStore: {
Label store;
__ bind(&store);
- __ mov(i.TempRegister(0), i.MemoryOperand(2));
- __ mov(i.TempRegister(1), i.NextMemoryOperand(2));
+ __ mov(eax, i.MemoryOperand(2));
+ __ mov(edx, i.NextMemoryOperand(2));
__ push(ebx);
frame_access_state()->IncreaseSPDelta(1);
i.MoveInstructionOperandToRegister(ebx, instr->InputAt(0));
@@ -3958,27 +3609,27 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ j(not_equal, &store);
break;
}
- case kWord32AtomicExchangeInt8: {
+ case kAtomicExchangeInt8: {
__ xchg_b(i.InputRegister(0), i.MemoryOperand(1));
__ movsx_b(i.InputRegister(0), i.InputRegister(0));
break;
}
- case kWord32AtomicExchangeUint8: {
+ case kAtomicExchangeUint8: {
__ xchg_b(i.InputRegister(0), i.MemoryOperand(1));
__ movzx_b(i.InputRegister(0), i.InputRegister(0));
break;
}
- case kWord32AtomicExchangeInt16: {
+ case kAtomicExchangeInt16: {
__ xchg_w(i.InputRegister(0), i.MemoryOperand(1));
__ movsx_w(i.InputRegister(0), i.InputRegister(0));
break;
}
- case kWord32AtomicExchangeUint16: {
+ case kAtomicExchangeUint16: {
__ xchg_w(i.InputRegister(0), i.MemoryOperand(1));
__ movzx_w(i.InputRegister(0), i.InputRegister(0));
break;
}
- case kWord32AtomicExchangeWord32: {
+ case kAtomicExchangeWord32: {
__ xchg(i.InputRegister(0), i.MemoryOperand(1));
break;
}
@@ -3998,31 +3649,31 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ j(not_equal, &exchange);
break;
}
- case kWord32AtomicCompareExchangeInt8: {
+ case kAtomicCompareExchangeInt8: {
__ lock();
__ cmpxchg_b(i.MemoryOperand(2), i.InputRegister(1));
__ movsx_b(eax, eax);
break;
}
- case kWord32AtomicCompareExchangeUint8: {
+ case kAtomicCompareExchangeUint8: {
__ lock();
__ cmpxchg_b(i.MemoryOperand(2), i.InputRegister(1));
__ movzx_b(eax, eax);
break;
}
- case kWord32AtomicCompareExchangeInt16: {
+ case kAtomicCompareExchangeInt16: {
__ lock();
__ cmpxchg_w(i.MemoryOperand(2), i.InputRegister(1));
__ movsx_w(eax, eax);
break;
}
- case kWord32AtomicCompareExchangeUint16: {
+ case kAtomicCompareExchangeUint16: {
__ lock();
__ cmpxchg_w(i.MemoryOperand(2), i.InputRegister(1));
__ movzx_w(eax, eax);
break;
}
- case kWord32AtomicCompareExchangeWord32: {
+ case kAtomicCompareExchangeWord32: {
__ lock();
__ cmpxchg(i.MemoryOperand(2), i.InputRegister(1));
break;
@@ -4038,27 +3689,27 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
#define ATOMIC_BINOP_CASE(op, inst) \
- case kWord32Atomic##op##Int8: { \
+ case kAtomic##op##Int8: { \
ASSEMBLE_ATOMIC_BINOP(inst, mov_b, cmpxchg_b); \
__ movsx_b(eax, eax); \
break; \
} \
- case kWord32Atomic##op##Uint8: { \
+ case kAtomic##op##Uint8: { \
ASSEMBLE_ATOMIC_BINOP(inst, mov_b, cmpxchg_b); \
__ movzx_b(eax, eax); \
break; \
} \
- case kWord32Atomic##op##Int16: { \
+ case kAtomic##op##Int16: { \
ASSEMBLE_ATOMIC_BINOP(inst, mov_w, cmpxchg_w); \
__ movsx_w(eax, eax); \
break; \
} \
- case kWord32Atomic##op##Uint16: { \
+ case kAtomic##op##Uint16: { \
ASSEMBLE_ATOMIC_BINOP(inst, mov_w, cmpxchg_w); \
__ movzx_w(eax, eax); \
break; \
} \
- case kWord32Atomic##op##Word32: { \
+ case kAtomic##op##Word32: { \
ASSEMBLE_ATOMIC_BINOP(inst, mov, cmpxchg); \
break; \
}
@@ -4107,16 +3758,15 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ j(not_equal, &binop);
break;
}
- case kWord32AtomicLoadInt8:
- case kWord32AtomicLoadUint8:
- case kWord32AtomicLoadInt16:
- case kWord32AtomicLoadUint16:
- case kWord32AtomicLoadWord32:
- case kWord32AtomicStoreWord8:
- case kWord32AtomicStoreWord16:
- case kWord32AtomicStoreWord32:
+ case kAtomicLoadInt8:
+ case kAtomicLoadUint8:
+ case kAtomicLoadInt16:
+ case kAtomicLoadUint16:
+ case kAtomicLoadWord32:
+ case kAtomicStoreWord8:
+ case kAtomicStoreWord16:
+ case kAtomicStoreWord32:
UNREACHABLE(); // Won't be generated by instruction selector.
- break;
}
return kSuccess;
}
@@ -4126,41 +3776,29 @@ static Condition FlagsConditionToCondition(FlagsCondition condition) {
case kUnorderedEqual:
case kEqual:
return equal;
- break;
case kUnorderedNotEqual:
case kNotEqual:
return not_equal;
- break;
case kSignedLessThan:
return less;
- break;
case kSignedGreaterThanOrEqual:
return greater_equal;
- break;
case kSignedLessThanOrEqual:
return less_equal;
- break;
case kSignedGreaterThan:
return greater;
- break;
case kUnsignedLessThan:
return below;
- break;
case kUnsignedGreaterThanOrEqual:
return above_equal;
- break;
case kUnsignedLessThanOrEqual:
return below_equal;
- break;
case kUnsignedGreaterThan:
return above;
- break;
case kOverflow:
return overflow;
- break;
case kNotOverflow:
return no_overflow;
- break;
default:
UNREACHABLE();
}
@@ -4183,12 +3821,6 @@ void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
if (!branch->fallthru) __ jmp(flabel);
}
-void CodeGenerator::AssembleBranchPoisoning(FlagsCondition condition,
- Instruction* instr) {
- // TODO(860429): Remove remaining poisoning infrastructure on ia32.
- UNREACHABLE();
-}
-
void CodeGenerator::AssembleArchDeoptBranch(Instruction* instr,
BranchInfo* branch) {
AssembleArchBranch(instr, branch);
@@ -4648,18 +4280,24 @@ void CodeGenerator::AssembleReturn(InstructionOperand* additional_pop_count) {
// The number of arguments without the receiver is
// max(argc_reg, parameter_slots-1), and the receiver is added in
// DropArguments().
- int parameter_slots_without_receiver = parameter_slots - 1;
Label mismatch_return;
Register scratch_reg = edx;
DCHECK_NE(argc_reg, scratch_reg);
DCHECK_EQ(0u, call_descriptor->CalleeSavedRegisters() & argc_reg.bit());
DCHECK_EQ(0u, call_descriptor->CalleeSavedRegisters() & scratch_reg.bit());
- __ cmp(argc_reg, Immediate(parameter_slots_without_receiver));
+ if (kJSArgcIncludesReceiver) {
+ __ cmp(argc_reg, Immediate(parameter_slots));
+ } else {
+ int parameter_slots_without_receiver = parameter_slots - 1;
+ __ cmp(argc_reg, Immediate(parameter_slots_without_receiver));
+ }
__ j(greater, &mismatch_return, Label::kNear);
__ Ret(parameter_slots * kSystemPointerSize, scratch_reg);
__ bind(&mismatch_return);
__ DropArguments(argc_reg, scratch_reg, TurboAssembler::kCountIsInteger,
- TurboAssembler::kCountExcludesReceiver);
+ kJSArgcIncludesReceiver
+ ? TurboAssembler::kCountIncludesReceiver
+ : TurboAssembler::kCountExcludesReceiver);
// We use a return instead of a jump for better return address prediction.
__ Ret();
} else if (additional_pop_count->IsImmediate()) {
diff --git a/deps/v8/src/compiler/backend/ia32/instruction-codes-ia32.h b/deps/v8/src/compiler/backend/ia32/instruction-codes-ia32.h
index 42af3326f3..bb54c726aa 100644
--- a/deps/v8/src/compiler/backend/ia32/instruction-codes-ia32.h
+++ b/deps/v8/src/compiler/backend/ia32/instruction-codes-ia32.h
@@ -48,26 +48,14 @@ namespace compiler {
V(IA32MFence) \
V(IA32LFence) \
V(SSEFloat32Cmp) \
- V(SSEFloat32Add) \
- V(SSEFloat32Sub) \
- V(SSEFloat32Mul) \
- V(SSEFloat32Div) \
- V(SSEFloat32Abs) \
- V(SSEFloat32Neg) \
V(SSEFloat32Sqrt) \
V(SSEFloat32Round) \
V(SSEFloat64Cmp) \
- V(SSEFloat64Add) \
- V(SSEFloat64Sub) \
- V(SSEFloat64Mul) \
- V(SSEFloat64Div) \
V(SSEFloat64Mod) \
V(SSEFloat32Max) \
V(SSEFloat64Max) \
V(SSEFloat32Min) \
V(SSEFloat64Min) \
- V(SSEFloat64Abs) \
- V(SSEFloat64Neg) \
V(SSEFloat64Sqrt) \
V(SSEFloat64Round) \
V(SSEFloat32ToFloat64) \
@@ -86,18 +74,18 @@ namespace compiler {
V(SSEFloat64InsertHighWord32) \
V(SSEFloat64LoadLowWord32) \
V(SSEFloat64SilenceNaN) \
- V(AVXFloat32Add) \
- V(AVXFloat32Sub) \
- V(AVXFloat32Mul) \
- V(AVXFloat32Div) \
- V(AVXFloat64Add) \
- V(AVXFloat64Sub) \
- V(AVXFloat64Mul) \
- V(AVXFloat64Div) \
- V(AVXFloat64Abs) \
- V(AVXFloat64Neg) \
- V(AVXFloat32Abs) \
- V(AVXFloat32Neg) \
+ V(Float32Add) \
+ V(Float32Sub) \
+ V(Float64Add) \
+ V(Float64Sub) \
+ V(Float32Mul) \
+ V(Float32Div) \
+ V(Float64Mul) \
+ V(Float64Div) \
+ V(Float64Abs) \
+ V(Float64Neg) \
+ V(Float32Abs) \
+ V(Float32Neg) \
V(IA32Movsxbl) \
V(IA32Movzxbl) \
V(IA32Movb) \
@@ -177,14 +165,10 @@ namespace compiler {
V(AVXF32x4Min) \
V(SSEF32x4Max) \
V(AVXF32x4Max) \
- V(SSEF32x4Eq) \
- V(AVXF32x4Eq) \
- V(SSEF32x4Ne) \
- V(AVXF32x4Ne) \
- V(SSEF32x4Lt) \
- V(AVXF32x4Lt) \
- V(SSEF32x4Le) \
- V(AVXF32x4Le) \
+ V(IA32F32x4Eq) \
+ V(IA32F32x4Ne) \
+ V(IA32F32x4Lt) \
+ V(IA32F32x4Le) \
V(IA32F32x4Pmin) \
V(IA32F32x4Pmax) \
V(IA32F32x4Round) \
@@ -197,33 +181,22 @@ namespace compiler {
V(IA32I32x4Neg) \
V(IA32I32x4Shl) \
V(IA32I32x4ShrS) \
- V(SSEI32x4Add) \
- V(AVXI32x4Add) \
- V(SSEI32x4Sub) \
- V(AVXI32x4Sub) \
- V(SSEI32x4Mul) \
- V(AVXI32x4Mul) \
- V(SSEI32x4MinS) \
- V(AVXI32x4MinS) \
- V(SSEI32x4MaxS) \
- V(AVXI32x4MaxS) \
- V(SSEI32x4Eq) \
- V(AVXI32x4Eq) \
- V(SSEI32x4Ne) \
- V(AVXI32x4Ne) \
- V(SSEI32x4GtS) \
- V(AVXI32x4GtS) \
- V(SSEI32x4GeS) \
- V(AVXI32x4GeS) \
+ V(IA32I32x4Add) \
+ V(IA32I32x4Sub) \
+ V(IA32I32x4Mul) \
+ V(IA32I32x4MinS) \
+ V(IA32I32x4MaxS) \
+ V(IA32I32x4Eq) \
+ V(IA32I32x4Ne) \
+ V(IA32I32x4GtS) \
+ V(IA32I32x4GeS) \
V(SSEI32x4UConvertF32x4) \
V(AVXI32x4UConvertF32x4) \
V(IA32I32x4UConvertI16x8Low) \
V(IA32I32x4UConvertI16x8High) \
V(IA32I32x4ShrU) \
- V(SSEI32x4MinU) \
- V(AVXI32x4MinU) \
- V(SSEI32x4MaxU) \
- V(AVXI32x4MaxU) \
+ V(IA32I32x4MinU) \
+ V(IA32I32x4MaxU) \
V(SSEI32x4GtU) \
V(AVXI32x4GtU) \
V(SSEI32x4GeU) \
@@ -246,43 +219,28 @@ namespace compiler {
V(IA32I16x8Neg) \
V(IA32I16x8Shl) \
V(IA32I16x8ShrS) \
- V(SSEI16x8SConvertI32x4) \
- V(AVXI16x8SConvertI32x4) \
- V(SSEI16x8Add) \
- V(AVXI16x8Add) \
- V(SSEI16x8AddSatS) \
- V(AVXI16x8AddSatS) \
- V(SSEI16x8Sub) \
- V(AVXI16x8Sub) \
- V(SSEI16x8SubSatS) \
- V(AVXI16x8SubSatS) \
- V(SSEI16x8Mul) \
- V(AVXI16x8Mul) \
- V(SSEI16x8MinS) \
- V(AVXI16x8MinS) \
- V(SSEI16x8MaxS) \
- V(AVXI16x8MaxS) \
- V(SSEI16x8Eq) \
- V(AVXI16x8Eq) \
+ V(IA32I16x8SConvertI32x4) \
+ V(IA32I16x8Add) \
+ V(IA32I16x8AddSatS) \
+ V(IA32I16x8Sub) \
+ V(IA32I16x8SubSatS) \
+ V(IA32I16x8Mul) \
+ V(IA32I16x8MinS) \
+ V(IA32I16x8MaxS) \
+ V(IA32I16x8Eq) \
V(SSEI16x8Ne) \
V(AVXI16x8Ne) \
- V(SSEI16x8GtS) \
- V(AVXI16x8GtS) \
+ V(IA32I16x8GtS) \
V(SSEI16x8GeS) \
V(AVXI16x8GeS) \
V(IA32I16x8UConvertI8x16Low) \
V(IA32I16x8UConvertI8x16High) \
V(IA32I16x8ShrU) \
- V(SSEI16x8UConvertI32x4) \
- V(AVXI16x8UConvertI32x4) \
- V(SSEI16x8AddSatU) \
- V(AVXI16x8AddSatU) \
- V(SSEI16x8SubSatU) \
- V(AVXI16x8SubSatU) \
- V(SSEI16x8MinU) \
- V(AVXI16x8MinU) \
- V(SSEI16x8MaxU) \
- V(AVXI16x8MaxU) \
+ V(IA32I16x8UConvertI32x4) \
+ V(IA32I16x8AddSatU) \
+ V(IA32I16x8SubSatU) \
+ V(IA32I16x8MinU) \
+ V(IA32I16x8MaxU) \
V(SSEI16x8GtU) \
V(AVXI16x8GtU) \
V(SSEI16x8GeU) \
@@ -305,8 +263,7 @@ namespace compiler {
V(IA32Pextrb) \
V(IA32Pextrw) \
V(IA32S128Store32Lane) \
- V(SSEI8x16SConvertI16x8) \
- V(AVXI8x16SConvertI16x8) \
+ V(IA32I8x16SConvertI16x8) \
V(IA32I8x16Neg) \
V(IA32I8x16Shl) \
V(IA32I8x16ShrS) \
@@ -322,8 +279,7 @@ namespace compiler {
V(IA32I8x16GtS) \
V(SSEI8x16GeS) \
V(AVXI8x16GeS) \
- V(SSEI8x16UConvertI16x8) \
- V(AVXI8x16UConvertI16x8) \
+ V(IA32I8x16UConvertI16x8) \
V(IA32I8x16AddSatU) \
V(IA32I8x16SubSatU) \
V(IA32I8x16ShrU) \
@@ -341,12 +297,9 @@ namespace compiler {
V(IA32S128Zero) \
V(IA32S128AllOnes) \
V(IA32S128Not) \
- V(SSES128And) \
- V(AVXS128And) \
- V(SSES128Or) \
- V(AVXS128Or) \
- V(SSES128Xor) \
- V(AVXS128Xor) \
+ V(IA32S128And) \
+ V(IA32S128Or) \
+ V(IA32S128Xor) \
V(IA32S128Select) \
V(IA32S128AndNot) \
V(IA32I8x16Swizzle) \
@@ -402,7 +355,8 @@ namespace compiler {
V(IA32I16x8AllTrue) \
V(IA32I8x16AllTrue) \
V(IA32Word32AtomicPairLoad) \
- V(IA32Word32AtomicPairStore) \
+ V(IA32Word32ReleasePairStore) \
+ V(IA32Word32SeqCstPairStore) \
V(IA32Word32AtomicPairAdd) \
V(IA32Word32AtomicPairSub) \
V(IA32Word32AtomicPairAnd) \
diff --git a/deps/v8/src/compiler/backend/ia32/instruction-scheduler-ia32.cc b/deps/v8/src/compiler/backend/ia32/instruction-scheduler-ia32.cc
index 278e7ea99b..3910d45195 100644
--- a/deps/v8/src/compiler/backend/ia32/instruction-scheduler-ia32.cc
+++ b/deps/v8/src/compiler/backend/ia32/instruction-scheduler-ia32.cc
@@ -49,26 +49,14 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kIA32Bswap:
case kIA32Lea:
case kSSEFloat32Cmp:
- case kSSEFloat32Add:
- case kSSEFloat32Sub:
- case kSSEFloat32Mul:
- case kSSEFloat32Div:
- case kSSEFloat32Abs:
- case kSSEFloat32Neg:
case kSSEFloat32Sqrt:
case kSSEFloat32Round:
case kSSEFloat64Cmp:
- case kSSEFloat64Add:
- case kSSEFloat64Sub:
- case kSSEFloat64Mul:
- case kSSEFloat64Div:
case kSSEFloat64Mod:
case kSSEFloat32Max:
case kSSEFloat64Max:
case kSSEFloat32Min:
case kSSEFloat64Min:
- case kSSEFloat64Abs:
- case kSSEFloat64Neg:
case kSSEFloat64Sqrt:
case kSSEFloat64Round:
case kSSEFloat32ToFloat64:
@@ -87,18 +75,18 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kSSEFloat64InsertHighWord32:
case kSSEFloat64LoadLowWord32:
case kSSEFloat64SilenceNaN:
- case kAVXFloat32Add:
- case kAVXFloat32Sub:
- case kAVXFloat32Mul:
- case kAVXFloat32Div:
- case kAVXFloat64Add:
- case kAVXFloat64Sub:
- case kAVXFloat64Mul:
- case kAVXFloat64Div:
- case kAVXFloat64Abs:
- case kAVXFloat64Neg:
- case kAVXFloat32Abs:
- case kAVXFloat32Neg:
+ case kFloat32Add:
+ case kFloat32Sub:
+ case kFloat64Add:
+ case kFloat64Sub:
+ case kFloat32Mul:
+ case kFloat32Div:
+ case kFloat64Mul:
+ case kFloat64Div:
+ case kFloat64Abs:
+ case kFloat64Neg:
+ case kFloat32Abs:
+ case kFloat32Neg:
case kIA32BitcastFI:
case kIA32BitcastIF:
case kIA32F64x2Splat:
@@ -162,14 +150,10 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kAVXF32x4Min:
case kSSEF32x4Max:
case kAVXF32x4Max:
- case kSSEF32x4Eq:
- case kAVXF32x4Eq:
- case kSSEF32x4Ne:
- case kAVXF32x4Ne:
- case kSSEF32x4Lt:
- case kAVXF32x4Lt:
- case kSSEF32x4Le:
- case kAVXF32x4Le:
+ case kIA32F32x4Eq:
+ case kIA32F32x4Ne:
+ case kIA32F32x4Lt:
+ case kIA32F32x4Le:
case kIA32F32x4Pmin:
case kIA32F32x4Pmax:
case kIA32F32x4Round:
@@ -182,33 +166,22 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kIA32I32x4Neg:
case kIA32I32x4Shl:
case kIA32I32x4ShrS:
- case kSSEI32x4Add:
- case kAVXI32x4Add:
- case kSSEI32x4Sub:
- case kAVXI32x4Sub:
- case kSSEI32x4Mul:
- case kAVXI32x4Mul:
- case kSSEI32x4MinS:
- case kAVXI32x4MinS:
- case kSSEI32x4MaxS:
- case kAVXI32x4MaxS:
- case kSSEI32x4Eq:
- case kAVXI32x4Eq:
- case kSSEI32x4Ne:
- case kAVXI32x4Ne:
- case kSSEI32x4GtS:
- case kAVXI32x4GtS:
- case kSSEI32x4GeS:
- case kAVXI32x4GeS:
+ case kIA32I32x4Add:
+ case kIA32I32x4Sub:
+ case kIA32I32x4Mul:
+ case kIA32I32x4MinS:
+ case kIA32I32x4MaxS:
+ case kIA32I32x4Eq:
+ case kIA32I32x4Ne:
+ case kIA32I32x4GtS:
+ case kIA32I32x4GeS:
case kSSEI32x4UConvertF32x4:
case kAVXI32x4UConvertF32x4:
case kIA32I32x4UConvertI16x8Low:
case kIA32I32x4UConvertI16x8High:
case kIA32I32x4ShrU:
- case kSSEI32x4MinU:
- case kAVXI32x4MinU:
- case kSSEI32x4MaxU:
- case kAVXI32x4MaxU:
+ case kIA32I32x4MinU:
+ case kIA32I32x4MaxU:
case kSSEI32x4GtU:
case kAVXI32x4GtU:
case kSSEI32x4GeU:
@@ -231,43 +204,28 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kIA32I16x8Neg:
case kIA32I16x8Shl:
case kIA32I16x8ShrS:
- case kSSEI16x8SConvertI32x4:
- case kAVXI16x8SConvertI32x4:
- case kSSEI16x8Add:
- case kAVXI16x8Add:
- case kSSEI16x8AddSatS:
- case kAVXI16x8AddSatS:
- case kSSEI16x8Sub:
- case kAVXI16x8Sub:
- case kSSEI16x8SubSatS:
- case kAVXI16x8SubSatS:
- case kSSEI16x8Mul:
- case kAVXI16x8Mul:
- case kSSEI16x8MinS:
- case kAVXI16x8MinS:
- case kSSEI16x8MaxS:
- case kAVXI16x8MaxS:
- case kSSEI16x8Eq:
- case kAVXI16x8Eq:
+ case kIA32I16x8SConvertI32x4:
+ case kIA32I16x8Add:
+ case kIA32I16x8AddSatS:
+ case kIA32I16x8Sub:
+ case kIA32I16x8SubSatS:
+ case kIA32I16x8Mul:
+ case kIA32I16x8MinS:
+ case kIA32I16x8MaxS:
+ case kIA32I16x8Eq:
case kSSEI16x8Ne:
case kAVXI16x8Ne:
- case kSSEI16x8GtS:
- case kAVXI16x8GtS:
+ case kIA32I16x8GtS:
case kSSEI16x8GeS:
case kAVXI16x8GeS:
case kIA32I16x8UConvertI8x16Low:
case kIA32I16x8UConvertI8x16High:
case kIA32I16x8ShrU:
- case kSSEI16x8UConvertI32x4:
- case kAVXI16x8UConvertI32x4:
- case kSSEI16x8AddSatU:
- case kAVXI16x8AddSatU:
- case kSSEI16x8SubSatU:
- case kAVXI16x8SubSatU:
- case kSSEI16x8MinU:
- case kAVXI16x8MinU:
- case kSSEI16x8MaxU:
- case kAVXI16x8MaxU:
+ case kIA32I16x8UConvertI32x4:
+ case kIA32I16x8AddSatU:
+ case kIA32I16x8SubSatU:
+ case kIA32I16x8MinU:
+ case kIA32I16x8MaxU:
case kSSEI16x8GtU:
case kAVXI16x8GtU:
case kSSEI16x8GeU:
@@ -290,8 +248,7 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kIA32Pextrb:
case kIA32Pextrw:
case kIA32S128Store32Lane:
- case kSSEI8x16SConvertI16x8:
- case kAVXI8x16SConvertI16x8:
+ case kIA32I8x16SConvertI16x8:
case kIA32I8x16Neg:
case kIA32I8x16Shl:
case kIA32I8x16ShrS:
@@ -307,8 +264,7 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kIA32I8x16GtS:
case kSSEI8x16GeS:
case kAVXI8x16GeS:
- case kSSEI8x16UConvertI16x8:
- case kAVXI8x16UConvertI16x8:
+ case kIA32I8x16UConvertI16x8:
case kIA32I8x16AddSatU:
case kIA32I8x16SubSatU:
case kIA32I8x16ShrU:
@@ -326,12 +282,9 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kIA32S128Zero:
case kIA32S128AllOnes:
case kIA32S128Not:
- case kSSES128And:
- case kAVXS128And:
- case kSSES128Or:
- case kAVXS128Or:
- case kSSES128Xor:
- case kAVXS128Xor:
+ case kIA32S128And:
+ case kIA32S128Or:
+ case kIA32S128Xor:
case kIA32S128Select:
case kIA32S128AndNot:
case kIA32I8x16Swizzle:
@@ -423,7 +376,8 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kIA32Word32AtomicPairLoad:
return kIsLoadOperation;
- case kIA32Word32AtomicPairStore:
+ case kIA32Word32ReleasePairStore:
+ case kIA32Word32SeqCstPairStore:
case kIA32Word32AtomicPairAdd:
case kIA32Word32AtomicPairSub:
case kIA32Word32AtomicPairAnd:
@@ -447,7 +401,7 @@ int InstructionScheduler::GetInstructionLatency(const Instruction* instr) {
// Basic latency modeling for ia32 instructions. They have been determined
// in an empirical way.
switch (instr->arch_opcode()) {
- case kSSEFloat64Mul:
+ case kFloat64Mul:
return 5;
case kIA32Imul:
case kIA32ImulHigh:
@@ -455,18 +409,18 @@ int InstructionScheduler::GetInstructionLatency(const Instruction* instr) {
case kSSEFloat32Cmp:
case kSSEFloat64Cmp:
return 9;
- case kSSEFloat32Add:
- case kSSEFloat32Sub:
- case kSSEFloat32Abs:
- case kSSEFloat32Neg:
- case kSSEFloat64Add:
- case kSSEFloat64Sub:
+ case kFloat32Add:
+ case kFloat32Sub:
+ case kFloat64Add:
+ case kFloat64Sub:
+ case kFloat32Abs:
+ case kFloat32Neg:
case kSSEFloat64Max:
case kSSEFloat64Min:
- case kSSEFloat64Abs:
- case kSSEFloat64Neg:
+ case kFloat64Abs:
+ case kFloat64Neg:
return 5;
- case kSSEFloat32Mul:
+ case kFloat32Mul:
return 4;
case kSSEFloat32ToFloat64:
case kSSEFloat64ToFloat32:
@@ -484,9 +438,9 @@ int InstructionScheduler::GetInstructionLatency(const Instruction* instr) {
return 33;
case kIA32Udiv:
return 26;
- case kSSEFloat32Div:
+ case kFloat32Div:
return 35;
- case kSSEFloat64Div:
+ case kFloat64Div:
return 63;
case kSSEFloat32Sqrt:
case kSSEFloat64Sqrt:
diff --git a/deps/v8/src/compiler/backend/ia32/instruction-selector-ia32.cc b/deps/v8/src/compiler/backend/ia32/instruction-selector-ia32.cc
index f36fdb2935..ce792692f0 100644
--- a/deps/v8/src/compiler/backend/ia32/instruction-selector-ia32.cc
+++ b/deps/v8/src/compiler/backend/ia32/instruction-selector-ia32.cc
@@ -246,6 +246,41 @@ class IA32OperandGenerator final : public OperandGenerator {
namespace {
+ArchOpcode GetLoadOpcode(LoadRepresentation load_rep) {
+ ArchOpcode opcode;
+ switch (load_rep.representation()) {
+ case MachineRepresentation::kFloat32:
+ opcode = kIA32Movss;
+ break;
+ case MachineRepresentation::kFloat64:
+ opcode = kIA32Movsd;
+ break;
+ case MachineRepresentation::kBit: // Fall through.
+ case MachineRepresentation::kWord8:
+ opcode = load_rep.IsSigned() ? kIA32Movsxbl : kIA32Movzxbl;
+ break;
+ case MachineRepresentation::kWord16:
+ opcode = load_rep.IsSigned() ? kIA32Movsxwl : kIA32Movzxwl;
+ break;
+ case MachineRepresentation::kTaggedSigned: // Fall through.
+ case MachineRepresentation::kTaggedPointer: // Fall through.
+ case MachineRepresentation::kTagged: // Fall through.
+ case MachineRepresentation::kWord32:
+ opcode = kIA32Movl;
+ break;
+ case MachineRepresentation::kSimd128:
+ opcode = kIA32Movdqu;
+ break;
+ case MachineRepresentation::kCompressedPointer: // Fall through.
+ case MachineRepresentation::kCompressed: // Fall through.
+ case MachineRepresentation::kWord64: // Fall through.
+ case MachineRepresentation::kMapWord: // Fall through.
+ case MachineRepresentation::kNone:
+ UNREACHABLE();
+ }
+ return opcode;
+}
+
void VisitRO(InstructionSelector* selector, Node* node, ArchOpcode opcode) {
IA32OperandGenerator g(selector);
Node* input = node->InputAt(0);
@@ -280,27 +315,24 @@ void VisitRR(InstructionSelector* selector, Node* node,
}
void VisitRROFloat(InstructionSelector* selector, Node* node,
- ArchOpcode avx_opcode, ArchOpcode sse_opcode) {
+ ArchOpcode opcode) {
IA32OperandGenerator g(selector);
InstructionOperand operand0 = g.UseRegister(node->InputAt(0));
InstructionOperand operand1 = g.Use(node->InputAt(1));
if (selector->IsSupported(AVX)) {
- selector->Emit(avx_opcode, g.DefineAsRegister(node), operand0, operand1);
+ selector->Emit(opcode, g.DefineAsRegister(node), operand0, operand1);
} else {
- selector->Emit(sse_opcode, g.DefineSameAsFirst(node), operand0, operand1);
+ selector->Emit(opcode, g.DefineSameAsFirst(node), operand0, operand1);
}
}
void VisitFloatUnop(InstructionSelector* selector, Node* node, Node* input,
- ArchOpcode avx_opcode, ArchOpcode sse_opcode) {
+ ArchOpcode opcode) {
IA32OperandGenerator g(selector);
- InstructionOperand temps[] = {g.TempSimd128Register()};
if (selector->IsSupported(AVX)) {
- selector->Emit(avx_opcode, g.DefineAsRegister(node), g.UseUnique(input),
- arraysize(temps), temps);
+ selector->Emit(opcode, g.DefineAsRegister(node), g.Use(input));
} else {
- selector->Emit(sse_opcode, g.DefineSameAsFirst(node),
- g.UseUniqueRegister(input), arraysize(temps), temps);
+ selector->Emit(opcode, g.DefineSameAsFirst(node), g.UseRegister(input));
}
}
@@ -329,7 +361,7 @@ void VisitRROSimd(InstructionSelector* selector, Node* node,
InstructionOperand operand0 = g.UseRegister(node->InputAt(0));
if (selector->IsSupported(AVX)) {
selector->Emit(avx_opcode, g.DefineAsRegister(node), operand0,
- g.Use(node->InputAt(1)));
+ g.UseRegister(node->InputAt(1)));
} else {
selector->Emit(sse_opcode, g.DefineSameAsFirst(node), operand0,
g.UseRegister(node->InputAt(1)));
@@ -389,14 +421,28 @@ void VisitRROSimdShift(InstructionSelector* selector, Node* node,
}
}
-void VisitRROI8x16SimdShift(InstructionSelector* selector, Node* node,
- ArchOpcode opcode) {
+void VisitI8x16Shift(InstructionSelector* selector, Node* node,
+ ArchOpcode opcode) {
IA32OperandGenerator g(selector);
- InstructionOperand operand0 = g.UseUniqueRegister(node->InputAt(0));
- InstructionOperand operand1 = g.UseUniqueRegister(node->InputAt(1));
- InstructionOperand temps[] = {g.TempRegister(), g.TempSimd128Register()};
- selector->Emit(opcode, g.DefineSameAsFirst(node), operand0, operand1,
- arraysize(temps), temps);
+ InstructionOperand output = CpuFeatures::IsSupported(AVX)
+ ? g.UseRegister(node)
+ : g.DefineSameAsFirst(node);
+
+ if (g.CanBeImmediate(node->InputAt(1))) {
+ if (opcode == kIA32I8x16ShrS) {
+ selector->Emit(opcode, output, g.UseRegister(node->InputAt(0)),
+ g.UseImmediate(node->InputAt(1)));
+ } else {
+ InstructionOperand temps[] = {g.TempRegister()};
+ selector->Emit(opcode, output, g.UseRegister(node->InputAt(0)),
+ g.UseImmediate(node->InputAt(1)), arraysize(temps), temps);
+ }
+ } else {
+ InstructionOperand operand0 = g.UseUniqueRegister(node->InputAt(0));
+ InstructionOperand operand1 = g.UseUniqueRegister(node->InputAt(1));
+ InstructionOperand temps[] = {g.TempRegister(), g.TempSimd128Register()};
+ selector->Emit(opcode, output, operand0, operand1, arraysize(temps), temps);
+ }
}
} // namespace
@@ -521,72 +567,110 @@ void InstructionSelector::VisitLoadTransform(Node* node) {
Emit(code, 1, outputs, input_count, inputs);
}
+void InstructionSelector::VisitLoad(Node* node, Node* value,
+ InstructionCode opcode) {
+ IA32OperandGenerator g(this);
+ InstructionOperand outputs[1];
+ outputs[0] = g.DefineAsRegister(node);
+ InstructionOperand inputs[3];
+ size_t input_count = 0;
+ AddressingMode mode =
+ g.GetEffectiveAddressMemoryOperand(node, inputs, &input_count);
+ InstructionCode code = opcode | AddressingModeField::encode(mode);
+ Emit(code, 1, outputs, input_count, inputs);
+}
+
void InstructionSelector::VisitLoad(Node* node) {
LoadRepresentation load_rep = LoadRepresentationOf(node->op());
+ DCHECK(!load_rep.IsMapWord());
+ VisitLoad(node, node, GetLoadOpcode(load_rep));
+}
- ArchOpcode opcode;
- switch (load_rep.representation()) {
+void InstructionSelector::VisitProtectedLoad(Node* node) {
+ // TODO(eholk)
+ UNIMPLEMENTED();
+}
+
+namespace {
+
+ArchOpcode GetStoreOpcode(MachineRepresentation rep) {
+ switch (rep) {
case MachineRepresentation::kFloat32:
- opcode = kIA32Movss;
- break;
+ return kIA32Movss;
case MachineRepresentation::kFloat64:
- opcode = kIA32Movsd;
- break;
+ return kIA32Movsd;
case MachineRepresentation::kBit: // Fall through.
case MachineRepresentation::kWord8:
- opcode = load_rep.IsSigned() ? kIA32Movsxbl : kIA32Movzxbl;
- break;
+ return kIA32Movb;
case MachineRepresentation::kWord16:
- opcode = load_rep.IsSigned() ? kIA32Movsxwl : kIA32Movzxwl;
- break;
+ return kIA32Movw;
case MachineRepresentation::kTaggedSigned: // Fall through.
case MachineRepresentation::kTaggedPointer: // Fall through.
case MachineRepresentation::kTagged: // Fall through.
case MachineRepresentation::kWord32:
- opcode = kIA32Movl;
- break;
+ return kIA32Movl;
case MachineRepresentation::kSimd128:
- opcode = kIA32Movdqu;
- break;
+ return kIA32Movdqu;
case MachineRepresentation::kCompressedPointer: // Fall through.
case MachineRepresentation::kCompressed: // Fall through.
case MachineRepresentation::kWord64: // Fall through.
+ case MachineRepresentation::kMapWord: // Fall through.
case MachineRepresentation::kNone:
- case MachineRepresentation::kMapWord:
UNREACHABLE();
}
+}
- IA32OperandGenerator g(this);
- InstructionOperand outputs[1];
- outputs[0] = g.DefineAsRegister(node);
- InstructionOperand inputs[3];
- size_t input_count = 0;
- AddressingMode mode =
- g.GetEffectiveAddressMemoryOperand(node, inputs, &input_count);
- InstructionCode code = opcode | AddressingModeField::encode(mode);
- if (node->opcode() == IrOpcode::kPoisonedLoad) {
- CHECK_NE(poisoning_level_, PoisoningMitigationLevel::kDontPoison);
- code |= AccessModeField::encode(kMemoryAccessPoisoned);
+ArchOpcode GetSeqCstStoreOpcode(MachineRepresentation rep) {
+ switch (rep) {
+ case MachineRepresentation::kWord8:
+ return kAtomicExchangeInt8;
+ case MachineRepresentation::kWord16:
+ return kAtomicExchangeInt16;
+ case MachineRepresentation::kTaggedSigned: // Fall through.
+ case MachineRepresentation::kTaggedPointer: // Fall through.
+ case MachineRepresentation::kTagged: // Fall through.
+ case MachineRepresentation::kWord32:
+ return kAtomicExchangeWord32;
+ default:
+ UNREACHABLE();
}
- Emit(code, 1, outputs, input_count, inputs);
}
-void InstructionSelector::VisitPoisonedLoad(Node* node) { VisitLoad(node); }
+void VisitAtomicExchange(InstructionSelector* selector, Node* node,
+ ArchOpcode opcode, MachineRepresentation rep) {
+ IA32OperandGenerator g(selector);
+ Node* base = node->InputAt(0);
+ Node* index = node->InputAt(1);
+ Node* value = node->InputAt(2);
-void InstructionSelector::VisitProtectedLoad(Node* node) {
- // TODO(eholk)
- UNIMPLEMENTED();
+ AddressingMode addressing_mode;
+ InstructionOperand value_operand = (rep == MachineRepresentation::kWord8)
+ ? g.UseFixed(value, edx)
+ : g.UseUniqueRegister(value);
+ InstructionOperand inputs[] = {
+ value_operand, g.UseUniqueRegister(base),
+ g.GetEffectiveIndexOperand(index, &addressing_mode)};
+ InstructionOperand outputs[] = {
+ (rep == MachineRepresentation::kWord8)
+ // Using DefineSameAsFirst requires the register to be unallocated.
+ ? g.DefineAsFixed(node, edx)
+ : g.DefineSameAsFirst(node)};
+ InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
+ selector->Emit(code, 1, outputs, arraysize(inputs), inputs);
}
-void InstructionSelector::VisitStore(Node* node) {
- IA32OperandGenerator g(this);
+void VisitStoreCommon(InstructionSelector* selector, Node* node,
+ StoreRepresentation store_rep,
+ base::Optional<AtomicMemoryOrder> atomic_order) {
+ IA32OperandGenerator g(selector);
Node* base = node->InputAt(0);
Node* index = node->InputAt(1);
Node* value = node->InputAt(2);
- StoreRepresentation store_rep = StoreRepresentationOf(node->op());
WriteBarrierKind write_barrier_kind = store_rep.write_barrier_kind();
MachineRepresentation rep = store_rep.representation();
+ const bool is_seqcst =
+ atomic_order && *atomic_order == AtomicMemoryOrder::kSeqCst;
if (FLAG_enable_unconditional_write_barriers && CanBeTaggedPointer(rep)) {
write_barrier_kind = kFullWriteBarrier;
@@ -603,48 +687,23 @@ void InstructionSelector::VisitStore(Node* node) {
WriteBarrierKindToRecordWriteMode(write_barrier_kind);
InstructionOperand temps[] = {g.TempRegister(), g.TempRegister()};
size_t const temp_count = arraysize(temps);
- InstructionCode code = kArchStoreWithWriteBarrier;
+ InstructionCode code = is_seqcst ? kArchAtomicStoreWithWriteBarrier
+ : kArchStoreWithWriteBarrier;
code |= AddressingModeField::encode(addressing_mode);
code |= MiscField::encode(static_cast<int>(record_write_mode));
- Emit(code, 0, nullptr, arraysize(inputs), inputs, temp_count, temps);
+ selector->Emit(code, 0, nullptr, arraysize(inputs), inputs, temp_count,
+ temps);
+ } else if (is_seqcst) {
+ VisitAtomicExchange(selector, node, GetSeqCstStoreOpcode(rep), rep);
} else {
- ArchOpcode opcode;
- switch (rep) {
- case MachineRepresentation::kFloat32:
- opcode = kIA32Movss;
- break;
- case MachineRepresentation::kFloat64:
- opcode = kIA32Movsd;
- break;
- case MachineRepresentation::kBit: // Fall through.
- case MachineRepresentation::kWord8:
- opcode = kIA32Movb;
- break;
- case MachineRepresentation::kWord16:
- opcode = kIA32Movw;
- break;
- case MachineRepresentation::kTaggedSigned: // Fall through.
- case MachineRepresentation::kTaggedPointer: // Fall through.
- case MachineRepresentation::kTagged: // Fall through.
- case MachineRepresentation::kWord32:
- opcode = kIA32Movl;
- break;
- case MachineRepresentation::kSimd128:
- opcode = kIA32Movdqu;
- break;
- case MachineRepresentation::kCompressedPointer: // Fall through.
- case MachineRepresentation::kCompressed: // Fall through.
- case MachineRepresentation::kWord64: // Fall through.
- case MachineRepresentation::kMapWord: // Fall through.
- case MachineRepresentation::kNone:
- UNREACHABLE();
- }
+ // Release and non-atomic stores emit MOV.
+ // https://www.cl.cam.ac.uk/~pes20/cpp/cpp0xmappings.html
InstructionOperand val;
if (g.CanBeImmediate(value)) {
val = g.UseImmediate(value);
- } else if (rep == MachineRepresentation::kWord8 ||
- rep == MachineRepresentation::kBit) {
+ } else if (!atomic_order && (rep == MachineRepresentation::kWord8 ||
+ rep == MachineRepresentation::kBit)) {
val = g.UseByteRegister(value);
} else {
val = g.UseRegister(value);
@@ -655,13 +714,20 @@ void InstructionSelector::VisitStore(Node* node) {
AddressingMode addressing_mode =
g.GetEffectiveAddressMemoryOperand(node, inputs, &input_count);
InstructionCode code =
- opcode | AddressingModeField::encode(addressing_mode);
+ GetStoreOpcode(rep) | AddressingModeField::encode(addressing_mode);
inputs[input_count++] = val;
- Emit(code, 0, static_cast<InstructionOperand*>(nullptr), input_count,
- inputs);
+ selector->Emit(code, 0, static_cast<InstructionOperand*>(nullptr),
+ input_count, inputs);
}
}
+} // namespace
+
+void InstructionSelector::VisitStore(Node* node) {
+ VisitStoreCommon(this, node, StoreRepresentationOf(node->op()),
+ base::nullopt);
+}
+
void InstructionSelector::VisitProtectedStore(Node* node) {
// TODO(eholk)
UNIMPLEMENTED();
@@ -1106,31 +1172,31 @@ void InstructionSelector::VisitWord32Ror(Node* node) {
V(F64x2Trunc, kIA32F64x2Round | MiscField::encode(kRoundToZero)) \
V(F64x2NearestInt, kIA32F64x2Round | MiscField::encode(kRoundToNearest))
-#define RRO_FLOAT_OP_LIST(V) \
- V(Float32Add, kAVXFloat32Add, kSSEFloat32Add) \
- V(Float64Add, kAVXFloat64Add, kSSEFloat64Add) \
- V(Float32Sub, kAVXFloat32Sub, kSSEFloat32Sub) \
- V(Float64Sub, kAVXFloat64Sub, kSSEFloat64Sub) \
- V(Float32Mul, kAVXFloat32Mul, kSSEFloat32Mul) \
- V(Float64Mul, kAVXFloat64Mul, kSSEFloat64Mul) \
- V(Float32Div, kAVXFloat32Div, kSSEFloat32Div) \
- V(Float64Div, kAVXFloat64Div, kSSEFloat64Div) \
- V(F64x2Add, kIA32F64x2Add, kIA32F64x2Add) \
- V(F64x2Sub, kIA32F64x2Sub, kIA32F64x2Sub) \
- V(F64x2Mul, kIA32F64x2Mul, kIA32F64x2Mul) \
- V(F64x2Div, kIA32F64x2Div, kIA32F64x2Div) \
- V(F64x2Eq, kIA32F64x2Eq, kIA32F64x2Eq) \
- V(F64x2Ne, kIA32F64x2Ne, kIA32F64x2Ne) \
- V(F64x2Lt, kIA32F64x2Lt, kIA32F64x2Lt) \
- V(F64x2Le, kIA32F64x2Le, kIA32F64x2Le)
-
-#define FLOAT_UNOP_LIST(V) \
- V(Float32Abs, kAVXFloat32Abs, kSSEFloat32Abs) \
- V(Float64Abs, kAVXFloat64Abs, kSSEFloat64Abs) \
- V(Float32Neg, kAVXFloat32Neg, kSSEFloat32Neg) \
- V(Float64Neg, kAVXFloat64Neg, kSSEFloat64Neg) \
- V(F64x2Abs, kAVXFloat64Abs, kSSEFloat64Abs) \
- V(F64x2Neg, kAVXFloat64Neg, kSSEFloat64Neg)
+#define RRO_FLOAT_OP_LIST(V) \
+ V(Float32Add, kFloat32Add) \
+ V(Float64Add, kFloat64Add) \
+ V(Float32Sub, kFloat32Sub) \
+ V(Float64Sub, kFloat64Sub) \
+ V(Float32Mul, kFloat32Mul) \
+ V(Float64Mul, kFloat64Mul) \
+ V(Float32Div, kFloat32Div) \
+ V(Float64Div, kFloat64Div) \
+ V(F64x2Add, kIA32F64x2Add) \
+ V(F64x2Sub, kIA32F64x2Sub) \
+ V(F64x2Mul, kIA32F64x2Mul) \
+ V(F64x2Div, kIA32F64x2Div) \
+ V(F64x2Eq, kIA32F64x2Eq) \
+ V(F64x2Ne, kIA32F64x2Ne) \
+ V(F64x2Lt, kIA32F64x2Lt) \
+ V(F64x2Le, kIA32F64x2Le)
+
+#define FLOAT_UNOP_LIST(V) \
+ V(Float32Abs, kFloat32Abs) \
+ V(Float64Abs, kFloat64Abs) \
+ V(Float32Neg, kFloat32Neg) \
+ V(Float64Neg, kFloat64Neg) \
+ V(F64x2Abs, kFloat64Abs) \
+ V(F64x2Neg, kFloat64Neg)
#define RO_VISITOR(Name, opcode) \
void InstructionSelector::Visit##Name(Node* node) { \
@@ -1164,17 +1230,17 @@ RR_OP_LIST(RR_VISITOR)
#undef RR_VISITOR
#undef RR_OP_LIST
-#define RRO_FLOAT_VISITOR(Name, avx, sse) \
+#define RRO_FLOAT_VISITOR(Name, opcode) \
void InstructionSelector::Visit##Name(Node* node) { \
- VisitRROFloat(this, node, avx, sse); \
+ VisitRROFloat(this, node, opcode); \
}
RRO_FLOAT_OP_LIST(RRO_FLOAT_VISITOR)
#undef RRO_FLOAT_VISITOR
#undef RRO_FLOAT_OP_LIST
-#define FLOAT_UNOP_VISITOR(Name, avx, sse) \
- void InstructionSelector::Visit##Name(Node* node) { \
- VisitFloatUnop(this, node, node->InputAt(0), avx, sse); \
+#define FLOAT_UNOP_VISITOR(Name, opcode) \
+ void InstructionSelector::Visit##Name(Node* node) { \
+ VisitFloatUnop(this, node, node->InputAt(0), opcode); \
}
FLOAT_UNOP_LIST(FLOAT_UNOP_VISITOR)
#undef FLOAT_UNOP_VISITOR
@@ -1617,29 +1683,6 @@ void VisitWordCompare(InstructionSelector* selector, Node* node,
VisitWordCompare(selector, node, kIA32Cmp, cont);
}
-void VisitAtomicExchange(InstructionSelector* selector, Node* node,
- ArchOpcode opcode, MachineRepresentation rep) {
- IA32OperandGenerator g(selector);
- Node* base = node->InputAt(0);
- Node* index = node->InputAt(1);
- Node* value = node->InputAt(2);
-
- AddressingMode addressing_mode;
- InstructionOperand value_operand = (rep == MachineRepresentation::kWord8)
- ? g.UseFixed(value, edx)
- : g.UseUniqueRegister(value);
- InstructionOperand inputs[] = {
- value_operand, g.UseUniqueRegister(base),
- g.GetEffectiveIndexOperand(index, &addressing_mode)};
- InstructionOperand outputs[] = {
- (rep == MachineRepresentation::kWord8)
- // Using DefineSameAsFirst requires the register to be unallocated.
- ? g.DefineAsFixed(node, edx)
- : g.DefineSameAsFirst(node)};
- InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
- selector->Emit(code, 1, outputs, arraysize(inputs), inputs);
-}
-
void VisitAtomicBinOp(InstructionSelector* selector, Node* node,
ArchOpcode opcode, MachineRepresentation rep) {
AddressingMode addressing_mode;
@@ -1949,32 +1992,25 @@ void InstructionSelector::VisitMemoryBarrier(Node* node) {
}
void InstructionSelector::VisitWord32AtomicLoad(Node* node) {
- LoadRepresentation load_rep = LoadRepresentationOf(node->op());
+ AtomicLoadParameters atomic_load_params = AtomicLoadParametersOf(node->op());
+ LoadRepresentation load_rep = atomic_load_params.representation();
DCHECK(load_rep.representation() == MachineRepresentation::kWord8 ||
load_rep.representation() == MachineRepresentation::kWord16 ||
- load_rep.representation() == MachineRepresentation::kWord32);
+ load_rep.representation() == MachineRepresentation::kWord32 ||
+ load_rep.representation() == MachineRepresentation::kTaggedSigned ||
+ load_rep.representation() == MachineRepresentation::kTaggedPointer ||
+ load_rep.representation() == MachineRepresentation::kTagged);
USE(load_rep);
- VisitLoad(node);
+ // The memory order is ignored as both acquire and sequentially consistent
+ // loads can emit MOV.
+ // https://www.cl.cam.ac.uk/~pes20/cpp/cpp0xmappings.html
+ VisitLoad(node, node, GetLoadOpcode(load_rep));
}
void InstructionSelector::VisitWord32AtomicStore(Node* node) {
- IA32OperandGenerator g(this);
- MachineRepresentation rep = AtomicStoreRepresentationOf(node->op());
- ArchOpcode opcode;
- switch (rep) {
- case MachineRepresentation::kWord8:
- opcode = kWord32AtomicExchangeInt8;
- break;
- case MachineRepresentation::kWord16:
- opcode = kWord32AtomicExchangeInt16;
- break;
- case MachineRepresentation::kWord32:
- opcode = kWord32AtomicExchangeWord32;
- break;
- default:
- UNREACHABLE();
- }
- VisitAtomicExchange(this, node, opcode, rep);
+ AtomicStoreParameters store_params = AtomicStoreParametersOf(node->op());
+ VisitStoreCommon(this, node, store_params.store_representation(),
+ store_params.order());
}
void InstructionSelector::VisitWord32AtomicExchange(Node* node) {
@@ -1982,15 +2018,15 @@ void InstructionSelector::VisitWord32AtomicExchange(Node* node) {
MachineType type = AtomicOpType(node->op());
ArchOpcode opcode;
if (type == MachineType::Int8()) {
- opcode = kWord32AtomicExchangeInt8;
+ opcode = kAtomicExchangeInt8;
} else if (type == MachineType::Uint8()) {
- opcode = kWord32AtomicExchangeUint8;
+ opcode = kAtomicExchangeUint8;
} else if (type == MachineType::Int16()) {
- opcode = kWord32AtomicExchangeInt16;
+ opcode = kAtomicExchangeInt16;
} else if (type == MachineType::Uint16()) {
- opcode = kWord32AtomicExchangeUint16;
+ opcode = kAtomicExchangeUint16;
} else if (type == MachineType::Int32() || type == MachineType::Uint32()) {
- opcode = kWord32AtomicExchangeWord32;
+ opcode = kAtomicExchangeWord32;
} else {
UNREACHABLE();
}
@@ -2007,15 +2043,15 @@ void InstructionSelector::VisitWord32AtomicCompareExchange(Node* node) {
MachineType type = AtomicOpType(node->op());
ArchOpcode opcode;
if (type == MachineType::Int8()) {
- opcode = kWord32AtomicCompareExchangeInt8;
+ opcode = kAtomicCompareExchangeInt8;
} else if (type == MachineType::Uint8()) {
- opcode = kWord32AtomicCompareExchangeUint8;
+ opcode = kAtomicCompareExchangeUint8;
} else if (type == MachineType::Int16()) {
- opcode = kWord32AtomicCompareExchangeInt16;
+ opcode = kAtomicCompareExchangeInt16;
} else if (type == MachineType::Uint16()) {
- opcode = kWord32AtomicCompareExchangeUint16;
+ opcode = kAtomicCompareExchangeUint16;
} else if (type == MachineType::Int32() || type == MachineType::Uint32()) {
- opcode = kWord32AtomicCompareExchangeWord32;
+ opcode = kAtomicCompareExchangeWord32;
} else {
UNREACHABLE();
}
@@ -2053,12 +2089,11 @@ void InstructionSelector::VisitWord32AtomicBinaryOperation(
VisitAtomicBinOp(this, node, opcode, type.representation());
}
-#define VISIT_ATOMIC_BINOP(op) \
- void InstructionSelector::VisitWord32Atomic##op(Node* node) { \
- VisitWord32AtomicBinaryOperation( \
- node, kWord32Atomic##op##Int8, kWord32Atomic##op##Uint8, \
- kWord32Atomic##op##Int16, kWord32Atomic##op##Uint16, \
- kWord32Atomic##op##Word32); \
+#define VISIT_ATOMIC_BINOP(op) \
+ void InstructionSelector::VisitWord32Atomic##op(Node* node) { \
+ VisitWord32AtomicBinaryOperation( \
+ node, kAtomic##op##Int8, kAtomic##op##Uint8, kAtomic##op##Int16, \
+ kAtomic##op##Uint16, kAtomic##op##Word32); \
}
VISIT_ATOMIC_BINOP(Add)
VISIT_ATOMIC_BINOP(Sub)
@@ -2068,6 +2103,8 @@ VISIT_ATOMIC_BINOP(Xor)
#undef VISIT_ATOMIC_BINOP
void InstructionSelector::VisitWord32AtomicPairLoad(Node* node) {
+ // Both acquire and sequentially consistent loads can emit MOV.
+ // https://www.cl.cam.ac.uk/~pes20/cpp/cpp0xmappings.html
IA32OperandGenerator g(this);
AddressingMode mode;
Node* base = node->InputAt(0);
@@ -2079,10 +2116,9 @@ void InstructionSelector::VisitWord32AtomicPairLoad(Node* node) {
g.GetEffectiveIndexOperand(index, &mode)};
InstructionCode code =
kIA32Word32AtomicPairLoad | AddressingModeField::encode(mode);
- InstructionOperand temps[] = {g.TempDoubleRegister()};
InstructionOperand outputs[] = {g.DefineAsRegister(projection0),
g.DefineAsRegister(projection1)};
- Emit(code, 2, outputs, 2, inputs, 1, temps);
+ Emit(code, 2, outputs, 2, inputs);
} else if (projection0 || projection1) {
// Only one word is needed, so it's enough to load just that.
ArchOpcode opcode = kIA32Movl;
@@ -2103,25 +2139,45 @@ void InstructionSelector::VisitWord32AtomicPairLoad(Node* node) {
}
void InstructionSelector::VisitWord32AtomicPairStore(Node* node) {
+ // Release pair stores emit a MOVQ via a double register, and sequentially
+ // consistent stores emit CMPXCHG8B.
+ // https://www.cl.cam.ac.uk/~pes20/cpp/cpp0xmappings.html
+
IA32OperandGenerator g(this);
Node* base = node->InputAt(0);
Node* index = node->InputAt(1);
Node* value = node->InputAt(2);
Node* value_high = node->InputAt(3);
- AddressingMode addressing_mode;
- InstructionOperand inputs[] = {
- g.UseUniqueRegisterOrSlotOrConstant(value), g.UseFixed(value_high, ecx),
- g.UseUniqueRegister(base),
- g.GetEffectiveIndexOperand(index, &addressing_mode)};
- // Allocating temp registers here as stores are performed using an atomic
- // exchange, the output of which is stored in edx:eax, which should be saved
- // and restored at the end of the instruction.
- InstructionOperand temps[] = {g.TempRegister(eax), g.TempRegister(edx)};
- const int num_temps = arraysize(temps);
- InstructionCode code =
- kIA32Word32AtomicPairStore | AddressingModeField::encode(addressing_mode);
- Emit(code, 0, nullptr, arraysize(inputs), inputs, num_temps, temps);
+ AtomicMemoryOrder order = OpParameter<AtomicMemoryOrder>(node->op());
+ if (order == AtomicMemoryOrder::kAcqRel) {
+ AddressingMode addressing_mode;
+ InstructionOperand inputs[] = {
+ g.UseUniqueRegisterOrSlotOrConstant(value),
+ g.UseUniqueRegisterOrSlotOrConstant(value_high),
+ g.UseUniqueRegister(base),
+ g.GetEffectiveIndexOperand(index, &addressing_mode),
+ };
+ InstructionCode code = kIA32Word32ReleasePairStore |
+ AddressingModeField::encode(addressing_mode);
+ Emit(code, 0, nullptr, arraysize(inputs), inputs);
+ } else {
+ DCHECK_EQ(order, AtomicMemoryOrder::kSeqCst);
+
+ AddressingMode addressing_mode;
+ InstructionOperand inputs[] = {
+ g.UseUniqueRegisterOrSlotOrConstant(value), g.UseFixed(value_high, ecx),
+ g.UseUniqueRegister(base),
+ g.GetEffectiveIndexOperand(index, &addressing_mode)};
+ // Allocating temp registers here as stores are performed using an atomic
+ // exchange, the output of which is stored in edx:eax, which should be saved
+ // and restored at the end of the instruction.
+ InstructionOperand temps[] = {g.TempRegister(eax), g.TempRegister(edx)};
+ const int num_temps = arraysize(temps);
+ InstructionCode code = kIA32Word32SeqCstPairStore |
+ AddressingModeField::encode(addressing_mode);
+ Emit(code, 0, nullptr, arraysize(inputs), inputs, num_temps, temps);
+ }
}
void InstructionSelector::VisitWord32AtomicPairAdd(Node* node) {
@@ -2193,60 +2249,57 @@ void InstructionSelector::VisitWord32AtomicPairCompareExchange(Node* node) {
#define SIMD_BINOP_LIST(V) \
V(F32x4Min) \
V(F32x4Max) \
- V(F32x4Eq) \
- V(F32x4Ne) \
- V(F32x4Lt) \
- V(F32x4Le) \
- V(I32x4Add) \
- V(I32x4Sub) \
- V(I32x4Mul) \
- V(I32x4MinS) \
- V(I32x4MaxS) \
- V(I32x4Eq) \
- V(I32x4Ne) \
- V(I32x4GtS) \
- V(I32x4GeS) \
- V(I32x4MinU) \
- V(I32x4MaxU) \
V(I32x4GtU) \
V(I32x4GeU) \
- V(I16x8SConvertI32x4) \
- V(I16x8Add) \
- V(I16x8AddSatS) \
- V(I16x8Sub) \
- V(I16x8SubSatS) \
- V(I16x8Mul) \
- V(I16x8MinS) \
- V(I16x8MaxS) \
- V(I16x8Eq) \
V(I16x8Ne) \
- V(I16x8GtS) \
V(I16x8GeS) \
- V(I16x8AddSatU) \
- V(I16x8SubSatU) \
- V(I16x8MinU) \
- V(I16x8MaxU) \
V(I16x8GtU) \
V(I16x8GeU) \
- V(I8x16SConvertI16x8) \
V(I8x16Ne) \
V(I8x16GeS) \
V(I8x16GtU) \
- V(I8x16GeU) \
- V(S128And) \
- V(S128Or) \
- V(S128Xor)
+ V(I8x16GeU)
#define SIMD_BINOP_UNIFIED_SSE_AVX_LIST(V) \
V(F32x4Add) \
V(F32x4Sub) \
V(F32x4Mul) \
V(F32x4Div) \
+ V(F32x4Eq) \
+ V(F32x4Ne) \
+ V(F32x4Lt) \
+ V(F32x4Le) \
V(I64x2Add) \
V(I64x2Sub) \
V(I64x2Eq) \
V(I64x2Ne) \
+ V(I32x4Add) \
+ V(I32x4Sub) \
+ V(I32x4Mul) \
+ V(I32x4MinS) \
+ V(I32x4MaxS) \
+ V(I32x4Eq) \
+ V(I32x4Ne) \
+ V(I32x4GtS) \
+ V(I32x4GeS) \
+ V(I32x4MinU) \
+ V(I32x4MaxU) \
V(I32x4DotI16x8S) \
+ V(I16x8Add) \
+ V(I16x8AddSatS) \
+ V(I16x8Sub) \
+ V(I16x8SubSatS) \
+ V(I16x8Mul) \
+ V(I16x8Eq) \
+ V(I16x8GtS) \
+ V(I16x8MinS) \
+ V(I16x8MaxS) \
+ V(I16x8AddSatU) \
+ V(I16x8SubSatU) \
+ V(I16x8MinU) \
+ V(I16x8MaxU) \
+ V(I16x8SConvertI32x4) \
+ V(I16x8UConvertI32x4) \
V(I16x8RoundingAverageU) \
V(I8x16Add) \
V(I8x16AddSatS) \
@@ -2260,7 +2313,12 @@ void InstructionSelector::VisitWord32AtomicPairCompareExchange(Node* node) {
V(I8x16SubSatU) \
V(I8x16MinU) \
V(I8x16MaxU) \
- V(I8x16RoundingAverageU)
+ V(I8x16SConvertI16x8) \
+ V(I8x16UConvertI16x8) \
+ V(I8x16RoundingAverageU) \
+ V(S128And) \
+ V(S128Or) \
+ V(S128Xor)
// These opcodes require all inputs to be registers because the codegen is
// simpler with all registers.
@@ -2462,7 +2520,12 @@ void InstructionSelector::VisitF32x4UConvertI32x4(Node* node) {
}
void InstructionSelector::VisitI32x4SConvertF32x4(Node* node) {
- VisitRRSimd(this, node, kIA32I32x4SConvertF32x4);
+ IA32OperandGenerator g(this);
+ InstructionOperand temps[] = {g.TempRegister()};
+ InstructionOperand dst =
+ IsSupported(AVX) ? g.DefineAsRegister(node) : g.DefineSameAsFirst(node);
+ Emit(kIA32I32x4SConvertF32x4, dst, g.UseRegister(node->InputAt(0)),
+ arraysize(temps), temps);
}
void InstructionSelector::VisitI32x4UConvertF32x4(Node* node) {
@@ -2625,26 +2688,6 @@ SIMD_BINOP_RRR(VISIT_SIMD_BINOP_RRR)
#undef VISIT_SIMD_BINOP_RRR
#undef SIMD_BINOP_RRR
-// TODO(v8:9198): SSE requires operand1 to be a register as we don't have memory
-// alignment yet. For AVX, memory operands are fine, but can have performance
-// issues if not aligned to 16/32 bytes (based on load size), see SDM Vol 1,
-// chapter 14.9
-void VisitPack(InstructionSelector* selector, Node* node, ArchOpcode avx_opcode,
- ArchOpcode sse_opcode) {
- IA32OperandGenerator g(selector);
- InstructionOperand operand0 = g.UseRegister(node->InputAt(0));
- InstructionOperand operand1 = g.UseRegister(node->InputAt(1));
- if (selector->IsSupported(AVX)) {
- selector->Emit(avx_opcode, g.DefineSameAsFirst(node), operand0, operand1);
- } else {
- selector->Emit(sse_opcode, g.DefineSameAsFirst(node), operand0, operand1);
- }
-}
-
-void InstructionSelector::VisitI16x8UConvertI32x4(Node* node) {
- VisitPack(this, node, kAVXI16x8UConvertI32x4, kSSEI16x8UConvertI32x4);
-}
-
void InstructionSelector::VisitI16x8BitMask(Node* node) {
IA32OperandGenerator g(this);
InstructionOperand temps[] = {g.TempSimd128Register()};
@@ -2652,43 +2695,16 @@ void InstructionSelector::VisitI16x8BitMask(Node* node) {
g.UseUniqueRegister(node->InputAt(0)), arraysize(temps), temps);
}
-void InstructionSelector::VisitI8x16UConvertI16x8(Node* node) {
- VisitPack(this, node, kAVXI8x16UConvertI16x8, kSSEI8x16UConvertI16x8);
-}
-
void InstructionSelector::VisitI8x16Shl(Node* node) {
- IA32OperandGenerator g(this);
- if (g.CanBeImmediate(node->InputAt(1))) {
- InstructionOperand temps[] = {g.TempRegister(), g.TempSimd128Register()};
- this->Emit(kIA32I8x16Shl, g.DefineSameAsFirst(node),
- g.UseRegister(node->InputAt(0)),
- g.UseImmediate(node->InputAt(1)), arraysize(temps), temps);
- } else {
- VisitRROI8x16SimdShift(this, node, kIA32I8x16Shl);
- }
+ VisitI8x16Shift(this, node, kIA32I8x16Shl);
}
void InstructionSelector::VisitI8x16ShrS(Node* node) {
- IA32OperandGenerator g(this);
- if (g.CanBeImmediate(node->InputAt(1))) {
- this->Emit(kIA32I8x16ShrS, g.DefineSameAsFirst(node),
- g.UseRegister(node->InputAt(0)),
- g.UseImmediate(node->InputAt(1)));
- } else {
- VisitRROI8x16SimdShift(this, node, kIA32I8x16ShrS);
- }
+ VisitI8x16Shift(this, node, kIA32I8x16ShrS);
}
void InstructionSelector::VisitI8x16ShrU(Node* node) {
- IA32OperandGenerator g(this);
- if (g.CanBeImmediate(node->InputAt(1))) {
- InstructionOperand temps[] = {g.TempRegister(), g.TempSimd128Register()};
- this->Emit(kIA32I8x16ShrU, g.DefineSameAsFirst(node),
- g.UseRegister(node->InputAt(0)),
- g.UseImmediate(node->InputAt(1)), arraysize(temps), temps);
- } else {
- VisitRROI8x16SimdShift(this, node, kIA32I8x16ShrU);
- }
+ VisitI8x16Shift(this, node, kIA32I8x16ShrU);
}
void InstructionSelector::VisitInt32AbsWithOverflow(Node* node) {
diff --git a/deps/v8/src/compiler/backend/instruction-codes.h b/deps/v8/src/compiler/backend/instruction-codes.h
index 31d669813e..63cf3ca06f 100644
--- a/deps/v8/src/compiler/backend/instruction-codes.h
+++ b/deps/v8/src/compiler/backend/instruction-codes.h
@@ -17,6 +17,8 @@
#include "src/compiler/backend/mips/instruction-codes-mips.h"
#elif V8_TARGET_ARCH_MIPS64
#include "src/compiler/backend/mips64/instruction-codes-mips64.h"
+#elif V8_TARGET_ARCH_LOONG64
+#include "src/compiler/backend/loong64/instruction-codes-loong64.h"
#elif V8_TARGET_ARCH_X64
#include "src/compiler/backend/x64/instruction-codes-x64.h"
#elif V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_PPC64
@@ -30,6 +32,7 @@
#define TARGET_ADDRESSING_MODE_LIST(V)
#endif
#include "src/base/bit-field.h"
+#include "src/codegen/atomic-memory-order.h"
#include "src/compiler/write-barrier-kind.h"
namespace v8 {
@@ -99,53 +102,53 @@ inline RecordWriteMode WriteBarrierKindToRecordWriteMode(
V(ArchParentFramePointer) \
V(ArchTruncateDoubleToI) \
V(ArchStoreWithWriteBarrier) \
+ V(ArchAtomicStoreWithWriteBarrier) \
V(ArchStackSlot) \
- V(ArchWordPoisonOnSpeculation) \
V(ArchStackPointerGreaterThan) \
V(ArchStackCheckOffset) \
- V(Word32AtomicLoadInt8) \
- V(Word32AtomicLoadUint8) \
- V(Word32AtomicLoadInt16) \
- V(Word32AtomicLoadUint16) \
- V(Word32AtomicLoadWord32) \
- V(Word32AtomicStoreWord8) \
- V(Word32AtomicStoreWord16) \
- V(Word32AtomicStoreWord32) \
- V(Word32AtomicExchangeInt8) \
- V(Word32AtomicExchangeUint8) \
- V(Word32AtomicExchangeInt16) \
- V(Word32AtomicExchangeUint16) \
- V(Word32AtomicExchangeWord32) \
- V(Word32AtomicCompareExchangeInt8) \
- V(Word32AtomicCompareExchangeUint8) \
- V(Word32AtomicCompareExchangeInt16) \
- V(Word32AtomicCompareExchangeUint16) \
- V(Word32AtomicCompareExchangeWord32) \
- V(Word32AtomicAddInt8) \
- V(Word32AtomicAddUint8) \
- V(Word32AtomicAddInt16) \
- V(Word32AtomicAddUint16) \
- V(Word32AtomicAddWord32) \
- V(Word32AtomicSubInt8) \
- V(Word32AtomicSubUint8) \
- V(Word32AtomicSubInt16) \
- V(Word32AtomicSubUint16) \
- V(Word32AtomicSubWord32) \
- V(Word32AtomicAndInt8) \
- V(Word32AtomicAndUint8) \
- V(Word32AtomicAndInt16) \
- V(Word32AtomicAndUint16) \
- V(Word32AtomicAndWord32) \
- V(Word32AtomicOrInt8) \
- V(Word32AtomicOrUint8) \
- V(Word32AtomicOrInt16) \
- V(Word32AtomicOrUint16) \
- V(Word32AtomicOrWord32) \
- V(Word32AtomicXorInt8) \
- V(Word32AtomicXorUint8) \
- V(Word32AtomicXorInt16) \
- V(Word32AtomicXorUint16) \
- V(Word32AtomicXorWord32) \
+ V(AtomicLoadInt8) \
+ V(AtomicLoadUint8) \
+ V(AtomicLoadInt16) \
+ V(AtomicLoadUint16) \
+ V(AtomicLoadWord32) \
+ V(AtomicStoreWord8) \
+ V(AtomicStoreWord16) \
+ V(AtomicStoreWord32) \
+ V(AtomicExchangeInt8) \
+ V(AtomicExchangeUint8) \
+ V(AtomicExchangeInt16) \
+ V(AtomicExchangeUint16) \
+ V(AtomicExchangeWord32) \
+ V(AtomicCompareExchangeInt8) \
+ V(AtomicCompareExchangeUint8) \
+ V(AtomicCompareExchangeInt16) \
+ V(AtomicCompareExchangeUint16) \
+ V(AtomicCompareExchangeWord32) \
+ V(AtomicAddInt8) \
+ V(AtomicAddUint8) \
+ V(AtomicAddInt16) \
+ V(AtomicAddUint16) \
+ V(AtomicAddWord32) \
+ V(AtomicSubInt8) \
+ V(AtomicSubUint8) \
+ V(AtomicSubInt16) \
+ V(AtomicSubUint16) \
+ V(AtomicSubWord32) \
+ V(AtomicAndInt8) \
+ V(AtomicAndUint8) \
+ V(AtomicAndInt16) \
+ V(AtomicAndUint16) \
+ V(AtomicAndWord32) \
+ V(AtomicOrInt8) \
+ V(AtomicOrUint8) \
+ V(AtomicOrInt16) \
+ V(AtomicOrUint16) \
+ V(AtomicOrWord32) \
+ V(AtomicXorInt8) \
+ V(AtomicXorUint8) \
+ V(AtomicXorInt16) \
+ V(AtomicXorUint16) \
+ V(AtomicXorWord32) \
V(Ieee754Float64Acos) \
V(Ieee754Float64Acosh) \
V(Ieee754Float64Asin) \
@@ -208,12 +211,10 @@ V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream& os,
enum FlagsMode {
kFlags_none = 0,
kFlags_branch = 1,
- kFlags_branch_and_poison = 2,
- kFlags_deoptimize = 3,
- kFlags_deoptimize_and_poison = 4,
- kFlags_set = 5,
- kFlags_trap = 6,
- kFlags_select = 7,
+ kFlags_deoptimize = 2,
+ kFlags_set = 3,
+ kFlags_trap = 4,
+ kFlags_select = 5,
};
V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream& os,
@@ -262,9 +263,20 @@ V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream& os,
enum MemoryAccessMode {
kMemoryAccessDirect = 0,
kMemoryAccessProtected = 1,
- kMemoryAccessPoisoned = 2
};
+enum class AtomicWidth { kWord32, kWord64 };
+
+inline size_t AtomicWidthSize(AtomicWidth width) {
+ switch (width) {
+ case AtomicWidth::kWord32:
+ return 4;
+ case AtomicWidth::kWord64:
+ return 8;
+ }
+ UNREACHABLE();
+}
+
// The InstructionCode is an opaque, target-specific integer that encodes
// what code to emit for an instruction in the code generator. It is not
// interesting to the register allocator, as the inputs and flags on the
@@ -279,6 +291,9 @@ using ArchOpcodeField = base::BitField<ArchOpcode, 0, 9>;
static_assert(ArchOpcodeField::is_valid(kLastArchOpcode),
"All opcodes must fit in the 9-bit ArchOpcodeField.");
using AddressingModeField = base::BitField<AddressingMode, 9, 5>;
+static_assert(
+ AddressingModeField::is_valid(kLastAddressingMode),
+ "All addressing modes must fit in the 5-bit AddressingModeField.");
using FlagsModeField = base::BitField<FlagsMode, 14, 3>;
using FlagsConditionField = base::BitField<FlagsCondition, 17, 5>;
using DeoptImmedArgsCountField = base::BitField<int, 22, 2>;
@@ -287,8 +302,29 @@ using DeoptFrameStateOffsetField = base::BitField<int, 24, 8>;
// size, an access mode, or both inside the overlapping MiscField.
using LaneSizeField = base::BitField<int, 22, 8>;
using AccessModeField = base::BitField<MemoryAccessMode, 30, 2>;
+// AtomicWidthField overlaps with MiscField and is used for the various Atomic
+// opcodes. Only used on 64bit architectures. All atomic instructions on 32bit
+// architectures are assumed to be 32bit wide.
+using AtomicWidthField = base::BitField<AtomicWidth, 22, 2>;
+// AtomicMemoryOrderField overlaps with MiscField and is used for the various
+// Atomic opcodes. This field is not used on all architectures. It is used on
+// architectures where the codegen for kSeqCst and kAcqRel differ only by
+// emitting fences.
+using AtomicMemoryOrderField = base::BitField<AtomicMemoryOrder, 24, 2>;
+using AtomicStoreRecordWriteModeField = base::BitField<RecordWriteMode, 26, 4>;
using MiscField = base::BitField<int, 22, 10>;
+// This static assertion serves as an early warning if we are about to exhaust
+// the available opcode space. If we are about to exhaust it, we should start
+// looking into options to compress some opcodes (see
+// https://crbug.com/v8/12093) before we fully run out of available opcodes.
+// Otherwise we risk being unable to land an important security fix or merge
+// back fixes that add new opcodes.
+// It is OK to temporarily reduce the required slack if we have a tracking bug
+// to reduce the number of used opcodes again.
+static_assert(ArchOpcodeField::kMax - kLastArchOpcode >= 16,
+ "We are running close to the number of available opcodes.");
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/compiler/backend/instruction-scheduler.cc b/deps/v8/src/compiler/backend/instruction-scheduler.cc
index c46d263bae..bdad838f3e 100644
--- a/deps/v8/src/compiler/backend/instruction-scheduler.cc
+++ b/deps/v8/src/compiler/backend/instruction-scheduler.cc
@@ -132,7 +132,6 @@ void InstructionScheduler::AddInstruction(Instruction* instr) {
// We should not have branches in the middle of a block.
DCHECK_NE(instr->flags_mode(), kFlags_branch);
- DCHECK_NE(instr->flags_mode(), kFlags_branch_and_poison);
if (IsFixedRegisterParameter(instr)) {
if (last_live_in_reg_marker_ != nullptr) {
@@ -298,11 +297,6 @@ int InstructionScheduler::GetInstructionFlags(const Instruction* instr) const {
// effects.
return kIsLoadOperation;
- case kArchWordPoisonOnSpeculation:
- // While poisoning operations have no side effect, they must not be
- // reordered relative to branches.
- return kHasSideEffect;
-
case kArchPrepareCallCFunction:
case kArchPrepareTailCall:
case kArchTailCallCodeObject:
@@ -334,55 +328,56 @@ int InstructionScheduler::GetInstructionFlags(const Instruction* instr) const {
return kIsBarrier;
case kArchStoreWithWriteBarrier:
+ case kArchAtomicStoreWithWriteBarrier:
return kHasSideEffect;
- case kWord32AtomicLoadInt8:
- case kWord32AtomicLoadUint8:
- case kWord32AtomicLoadInt16:
- case kWord32AtomicLoadUint16:
- case kWord32AtomicLoadWord32:
+ case kAtomicLoadInt8:
+ case kAtomicLoadUint8:
+ case kAtomicLoadInt16:
+ case kAtomicLoadUint16:
+ case kAtomicLoadWord32:
return kIsLoadOperation;
- case kWord32AtomicStoreWord8:
- case kWord32AtomicStoreWord16:
- case kWord32AtomicStoreWord32:
+ case kAtomicStoreWord8:
+ case kAtomicStoreWord16:
+ case kAtomicStoreWord32:
return kHasSideEffect;
- case kWord32AtomicExchangeInt8:
- case kWord32AtomicExchangeUint8:
- case kWord32AtomicExchangeInt16:
- case kWord32AtomicExchangeUint16:
- case kWord32AtomicExchangeWord32:
- case kWord32AtomicCompareExchangeInt8:
- case kWord32AtomicCompareExchangeUint8:
- case kWord32AtomicCompareExchangeInt16:
- case kWord32AtomicCompareExchangeUint16:
- case kWord32AtomicCompareExchangeWord32:
- case kWord32AtomicAddInt8:
- case kWord32AtomicAddUint8:
- case kWord32AtomicAddInt16:
- case kWord32AtomicAddUint16:
- case kWord32AtomicAddWord32:
- case kWord32AtomicSubInt8:
- case kWord32AtomicSubUint8:
- case kWord32AtomicSubInt16:
- case kWord32AtomicSubUint16:
- case kWord32AtomicSubWord32:
- case kWord32AtomicAndInt8:
- case kWord32AtomicAndUint8:
- case kWord32AtomicAndInt16:
- case kWord32AtomicAndUint16:
- case kWord32AtomicAndWord32:
- case kWord32AtomicOrInt8:
- case kWord32AtomicOrUint8:
- case kWord32AtomicOrInt16:
- case kWord32AtomicOrUint16:
- case kWord32AtomicOrWord32:
- case kWord32AtomicXorInt8:
- case kWord32AtomicXorUint8:
- case kWord32AtomicXorInt16:
- case kWord32AtomicXorUint16:
- case kWord32AtomicXorWord32:
+ case kAtomicExchangeInt8:
+ case kAtomicExchangeUint8:
+ case kAtomicExchangeInt16:
+ case kAtomicExchangeUint16:
+ case kAtomicExchangeWord32:
+ case kAtomicCompareExchangeInt8:
+ case kAtomicCompareExchangeUint8:
+ case kAtomicCompareExchangeInt16:
+ case kAtomicCompareExchangeUint16:
+ case kAtomicCompareExchangeWord32:
+ case kAtomicAddInt8:
+ case kAtomicAddUint8:
+ case kAtomicAddInt16:
+ case kAtomicAddUint16:
+ case kAtomicAddWord32:
+ case kAtomicSubInt8:
+ case kAtomicSubUint8:
+ case kAtomicSubInt16:
+ case kAtomicSubUint16:
+ case kAtomicSubWord32:
+ case kAtomicAndInt8:
+ case kAtomicAndUint8:
+ case kAtomicAndInt16:
+ case kAtomicAndUint16:
+ case kAtomicAndWord32:
+ case kAtomicOrInt8:
+ case kAtomicOrUint8:
+ case kAtomicOrInt16:
+ case kAtomicOrUint16:
+ case kAtomicOrWord32:
+ case kAtomicXorInt8:
+ case kAtomicXorUint8:
+ case kAtomicXorInt16:
+ case kAtomicXorUint16:
+ case kAtomicXorWord32:
return kHasSideEffect;
#define CASE(Name) case k##Name:
diff --git a/deps/v8/src/compiler/backend/instruction-selector.cc b/deps/v8/src/compiler/backend/instruction-selector.cc
index f279ea1590..cd2b83ac3d 100644
--- a/deps/v8/src/compiler/backend/instruction-selector.cc
+++ b/deps/v8/src/compiler/backend/instruction-selector.cc
@@ -39,7 +39,7 @@ InstructionSelector::InstructionSelector(
size_t* max_pushed_argument_count, SourcePositionMode source_position_mode,
Features features, EnableScheduling enable_scheduling,
EnableRootsRelativeAddressing enable_roots_relative_addressing,
- PoisoningMitigationLevel poisoning_level, EnableTraceTurboJson trace_turbo)
+ EnableTraceTurboJson trace_turbo)
: zone_(zone),
linkage_(linkage),
sequence_(sequence),
@@ -63,7 +63,6 @@ InstructionSelector::InstructionSelector(
enable_roots_relative_addressing_(enable_roots_relative_addressing),
enable_switch_jump_table_(enable_switch_jump_table),
state_values_cache_(zone),
- poisoning_level_(poisoning_level),
frame_(frame),
instruction_selection_failed_(false),
instr_origins_(sequence->zone()),
@@ -1076,17 +1075,10 @@ void InstructionSelector::InitializeCallBuffer(Node* call, CallBuffer* buffer,
}
DCHECK_EQ(1u, buffer->instruction_args.size());
- // Argument 1 is used for poison-alias index (encoded in a word-sized
- // immediate. This an index of the operand that aliases with poison register
- // or -1 if there is no aliasing.
- buffer->instruction_args.push_back(g.TempImmediate(-1));
- const size_t poison_alias_index = 1;
- DCHECK_EQ(buffer->instruction_args.size() - 1, poison_alias_index);
-
// If the call needs a frame state, we insert the state information as
// follows (n is the number of value inputs to the frame state):
- // arg 2 : deoptimization id.
- // arg 3 - arg (n + 2) : value inputs to the frame state.
+ // arg 1 : deoptimization id.
+ // arg 2 - arg (n + 2) : value inputs to the frame state.
size_t frame_state_entries = 0;
USE(frame_state_entries); // frame_state_entries is only used for debug.
if (buffer->frame_state_descriptor != nullptr) {
@@ -1123,7 +1115,7 @@ void InstructionSelector::InitializeCallBuffer(Node* call, CallBuffer* buffer,
&buffer->instruction_args, FrameStateInputKind::kStackSlot,
instruction_zone());
- DCHECK_EQ(2 + frame_state_entries, buffer->instruction_args.size());
+ DCHECK_EQ(1 + frame_state_entries, buffer->instruction_args.size());
}
size_t input_count = static_cast<size_t>(buffer->input_count());
@@ -1159,23 +1151,11 @@ void InstructionSelector::InitializeCallBuffer(Node* call, CallBuffer* buffer,
buffer->pushed_nodes[stack_index] = param;
pushed_count++;
} else {
- // If we do load poisoning and the linkage uses the poisoning register,
- // then we request the input in memory location, and during code
- // generation, we move the input to the register.
- if (poisoning_level_ != PoisoningMitigationLevel::kDontPoison &&
- unallocated.HasFixedRegisterPolicy()) {
- int reg = unallocated.fixed_register_index();
- if (Register::from_code(reg) == kSpeculationPoisonRegister) {
- buffer->instruction_args[poison_alias_index] = g.TempImmediate(
- static_cast<int32_t>(buffer->instruction_args.size()));
- op = g.UseRegisterOrSlotOrConstant(*iter);
- }
- }
buffer->instruction_args.push_back(op);
}
}
DCHECK_EQ(input_count, buffer->instruction_args.size() + pushed_count -
- frame_state_entries - 1);
+ frame_state_entries);
if (V8_TARGET_ARCH_STORES_RETURN_ADDRESS_ON_STACK && is_tail_call &&
stack_param_delta != 0) {
// For tail calls that change the size of their parameter list and keep
@@ -1509,11 +1489,6 @@ void InstructionSelector::VisitNode(Node* node) {
MarkAsRepresentation(MachineRepresentation::kSimd128, node);
return VisitLoadLane(node);
}
- case IrOpcode::kPoisonedLoad: {
- LoadRepresentation type = LoadRepresentationOf(node->op());
- MarkAsRepresentation(type.representation(), node);
- return VisitPoisonedLoad(node);
- }
case IrOpcode::kStore:
return VisitStore(node);
case IrOpcode::kProtectedStore:
@@ -1850,12 +1825,6 @@ void InstructionSelector::VisitNode(Node* node) {
return MarkAsFloat64(node), VisitFloat64InsertLowWord32(node);
case IrOpcode::kFloat64InsertHighWord32:
return MarkAsFloat64(node), VisitFloat64InsertHighWord32(node);
- case IrOpcode::kTaggedPoisonOnSpeculation:
- return MarkAsTagged(node), VisitTaggedPoisonOnSpeculation(node);
- case IrOpcode::kWord32PoisonOnSpeculation:
- return MarkAsWord32(node), VisitWord32PoisonOnSpeculation(node);
- case IrOpcode::kWord64PoisonOnSpeculation:
- return MarkAsWord64(node), VisitWord64PoisonOnSpeculation(node);
case IrOpcode::kStackSlot:
return VisitStackSlot(node);
case IrOpcode::kStackPointerGreaterThan:
@@ -1900,12 +1869,14 @@ void InstructionSelector::VisitNode(Node* node) {
case IrOpcode::kMemoryBarrier:
return VisitMemoryBarrier(node);
case IrOpcode::kWord32AtomicLoad: {
- LoadRepresentation type = LoadRepresentationOf(node->op());
+ AtomicLoadParameters params = AtomicLoadParametersOf(node->op());
+ LoadRepresentation type = params.representation();
MarkAsRepresentation(type.representation(), node);
return VisitWord32AtomicLoad(node);
}
case IrOpcode::kWord64AtomicLoad: {
- LoadRepresentation type = LoadRepresentationOf(node->op());
+ AtomicLoadParameters params = AtomicLoadParametersOf(node->op());
+ LoadRepresentation type = params.representation();
MarkAsRepresentation(type.representation(), node);
return VisitWord64AtomicLoad(node);
}
@@ -2389,30 +2360,6 @@ void InstructionSelector::VisitNode(Node* node) {
}
}
-void InstructionSelector::EmitWordPoisonOnSpeculation(Node* node) {
- if (poisoning_level_ != PoisoningMitigationLevel::kDontPoison) {
- OperandGenerator g(this);
- Node* input_node = NodeProperties::GetValueInput(node, 0);
- InstructionOperand input = g.UseRegister(input_node);
- InstructionOperand output = g.DefineSameAsFirst(node);
- Emit(kArchWordPoisonOnSpeculation, output, input);
- } else {
- EmitIdentity(node);
- }
-}
-
-void InstructionSelector::VisitWord32PoisonOnSpeculation(Node* node) {
- EmitWordPoisonOnSpeculation(node);
-}
-
-void InstructionSelector::VisitWord64PoisonOnSpeculation(Node* node) {
- EmitWordPoisonOnSpeculation(node);
-}
-
-void InstructionSelector::VisitTaggedPoisonOnSpeculation(Node* node) {
- EmitWordPoisonOnSpeculation(node);
-}
-
void InstructionSelector::VisitStackPointerGreaterThan(Node* node) {
FlagsContinuation cont =
FlagsContinuation::ForSet(kStackPointerGreaterThanCondition, node);
@@ -2766,7 +2713,8 @@ void InstructionSelector::VisitWord32AtomicPairCompareExchange(Node* node) {
#endif // !V8_TARGET_ARCH_IA32 && !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_MIPS
#if !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_MIPS64 && \
- !V8_TARGET_ARCH_S390 && !V8_TARGET_ARCH_PPC64 && !V8_TARGET_ARCH_RISCV64
+ !V8_TARGET_ARCH_S390 && !V8_TARGET_ARCH_PPC64 && \
+ !V8_TARGET_ARCH_RISCV64 && !V8_TARGET_ARCH_LOONG64
void InstructionSelector::VisitWord64AtomicLoad(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitWord64AtomicStore(Node* node) {
@@ -2792,7 +2740,7 @@ void InstructionSelector::VisitWord64AtomicCompareExchange(Node* node) {
}
#endif // !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_PPC64
// !V8_TARGET_ARCH_MIPS64 && !V8_TARGET_ARCH_S390 &&
- // !V8_TARGET_ARCH_RISCV64
+ // !V8_TARGET_ARCH_RISCV64 && !V8_TARGET_ARCH_LOONG64
#if !V8_TARGET_ARCH_IA32 && !V8_TARGET_ARCH_ARM
// This is only needed on 32-bit to split the 64-bit value into two operands.
@@ -2806,11 +2754,12 @@ void InstructionSelector::VisitI64x2ReplaceLaneI32Pair(Node* node) {
#if !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_S390X && !V8_TARGET_ARCH_PPC64
#if !V8_TARGET_ARCH_ARM64
-#if !V8_TARGET_ARCH_MIPS64
+#if !V8_TARGET_ARCH_MIPS64 && !V8_TARGET_ARCH_LOONG64 && !V8_TARGET_ARCH_RISCV64
void InstructionSelector::VisitI64x2Splat(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitI64x2ExtractLane(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitI64x2ReplaceLane(Node* node) { UNIMPLEMENTED(); }
-#endif // !V8_TARGET_ARCH_MIPS64
+#endif // !V8_TARGET_ARCH_MIPS64 && !V8_TARGET_ARCH_LOONG64 &&
+ // !V8_TARGET_ARCH_RISCV64
void InstructionSelector::VisitF64x2Qfma(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitF64x2Qfms(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitF32x4Qfma(Node* node) { UNIMPLEMENTED(); }
@@ -3104,45 +3053,24 @@ void InstructionSelector::VisitReturn(Node* ret) {
void InstructionSelector::VisitBranch(Node* branch, BasicBlock* tbranch,
BasicBlock* fbranch) {
- if (NeedsPoisoning(IsSafetyCheckOf(branch->op()))) {
- FlagsContinuation cont =
- FlagsContinuation::ForBranchAndPoison(kNotEqual, tbranch, fbranch);
- VisitWordCompareZero(branch, branch->InputAt(0), &cont);
- } else {
- FlagsContinuation cont =
- FlagsContinuation::ForBranch(kNotEqual, tbranch, fbranch);
- VisitWordCompareZero(branch, branch->InputAt(0), &cont);
- }
+ FlagsContinuation cont =
+ FlagsContinuation::ForBranch(kNotEqual, tbranch, fbranch);
+ VisitWordCompareZero(branch, branch->InputAt(0), &cont);
}
void InstructionSelector::VisitDeoptimizeIf(Node* node) {
DeoptimizeParameters p = DeoptimizeParametersOf(node->op());
- if (NeedsPoisoning(p.is_safety_check())) {
- FlagsContinuation cont = FlagsContinuation::ForDeoptimizeAndPoison(
- kNotEqual, p.kind(), p.reason(), node->id(), p.feedback(),
- node->InputAt(1));
- VisitWordCompareZero(node, node->InputAt(0), &cont);
- } else {
- FlagsContinuation cont = FlagsContinuation::ForDeoptimize(
- kNotEqual, p.kind(), p.reason(), node->id(), p.feedback(),
- node->InputAt(1));
- VisitWordCompareZero(node, node->InputAt(0), &cont);
- }
+ FlagsContinuation cont = FlagsContinuation::ForDeoptimize(
+ kNotEqual, p.kind(), p.reason(), node->id(), p.feedback(),
+ node->InputAt(1));
+ VisitWordCompareZero(node, node->InputAt(0), &cont);
}
void InstructionSelector::VisitDeoptimizeUnless(Node* node) {
DeoptimizeParameters p = DeoptimizeParametersOf(node->op());
- if (NeedsPoisoning(p.is_safety_check())) {
- FlagsContinuation cont = FlagsContinuation::ForDeoptimizeAndPoison(
- kEqual, p.kind(), p.reason(), node->id(), p.feedback(),
- node->InputAt(1));
- VisitWordCompareZero(node, node->InputAt(0), &cont);
- } else {
- FlagsContinuation cont = FlagsContinuation::ForDeoptimize(
- kEqual, p.kind(), p.reason(), node->id(), p.feedback(),
- node->InputAt(1));
- VisitWordCompareZero(node, node->InputAt(0), &cont);
- }
+ FlagsContinuation cont = FlagsContinuation::ForDeoptimize(
+ kEqual, p.kind(), p.reason(), node->id(), p.feedback(), node->InputAt(1));
+ VisitWordCompareZero(node, node->InputAt(0), &cont);
}
void InstructionSelector::VisitSelect(Node* node) {
@@ -3186,17 +3114,10 @@ void InstructionSelector::VisitDynamicCheckMapsWithDeoptUnless(Node* node) {
g.UseImmediate(n.slot()), g.UseImmediate(n.handler())});
}
- if (NeedsPoisoning(IsSafetyCheck::kCriticalSafetyCheck)) {
- FlagsContinuation cont = FlagsContinuation::ForDeoptimizeAndPoison(
- kEqual, p.kind(), p.reason(), node->id(), p.feedback(), n.frame_state(),
- dynamic_check_args.data(), static_cast<int>(dynamic_check_args.size()));
- VisitWordCompareZero(node, n.condition(), &cont);
- } else {
- FlagsContinuation cont = FlagsContinuation::ForDeoptimize(
- kEqual, p.kind(), p.reason(), node->id(), p.feedback(), n.frame_state(),
- dynamic_check_args.data(), static_cast<int>(dynamic_check_args.size()));
- VisitWordCompareZero(node, n.condition(), &cont);
- }
+ FlagsContinuation cont = FlagsContinuation::ForDeoptimize(
+ kEqual, p.kind(), p.reason(), node->id(), p.feedback(), n.frame_state(),
+ dynamic_check_args.data(), static_cast<int>(dynamic_check_args.size()));
+ VisitWordCompareZero(node, n.condition(), &cont);
}
void InstructionSelector::VisitTrapIf(Node* node, TrapId trap_id) {
@@ -3409,18 +3330,6 @@ void InstructionSelector::SwapShuffleInputs(Node* node) {
}
#endif // V8_ENABLE_WEBASSEMBLY
-// static
-bool InstructionSelector::NeedsPoisoning(IsSafetyCheck safety_check) const {
- switch (poisoning_level_) {
- case PoisoningMitigationLevel::kDontPoison:
- return false;
- case PoisoningMitigationLevel::kPoisonAll:
- return safety_check != IsSafetyCheck::kNoSafetyCheck;
- case PoisoningMitigationLevel::kPoisonCriticalOnly:
- return safety_check == IsSafetyCheck::kCriticalSafetyCheck;
- }
- UNREACHABLE();
-}
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/compiler/backend/instruction-selector.h b/deps/v8/src/compiler/backend/instruction-selector.h
index 11a329d1d6..b33de8e856 100644
--- a/deps/v8/src/compiler/backend/instruction-selector.h
+++ b/deps/v8/src/compiler/backend/instruction-selector.h
@@ -54,13 +54,6 @@ class FlagsContinuation final {
return FlagsContinuation(kFlags_branch, condition, true_block, false_block);
}
- static FlagsContinuation ForBranchAndPoison(FlagsCondition condition,
- BasicBlock* true_block,
- BasicBlock* false_block) {
- return FlagsContinuation(kFlags_branch_and_poison, condition, true_block,
- false_block);
- }
-
// Creates a new flags continuation for an eager deoptimization exit.
static FlagsContinuation ForDeoptimize(
FlagsCondition condition, DeoptimizeKind kind, DeoptimizeReason reason,
@@ -71,16 +64,6 @@ class FlagsContinuation final {
extra_args_count);
}
- // Creates a new flags continuation for an eager deoptimization exit.
- static FlagsContinuation ForDeoptimizeAndPoison(
- FlagsCondition condition, DeoptimizeKind kind, DeoptimizeReason reason,
- NodeId node_id, FeedbackSource const& feedback, Node* frame_state,
- InstructionOperand* extra_args = nullptr, int extra_args_count = 0) {
- return FlagsContinuation(kFlags_deoptimize_and_poison, condition, kind,
- reason, node_id, feedback, frame_state, extra_args,
- extra_args_count);
- }
-
// Creates a new flags continuation for a boolean value.
static FlagsContinuation ForSet(FlagsCondition condition, Node* result) {
return FlagsContinuation(condition, result);
@@ -98,16 +81,8 @@ class FlagsContinuation final {
}
bool IsNone() const { return mode_ == kFlags_none; }
- bool IsBranch() const {
- return mode_ == kFlags_branch || mode_ == kFlags_branch_and_poison;
- }
- bool IsDeoptimize() const {
- return mode_ == kFlags_deoptimize || mode_ == kFlags_deoptimize_and_poison;
- }
- bool IsPoisoned() const {
- return mode_ == kFlags_branch_and_poison ||
- mode_ == kFlags_deoptimize_and_poison;
- }
+ bool IsBranch() const { return mode_ == kFlags_branch; }
+ bool IsDeoptimize() const { return mode_ == kFlags_deoptimize; }
bool IsSet() const { return mode_ == kFlags_set; }
bool IsTrap() const { return mode_ == kFlags_trap; }
bool IsSelect() const { return mode_ == kFlags_select; }
@@ -226,7 +201,7 @@ class FlagsContinuation final {
condition_(condition),
true_block_(true_block),
false_block_(false_block) {
- DCHECK(mode == kFlags_branch || mode == kFlags_branch_and_poison);
+ DCHECK(mode == kFlags_branch);
DCHECK_NOT_NULL(true_block);
DCHECK_NOT_NULL(false_block);
}
@@ -245,7 +220,7 @@ class FlagsContinuation final {
frame_state_or_result_(frame_state),
extra_args_(extra_args),
extra_args_count_(extra_args_count) {
- DCHECK(mode == kFlags_deoptimize || mode == kFlags_deoptimize_and_poison);
+ DCHECK(mode == kFlags_deoptimize);
DCHECK_NOT_NULL(frame_state);
}
@@ -338,8 +313,6 @@ class V8_EXPORT_PRIVATE InstructionSelector final {
: kDisableScheduling,
EnableRootsRelativeAddressing enable_roots_relative_addressing =
kDisableRootsRelativeAddressing,
- PoisoningMitigationLevel poisoning_level =
- PoisoningMitigationLevel::kDontPoison,
EnableTraceTurboJson trace_turbo = kDisableTraceTurboJson);
// Visit code for the entire graph with the included schedule.
@@ -443,8 +416,6 @@ class V8_EXPORT_PRIVATE InstructionSelector final {
static MachineOperatorBuilder::AlignmentRequirements AlignmentRequirements();
- bool NeedsPoisoning(IsSafetyCheck safety_check) const;
-
// ===========================================================================
// ============ Architecture-independent graph covering methods. =============
// ===========================================================================
@@ -681,8 +652,6 @@ class V8_EXPORT_PRIVATE InstructionSelector final {
void VisitWordCompareZero(Node* user, Node* value, FlagsContinuation* cont);
- void EmitWordPoisonOnSpeculation(Node* node);
-
void EmitPrepareArguments(ZoneVector<compiler::PushParameter>* arguments,
const CallDescriptor* call_descriptor, Node* node);
void EmitPrepareResults(ZoneVector<compiler::PushParameter>* results,
@@ -797,7 +766,6 @@ class V8_EXPORT_PRIVATE InstructionSelector final {
FrameStateInput::Equal>
state_values_cache_;
- PoisoningMitigationLevel poisoning_level_;
Frame* frame_;
bool instruction_selection_failed_;
ZoneVector<std::pair<int, int>> instr_origins_;
diff --git a/deps/v8/src/compiler/backend/instruction.cc b/deps/v8/src/compiler/backend/instruction.cc
index 63ca78e060..0da8e054ae 100644
--- a/deps/v8/src/compiler/backend/instruction.cc
+++ b/deps/v8/src/compiler/backend/instruction.cc
@@ -410,12 +410,8 @@ std::ostream& operator<<(std::ostream& os, const FlagsMode& fm) {
return os;
case kFlags_branch:
return os << "branch";
- case kFlags_branch_and_poison:
- return os << "branch_and_poison";
case kFlags_deoptimize:
return os << "deoptimize";
- case kFlags_deoptimize_and_poison:
- return os << "deoptimize_and_poison";
case kFlags_set:
return os << "set";
case kFlags_trap:
diff --git a/deps/v8/src/compiler/backend/instruction.h b/deps/v8/src/compiler/backend/instruction.h
index 204683c973..8698ed8a98 100644
--- a/deps/v8/src/compiler/backend/instruction.h
+++ b/deps/v8/src/compiler/backend/instruction.h
@@ -935,8 +935,7 @@ class V8_EXPORT_PRIVATE Instruction final {
bool IsDeoptimizeCall() const {
return arch_opcode() == ArchOpcode::kArchDeoptimize ||
- FlagsModeField::decode(opcode()) == kFlags_deoptimize ||
- FlagsModeField::decode(opcode()) == kFlags_deoptimize_and_poison;
+ FlagsModeField::decode(opcode()) == kFlags_deoptimize;
}
bool IsTrap() const {
diff --git a/deps/v8/src/compiler/backend/jump-threading.cc b/deps/v8/src/compiler/backend/jump-threading.cc
index e91b7e17d2..258d05955e 100644
--- a/deps/v8/src/compiler/backend/jump-threading.cc
+++ b/deps/v8/src/compiler/backend/jump-threading.cc
@@ -55,17 +55,6 @@ struct JumpThreadingState {
RpoNumber onstack() { return RpoNumber::FromInt(-2); }
};
-bool IsBlockWithBranchPoisoning(InstructionSequence* code,
- InstructionBlock* block) {
- if (block->PredecessorCount() != 1) return false;
- RpoNumber pred_rpo = (block->predecessors())[0];
- const InstructionBlock* pred = code->InstructionBlockAt(pred_rpo);
- if (pred->code_start() == pred->code_end()) return false;
- Instruction* instr = code->InstructionAt(pred->code_end() - 1);
- FlagsMode mode = FlagsModeField::decode(instr->opcode());
- return mode == kFlags_branch_and_poison;
-}
-
} // namespace
bool JumpThreading::ComputeForwarding(Zone* local_zone,
@@ -92,85 +81,80 @@ bool JumpThreading::ComputeForwarding(Zone* local_zone,
TRACE("jt [%d] B%d\n", static_cast<int>(stack.size()),
block->rpo_number().ToInt());
RpoNumber fw = block->rpo_number();
- if (!IsBlockWithBranchPoisoning(code, block)) {
- bool fallthru = true;
- for (int i = block->code_start(); i < block->code_end(); ++i) {
- Instruction* instr = code->InstructionAt(i);
- if (!instr->AreMovesRedundant()) {
- // can't skip instructions with non redundant moves.
- TRACE(" parallel move\n");
- fallthru = false;
- } else if (FlagsModeField::decode(instr->opcode()) != kFlags_none) {
- // can't skip instructions with flags continuations.
- TRACE(" flags\n");
- fallthru = false;
- } else if (instr->IsNop()) {
- // skip nops.
- TRACE(" nop\n");
- continue;
- } else if (instr->arch_opcode() == kArchJmp) {
- // try to forward the jump instruction.
- TRACE(" jmp\n");
- // if this block deconstructs the frame, we can't forward it.
- // TODO(mtrofin): we can still forward if we end up building
- // the frame at start. So we should move the decision of whether
- // to build a frame or not in the register allocator, and trickle it
- // here and to the code generator.
- if (frame_at_start || !(block->must_deconstruct_frame() ||
- block->must_construct_frame())) {
- fw = code->InputRpo(instr, 0);
- }
- fallthru = false;
- } else if (instr->IsRet()) {
- TRACE(" ret\n");
- if (fallthru) {
- CHECK_IMPLIES(block->must_construct_frame(),
- block->must_deconstruct_frame());
- // Only handle returns with immediate/constant operands, since
- // they must always be the same for all returns in a function.
- // Dynamic return values might use different registers at
- // different return sites and therefore cannot be shared.
- if (instr->InputAt(0)->IsImmediate()) {
- int32_t return_size = ImmediateOperand::cast(instr->InputAt(0))
- ->inline_int32_value();
- // Instructions can be shared only for blocks that share
- // the same |must_deconstruct_frame| attribute.
- if (block->must_deconstruct_frame()) {
- if (empty_deconstruct_frame_return_block ==
- RpoNumber::Invalid()) {
- empty_deconstruct_frame_return_block = block->rpo_number();
- empty_deconstruct_frame_return_size = return_size;
- } else if (empty_deconstruct_frame_return_size ==
- return_size) {
- fw = empty_deconstruct_frame_return_block;
- block->clear_must_deconstruct_frame();
- }
- } else {
- if (empty_no_deconstruct_frame_return_block ==
- RpoNumber::Invalid()) {
- empty_no_deconstruct_frame_return_block =
- block->rpo_number();
- empty_no_deconstruct_frame_return_size = return_size;
- } else if (empty_no_deconstruct_frame_return_size ==
- return_size) {
- fw = empty_no_deconstruct_frame_return_block;
- }
+ bool fallthru = true;
+ for (int i = block->code_start(); i < block->code_end(); ++i) {
+ Instruction* instr = code->InstructionAt(i);
+ if (!instr->AreMovesRedundant()) {
+ // can't skip instructions with non redundant moves.
+ TRACE(" parallel move\n");
+ fallthru = false;
+ } else if (FlagsModeField::decode(instr->opcode()) != kFlags_none) {
+ // can't skip instructions with flags continuations.
+ TRACE(" flags\n");
+ fallthru = false;
+ } else if (instr->IsNop()) {
+ // skip nops.
+ TRACE(" nop\n");
+ continue;
+ } else if (instr->arch_opcode() == kArchJmp) {
+ // try to forward the jump instruction.
+ TRACE(" jmp\n");
+ // if this block deconstructs the frame, we can't forward it.
+ // TODO(mtrofin): we can still forward if we end up building
+ // the frame at start. So we should move the decision of whether
+ // to build a frame or not in the register allocator, and trickle it
+ // here and to the code generator.
+ if (frame_at_start || !(block->must_deconstruct_frame() ||
+ block->must_construct_frame())) {
+ fw = code->InputRpo(instr, 0);
+ }
+ fallthru = false;
+ } else if (instr->IsRet()) {
+ TRACE(" ret\n");
+ if (fallthru) {
+ CHECK_IMPLIES(block->must_construct_frame(),
+ block->must_deconstruct_frame());
+ // Only handle returns with immediate/constant operands, since
+ // they must always be the same for all returns in a function.
+ // Dynamic return values might use different registers at
+ // different return sites and therefore cannot be shared.
+ if (instr->InputAt(0)->IsImmediate()) {
+ int32_t return_size = ImmediateOperand::cast(instr->InputAt(0))
+ ->inline_int32_value();
+ // Instructions can be shared only for blocks that share
+ // the same |must_deconstruct_frame| attribute.
+ if (block->must_deconstruct_frame()) {
+ if (empty_deconstruct_frame_return_block ==
+ RpoNumber::Invalid()) {
+ empty_deconstruct_frame_return_block = block->rpo_number();
+ empty_deconstruct_frame_return_size = return_size;
+ } else if (empty_deconstruct_frame_return_size == return_size) {
+ fw = empty_deconstruct_frame_return_block;
+ block->clear_must_deconstruct_frame();
+ }
+ } else {
+ if (empty_no_deconstruct_frame_return_block ==
+ RpoNumber::Invalid()) {
+ empty_no_deconstruct_frame_return_block = block->rpo_number();
+ empty_no_deconstruct_frame_return_size = return_size;
+ } else if (empty_no_deconstruct_frame_return_size ==
+ return_size) {
+ fw = empty_no_deconstruct_frame_return_block;
}
}
}
- fallthru = false;
- } else {
- // can't skip other instructions.
- TRACE(" other\n");
- fallthru = false;
}
- break;
- }
- if (fallthru) {
- int next = 1 + block->rpo_number().ToInt();
- if (next < code->InstructionBlockCount())
- fw = RpoNumber::FromInt(next);
+ fallthru = false;
+ } else {
+ // can't skip other instructions.
+ TRACE(" other\n");
+ fallthru = false;
}
+ break;
+ }
+ if (fallthru) {
+ int next = 1 + block->rpo_number().ToInt();
+ if (next < code->InstructionBlockCount()) fw = RpoNumber::FromInt(next);
}
state.Forward(fw);
}
@@ -225,7 +209,7 @@ void JumpThreading::ApplyForwarding(Zone* local_zone,
for (int i = block->code_start(); i < block->code_end(); ++i) {
Instruction* instr = code->InstructionAt(i);
FlagsMode mode = FlagsModeField::decode(instr->opcode());
- if (mode == kFlags_branch || mode == kFlags_branch_and_poison) {
+ if (mode == kFlags_branch) {
fallthru = false; // branches don't fall through to the next block.
} else if (instr->arch_opcode() == kArchJmp ||
instr->arch_opcode() == kArchRet) {
diff --git a/deps/v8/src/compiler/backend/loong64/code-generator-loong64.cc b/deps/v8/src/compiler/backend/loong64/code-generator-loong64.cc
new file mode 100644
index 0000000000..0397a36145
--- /dev/null
+++ b/deps/v8/src/compiler/backend/loong64/code-generator-loong64.cc
@@ -0,0 +1,2636 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/codegen/assembler-inl.h"
+#include "src/codegen/callable.h"
+#include "src/codegen/loong64/constants-loong64.h"
+#include "src/codegen/macro-assembler.h"
+#include "src/codegen/optimized-compilation-info.h"
+#include "src/compiler/backend/code-generator-impl.h"
+#include "src/compiler/backend/code-generator.h"
+#include "src/compiler/backend/gap-resolver.h"
+#include "src/compiler/node-matchers.h"
+#include "src/compiler/osr.h"
+#include "src/heap/memory-chunk.h"
+
+#if V8_ENABLE_WEBASSEMBLY
+#include "src/wasm/wasm-code-manager.h"
+#endif // V8_ENABLE_WEBASSEMBLY
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+#define __ tasm()->
+
+// TODO(LOONG_dev): consider renaming these macros.
+#define TRACE_MSG(msg) \
+ PrintF("code_gen: \'%s\' in function %s at line %d\n", msg, __FUNCTION__, \
+ __LINE__)
+
+#define TRACE_UNIMPL() \
+ PrintF("UNIMPLEMENTED code_generator_loong64: %s at line %d\n", \
+ __FUNCTION__, __LINE__)
+
+// Adds Loong64-specific methods to convert InstructionOperands.
+class Loong64OperandConverter final : public InstructionOperandConverter {
+ public:
+ Loong64OperandConverter(CodeGenerator* gen, Instruction* instr)
+ : InstructionOperandConverter(gen, instr) {}
+
+ FloatRegister OutputSingleRegister(size_t index = 0) {
+ return ToSingleRegister(instr_->OutputAt(index));
+ }
+
+ FloatRegister InputSingleRegister(size_t index) {
+ return ToSingleRegister(instr_->InputAt(index));
+ }
+
+ FloatRegister ToSingleRegister(InstructionOperand* op) {
+ // Single (Float) and Double register namespace is same on LOONG64,
+ // both are typedefs of FPURegister.
+ return ToDoubleRegister(op);
+ }
+
+ Register InputOrZeroRegister(size_t index) {
+ if (instr_->InputAt(index)->IsImmediate()) {
+ DCHECK_EQ(0, InputInt32(index));
+ return zero_reg;
+ }
+ return InputRegister(index);
+ }
+
+ DoubleRegister InputOrZeroDoubleRegister(size_t index) {
+ if (instr_->InputAt(index)->IsImmediate()) return kDoubleRegZero;
+
+ return InputDoubleRegister(index);
+ }
+
+ DoubleRegister InputOrZeroSingleRegister(size_t index) {
+ if (instr_->InputAt(index)->IsImmediate()) return kDoubleRegZero;
+
+ return InputSingleRegister(index);
+ }
+
+ Operand InputImmediate(size_t index) {
+ Constant constant = ToConstant(instr_->InputAt(index));
+ switch (constant.type()) {
+ case Constant::kInt32:
+ return Operand(constant.ToInt32());
+ case Constant::kInt64:
+ return Operand(constant.ToInt64());
+ case Constant::kFloat32:
+ return Operand::EmbeddedNumber(constant.ToFloat32());
+ case Constant::kFloat64:
+ return Operand::EmbeddedNumber(constant.ToFloat64().value());
+ case Constant::kExternalReference:
+ case Constant::kCompressedHeapObject:
+ case Constant::kHeapObject:
+ break;
+ case Constant::kDelayedStringConstant:
+ return Operand::EmbeddedStringConstant(
+ constant.ToDelayedStringConstant());
+ case Constant::kRpoNumber:
+ UNREACHABLE(); // TODO(titzer): RPO immediates on loong64?
+ }
+ UNREACHABLE();
+ }
+
+ Operand InputOperand(size_t index) {
+ InstructionOperand* op = instr_->InputAt(index);
+ if (op->IsRegister()) {
+ return Operand(ToRegister(op));
+ }
+ return InputImmediate(index);
+ }
+
+ MemOperand MemoryOperand(size_t* first_index) {
+ const size_t index = *first_index;
+ switch (AddressingModeField::decode(instr_->opcode())) {
+ case kMode_None:
+ break;
+ case kMode_Root:
+ *first_index += 1;
+ return MemOperand(kRootRegister, InputInt32(index));
+ case kMode_MRI:
+ *first_index += 2;
+ return MemOperand(InputRegister(index + 0), InputInt32(index + 1));
+ case kMode_MRR:
+ *first_index += 2;
+ return MemOperand(InputRegister(index + 0), InputRegister(index + 1));
+ }
+ UNREACHABLE();
+ }
+
+ MemOperand MemoryOperand(size_t index = 0) { return MemoryOperand(&index); }
+
+ MemOperand ToMemOperand(InstructionOperand* op) const {
+ DCHECK_NOT_NULL(op);
+ DCHECK(op->IsStackSlot() || op->IsFPStackSlot());
+ return SlotToMemOperand(AllocatedOperand::cast(op)->index());
+ }
+
+ MemOperand SlotToMemOperand(int slot) const {
+ FrameOffset offset = frame_access_state()->GetFrameOffset(slot);
+ return MemOperand(offset.from_stack_pointer() ? sp : fp, offset.offset());
+ }
+};
+
+static inline bool HasRegisterInput(Instruction* instr, size_t index) {
+ return instr->InputAt(index)->IsRegister();
+}
+
+namespace {
+
+class OutOfLineRecordWrite final : public OutOfLineCode {
+ public:
+ OutOfLineRecordWrite(CodeGenerator* gen, Register object, Operand offset,
+ Register value, RecordWriteMode mode,
+ StubCallMode stub_mode)
+ : OutOfLineCode(gen),
+ object_(object),
+ offset_(offset),
+ value_(value),
+ mode_(mode),
+#if V8_ENABLE_WEBASSEMBLY
+ stub_mode_(stub_mode),
+#endif // V8_ENABLE_WEBASSEMBLY
+ must_save_lr_(!gen->frame_access_state()->has_frame()),
+ zone_(gen->zone()) {
+ }
+
+ void Generate() final {
+ __ CheckPageFlag(value_, MemoryChunk::kPointersToHereAreInterestingMask, eq,
+ exit());
+ RememberedSetAction const remembered_set_action =
+ mode_ > RecordWriteMode::kValueIsMap ? RememberedSetAction::kEmit
+ : RememberedSetAction::kOmit;
+ SaveFPRegsMode const save_fp_mode = frame()->DidAllocateDoubleRegisters()
+ ? SaveFPRegsMode::kSave
+ : SaveFPRegsMode::kIgnore;
+ if (must_save_lr_) {
+ // We need to save and restore ra if the frame was elided.
+ __ Push(ra);
+ }
+ if (mode_ == RecordWriteMode::kValueIsEphemeronKey) {
+ __ CallEphemeronKeyBarrier(object_, offset_, save_fp_mode);
+#if V8_ENABLE_WEBASSEMBLY
+ } else if (stub_mode_ == StubCallMode::kCallWasmRuntimeStub) {
+ // A direct call to a wasm runtime stub defined in this module.
+ // Just encode the stub index. This will be patched when the code
+ // is added to the native module and copied into wasm code space.
+ __ CallRecordWriteStubSaveRegisters(object_, offset_,
+ remembered_set_action, save_fp_mode,
+ StubCallMode::kCallWasmRuntimeStub);
+#endif // V8_ENABLE_WEBASSEMBLY
+ } else {
+ __ CallRecordWriteStubSaveRegisters(object_, offset_,
+ remembered_set_action, save_fp_mode);
+ }
+ if (must_save_lr_) {
+ __ Pop(ra);
+ }
+ }
+
+ private:
+ Register const object_;
+ Operand const offset_;
+ Register const value_;
+ RecordWriteMode const mode_;
+#if V8_ENABLE_WEBASSEMBLY
+ StubCallMode const stub_mode_;
+#endif // V8_ENABLE_WEBASSEMBLY
+ bool must_save_lr_;
+ Zone* zone_;
+};
+
+#define CREATE_OOL_CLASS(ool_name, tasm_ool_name, T) \
+ class ool_name final : public OutOfLineCode { \
+ public: \
+ ool_name(CodeGenerator* gen, T dst, T src1, T src2) \
+ : OutOfLineCode(gen), dst_(dst), src1_(src1), src2_(src2) {} \
+ \
+ void Generate() final { __ tasm_ool_name(dst_, src1_, src2_); } \
+ \
+ private: \
+ T const dst_; \
+ T const src1_; \
+ T const src2_; \
+ }
+
+CREATE_OOL_CLASS(OutOfLineFloat32Max, Float32MaxOutOfLine, FPURegister);
+CREATE_OOL_CLASS(OutOfLineFloat32Min, Float32MinOutOfLine, FPURegister);
+CREATE_OOL_CLASS(OutOfLineFloat64Max, Float64MaxOutOfLine, FPURegister);
+CREATE_OOL_CLASS(OutOfLineFloat64Min, Float64MinOutOfLine, FPURegister);
+
+#undef CREATE_OOL_CLASS
+
+Condition FlagsConditionToConditionCmp(FlagsCondition condition) {
+ switch (condition) {
+ case kEqual:
+ return eq;
+ case kNotEqual:
+ return ne;
+ case kSignedLessThan:
+ return lt;
+ case kSignedGreaterThanOrEqual:
+ return ge;
+ case kSignedLessThanOrEqual:
+ return le;
+ case kSignedGreaterThan:
+ return gt;
+ case kUnsignedLessThan:
+ return lo;
+ case kUnsignedGreaterThanOrEqual:
+ return hs;
+ case kUnsignedLessThanOrEqual:
+ return ls;
+ case kUnsignedGreaterThan:
+ return hi;
+ case kUnorderedEqual:
+ case kUnorderedNotEqual:
+ break;
+ default:
+ break;
+ }
+ UNREACHABLE();
+}
+
+Condition FlagsConditionToConditionTst(FlagsCondition condition) {
+ switch (condition) {
+ case kNotEqual:
+ return ne;
+ case kEqual:
+ return eq;
+ default:
+ break;
+ }
+ UNREACHABLE();
+}
+
+Condition FlagsConditionToConditionOvf(FlagsCondition condition) {
+ switch (condition) {
+ case kOverflow:
+ return ne;
+ case kNotOverflow:
+ return eq;
+ default:
+ break;
+ }
+ UNREACHABLE();
+}
+
+FPUCondition FlagsConditionToConditionCmpFPU(bool* predicate,
+ FlagsCondition condition) {
+ switch (condition) {
+ case kEqual:
+ *predicate = true;
+ return CEQ;
+ case kNotEqual:
+ *predicate = false;
+ return CEQ;
+ case kUnsignedLessThan:
+ *predicate = true;
+ return CLT;
+ case kUnsignedGreaterThanOrEqual:
+ *predicate = false;
+ return CLT;
+ case kUnsignedLessThanOrEqual:
+ *predicate = true;
+ return CLE;
+ case kUnsignedGreaterThan:
+ *predicate = false;
+ return CLE;
+ case kUnorderedEqual:
+ case kUnorderedNotEqual:
+ *predicate = true;
+ break;
+ default:
+ *predicate = true;
+ break;
+ }
+ UNREACHABLE();
+}
+
+} // namespace
+
+#define ASSEMBLE_ATOMIC_LOAD_INTEGER(asm_instr) \
+ do { \
+ __ asm_instr(i.OutputRegister(), i.MemoryOperand()); \
+ __ dbar(0); \
+ } while (0)
+
+// TODO(LOONG_dev): remove second dbar?
+#define ASSEMBLE_ATOMIC_STORE_INTEGER(asm_instr) \
+ do { \
+ __ dbar(0); \
+ __ asm_instr(i.InputOrZeroRegister(2), i.MemoryOperand()); \
+ __ dbar(0); \
+ } while (0)
+
+// only use for sub_w and sub_d
+#define ASSEMBLE_ATOMIC_BINOP(load_linked, store_conditional, bin_instr) \
+ do { \
+ Label binop; \
+ __ Add_d(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); \
+ __ dbar(0); \
+ __ bind(&binop); \
+ __ load_linked(i.OutputRegister(0), MemOperand(i.TempRegister(0), 0)); \
+ __ bin_instr(i.TempRegister(1), i.OutputRegister(0), \
+ Operand(i.InputRegister(2))); \
+ __ store_conditional(i.TempRegister(1), MemOperand(i.TempRegister(0), 0)); \
+ __ BranchShort(&binop, eq, i.TempRegister(1), Operand(zero_reg)); \
+ __ dbar(0); \
+ } while (0)
+
+// TODO(LOONG_dev): remove second dbar?
+#define ASSEMBLE_ATOMIC_BINOP_EXT(load_linked, store_conditional, sign_extend, \
+ size, bin_instr, representation) \
+ do { \
+ Label binop; \
+ __ add_d(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); \
+ if (representation == 32) { \
+ __ andi(i.TempRegister(3), i.TempRegister(0), 0x3); \
+ } else { \
+ DCHECK_EQ(representation, 64); \
+ __ andi(i.TempRegister(3), i.TempRegister(0), 0x7); \
+ } \
+ __ Sub_d(i.TempRegister(0), i.TempRegister(0), \
+ Operand(i.TempRegister(3))); \
+ __ slli_w(i.TempRegister(3), i.TempRegister(3), 3); \
+ __ dbar(0); \
+ __ bind(&binop); \
+ __ load_linked(i.TempRegister(1), MemOperand(i.TempRegister(0), 0)); \
+ __ ExtractBits(i.OutputRegister(0), i.TempRegister(1), i.TempRegister(3), \
+ size, sign_extend); \
+ __ bin_instr(i.TempRegister(2), i.OutputRegister(0), \
+ Operand(i.InputRegister(2))); \
+ __ InsertBits(i.TempRegister(1), i.TempRegister(2), i.TempRegister(3), \
+ size); \
+ __ store_conditional(i.TempRegister(1), MemOperand(i.TempRegister(0), 0)); \
+ __ BranchShort(&binop, eq, i.TempRegister(1), Operand(zero_reg)); \
+ __ dbar(0); \
+ } while (0)
+
+// TODO(LOONG_dev): remove second dbar?
+#define ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT( \
+ load_linked, store_conditional, sign_extend, size, representation) \
+ do { \
+ Label exchange; \
+ __ add_d(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); \
+ if (representation == 32) { \
+ __ andi(i.TempRegister(1), i.TempRegister(0), 0x3); \
+ } else { \
+ DCHECK_EQ(representation, 64); \
+ __ andi(i.TempRegister(1), i.TempRegister(0), 0x7); \
+ } \
+ __ Sub_d(i.TempRegister(0), i.TempRegister(0), \
+ Operand(i.TempRegister(1))); \
+ __ slli_w(i.TempRegister(1), i.TempRegister(1), 3); \
+ __ dbar(0); \
+ __ bind(&exchange); \
+ __ load_linked(i.TempRegister(2), MemOperand(i.TempRegister(0), 0)); \
+ __ ExtractBits(i.OutputRegister(0), i.TempRegister(2), i.TempRegister(1), \
+ size, sign_extend); \
+ __ InsertBits(i.TempRegister(2), i.InputRegister(2), i.TempRegister(1), \
+ size); \
+ __ store_conditional(i.TempRegister(2), MemOperand(i.TempRegister(0), 0)); \
+ __ BranchShort(&exchange, eq, i.TempRegister(2), Operand(zero_reg)); \
+ __ dbar(0); \
+ } while (0)
+
+// TODO(LOONG_dev): remove second dbar?
+#define ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(load_linked, \
+ store_conditional) \
+ do { \
+ Label compareExchange; \
+ Label exit; \
+ __ add_d(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); \
+ __ dbar(0); \
+ __ bind(&compareExchange); \
+ __ load_linked(i.OutputRegister(0), MemOperand(i.TempRegister(0), 0)); \
+ __ BranchShort(&exit, ne, i.InputRegister(2), \
+ Operand(i.OutputRegister(0))); \
+ __ mov(i.TempRegister(2), i.InputRegister(3)); \
+ __ store_conditional(i.TempRegister(2), MemOperand(i.TempRegister(0), 0)); \
+ __ BranchShort(&compareExchange, eq, i.TempRegister(2), \
+ Operand(zero_reg)); \
+ __ bind(&exit); \
+ __ dbar(0); \
+ } while (0)
+
+// TODO(LOONG_dev): remove second dbar?
+#define ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT( \
+ load_linked, store_conditional, sign_extend, size, representation) \
+ do { \
+ Label compareExchange; \
+ Label exit; \
+ __ add_d(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); \
+ if (representation == 32) { \
+ __ andi(i.TempRegister(1), i.TempRegister(0), 0x3); \
+ } else { \
+ DCHECK_EQ(representation, 64); \
+ __ andi(i.TempRegister(1), i.TempRegister(0), 0x7); \
+ } \
+ __ Sub_d(i.TempRegister(0), i.TempRegister(0), \
+ Operand(i.TempRegister(1))); \
+ __ slli_w(i.TempRegister(1), i.TempRegister(1), 3); \
+ __ dbar(0); \
+ __ bind(&compareExchange); \
+ __ load_linked(i.TempRegister(2), MemOperand(i.TempRegister(0), 0)); \
+ __ ExtractBits(i.OutputRegister(0), i.TempRegister(2), i.TempRegister(1), \
+ size, sign_extend); \
+ __ ExtractBits(i.InputRegister(2), i.InputRegister(2), zero_reg, size, \
+ sign_extend); \
+ __ BranchShort(&exit, ne, i.InputRegister(2), \
+ Operand(i.OutputRegister(0))); \
+ __ InsertBits(i.TempRegister(2), i.InputRegister(3), i.TempRegister(1), \
+ size); \
+ __ store_conditional(i.TempRegister(2), MemOperand(i.TempRegister(0), 0)); \
+ __ BranchShort(&compareExchange, eq, i.TempRegister(2), \
+ Operand(zero_reg)); \
+ __ bind(&exit); \
+ __ dbar(0); \
+ } while (0)
+
+#define ASSEMBLE_IEEE754_BINOP(name) \
+ do { \
+ FrameScope scope(tasm(), StackFrame::MANUAL); \
+ UseScratchRegisterScope temps(tasm()); \
+ Register scratch = temps.Acquire(); \
+ __ PrepareCallCFunction(0, 2, scratch); \
+ __ CallCFunction(ExternalReference::ieee754_##name##_function(), 0, 2); \
+ } while (0)
+
+#define ASSEMBLE_IEEE754_UNOP(name) \
+ do { \
+ FrameScope scope(tasm(), StackFrame::MANUAL); \
+ UseScratchRegisterScope temps(tasm()); \
+ Register scratch = temps.Acquire(); \
+ __ PrepareCallCFunction(0, 1, scratch); \
+ __ CallCFunction(ExternalReference::ieee754_##name##_function(), 0, 1); \
+ } while (0)
+
+#define ASSEMBLE_F64X2_ARITHMETIC_BINOP(op) \
+ do { \
+ __ op(i.OutputSimd128Register(), i.InputSimd128Register(0), \
+ i.InputSimd128Register(1)); \
+ } while (0)
+
+void CodeGenerator::AssembleDeconstructFrame() {
+ __ mov(sp, fp);
+ __ Pop(ra, fp);
+}
+
+void CodeGenerator::AssemblePrepareTailCall() {
+ if (frame_access_state()->has_frame()) {
+ __ Ld_d(ra, MemOperand(fp, StandardFrameConstants::kCallerPCOffset));
+ __ Ld_d(fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+ }
+ frame_access_state()->SetFrameAccessToSP();
+}
+
+namespace {
+
+void AdjustStackPointerForTailCall(TurboAssembler* tasm,
+ FrameAccessState* state,
+ int new_slot_above_sp,
+ bool allow_shrinkage = true) {
+ int current_sp_offset = state->GetSPToFPSlotCount() +
+ StandardFrameConstants::kFixedSlotCountAboveFp;
+ int stack_slot_delta = new_slot_above_sp - current_sp_offset;
+ if (stack_slot_delta > 0) {
+ tasm->Sub_d(sp, sp, stack_slot_delta * kSystemPointerSize);
+ state->IncreaseSPDelta(stack_slot_delta);
+ } else if (allow_shrinkage && stack_slot_delta < 0) {
+ tasm->Add_d(sp, sp, -stack_slot_delta * kSystemPointerSize);
+ state->IncreaseSPDelta(stack_slot_delta);
+ }
+}
+
+} // namespace
+
+void CodeGenerator::AssembleTailCallBeforeGap(Instruction* instr,
+ int first_unused_slot_offset) {
+ AdjustStackPointerForTailCall(tasm(), frame_access_state(),
+ first_unused_slot_offset, false);
+}
+
+void CodeGenerator::AssembleTailCallAfterGap(Instruction* instr,
+ int first_unused_slot_offset) {
+ AdjustStackPointerForTailCall(tasm(), frame_access_state(),
+ first_unused_slot_offset);
+}
+
+// Check that {kJavaScriptCallCodeStartRegister} is correct.
+void CodeGenerator::AssembleCodeStartRegisterCheck() {
+ UseScratchRegisterScope temps(tasm());
+ Register scratch = temps.Acquire();
+ __ ComputeCodeStartAddress(scratch);
+ __ Assert(eq, AbortReason::kWrongFunctionCodeStart,
+ kJavaScriptCallCodeStartRegister, Operand(scratch));
+}
+
+// Check if the code object is marked for deoptimization. If it is, then it
+// jumps to the CompileLazyDeoptimizedCode builtin. In order to do this we need
+// to:
+// 1. read from memory the word that contains that bit, which can be found in
+// the flags in the referenced {CodeDataContainer} object;
+// 2. test kMarkedForDeoptimizationBit in those flags; and
+// 3. if it is not zero then it jumps to the builtin.
+void CodeGenerator::BailoutIfDeoptimized() {
+ UseScratchRegisterScope temps(tasm());
+ Register scratch = temps.Acquire();
+ int offset = Code::kCodeDataContainerOffset - Code::kHeaderSize;
+ __ Ld_d(scratch, MemOperand(kJavaScriptCallCodeStartRegister, offset));
+ __ Ld_w(scratch, FieldMemOperand(
+ scratch, CodeDataContainer::kKindSpecificFlagsOffset));
+ __ And(scratch, scratch, Operand(1 << Code::kMarkedForDeoptimizationBit));
+ __ Jump(BUILTIN_CODE(isolate(), CompileLazyDeoptimizedCode),
+ RelocInfo::CODE_TARGET, ne, scratch, Operand(zero_reg));
+}
+
+// Assembles an instruction after register allocation, producing machine code.
+CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
+ Instruction* instr) {
+ Loong64OperandConverter i(this, instr);
+ InstructionCode opcode = instr->opcode();
+ ArchOpcode arch_opcode = ArchOpcodeField::decode(opcode);
+ switch (arch_opcode) {
+ case kArchCallCodeObject: {
+ if (instr->InputAt(0)->IsImmediate()) {
+ __ Call(i.InputCode(0), RelocInfo::CODE_TARGET);
+ } else {
+ Register reg = i.InputRegister(0);
+ DCHECK_IMPLIES(
+ instr->HasCallDescriptorFlag(CallDescriptor::kFixedTargetRegister),
+ reg == kJavaScriptCallCodeStartRegister);
+ __ CallCodeObject(reg);
+ }
+ RecordCallPosition(instr);
+ frame_access_state()->ClearSPDelta();
+ break;
+ }
+ case kArchCallBuiltinPointer: {
+ DCHECK(!instr->InputAt(0)->IsImmediate());
+ Register builtin_index = i.InputRegister(0);
+ __ CallBuiltinByIndex(builtin_index);
+ RecordCallPosition(instr);
+ frame_access_state()->ClearSPDelta();
+ break;
+ }
+#if V8_ENABLE_WEBASSEMBLY
+ case kArchCallWasmFunction: {
+ if (instr->InputAt(0)->IsImmediate()) {
+ Constant constant = i.ToConstant(instr->InputAt(0));
+ Address wasm_code = static_cast<Address>(constant.ToInt64());
+ __ Call(wasm_code, constant.rmode());
+ } else {
+ __ Call(i.InputRegister(0));
+ }
+ RecordCallPosition(instr);
+ frame_access_state()->ClearSPDelta();
+ break;
+ }
+ case kArchTailCallWasm: {
+ if (instr->InputAt(0)->IsImmediate()) {
+ Constant constant = i.ToConstant(instr->InputAt(0));
+ Address wasm_code = static_cast<Address>(constant.ToInt64());
+ __ Jump(wasm_code, constant.rmode());
+ } else {
+ __ Jump(i.InputRegister(0));
+ }
+ frame_access_state()->ClearSPDelta();
+ frame_access_state()->SetFrameAccessToDefault();
+ break;
+ }
+#endif // V8_ENABLE_WEBASSEMBLY
+ case kArchTailCallCodeObject: {
+ if (instr->InputAt(0)->IsImmediate()) {
+ __ Jump(i.InputCode(0), RelocInfo::CODE_TARGET);
+ } else {
+ Register reg = i.InputRegister(0);
+ DCHECK_IMPLIES(
+ instr->HasCallDescriptorFlag(CallDescriptor::kFixedTargetRegister),
+ reg == kJavaScriptCallCodeStartRegister);
+ __ JumpCodeObject(reg);
+ }
+ frame_access_state()->ClearSPDelta();
+ frame_access_state()->SetFrameAccessToDefault();
+ break;
+ }
+ case kArchTailCallAddress: {
+ CHECK(!instr->InputAt(0)->IsImmediate());
+ Register reg = i.InputRegister(0);
+ DCHECK_IMPLIES(
+ instr->HasCallDescriptorFlag(CallDescriptor::kFixedTargetRegister),
+ reg == kJavaScriptCallCodeStartRegister);
+ __ Jump(reg);
+ frame_access_state()->ClearSPDelta();
+ frame_access_state()->SetFrameAccessToDefault();
+ break;
+ }
+ case kArchCallJSFunction: {
+ Register func = i.InputRegister(0);
+ if (FLAG_debug_code) {
+ UseScratchRegisterScope temps(tasm());
+ Register scratch = temps.Acquire();
+ // Check the function's context matches the context argument.
+ __ Ld_d(scratch, FieldMemOperand(func, JSFunction::kContextOffset));
+ __ Assert(eq, AbortReason::kWrongFunctionContext, cp, Operand(scratch));
+ }
+ static_assert(kJavaScriptCallCodeStartRegister == a2, "ABI mismatch");
+ __ Ld_d(a2, FieldMemOperand(func, JSFunction::kCodeOffset));
+ __ CallCodeObject(a2);
+ RecordCallPosition(instr);
+ frame_access_state()->ClearSPDelta();
+ break;
+ }
+ case kArchPrepareCallCFunction: {
+ UseScratchRegisterScope temps(tasm());
+ Register scratch = temps.Acquire();
+ int const num_parameters = MiscField::decode(instr->opcode());
+ __ PrepareCallCFunction(num_parameters, scratch);
+ // Frame alignment requires using FP-relative frame addressing.
+ frame_access_state()->SetFrameAccessToFP();
+ break;
+ }
+ case kArchSaveCallerRegisters: {
+ fp_mode_ =
+ static_cast<SaveFPRegsMode>(MiscField::decode(instr->opcode()));
+ DCHECK(fp_mode_ == SaveFPRegsMode::kIgnore ||
+ fp_mode_ == SaveFPRegsMode::kSave);
+ // kReturnRegister0 should have been saved before entering the stub.
+ int bytes = __ PushCallerSaved(fp_mode_, kReturnRegister0);
+ DCHECK(IsAligned(bytes, kSystemPointerSize));
+ DCHECK_EQ(0, frame_access_state()->sp_delta());
+ frame_access_state()->IncreaseSPDelta(bytes / kSystemPointerSize);
+ DCHECK(!caller_registers_saved_);
+ caller_registers_saved_ = true;
+ break;
+ }
+ case kArchRestoreCallerRegisters: {
+ DCHECK(fp_mode_ ==
+ static_cast<SaveFPRegsMode>(MiscField::decode(instr->opcode())));
+ DCHECK(fp_mode_ == SaveFPRegsMode::kIgnore ||
+ fp_mode_ == SaveFPRegsMode::kSave);
+ // Don't overwrite the returned value.
+ int bytes = __ PopCallerSaved(fp_mode_, kReturnRegister0);
+ frame_access_state()->IncreaseSPDelta(-(bytes / kSystemPointerSize));
+ DCHECK_EQ(0, frame_access_state()->sp_delta());
+ DCHECK(caller_registers_saved_);
+ caller_registers_saved_ = false;
+ break;
+ }
+ case kArchPrepareTailCall:
+ AssemblePrepareTailCall();
+ break;
+ case kArchCallCFunction: {
+ int const num_parameters = MiscField::decode(instr->opcode());
+#if V8_ENABLE_WEBASSEMBLY
+ Label start_call;
+ bool isWasmCapiFunction =
+ linkage()->GetIncomingDescriptor()->IsWasmCapiFunction();
+ // from start_call to return address.
+ int offset = __ root_array_available() ? 36 : 80; // 9 or 20 instrs
+#endif // V8_ENABLE_WEBASSEMBLY
+#if V8_HOST_ARCH_LOONG64
+ if (FLAG_debug_code) {
+ offset += 12; // see CallCFunction
+ }
+#endif
+#if V8_ENABLE_WEBASSEMBLY
+ if (isWasmCapiFunction) {
+ __ bind(&start_call);
+ __ pcaddi(t7, -4);
+ __ St_d(t7, MemOperand(fp, WasmExitFrameConstants::kCallingPCOffset));
+ }
+#endif // V8_ENABLE_WEBASSEMBLY
+ if (instr->InputAt(0)->IsImmediate()) {
+ ExternalReference ref = i.InputExternalReference(0);
+ __ CallCFunction(ref, num_parameters);
+ } else {
+ Register func = i.InputRegister(0);
+ __ CallCFunction(func, num_parameters);
+ }
+#if V8_ENABLE_WEBASSEMBLY
+ if (isWasmCapiFunction) {
+ CHECK_EQ(offset, __ SizeOfCodeGeneratedSince(&start_call));
+ RecordSafepoint(instr->reference_map());
+ }
+#endif // V8_ENABLE_WEBASSEMBLY
+ frame_access_state()->SetFrameAccessToDefault();
+ // Ideally, we should decrement SP delta to match the change of stack
+ // pointer in CallCFunction. However, for certain architectures (e.g.
+ // ARM), there may be more strict alignment requirement, causing old SP
+ // to be saved on the stack. In those cases, we can not calculate the SP
+ // delta statically.
+ frame_access_state()->ClearSPDelta();
+ if (caller_registers_saved_) {
+ // Need to re-sync SP delta introduced in kArchSaveCallerRegisters.
+ // Here, we assume the sequence to be:
+ // kArchSaveCallerRegisters;
+ // kArchCallCFunction;
+ // kArchRestoreCallerRegisters;
+ int bytes =
+ __ RequiredStackSizeForCallerSaved(fp_mode_, kReturnRegister0);
+ frame_access_state()->IncreaseSPDelta(bytes / kSystemPointerSize);
+ }
+ break;
+ }
+ case kArchJmp:
+ AssembleArchJump(i.InputRpo(0));
+ break;
+ case kArchBinarySearchSwitch:
+ AssembleArchBinarySearchSwitch(instr);
+ break;
+ case kArchTableSwitch:
+ AssembleArchTableSwitch(instr);
+ break;
+ case kArchAbortCSAAssert:
+ DCHECK(i.InputRegister(0) == a0);
+ {
+ // We don't actually want to generate a pile of code for this, so just
+ // claim there is a stack frame, without generating one.
+ FrameScope scope(tasm(), StackFrame::NONE);
+ __ Call(isolate()->builtins()->code_handle(Builtin::kAbortCSAAssert),
+ RelocInfo::CODE_TARGET);
+ }
+ __ stop();
+ break;
+ case kArchDebugBreak:
+ __ DebugBreak();
+ break;
+ case kArchComment:
+ __ RecordComment(reinterpret_cast<const char*>(i.InputInt64(0)));
+ break;
+ case kArchNop:
+ case kArchThrowTerminator:
+ // don't emit code for nops.
+ break;
+ case kArchDeoptimize: {
+ DeoptimizationExit* exit =
+ BuildTranslation(instr, -1, 0, 0, OutputFrameStateCombine::Ignore());
+ __ Branch(exit->label());
+ break;
+ }
+ case kArchRet:
+ AssembleReturn(instr->InputAt(0));
+ break;
+ case kArchStackPointerGreaterThan: {
+ Register lhs_register = sp;
+ uint32_t offset;
+ if (ShouldApplyOffsetToStackCheck(instr, &offset)) {
+ lhs_register = i.TempRegister(1);
+ __ Sub_d(lhs_register, sp, offset);
+ }
+ __ Sltu(i.TempRegister(0), i.InputRegister(0), lhs_register);
+ break;
+ }
+ case kArchStackCheckOffset:
+ __ Move(i.OutputRegister(), Smi::FromInt(GetStackCheckOffset()));
+ break;
+ case kArchFramePointer:
+ __ mov(i.OutputRegister(), fp);
+ break;
+ case kArchParentFramePointer:
+ if (frame_access_state()->has_frame()) {
+ __ Ld_d(i.OutputRegister(), MemOperand(fp, 0));
+ } else {
+ __ mov(i.OutputRegister(), fp);
+ }
+ break;
+ case kArchTruncateDoubleToI:
+ __ TruncateDoubleToI(isolate(), zone(), i.OutputRegister(),
+ i.InputDoubleRegister(0), DetermineStubCallMode());
+ break;
+ case kArchStoreWithWriteBarrier: // Fall through.
+ case kArchAtomicStoreWithWriteBarrier: {
+ RecordWriteMode mode =
+ static_cast<RecordWriteMode>(MiscField::decode(instr->opcode()));
+ AddressingMode addressing_mode =
+ AddressingModeField::decode(instr->opcode());
+ Register object = i.InputRegister(0);
+ Operand offset(zero_reg);
+ if (addressing_mode == kMode_MRI) {
+ offset = Operand(i.InputInt64(1));
+ } else {
+ DCHECK_EQ(addressing_mode, kMode_MRR);
+ offset = Operand(i.InputRegister(1));
+ }
+ Register value = i.InputRegister(2);
+
+ auto ool = zone()->New<OutOfLineRecordWrite>(
+ this, object, offset, value, mode, DetermineStubCallMode());
+ if (arch_opcode == kArchStoreWithWriteBarrier) {
+ if (addressing_mode == kMode_MRI) {
+ __ St_d(value, MemOperand(object, i.InputInt64(1)));
+ } else {
+ DCHECK_EQ(addressing_mode, kMode_MRR);
+ __ St_d(value, MemOperand(object, i.InputRegister(1)));
+ }
+ } else {
+ DCHECK_EQ(kArchAtomicStoreWithWriteBarrier, arch_opcode);
+ DCHECK_EQ(addressing_mode, kMode_MRI);
+ UseScratchRegisterScope temps(tasm());
+ Register scratch = temps.Acquire();
+ __ Add_d(scratch, object, Operand(i.InputInt64(1)));
+ __ amswap_db_d(zero_reg, value, scratch);
+ }
+ if (mode > RecordWriteMode::kValueIsPointer) {
+ __ JumpIfSmi(value, ool->exit());
+ }
+ __ CheckPageFlag(object, MemoryChunk::kPointersFromHereAreInterestingMask,
+ ne, ool->entry());
+ __ bind(ool->exit());
+ break;
+ }
+ case kArchStackSlot: {
+ UseScratchRegisterScope temps(tasm());
+ Register scratch = temps.Acquire();
+ FrameOffset offset =
+ frame_access_state()->GetFrameOffset(i.InputInt32(0));
+ Register base_reg = offset.from_stack_pointer() ? sp : fp;
+ __ Add_d(i.OutputRegister(), base_reg, Operand(offset.offset()));
+ if (FLAG_debug_code) {
+ // Verify that the output_register is properly aligned
+ __ And(scratch, i.OutputRegister(), Operand(kSystemPointerSize - 1));
+ __ Assert(eq, AbortReason::kAllocationIsNotDoubleAligned, scratch,
+ Operand(zero_reg));
+ }
+ break;
+ }
+ case kIeee754Float64Acos:
+ ASSEMBLE_IEEE754_UNOP(acos);
+ break;
+ case kIeee754Float64Acosh:
+ ASSEMBLE_IEEE754_UNOP(acosh);
+ break;
+ case kIeee754Float64Asin:
+ ASSEMBLE_IEEE754_UNOP(asin);
+ break;
+ case kIeee754Float64Asinh:
+ ASSEMBLE_IEEE754_UNOP(asinh);
+ break;
+ case kIeee754Float64Atan:
+ ASSEMBLE_IEEE754_UNOP(atan);
+ break;
+ case kIeee754Float64Atanh:
+ ASSEMBLE_IEEE754_UNOP(atanh);
+ break;
+ case kIeee754Float64Atan2:
+ ASSEMBLE_IEEE754_BINOP(atan2);
+ break;
+ case kIeee754Float64Cos:
+ ASSEMBLE_IEEE754_UNOP(cos);
+ break;
+ case kIeee754Float64Cosh:
+ ASSEMBLE_IEEE754_UNOP(cosh);
+ break;
+ case kIeee754Float64Cbrt:
+ ASSEMBLE_IEEE754_UNOP(cbrt);
+ break;
+ case kIeee754Float64Exp:
+ ASSEMBLE_IEEE754_UNOP(exp);
+ break;
+ case kIeee754Float64Expm1:
+ ASSEMBLE_IEEE754_UNOP(expm1);
+ break;
+ case kIeee754Float64Log:
+ ASSEMBLE_IEEE754_UNOP(log);
+ break;
+ case kIeee754Float64Log1p:
+ ASSEMBLE_IEEE754_UNOP(log1p);
+ break;
+ case kIeee754Float64Log2:
+ ASSEMBLE_IEEE754_UNOP(log2);
+ break;
+ case kIeee754Float64Log10:
+ ASSEMBLE_IEEE754_UNOP(log10);
+ break;
+ case kIeee754Float64Pow:
+ ASSEMBLE_IEEE754_BINOP(pow);
+ break;
+ case kIeee754Float64Sin:
+ ASSEMBLE_IEEE754_UNOP(sin);
+ break;
+ case kIeee754Float64Sinh:
+ ASSEMBLE_IEEE754_UNOP(sinh);
+ break;
+ case kIeee754Float64Tan:
+ ASSEMBLE_IEEE754_UNOP(tan);
+ break;
+ case kIeee754Float64Tanh:
+ ASSEMBLE_IEEE754_UNOP(tanh);
+ break;
+ case kLoong64Add_w:
+ __ Add_w(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+ break;
+ case kLoong64Add_d:
+ __ Add_d(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+ break;
+ case kLoong64AddOvf_d:
+ __ AddOverflow_d(i.OutputRegister(), i.InputRegister(0),
+ i.InputOperand(1), t8);
+ break;
+ case kLoong64Sub_w:
+ __ Sub_w(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+ break;
+ case kLoong64Sub_d:
+ __ Sub_d(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+ break;
+ case kLoong64SubOvf_d:
+ __ SubOverflow_d(i.OutputRegister(), i.InputRegister(0),
+ i.InputOperand(1), t8);
+ break;
+ case kLoong64Mul_w:
+ __ Mul_w(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+ break;
+ case kLoong64MulOvf_w:
+ __ MulOverflow_w(i.OutputRegister(), i.InputRegister(0),
+ i.InputOperand(1), t8);
+ break;
+ case kLoong64Mulh_w:
+ __ Mulh_w(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+ break;
+ case kLoong64Mulh_wu:
+ __ Mulh_wu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+ break;
+ case kLoong64Mulh_d:
+ __ Mulh_d(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+ break;
+ case kLoong64Div_w:
+ __ Div_w(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+ __ masknez(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
+ break;
+ case kLoong64Div_wu:
+ __ Div_wu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+ __ masknez(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
+ break;
+ case kLoong64Mod_w:
+ __ Mod_w(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+ break;
+ case kLoong64Mod_wu:
+ __ Mod_wu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+ break;
+ case kLoong64Mul_d:
+ __ Mul_d(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+ break;
+ case kLoong64Div_d:
+ __ Div_d(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+ __ masknez(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
+ break;
+ case kLoong64Div_du:
+ __ Div_du(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+ __ masknez(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
+ break;
+ case kLoong64Mod_d:
+ __ Mod_d(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+ break;
+ case kLoong64Mod_du:
+ __ Mod_du(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+ break;
+ case kLoong64Alsl_d:
+ DCHECK(instr->InputAt(2)->IsImmediate());
+ __ Alsl_d(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
+ i.InputInt8(2), t7);
+ break;
+ case kLoong64Alsl_w:
+ DCHECK(instr->InputAt(2)->IsImmediate());
+ __ Alsl_w(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
+ i.InputInt8(2), t7);
+ break;
+ case kLoong64And:
+ case kLoong64And32:
+ __ And(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+ break;
+ case kLoong64Or:
+ case kLoong64Or32:
+ __ Or(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+ break;
+ case kLoong64Nor:
+ case kLoong64Nor32:
+ if (instr->InputAt(1)->IsRegister()) {
+ __ Nor(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+ } else {
+ DCHECK_EQ(0, i.InputOperand(1).immediate());
+ __ Nor(i.OutputRegister(), i.InputRegister(0), zero_reg);
+ }
+ break;
+ case kLoong64Xor:
+ case kLoong64Xor32:
+ __ Xor(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+ break;
+ case kLoong64Clz_w:
+ __ clz_w(i.OutputRegister(), i.InputRegister(0));
+ break;
+ case kLoong64Clz_d:
+ __ clz_d(i.OutputRegister(), i.InputRegister(0));
+ break;
+ case kLoong64Sll_w:
+ if (instr->InputAt(1)->IsRegister()) {
+ __ sll_w(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
+ } else {
+ int64_t imm = i.InputOperand(1).immediate();
+ __ slli_w(i.OutputRegister(), i.InputRegister(0),
+ static_cast<uint16_t>(imm));
+ }
+ break;
+ case kLoong64Srl_w:
+ if (instr->InputAt(1)->IsRegister()) {
+ __ srl_w(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
+ } else {
+ int64_t imm = i.InputOperand(1).immediate();
+ __ srli_w(i.OutputRegister(), i.InputRegister(0),
+ static_cast<uint16_t>(imm));
+ }
+ break;
+ case kLoong64Sra_w:
+ if (instr->InputAt(1)->IsRegister()) {
+ __ sra_w(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
+ } else {
+ int64_t imm = i.InputOperand(1).immediate();
+ __ srai_w(i.OutputRegister(), i.InputRegister(0),
+ static_cast<uint16_t>(imm));
+ }
+ break;
+ case kLoong64Bstrpick_w:
+ __ bstrpick_w(i.OutputRegister(), i.InputRegister(0),
+ i.InputInt8(1) + i.InputInt8(2) - 1, i.InputInt8(1));
+ break;
+ case kLoong64Bstrins_w:
+ if (instr->InputAt(1)->IsImmediate() && i.InputInt8(1) == 0) {
+ __ bstrins_w(i.OutputRegister(), zero_reg,
+ i.InputInt8(1) + i.InputInt8(2) - 1, i.InputInt8(1));
+ } else {
+ __ bstrins_w(i.OutputRegister(), i.InputRegister(0),
+ i.InputInt8(1) + i.InputInt8(2) - 1, i.InputInt8(1));
+ }
+ break;
+ case kLoong64Bstrpick_d: {
+ __ bstrpick_d(i.OutputRegister(), i.InputRegister(0),
+ i.InputInt8(1) + i.InputInt8(2) - 1, i.InputInt8(1));
+ break;
+ }
+ case kLoong64Bstrins_d:
+ if (instr->InputAt(1)->IsImmediate() && i.InputInt8(1) == 0) {
+ __ bstrins_d(i.OutputRegister(), zero_reg,
+ i.InputInt8(1) + i.InputInt8(2) - 1, i.InputInt8(1));
+ } else {
+ __ bstrins_d(i.OutputRegister(), i.InputRegister(0),
+ i.InputInt8(1) + i.InputInt8(2) - 1, i.InputInt8(1));
+ }
+ break;
+ case kLoong64Sll_d:
+ if (instr->InputAt(1)->IsRegister()) {
+ __ sll_d(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
+ } else {
+ int64_t imm = i.InputOperand(1).immediate();
+ __ slli_d(i.OutputRegister(), i.InputRegister(0),
+ static_cast<uint16_t>(imm));
+ }
+ break;
+ case kLoong64Srl_d:
+ if (instr->InputAt(1)->IsRegister()) {
+ __ srl_d(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
+ } else {
+ int64_t imm = i.InputOperand(1).immediate();
+ __ srli_d(i.OutputRegister(), i.InputRegister(0),
+ static_cast<uint16_t>(imm));
+ }
+ break;
+ case kLoong64Sra_d:
+ if (instr->InputAt(1)->IsRegister()) {
+ __ sra_d(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
+ } else {
+ int64_t imm = i.InputOperand(1).immediate();
+ __ srai_d(i.OutputRegister(), i.InputRegister(0), imm);
+ }
+ break;
+ case kLoong64Rotr_w:
+ __ Rotr_w(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+ break;
+ case kLoong64Rotr_d:
+ __ Rotr_d(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+ break;
+ case kLoong64Tst:
+ __ And(t8, i.InputRegister(0), i.InputOperand(1));
+ // Pseudo-instruction used for cmp/branch. No opcode emitted here.
+ break;
+ case kLoong64Cmp:
+ // Pseudo-instruction used for cmp/branch. No opcode emitted here.
+ break;
+ case kLoong64Mov:
+ // TODO(LOONG_dev): Should we combine mov/li, or use separate instr?
+ // - Also see x64 ASSEMBLE_BINOP & RegisterOrOperandType
+ if (HasRegisterInput(instr, 0)) {
+ __ mov(i.OutputRegister(), i.InputRegister(0));
+ } else {
+ __ li(i.OutputRegister(), i.InputOperand(0));
+ }
+ break;
+
+ case kLoong64Float32Cmp: {
+ FPURegister left = i.InputOrZeroSingleRegister(0);
+ FPURegister right = i.InputOrZeroSingleRegister(1);
+ bool predicate;
+ FPUCondition cc =
+ FlagsConditionToConditionCmpFPU(&predicate, instr->flags_condition());
+
+ if ((left == kDoubleRegZero || right == kDoubleRegZero) &&
+ !__ IsDoubleZeroRegSet()) {
+ __ Move(kDoubleRegZero, 0.0);
+ }
+
+ __ CompareF32(left, right, cc);
+ } break;
+ case kLoong64Float32Add:
+ __ fadd_s(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+ i.InputDoubleRegister(1));
+ break;
+ case kLoong64Float32Sub:
+ __ fsub_s(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+ i.InputDoubleRegister(1));
+ break;
+ case kLoong64Float32Mul:
+ __ fmul_s(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+ i.InputDoubleRegister(1));
+ break;
+ case kLoong64Float32Div:
+ __ fdiv_s(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+ i.InputDoubleRegister(1));
+ break;
+ case kLoong64Float32Abs:
+ __ fabs_s(i.OutputSingleRegister(), i.InputSingleRegister(0));
+ break;
+ case kLoong64Float32Neg:
+ __ Neg_s(i.OutputSingleRegister(), i.InputSingleRegister(0));
+ break;
+ case kLoong64Float32Sqrt: {
+ __ fsqrt_s(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
+ break;
+ }
+ case kLoong64Float32Min: {
+ FPURegister dst = i.OutputSingleRegister();
+ FPURegister src1 = i.InputSingleRegister(0);
+ FPURegister src2 = i.InputSingleRegister(1);
+ auto ool = zone()->New<OutOfLineFloat32Min>(this, dst, src1, src2);
+ __ Float32Min(dst, src1, src2, ool->entry());
+ __ bind(ool->exit());
+ break;
+ }
+ case kLoong64Float32Max: {
+ FPURegister dst = i.OutputSingleRegister();
+ FPURegister src1 = i.InputSingleRegister(0);
+ FPURegister src2 = i.InputSingleRegister(1);
+ auto ool = zone()->New<OutOfLineFloat32Max>(this, dst, src1, src2);
+ __ Float32Max(dst, src1, src2, ool->entry());
+ __ bind(ool->exit());
+ break;
+ }
+ case kLoong64Float64Cmp: {
+ FPURegister left = i.InputOrZeroDoubleRegister(0);
+ FPURegister right = i.InputOrZeroDoubleRegister(1);
+ bool predicate;
+ FPUCondition cc =
+ FlagsConditionToConditionCmpFPU(&predicate, instr->flags_condition());
+ if ((left == kDoubleRegZero || right == kDoubleRegZero) &&
+ !__ IsDoubleZeroRegSet()) {
+ __ Move(kDoubleRegZero, 0.0);
+ }
+
+ __ CompareF64(left, right, cc);
+ } break;
+ case kLoong64Float64Add:
+ __ fadd_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+ i.InputDoubleRegister(1));
+ break;
+ case kLoong64Float64Sub:
+ __ fsub_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+ i.InputDoubleRegister(1));
+ break;
+ case kLoong64Float64Mul:
+ // TODO(LOONG_dev): LOONG64 add special case: right op is -1.0, see arm
+ // port.
+ __ fmul_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+ i.InputDoubleRegister(1));
+ break;
+ case kLoong64Float64Div:
+ __ fdiv_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+ i.InputDoubleRegister(1));
+ break;
+ case kLoong64Float64Mod: {
+ // TODO(turbofan): implement directly.
+ FrameScope scope(tasm(), StackFrame::MANUAL);
+ UseScratchRegisterScope temps(tasm());
+ Register scratch = temps.Acquire();
+ __ PrepareCallCFunction(0, 2, scratch);
+ __ CallCFunction(ExternalReference::mod_two_doubles_operation(), 0, 2);
+ break;
+ }
+ case kLoong64Float64Abs:
+ __ fabs_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
+ break;
+ case kLoong64Float64Neg:
+ __ Neg_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
+ break;
+ case kLoong64Float64Sqrt: {
+ __ fsqrt_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
+ break;
+ }
+ case kLoong64Float64Min: {
+ FPURegister dst = i.OutputDoubleRegister();
+ FPURegister src1 = i.InputDoubleRegister(0);
+ FPURegister src2 = i.InputDoubleRegister(1);
+ auto ool = zone()->New<OutOfLineFloat64Min>(this, dst, src1, src2);
+ __ Float64Min(dst, src1, src2, ool->entry());
+ __ bind(ool->exit());
+ break;
+ }
+ case kLoong64Float64Max: {
+ FPURegister dst = i.OutputDoubleRegister();
+ FPURegister src1 = i.InputDoubleRegister(0);
+ FPURegister src2 = i.InputDoubleRegister(1);
+ auto ool = zone()->New<OutOfLineFloat64Max>(this, dst, src1, src2);
+ __ Float64Max(dst, src1, src2, ool->entry());
+ __ bind(ool->exit());
+ break;
+ }
+ case kLoong64Float64RoundDown: {
+ __ Floor_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
+ break;
+ }
+ case kLoong64Float32RoundDown: {
+ __ Floor_s(i.OutputSingleRegister(), i.InputSingleRegister(0));
+ break;
+ }
+ case kLoong64Float64RoundTruncate: {
+ __ Trunc_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
+ break;
+ }
+ case kLoong64Float32RoundTruncate: {
+ __ Trunc_s(i.OutputSingleRegister(), i.InputSingleRegister(0));
+ break;
+ }
+ case kLoong64Float64RoundUp: {
+ __ Ceil_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
+ break;
+ }
+ case kLoong64Float32RoundUp: {
+ __ Ceil_s(i.OutputSingleRegister(), i.InputSingleRegister(0));
+ break;
+ }
+ case kLoong64Float64RoundTiesEven: {
+ __ Round_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
+ break;
+ }
+ case kLoong64Float32RoundTiesEven: {
+ __ Round_s(i.OutputSingleRegister(), i.InputSingleRegister(0));
+ break;
+ }
+ case kLoong64Float64SilenceNaN:
+ __ FPUCanonicalizeNaN(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
+ break;
+ case kLoong64Float64ToFloat32:
+ __ fcvt_s_d(i.OutputSingleRegister(), i.InputDoubleRegister(0));
+ break;
+ case kLoong64Float32ToFloat64:
+ __ fcvt_d_s(i.OutputDoubleRegister(), i.InputSingleRegister(0));
+ break;
+ case kLoong64Int32ToFloat64: {
+ FPURegister scratch = kScratchDoubleReg;
+ __ movgr2fr_w(scratch, i.InputRegister(0));
+ __ ffint_d_w(i.OutputDoubleRegister(), scratch);
+ break;
+ }
+ case kLoong64Int32ToFloat32: {
+ FPURegister scratch = kScratchDoubleReg;
+ __ movgr2fr_w(scratch, i.InputRegister(0));
+ __ ffint_s_w(i.OutputDoubleRegister(), scratch);
+ break;
+ }
+ case kLoong64Uint32ToFloat32: {
+ __ Ffint_s_uw(i.OutputDoubleRegister(), i.InputRegister(0));
+ break;
+ }
+ case kLoong64Int64ToFloat32: {
+ FPURegister scratch = kScratchDoubleReg;
+ __ movgr2fr_d(scratch, i.InputRegister(0));
+ __ ffint_s_l(i.OutputDoubleRegister(), scratch);
+ break;
+ }
+ case kLoong64Int64ToFloat64: {
+ FPURegister scratch = kScratchDoubleReg;
+ __ movgr2fr_d(scratch, i.InputRegister(0));
+ __ ffint_d_l(i.OutputDoubleRegister(), scratch);
+ break;
+ }
+ case kLoong64Uint32ToFloat64: {
+ __ Ffint_d_uw(i.OutputDoubleRegister(), i.InputRegister(0));
+ break;
+ }
+ case kLoong64Uint64ToFloat64: {
+ __ Ffint_d_ul(i.OutputDoubleRegister(), i.InputRegister(0));
+ break;
+ }
+ case kLoong64Uint64ToFloat32: {
+ __ Ffint_s_ul(i.OutputDoubleRegister(), i.InputRegister(0));
+ break;
+ }
+ case kLoong64Float64ToInt32: {
+ FPURegister scratch = kScratchDoubleReg;
+ __ ftintrz_w_d(scratch, i.InputDoubleRegister(0));
+ __ movfr2gr_s(i.OutputRegister(), scratch);
+ break;
+ }
+ case kLoong64Float32ToInt32: {
+ FPURegister scratch_d = kScratchDoubleReg;
+ bool set_overflow_to_min_i32 = MiscField::decode(instr->opcode());
+ __ ftintrz_w_s(scratch_d, i.InputDoubleRegister(0));
+ __ movfr2gr_s(i.OutputRegister(), scratch_d);
+ if (set_overflow_to_min_i32) {
+ UseScratchRegisterScope temps(tasm());
+ Register scratch = temps.Acquire();
+ // Avoid INT32_MAX as an overflow indicator and use INT32_MIN instead,
+ // because INT32_MIN allows easier out-of-bounds detection.
+ __ addi_w(scratch, i.OutputRegister(), 1);
+ __ slt(scratch, scratch, i.OutputRegister());
+ __ add_w(i.OutputRegister(), i.OutputRegister(), scratch);
+ }
+ break;
+ }
+ case kLoong64Float32ToInt64: {
+ FPURegister scratch_d = kScratchDoubleReg;
+
+ bool load_status = instr->OutputCount() > 1;
+ // Other arches use round to zero here, so we follow.
+ __ ftintrz_l_s(scratch_d, i.InputDoubleRegister(0));
+ __ movfr2gr_d(i.OutputRegister(), scratch_d);
+ if (load_status) {
+ Register output2 = i.OutputRegister(1);
+ __ movfcsr2gr(output2, FCSR2);
+ // Check for overflow and NaNs.
+ __ And(output2, output2,
+ kFCSROverflowCauseMask | kFCSRInvalidOpCauseMask);
+ __ Slt(output2, zero_reg, output2);
+ __ xori(output2, output2, 1);
+ }
+ break;
+ }
+ case kLoong64Float64ToInt64: {
+ UseScratchRegisterScope temps(tasm());
+ Register scratch = temps.Acquire();
+ FPURegister scratch_d = kScratchDoubleReg;
+
+ bool set_overflow_to_min_i64 = MiscField::decode(instr->opcode());
+ bool load_status = instr->OutputCount() > 1;
+ // Other arches use round to zero here, so we follow.
+ __ ftintrz_l_d(scratch_d, i.InputDoubleRegister(0));
+ __ movfr2gr_d(i.OutputRegister(0), scratch_d);
+ if (load_status) {
+ Register output2 = i.OutputRegister(1);
+ __ movfcsr2gr(output2, FCSR2);
+ // Check for overflow and NaNs.
+ __ And(output2, output2,
+ kFCSROverflowCauseMask | kFCSRInvalidOpCauseMask);
+ __ Slt(output2, zero_reg, output2);
+ __ xori(output2, output2, 1);
+ }
+ if (set_overflow_to_min_i64) {
+ // Avoid INT64_MAX as an overflow indicator and use INT64_MIN instead,
+ // because INT64_MIN allows easier out-of-bounds detection.
+ __ addi_d(scratch, i.OutputRegister(), 1);
+ __ slt(scratch, scratch, i.OutputRegister());
+ __ add_d(i.OutputRegister(), i.OutputRegister(), scratch);
+ }
+ break;
+ }
+ case kLoong64Float64ToUint32: {
+ FPURegister scratch = kScratchDoubleReg;
+ __ Ftintrz_uw_d(i.OutputRegister(), i.InputDoubleRegister(0), scratch);
+ break;
+ }
+ case kLoong64Float32ToUint32: {
+ FPURegister scratch = kScratchDoubleReg;
+ bool set_overflow_to_min_i32 = MiscField::decode(instr->opcode());
+ __ Ftintrz_uw_s(i.OutputRegister(), i.InputDoubleRegister(0), scratch);
+ if (set_overflow_to_min_i32) {
+ UseScratchRegisterScope temps(tasm());
+ Register scratch = temps.Acquire();
+ // Avoid UINT32_MAX as an overflow indicator and use 0 instead,
+ // because 0 allows easier out-of-bounds detection.
+ __ addi_w(scratch, i.OutputRegister(), 1);
+ __ Movz(i.OutputRegister(), zero_reg, scratch);
+ }
+ break;
+ }
+ case kLoong64Float32ToUint64: {
+ FPURegister scratch = kScratchDoubleReg;
+ Register result = instr->OutputCount() > 1 ? i.OutputRegister(1) : no_reg;
+ __ Ftintrz_ul_s(i.OutputRegister(), i.InputDoubleRegister(0), scratch,
+ result);
+ break;
+ }
+ case kLoong64Float64ToUint64: {
+ FPURegister scratch = kScratchDoubleReg;
+ Register result = instr->OutputCount() > 1 ? i.OutputRegister(1) : no_reg;
+ __ Ftintrz_ul_d(i.OutputRegister(0), i.InputDoubleRegister(0), scratch,
+ result);
+ break;
+ }
+ case kLoong64BitcastDL:
+ __ movfr2gr_d(i.OutputRegister(), i.InputDoubleRegister(0));
+ break;
+ case kLoong64BitcastLD:
+ __ movgr2fr_d(i.OutputDoubleRegister(), i.InputRegister(0));
+ break;
+ case kLoong64Float64ExtractLowWord32:
+ __ FmoveLow(i.OutputRegister(), i.InputDoubleRegister(0));
+ break;
+ case kLoong64Float64ExtractHighWord32:
+ __ movfrh2gr_s(i.OutputRegister(), i.InputDoubleRegister(0));
+ break;
+ case kLoong64Float64InsertLowWord32:
+ __ FmoveLow(i.OutputDoubleRegister(), i.InputRegister(1));
+ break;
+ case kLoong64Float64InsertHighWord32:
+ __ movgr2frh_w(i.OutputDoubleRegister(), i.InputRegister(1));
+ break;
+ // ... more basic instructions ...
+
+ case kLoong64Ext_w_b:
+ __ ext_w_b(i.OutputRegister(), i.InputRegister(0));
+ break;
+ case kLoong64Ext_w_h:
+ __ ext_w_h(i.OutputRegister(), i.InputRegister(0));
+ break;
+ case kLoong64Ld_bu:
+ __ Ld_bu(i.OutputRegister(), i.MemoryOperand());
+ break;
+ case kLoong64Ld_b:
+ __ Ld_b(i.OutputRegister(), i.MemoryOperand());
+ break;
+ case kLoong64St_b:
+ __ St_b(i.InputOrZeroRegister(2), i.MemoryOperand());
+ break;
+ case kLoong64Ld_hu:
+ __ Ld_hu(i.OutputRegister(), i.MemoryOperand());
+ break;
+ case kLoong64Ld_h:
+ __ Ld_h(i.OutputRegister(), i.MemoryOperand());
+ break;
+ case kLoong64St_h:
+ __ St_h(i.InputOrZeroRegister(2), i.MemoryOperand());
+ break;
+ case kLoong64Ld_w:
+ __ Ld_w(i.OutputRegister(), i.MemoryOperand());
+ break;
+ case kLoong64Ld_wu:
+ __ Ld_wu(i.OutputRegister(), i.MemoryOperand());
+ break;
+ case kLoong64Ld_d:
+ __ Ld_d(i.OutputRegister(), i.MemoryOperand());
+ break;
+ case kLoong64St_w:
+ __ St_w(i.InputOrZeroRegister(2), i.MemoryOperand());
+ break;
+ case kLoong64St_d:
+ __ St_d(i.InputOrZeroRegister(2), i.MemoryOperand());
+ break;
+ case kLoong64Fld_s: {
+ __ Fld_s(i.OutputSingleRegister(), i.MemoryOperand());
+ break;
+ }
+ case kLoong64Fst_s: {
+ size_t index = 0;
+ MemOperand operand = i.MemoryOperand(&index);
+ FPURegister ft = i.InputOrZeroSingleRegister(index);
+ if (ft == kDoubleRegZero && !__ IsDoubleZeroRegSet()) {
+ __ Move(kDoubleRegZero, 0.0);
+ }
+
+ __ Fst_s(ft, operand);
+ break;
+ }
+ case kLoong64Fld_d:
+ __ Fld_d(i.OutputDoubleRegister(), i.MemoryOperand());
+ break;
+ case kLoong64Fst_d: {
+ FPURegister ft = i.InputOrZeroDoubleRegister(2);
+ if (ft == kDoubleRegZero && !__ IsDoubleZeroRegSet()) {
+ __ Move(kDoubleRegZero, 0.0);
+ }
+
+ __ Fst_d(ft, i.MemoryOperand());
+ break;
+ }
+ case kLoong64Dbar: {
+ __ dbar(0);
+ break;
+ }
+ case kLoong64Push:
+ if (instr->InputAt(0)->IsFPRegister()) {
+ __ Fst_d(i.InputDoubleRegister(0), MemOperand(sp, -kDoubleSize));
+ __ Sub_d(sp, sp, Operand(kDoubleSize));
+ frame_access_state()->IncreaseSPDelta(kDoubleSize / kSystemPointerSize);
+ } else {
+ __ Push(i.InputRegister(0));
+ frame_access_state()->IncreaseSPDelta(1);
+ }
+ break;
+ case kLoong64Peek: {
+ int reverse_slot = i.InputInt32(0);
+ int offset =
+ FrameSlotToFPOffset(frame()->GetTotalFrameSlotCount() - reverse_slot);
+ if (instr->OutputAt(0)->IsFPRegister()) {
+ LocationOperand* op = LocationOperand::cast(instr->OutputAt(0));
+ if (op->representation() == MachineRepresentation::kFloat64) {
+ __ Fld_d(i.OutputDoubleRegister(), MemOperand(fp, offset));
+ } else if (op->representation() == MachineRepresentation::kFloat32) {
+ __ Fld_s(i.OutputSingleRegister(0), MemOperand(fp, offset));
+ } else {
+ DCHECK_EQ(MachineRepresentation::kSimd128, op->representation());
+ abort();
+ }
+ } else {
+ __ Ld_d(i.OutputRegister(0), MemOperand(fp, offset));
+ }
+ break;
+ }
+ case kLoong64StackClaim: {
+ __ Sub_d(sp, sp, Operand(i.InputInt32(0)));
+ frame_access_state()->IncreaseSPDelta(i.InputInt32(0) /
+ kSystemPointerSize);
+ break;
+ }
+ case kLoong64Poke: {
+ if (instr->InputAt(0)->IsFPRegister()) {
+ __ Fst_d(i.InputDoubleRegister(0), MemOperand(sp, i.InputInt32(1)));
+ } else {
+ __ St_d(i.InputRegister(0), MemOperand(sp, i.InputInt32(1)));
+ }
+ break;
+ }
+ case kLoong64ByteSwap64: {
+ __ ByteSwapSigned(i.OutputRegister(0), i.InputRegister(0), 8);
+ break;
+ }
+ case kLoong64ByteSwap32: {
+ __ ByteSwapSigned(i.OutputRegister(0), i.InputRegister(0), 4);
+ break;
+ }
+ case kAtomicLoadInt8:
+ DCHECK_EQ(AtomicWidthField::decode(opcode), AtomicWidth::kWord32);
+ ASSEMBLE_ATOMIC_LOAD_INTEGER(Ld_b);
+ break;
+ case kAtomicLoadUint8:
+ ASSEMBLE_ATOMIC_LOAD_INTEGER(Ld_bu);
+ break;
+ case kAtomicLoadInt16:
+ DCHECK_EQ(AtomicWidthField::decode(opcode), AtomicWidth::kWord32);
+ ASSEMBLE_ATOMIC_LOAD_INTEGER(Ld_h);
+ break;
+ case kAtomicLoadUint16:
+ ASSEMBLE_ATOMIC_LOAD_INTEGER(Ld_hu);
+ break;
+ case kAtomicLoadWord32:
+ ASSEMBLE_ATOMIC_LOAD_INTEGER(Ld_w);
+ break;
+ case kLoong64Word64AtomicLoadUint32:
+ ASSEMBLE_ATOMIC_LOAD_INTEGER(Ld_wu);
+ break;
+ case kLoong64Word64AtomicLoadUint64:
+ ASSEMBLE_ATOMIC_LOAD_INTEGER(Ld_d);
+ break;
+ case kAtomicStoreWord8:
+ ASSEMBLE_ATOMIC_STORE_INTEGER(St_b);
+ break;
+ case kAtomicStoreWord16:
+ ASSEMBLE_ATOMIC_STORE_INTEGER(St_h);
+ break;
+ case kAtomicStoreWord32:
+ ASSEMBLE_ATOMIC_STORE_INTEGER(St_w);
+ break;
+ case kLoong64StoreCompressTagged:
+ case kLoong64Word64AtomicStoreWord64:
+ ASSEMBLE_ATOMIC_STORE_INTEGER(St_d);
+ break;
+ case kAtomicExchangeInt8:
+ DCHECK_EQ(AtomicWidthField::decode(opcode), AtomicWidth::kWord32);
+ ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(Ll_w, Sc_w, true, 8, 32);
+ break;
+ case kAtomicExchangeUint8:
+ switch (AtomicWidthField::decode(opcode)) {
+ case AtomicWidth::kWord32:
+ ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(Ll_w, Sc_w, false, 8, 32);
+ break;
+ case AtomicWidth::kWord64:
+ ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(Ll_d, Sc_d, false, 8, 64);
+ break;
+ }
+ break;
+ case kAtomicExchangeInt16:
+ DCHECK_EQ(AtomicWidthField::decode(opcode), AtomicWidth::kWord32);
+ ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(Ll_w, Sc_w, true, 16, 32);
+ break;
+ case kAtomicExchangeUint16:
+ switch (AtomicWidthField::decode(opcode)) {
+ case AtomicWidth::kWord32:
+ ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(Ll_w, Sc_w, false, 16, 32);
+ break;
+ case AtomicWidth::kWord64:
+ ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(Ll_d, Sc_d, false, 16, 64);
+ break;
+ }
+ break;
+ case kAtomicExchangeWord32:
+ switch (AtomicWidthField::decode(opcode)) {
+ case AtomicWidth::kWord32:
+ __ add_d(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1));
+ __ amswap_db_w(i.OutputRegister(0), i.InputRegister(2),
+ i.TempRegister(0));
+ break;
+ case AtomicWidth::kWord64:
+ ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(Ll_d, Sc_d, false, 32, 64);
+ break;
+ }
+ break;
+ case kLoong64Word64AtomicExchangeUint64:
+ __ add_d(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1));
+ __ amswap_db_d(i.OutputRegister(0), i.InputRegister(2),
+ i.TempRegister(0));
+ break;
+ case kAtomicCompareExchangeInt8:
+ DCHECK_EQ(AtomicWidthField::decode(opcode), AtomicWidth::kWord32);
+ ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(Ll_w, Sc_w, true, 8, 32);
+ break;
+ case kAtomicCompareExchangeUint8:
+ switch (AtomicWidthField::decode(opcode)) {
+ case AtomicWidth::kWord32:
+ ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(Ll_w, Sc_w, false, 8,
+ 32);
+ break;
+ case AtomicWidth::kWord64:
+ ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(Ll_d, Sc_d, false, 8,
+ 64);
+ break;
+ }
+ break;
+ case kAtomicCompareExchangeInt16:
+ DCHECK_EQ(AtomicWidthField::decode(opcode), AtomicWidth::kWord32);
+ ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(Ll_w, Sc_w, true, 16, 32);
+ break;
+ case kAtomicCompareExchangeUint16:
+ switch (AtomicWidthField::decode(opcode)) {
+ case AtomicWidth::kWord32:
+ ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(Ll_w, Sc_w, false, 16,
+ 32);
+ break;
+ case AtomicWidth::kWord64:
+ ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(Ll_d, Sc_d, false, 16,
+ 64);
+ break;
+ }
+ break;
+ case kAtomicCompareExchangeWord32:
+ switch (AtomicWidthField::decode(opcode)) {
+ case AtomicWidth::kWord32:
+ __ slli_w(i.InputRegister(2), i.InputRegister(2), 0);
+ ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(Ll_w, Sc_w);
+ break;
+ case AtomicWidth::kWord64:
+ ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(Ll_d, Sc_d, false, 32,
+ 64);
+ break;
+ }
+ break;
+ case kLoong64Word64AtomicCompareExchangeUint64:
+ ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(Ll_d, Sc_d);
+ break;
+ case kAtomicAddWord32:
+ switch (AtomicWidthField::decode(opcode)) {
+ case AtomicWidth::kWord32:
+ __ Add_d(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1));
+ __ amadd_db_w(i.OutputRegister(0), i.InputRegister(2),
+ i.TempRegister(0));
+ break;
+ case AtomicWidth::kWord64:
+ ASSEMBLE_ATOMIC_BINOP_EXT(Ll_d, Sc_d, false, 32, Add_d, 64);
+ break;
+ }
+ break;
+ case kAtomicSubWord32:
+ switch (AtomicWidthField::decode(opcode)) {
+ case AtomicWidth::kWord32:
+ ASSEMBLE_ATOMIC_BINOP(Ll_w, Sc_w, Sub_w);
+ break;
+ case AtomicWidth::kWord64:
+ ASSEMBLE_ATOMIC_BINOP_EXT(Ll_d, Sc_d, false, 32, Sub_d, 64);
+ break;
+ }
+ break;
+ case kAtomicAndWord32:
+ switch (AtomicWidthField::decode(opcode)) {
+ case AtomicWidth::kWord32:
+ __ Add_d(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1));
+ __ amand_db_w(i.OutputRegister(0), i.InputRegister(2),
+ i.TempRegister(0));
+ break;
+ case AtomicWidth::kWord64:
+ ASSEMBLE_ATOMIC_BINOP_EXT(Ll_d, Sc_d, false, 32, And, 64);
+ break;
+ }
+ break;
+ case kAtomicOrWord32:
+ switch (AtomicWidthField::decode(opcode)) {
+ case AtomicWidth::kWord32:
+ __ Add_d(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1));
+ __ amor_db_w(i.OutputRegister(0), i.InputRegister(2),
+ i.TempRegister(0));
+ break;
+ case AtomicWidth::kWord64:
+ ASSEMBLE_ATOMIC_BINOP_EXT(Ll_d, Sc_d, false, 32, Or, 64);
+ break;
+ }
+ break;
+ case kAtomicXorWord32:
+ switch (AtomicWidthField::decode(opcode)) {
+ case AtomicWidth::kWord32:
+ __ Add_d(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1));
+ __ amxor_db_w(i.OutputRegister(0), i.InputRegister(2),
+ i.TempRegister(0));
+ break;
+ case AtomicWidth::kWord64:
+ ASSEMBLE_ATOMIC_BINOP_EXT(Ll_d, Sc_d, false, 32, Xor, 64);
+ break;
+ }
+ break;
+#define ATOMIC_BINOP_CASE(op, inst32, inst64) \
+ case kAtomic##op##Int8: \
+ DCHECK_EQ(AtomicWidthField::decode(opcode), AtomicWidth::kWord32); \
+ ASSEMBLE_ATOMIC_BINOP_EXT(Ll_w, Sc_w, true, 8, inst32, 32); \
+ break; \
+ case kAtomic##op##Uint8: \
+ switch (AtomicWidthField::decode(opcode)) { \
+ case AtomicWidth::kWord32: \
+ ASSEMBLE_ATOMIC_BINOP_EXT(Ll_w, Sc_w, false, 8, inst32, 32); \
+ break; \
+ case AtomicWidth::kWord64: \
+ ASSEMBLE_ATOMIC_BINOP_EXT(Ll_d, Sc_d, false, 8, inst64, 64); \
+ break; \
+ } \
+ break; \
+ case kAtomic##op##Int16: \
+ DCHECK_EQ(AtomicWidthField::decode(opcode), AtomicWidth::kWord32); \
+ ASSEMBLE_ATOMIC_BINOP_EXT(Ll_w, Sc_w, true, 16, inst32, 32); \
+ break; \
+ case kAtomic##op##Uint16: \
+ switch (AtomicWidthField::decode(opcode)) { \
+ case AtomicWidth::kWord32: \
+ ASSEMBLE_ATOMIC_BINOP_EXT(Ll_w, Sc_w, false, 16, inst32, 32); \
+ break; \
+ case AtomicWidth::kWord64: \
+ ASSEMBLE_ATOMIC_BINOP_EXT(Ll_d, Sc_d, false, 16, inst64, 64); \
+ break; \
+ } \
+ break;
+ ATOMIC_BINOP_CASE(Add, Add_w, Add_d)
+ ATOMIC_BINOP_CASE(Sub, Sub_w, Sub_d)
+ ATOMIC_BINOP_CASE(And, And, And)
+ ATOMIC_BINOP_CASE(Or, Or, Or)
+ ATOMIC_BINOP_CASE(Xor, Xor, Xor)
+#undef ATOMIC_BINOP_CASE
+
+ case kLoong64Word64AtomicAddUint64:
+ __ Add_d(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1));
+ __ amadd_db_d(i.OutputRegister(0), i.InputRegister(2), i.TempRegister(0));
+ break;
+ case kLoong64Word64AtomicSubUint64:
+ ASSEMBLE_ATOMIC_BINOP(Ll_d, Sc_d, Sub_d);
+ break;
+ case kLoong64Word64AtomicAndUint64:
+ __ Add_d(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1));
+ __ amand_db_d(i.OutputRegister(0), i.InputRegister(2), i.TempRegister(0));
+ break;
+ case kLoong64Word64AtomicOrUint64:
+ __ Add_d(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1));
+ __ amor_db_d(i.OutputRegister(0), i.InputRegister(2), i.TempRegister(0));
+ break;
+ case kLoong64Word64AtomicXorUint64:
+ __ Add_d(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1));
+ __ amxor_db_d(i.OutputRegister(0), i.InputRegister(2), i.TempRegister(0));
+ break;
+#undef ATOMIC_BINOP_CASE
+ case kLoong64S128Const:
+ case kLoong64S128Zero:
+ case kLoong64I32x4Splat:
+ case kLoong64I32x4ExtractLane:
+ case kLoong64I32x4Add:
+ case kLoong64I32x4ReplaceLane:
+ case kLoong64I32x4Sub:
+ case kLoong64F64x2Abs:
+ default:
+ break;
+ }
+ return kSuccess;
+}
+
+#define UNSUPPORTED_COND(opcode, condition) \
+ StdoutStream{} << "Unsupported " << #opcode << " condition: \"" << condition \
+ << "\""; \
+ UNIMPLEMENTED();
+
+void AssembleBranchToLabels(CodeGenerator* gen, TurboAssembler* tasm,
+ Instruction* instr, FlagsCondition condition,
+ Label* tlabel, Label* flabel, bool fallthru) {
+#undef __
+#define __ tasm->
+ Loong64OperandConverter i(gen, instr);
+
+ Condition cc = kNoCondition;
+ // LOONG64 does not have condition code flags, so compare and branch are
+ // implemented differently than on the other arch's. The compare operations
+ // emit loong64 pseudo-instructions, which are handled here by branch
+ // instructions that do the actual comparison. Essential that the input
+ // registers to compare pseudo-op are not modified before this branch op, as
+ // they are tested here.
+
+ if (instr->arch_opcode() == kLoong64Tst) {
+ cc = FlagsConditionToConditionTst(condition);
+ __ Branch(tlabel, cc, t8, Operand(zero_reg));
+ } else if (instr->arch_opcode() == kLoong64Add_d ||
+ instr->arch_opcode() == kLoong64Sub_d) {
+ UseScratchRegisterScope temps(tasm);
+ Register scratch = temps.Acquire();
+ Register scratch2 = temps.Acquire();
+ cc = FlagsConditionToConditionOvf(condition);
+ __ srai_d(scratch, i.OutputRegister(), 32);
+ __ srai_w(scratch2, i.OutputRegister(), 31);
+ __ Branch(tlabel, cc, scratch2, Operand(scratch));
+ } else if (instr->arch_opcode() == kLoong64AddOvf_d ||
+ instr->arch_opcode() == kLoong64SubOvf_d) {
+ switch (condition) {
+ // Overflow occurs if overflow register is negative
+ case kOverflow:
+ __ Branch(tlabel, lt, t8, Operand(zero_reg));
+ break;
+ case kNotOverflow:
+ __ Branch(tlabel, ge, t8, Operand(zero_reg));
+ break;
+ default:
+ UNSUPPORTED_COND(instr->arch_opcode(), condition);
+ }
+ } else if (instr->arch_opcode() == kLoong64MulOvf_w) {
+ // Overflow occurs if overflow register is not zero
+ switch (condition) {
+ case kOverflow:
+ __ Branch(tlabel, ne, t8, Operand(zero_reg));
+ break;
+ case kNotOverflow:
+ __ Branch(tlabel, eq, t8, Operand(zero_reg));
+ break;
+ default:
+ UNSUPPORTED_COND(kLoong64MulOvf_w, condition);
+ }
+ } else if (instr->arch_opcode() == kLoong64Cmp) {
+ cc = FlagsConditionToConditionCmp(condition);
+ __ Branch(tlabel, cc, i.InputRegister(0), i.InputOperand(1));
+ } else if (instr->arch_opcode() == kArchStackPointerGreaterThan) {
+ cc = FlagsConditionToConditionCmp(condition);
+ DCHECK((cc == ls) || (cc == hi));
+ if (cc == ls) {
+ __ xori(i.TempRegister(0), i.TempRegister(0), 1);
+ }
+ __ Branch(tlabel, ne, i.TempRegister(0), Operand(zero_reg));
+ } else if (instr->arch_opcode() == kLoong64Float32Cmp ||
+ instr->arch_opcode() == kLoong64Float64Cmp) {
+ bool predicate;
+ FlagsConditionToConditionCmpFPU(&predicate, condition);
+ if (predicate) {
+ __ BranchTrueF(tlabel);
+ } else {
+ __ BranchFalseF(tlabel);
+ }
+ } else {
+ PrintF("AssembleArchBranch Unimplemented arch_opcode: %d\n",
+ instr->arch_opcode());
+ UNIMPLEMENTED();
+ }
+ if (!fallthru) __ Branch(flabel); // no fallthru to flabel.
+#undef __
+#define __ tasm()->
+}
+
+// Assembles branches after an instruction.
+void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
+ Label* tlabel = branch->true_label;
+ Label* flabel = branch->false_label;
+
+ AssembleBranchToLabels(this, tasm(), instr, branch->condition, tlabel, flabel,
+ branch->fallthru);
+}
+
+#undef UNSUPPORTED_COND
+
+void CodeGenerator::AssembleArchDeoptBranch(Instruction* instr,
+ BranchInfo* branch) {
+ AssembleArchBranch(instr, branch);
+}
+
+void CodeGenerator::AssembleArchJump(RpoNumber target) {
+ if (!IsNextInAssemblyOrder(target)) __ Branch(GetLabel(target));
+}
+
+#if V8_ENABLE_WEBASSEMBLY
+void CodeGenerator::AssembleArchTrap(Instruction* instr,
+ FlagsCondition condition) {
+ class OutOfLineTrap final : public OutOfLineCode {
+ public:
+ OutOfLineTrap(CodeGenerator* gen, Instruction* instr)
+ : OutOfLineCode(gen), instr_(instr), gen_(gen) {}
+ void Generate() final {
+ Loong64OperandConverter i(gen_, instr_);
+ TrapId trap_id =
+ static_cast<TrapId>(i.InputInt32(instr_->InputCount() - 1));
+ GenerateCallToTrap(trap_id);
+ }
+
+ private:
+ void GenerateCallToTrap(TrapId trap_id) {
+ if (trap_id == TrapId::kInvalid) {
+ // We cannot test calls to the runtime in cctest/test-run-wasm.
+ // Therefore we emit a call to C here instead of a call to the runtime.
+ // We use the context register as the scratch register, because we do
+ // not have a context here.
+ __ PrepareCallCFunction(0, 0, cp);
+ __ CallCFunction(
+ ExternalReference::wasm_call_trap_callback_for_testing(), 0);
+ __ LeaveFrame(StackFrame::WASM);
+ auto call_descriptor = gen_->linkage()->GetIncomingDescriptor();
+ int pop_count = static_cast<int>(call_descriptor->ParameterSlotCount());
+ pop_count += (pop_count & 1); // align
+ __ Drop(pop_count);
+ __ Ret();
+ } else {
+ gen_->AssembleSourcePosition(instr_);
+ // A direct call to a wasm runtime stub defined in this module.
+ // Just encode the stub index. This will be patched when the code
+ // is added to the native module and copied into wasm code space.
+ __ Call(static_cast<Address>(trap_id), RelocInfo::WASM_STUB_CALL);
+ ReferenceMap* reference_map =
+ gen_->zone()->New<ReferenceMap>(gen_->zone());
+ gen_->RecordSafepoint(reference_map);
+ if (FLAG_debug_code) {
+ __ stop();
+ }
+ }
+ }
+ Instruction* instr_;
+ CodeGenerator* gen_;
+ };
+ auto ool = zone()->New<OutOfLineTrap>(this, instr);
+ Label* tlabel = ool->entry();
+ AssembleBranchToLabels(this, tasm(), instr, condition, tlabel, nullptr, true);
+}
+#endif // V8_ENABLE_WEBASSEMBLY
+
+// Assembles boolean materializations after an instruction.
+void CodeGenerator::AssembleArchBoolean(Instruction* instr,
+ FlagsCondition condition) {
+ Loong64OperandConverter i(this, instr);
+
+ // Materialize a full 32-bit 1 or 0 value. The result register is always the
+ // last output of the instruction.
+ DCHECK_NE(0u, instr->OutputCount());
+ Register result = i.OutputRegister(instr->OutputCount() - 1);
+ Condition cc = kNoCondition;
+ // Loong64 does not have condition code flags, so compare and branch are
+ // implemented differently than on the other arch's. The compare operations
+ // emit loong64 pseudo-instructions, which are checked and handled here.
+
+ if (instr->arch_opcode() == kLoong64Tst) {
+ cc = FlagsConditionToConditionTst(condition);
+ if (cc == eq) {
+ __ Sltu(result, t8, 1);
+ } else {
+ __ Sltu(result, zero_reg, t8);
+ }
+ return;
+ } else if (instr->arch_opcode() == kLoong64Add_d ||
+ instr->arch_opcode() == kLoong64Sub_d) {
+ UseScratchRegisterScope temps(tasm());
+ Register scratch = temps.Acquire();
+ cc = FlagsConditionToConditionOvf(condition);
+ // Check for overflow creates 1 or 0 for result.
+ __ srli_d(scratch, i.OutputRegister(), 63);
+ __ srli_w(result, i.OutputRegister(), 31);
+ __ xor_(result, scratch, result);
+ if (cc == eq) // Toggle result for not overflow.
+ __ xori(result, result, 1);
+ return;
+ } else if (instr->arch_opcode() == kLoong64AddOvf_d ||
+ instr->arch_opcode() == kLoong64SubOvf_d) {
+ // Overflow occurs if overflow register is negative
+ __ slt(result, t8, zero_reg);
+ } else if (instr->arch_opcode() == kLoong64MulOvf_w) {
+ // Overflow occurs if overflow register is not zero
+ __ Sgtu(result, t8, zero_reg);
+ } else if (instr->arch_opcode() == kLoong64Cmp) {
+ cc = FlagsConditionToConditionCmp(condition);
+ switch (cc) {
+ case eq:
+ case ne: {
+ Register left = i.InputRegister(0);
+ Operand right = i.InputOperand(1);
+ if (instr->InputAt(1)->IsImmediate()) {
+ if (is_int12(-right.immediate())) {
+ if (right.immediate() == 0) {
+ if (cc == eq) {
+ __ Sltu(result, left, 1);
+ } else {
+ __ Sltu(result, zero_reg, left);
+ }
+ } else {
+ __ Add_d(result, left, Operand(-right.immediate()));
+ if (cc == eq) {
+ __ Sltu(result, result, 1);
+ } else {
+ __ Sltu(result, zero_reg, result);
+ }
+ }
+ } else {
+ __ Xor(result, left, Operand(right));
+ if (cc == eq) {
+ __ Sltu(result, result, 1);
+ } else {
+ __ Sltu(result, zero_reg, result);
+ }
+ }
+ } else {
+ __ Xor(result, left, right);
+ if (cc == eq) {
+ __ Sltu(result, result, 1);
+ } else {
+ __ Sltu(result, zero_reg, result);
+ }
+ }
+ } break;
+ case lt:
+ case ge: {
+ Register left = i.InputRegister(0);
+ Operand right = i.InputOperand(1);
+ __ Slt(result, left, right);
+ if (cc == ge) {
+ __ xori(result, result, 1);
+ }
+ } break;
+ case gt:
+ case le: {
+ Register left = i.InputRegister(1);
+ Operand right = i.InputOperand(0);
+ __ Slt(result, left, right);
+ if (cc == le) {
+ __ xori(result, result, 1);
+ }
+ } break;
+ case lo:
+ case hs: {
+ Register left = i.InputRegister(0);
+ Operand right = i.InputOperand(1);
+ __ Sltu(result, left, right);
+ if (cc == hs) {
+ __ xori(result, result, 1);
+ }
+ } break;
+ case hi:
+ case ls: {
+ Register left = i.InputRegister(1);
+ Operand right = i.InputOperand(0);
+ __ Sltu(result, left, right);
+ if (cc == ls) {
+ __ xori(result, result, 1);
+ }
+ } break;
+ default:
+ UNREACHABLE();
+ }
+ return;
+ } else if (instr->arch_opcode() == kLoong64Float64Cmp ||
+ instr->arch_opcode() == kLoong64Float32Cmp) {
+ FPURegister left = i.InputOrZeroDoubleRegister(0);
+ FPURegister right = i.InputOrZeroDoubleRegister(1);
+ if ((left == kDoubleRegZero || right == kDoubleRegZero) &&
+ !__ IsDoubleZeroRegSet()) {
+ __ Move(kDoubleRegZero, 0.0);
+ }
+ bool predicate;
+ FlagsConditionToConditionCmpFPU(&predicate, condition);
+ {
+ __ movcf2gr(result, FCC0);
+ if (!predicate) {
+ __ xori(result, result, 1);
+ }
+ }
+ return;
+ } else if (instr->arch_opcode() == kArchStackPointerGreaterThan) {
+ cc = FlagsConditionToConditionCmp(condition);
+ DCHECK((cc == ls) || (cc == hi));
+ if (cc == ls) {
+ __ xori(i.OutputRegister(), i.TempRegister(0), 1);
+ }
+ return;
+ } else {
+ PrintF("AssembleArchBranch Unimplemented arch_opcode is : %d\n",
+ instr->arch_opcode());
+ TRACE_UNIMPL();
+ UNIMPLEMENTED();
+ }
+}
+
+void CodeGenerator::AssembleArchBinarySearchSwitch(Instruction* instr) {
+ Loong64OperandConverter i(this, instr);
+ Register input = i.InputRegister(0);
+ std::vector<std::pair<int32_t, Label*>> cases;
+ for (size_t index = 2; index < instr->InputCount(); index += 2) {
+ cases.push_back({i.InputInt32(index + 0), GetLabel(i.InputRpo(index + 1))});
+ }
+ AssembleArchBinarySearchSwitchRange(input, i.InputRpo(1), cases.data(),
+ cases.data() + cases.size());
+}
+
+void CodeGenerator::AssembleArchTableSwitch(Instruction* instr) {
+ Loong64OperandConverter i(this, instr);
+ Register input = i.InputRegister(0);
+ size_t const case_count = instr->InputCount() - 2;
+
+ __ Branch(GetLabel(i.InputRpo(1)), hs, input, Operand(case_count));
+ __ GenerateSwitchTable(input, case_count, [&i, this](size_t index) {
+ return GetLabel(i.InputRpo(index + 2));
+ });
+}
+
+void CodeGenerator::AssembleArchSelect(Instruction* instr,
+ FlagsCondition condition) {
+ UNIMPLEMENTED();
+}
+
+void CodeGenerator::FinishFrame(Frame* frame) {
+ auto call_descriptor = linkage()->GetIncomingDescriptor();
+
+ const RegList saves_fpu = call_descriptor->CalleeSavedFPRegisters();
+ if (saves_fpu != 0) {
+ int count = base::bits::CountPopulation(saves_fpu);
+ DCHECK_EQ(kNumCalleeSavedFPU, count);
+ frame->AllocateSavedCalleeRegisterSlots(count *
+ (kDoubleSize / kSystemPointerSize));
+ }
+
+ const RegList saves = call_descriptor->CalleeSavedRegisters();
+ if (saves != 0) {
+ int count = base::bits::CountPopulation(saves);
+ frame->AllocateSavedCalleeRegisterSlots(count);
+ }
+}
+
+void CodeGenerator::AssembleConstructFrame() {
+ auto call_descriptor = linkage()->GetIncomingDescriptor();
+
+ if (frame_access_state()->has_frame()) {
+ if (call_descriptor->IsCFunctionCall()) {
+#if V8_ENABLE_WEBASSEMBLY
+ if (info()->GetOutputStackFrameType() == StackFrame::C_WASM_ENTRY) {
+ __ StubPrologue(StackFrame::C_WASM_ENTRY);
+ // Reserve stack space for saving the c_entry_fp later.
+ __ Sub_d(sp, sp, Operand(kSystemPointerSize));
+#else
+ // For balance.
+ if (false) {
+#endif // V8_ENABLE_WEBASSEMBLY
+ } else {
+ __ Push(ra, fp);
+ __ mov(fp, sp);
+ }
+ } else if (call_descriptor->IsJSFunctionCall()) {
+ __ Prologue();
+ } else {
+ __ StubPrologue(info()->GetOutputStackFrameType());
+#if V8_ENABLE_WEBASSEMBLY
+ if (call_descriptor->IsWasmFunctionCall()) {
+ __ Push(kWasmInstanceRegister);
+ } else if (call_descriptor->IsWasmImportWrapper() ||
+ call_descriptor->IsWasmCapiFunction()) {
+ // Wasm import wrappers are passed a tuple in the place of the instance.
+ // Unpack the tuple into the instance and the target callable.
+ // This must be done here in the codegen because it cannot be expressed
+ // properly in the graph.
+ __ Ld_d(kJSFunctionRegister,
+ FieldMemOperand(kWasmInstanceRegister, Tuple2::kValue2Offset));
+ __ Ld_d(kWasmInstanceRegister,
+ FieldMemOperand(kWasmInstanceRegister, Tuple2::kValue1Offset));
+ __ Push(kWasmInstanceRegister);
+ if (call_descriptor->IsWasmCapiFunction()) {
+ // Reserve space for saving the PC later.
+ __ Sub_d(sp, sp, Operand(kSystemPointerSize));
+ }
+ }
+#endif // V8_ENABLE_WEBASSEMBLY
+ }
+ }
+
+ int required_slots =
+ frame()->GetTotalFrameSlotCount() - frame()->GetFixedSlotCount();
+
+ if (info()->is_osr()) {
+ // TurboFan OSR-compiled functions cannot be entered directly.
+ __ Abort(AbortReason::kShouldNotDirectlyEnterOsrFunction);
+
+ // Unoptimized code jumps directly to this entrypoint while the unoptimized
+ // frame is still on the stack. Optimized code uses OSR values directly from
+ // the unoptimized frame. Thus, all that needs to be done is to allocate the
+ // remaining stack slots.
+ __ RecordComment("-- OSR entrypoint --");
+ osr_pc_offset_ = __ pc_offset();
+ required_slots -= osr_helper()->UnoptimizedFrameSlots();
+ }
+
+ const RegList saves = call_descriptor->CalleeSavedRegisters();
+ const RegList saves_fpu = call_descriptor->CalleeSavedFPRegisters();
+
+ if (required_slots > 0) {
+ DCHECK(frame_access_state()->has_frame());
+#if V8_ENABLE_WEBASSEMBLY
+ if (info()->IsWasm() && required_slots * kSystemPointerSize > 4 * KB) {
+ // For WebAssembly functions with big frames we have to do the stack
+ // overflow check before we construct the frame. Otherwise we may not
+ // have enough space on the stack to call the runtime for the stack
+ // overflow.
+ Label done;
+
+ // If the frame is bigger than the stack, we throw the stack overflow
+ // exception unconditionally. Thereby we can avoid the integer overflow
+ // check in the condition code.
+ if (required_slots * kSystemPointerSize < FLAG_stack_size * KB) {
+ UseScratchRegisterScope temps(tasm());
+ Register scratch = temps.Acquire();
+ __ Ld_d(scratch, FieldMemOperand(
+ kWasmInstanceRegister,
+ WasmInstanceObject::kRealStackLimitAddressOffset));
+ __ Ld_d(scratch, MemOperand(scratch, 0));
+ __ Add_d(scratch, scratch,
+ Operand(required_slots * kSystemPointerSize));
+ __ Branch(&done, uge, sp, Operand(scratch));
+ }
+
+ __ Call(wasm::WasmCode::kWasmStackOverflow, RelocInfo::WASM_STUB_CALL);
+ // The call does not return, hence we can ignore any references and just
+ // define an empty safepoint.
+ ReferenceMap* reference_map = zone()->New<ReferenceMap>(zone());
+ RecordSafepoint(reference_map);
+ if (FLAG_debug_code) {
+ __ stop();
+ }
+
+ __ bind(&done);
+ }
+#endif // V8_ENABLE_WEBASSEMBLY
+ }
+
+ const int returns = frame()->GetReturnSlotCount();
+
+ // Skip callee-saved and return slots, which are pushed below.
+ required_slots -= base::bits::CountPopulation(saves);
+ required_slots -= base::bits::CountPopulation(saves_fpu);
+ required_slots -= returns;
+ if (required_slots > 0) {
+ __ Sub_d(sp, sp, Operand(required_slots * kSystemPointerSize));
+ }
+
+ if (saves_fpu != 0) {
+ // Save callee-saved FPU registers.
+ __ MultiPushFPU(saves_fpu);
+ DCHECK_EQ(kNumCalleeSavedFPU, base::bits::CountPopulation(saves_fpu));
+ }
+
+ if (saves != 0) {
+ // Save callee-saved registers.
+ __ MultiPush(saves);
+ }
+
+ if (returns != 0) {
+ // Create space for returns.
+ __ Sub_d(sp, sp, Operand(returns * kSystemPointerSize));
+ }
+}
+
+void CodeGenerator::AssembleReturn(InstructionOperand* additional_pop_count) {
+ auto call_descriptor = linkage()->GetIncomingDescriptor();
+
+ const int returns = frame()->GetReturnSlotCount();
+ if (returns != 0) {
+ __ Add_d(sp, sp, Operand(returns * kSystemPointerSize));
+ }
+
+ // Restore GP registers.
+ const RegList saves = call_descriptor->CalleeSavedRegisters();
+ if (saves != 0) {
+ __ MultiPop(saves);
+ }
+
+ // Restore FPU registers.
+ const RegList saves_fpu = call_descriptor->CalleeSavedFPRegisters();
+ if (saves_fpu != 0) {
+ __ MultiPopFPU(saves_fpu);
+ }
+
+ Loong64OperandConverter g(this, nullptr);
+
+ const int parameter_slots =
+ static_cast<int>(call_descriptor->ParameterSlotCount());
+
+ // {aditional_pop_count} is only greater than zero if {parameter_slots = 0}.
+ // Check RawMachineAssembler::PopAndReturn.
+ if (parameter_slots != 0) {
+ if (additional_pop_count->IsImmediate()) {
+ DCHECK_EQ(g.ToConstant(additional_pop_count).ToInt32(), 0);
+ } else if (FLAG_debug_code) {
+ __ Assert(eq, AbortReason::kUnexpectedAdditionalPopValue,
+ g.ToRegister(additional_pop_count),
+ Operand(static_cast<int64_t>(0)));
+ }
+ }
+
+ // Functions with JS linkage have at least one parameter (the receiver).
+ // If {parameter_slots} == 0, it means it is a builtin with
+ // kDontAdaptArgumentsSentinel, which takes care of JS arguments popping
+ // itself.
+ const bool drop_jsargs = frame_access_state()->has_frame() &&
+ call_descriptor->IsJSFunctionCall() &&
+ parameter_slots != 0;
+
+ if (call_descriptor->IsCFunctionCall()) {
+ AssembleDeconstructFrame();
+ } else if (frame_access_state()->has_frame()) {
+ // Canonicalize JSFunction return sites for now unless they have an variable
+ // number of stack slot pops.
+ if (additional_pop_count->IsImmediate() &&
+ g.ToConstant(additional_pop_count).ToInt32() == 0) {
+ if (return_label_.is_bound()) {
+ __ Branch(&return_label_);
+ return;
+ } else {
+ __ bind(&return_label_);
+ }
+ }
+ if (drop_jsargs) {
+ // Get the actual argument count
+ __ Ld_d(t0, MemOperand(fp, StandardFrameConstants::kArgCOffset));
+ }
+ AssembleDeconstructFrame();
+ }
+ if (drop_jsargs) {
+ // We must pop all arguments from the stack (including the receiver). This
+ // number of arguments is given by max(1 + argc_reg, parameter_count).
+ __ Add_d(t0, t0, Operand(1)); // Also pop the receiver.
+ if (parameter_slots > 1) {
+ __ li(t1, parameter_slots);
+ __ slt(t2, t0, t1);
+ __ Movn(t0, t1, t2);
+ }
+ __ slli_d(t0, t0, kSystemPointerSizeLog2);
+ __ add_d(sp, sp, t0);
+ } else if (additional_pop_count->IsImmediate()) {
+ int additional_count = g.ToConstant(additional_pop_count).ToInt32();
+ __ Drop(parameter_slots + additional_count);
+ } else {
+ Register pop_reg = g.ToRegister(additional_pop_count);
+ __ Drop(parameter_slots);
+ __ slli_d(pop_reg, pop_reg, kSystemPointerSizeLog2);
+ __ add_d(sp, sp, pop_reg);
+ }
+ __ Ret();
+}
+
+void CodeGenerator::FinishCode() {}
+
+void CodeGenerator::PrepareForDeoptimizationExits(
+ ZoneDeque<DeoptimizationExit*>* exits) {}
+
+void CodeGenerator::AssembleMove(InstructionOperand* source,
+ InstructionOperand* destination) {
+ Loong64OperandConverter g(this, nullptr);
+ // Dispatch on the source and destination operand kinds. Not all
+ // combinations are possible.
+ if (source->IsRegister()) {
+ DCHECK(destination->IsRegister() || destination->IsStackSlot());
+ Register src = g.ToRegister(source);
+ if (destination->IsRegister()) {
+ __ mov(g.ToRegister(destination), src);
+ } else {
+ __ St_d(src, g.ToMemOperand(destination));
+ }
+ } else if (source->IsStackSlot()) {
+ DCHECK(destination->IsRegister() || destination->IsStackSlot());
+ MemOperand src = g.ToMemOperand(source);
+ if (destination->IsRegister()) {
+ __ Ld_d(g.ToRegister(destination), src);
+ } else {
+ UseScratchRegisterScope temps(tasm());
+ Register scratch = temps.Acquire();
+ __ Ld_d(scratch, src);
+ __ St_d(scratch, g.ToMemOperand(destination));
+ }
+ } else if (source->IsConstant()) {
+ Constant src = g.ToConstant(source);
+ if (destination->IsRegister() || destination->IsStackSlot()) {
+ UseScratchRegisterScope temps(tasm());
+ Register scratch = temps.Acquire();
+ Register dst =
+ destination->IsRegister() ? g.ToRegister(destination) : scratch;
+ switch (src.type()) {
+ case Constant::kInt32:
+ __ li(dst, Operand(src.ToInt32()));
+ break;
+ case Constant::kFloat32:
+ __ li(dst, Operand::EmbeddedNumber(src.ToFloat32()));
+ break;
+ case Constant::kInt64:
+#if V8_ENABLE_WEBASSEMBLY
+ if (RelocInfo::IsWasmReference(src.rmode()))
+ __ li(dst, Operand(src.ToInt64(), src.rmode()));
+ else
+#endif // V8_ENABLE_WEBASSEMBLY
+ __ li(dst, Operand(src.ToInt64()));
+ break;
+ case Constant::kFloat64:
+ __ li(dst, Operand::EmbeddedNumber(src.ToFloat64().value()));
+ break;
+ case Constant::kExternalReference:
+ __ li(dst, src.ToExternalReference());
+ break;
+ case Constant::kDelayedStringConstant:
+ __ li(dst, src.ToDelayedStringConstant());
+ break;
+ case Constant::kHeapObject: {
+ Handle<HeapObject> src_object = src.ToHeapObject();
+ RootIndex index;
+ if (IsMaterializableFromRoot(src_object, &index)) {
+ __ LoadRoot(dst, index);
+ } else {
+ __ li(dst, src_object);
+ }
+ break;
+ }
+ case Constant::kCompressedHeapObject:
+ UNREACHABLE();
+ case Constant::kRpoNumber:
+ UNREACHABLE(); // TODO(titzer): loading RPO numbers on LOONG64.
+ }
+ if (destination->IsStackSlot()) __ St_d(dst, g.ToMemOperand(destination));
+ } else if (src.type() == Constant::kFloat32) {
+ if (destination->IsFPStackSlot()) {
+ MemOperand dst = g.ToMemOperand(destination);
+ if (bit_cast<int32_t>(src.ToFloat32()) == 0) {
+ __ St_d(zero_reg, dst);
+ } else {
+ UseScratchRegisterScope temps(tasm());
+ Register scratch = temps.Acquire();
+ __ li(scratch, Operand(bit_cast<int32_t>(src.ToFloat32())));
+ __ St_d(scratch, dst);
+ }
+ } else {
+ DCHECK(destination->IsFPRegister());
+ FloatRegister dst = g.ToSingleRegister(destination);
+ __ Move(dst, src.ToFloat32());
+ }
+ } else {
+ DCHECK_EQ(Constant::kFloat64, src.type());
+ DoubleRegister dst = destination->IsFPRegister()
+ ? g.ToDoubleRegister(destination)
+ : kScratchDoubleReg;
+ __ Move(dst, src.ToFloat64().value());
+ if (destination->IsFPStackSlot()) {
+ __ Fst_d(dst, g.ToMemOperand(destination));
+ }
+ }
+ } else if (source->IsFPRegister()) {
+ FPURegister src = g.ToDoubleRegister(source);
+ if (destination->IsFPRegister()) {
+ FPURegister dst = g.ToDoubleRegister(destination);
+ __ Move(dst, src);
+ } else {
+ DCHECK(destination->IsFPStackSlot());
+ __ Fst_d(src, g.ToMemOperand(destination));
+ }
+ } else if (source->IsFPStackSlot()) {
+ DCHECK(destination->IsFPRegister() || destination->IsFPStackSlot());
+ MemOperand src = g.ToMemOperand(source);
+ if (destination->IsFPRegister()) {
+ __ Fld_d(g.ToDoubleRegister(destination), src);
+ } else {
+ DCHECK(destination->IsFPStackSlot());
+ FPURegister temp = kScratchDoubleReg;
+ __ Fld_d(temp, src);
+ __ Fst_d(temp, g.ToMemOperand(destination));
+ }
+ } else {
+ UNREACHABLE();
+ }
+}
+
+void CodeGenerator::AssembleSwap(InstructionOperand* source,
+ InstructionOperand* destination) {
+ Loong64OperandConverter g(this, nullptr);
+ // Dispatch on the source and destination operand kinds. Not all
+ // combinations are possible.
+ if (source->IsRegister()) {
+ UseScratchRegisterScope temps(tasm());
+ Register scratch = temps.Acquire();
+ // Register-register.
+ Register src = g.ToRegister(source);
+ if (destination->IsRegister()) {
+ Register dst = g.ToRegister(destination);
+ __ Move(scratch, src);
+ __ Move(src, dst);
+ __ Move(dst, scratch);
+ } else {
+ DCHECK(destination->IsStackSlot());
+ MemOperand dst = g.ToMemOperand(destination);
+ __ mov(scratch, src);
+ __ Ld_d(src, dst);
+ __ St_d(scratch, dst);
+ }
+ } else if (source->IsStackSlot()) {
+ DCHECK(destination->IsStackSlot());
+ // TODO(LOONG_dev): LOONG64 Optimize scratch registers usage
+ // Since the Ld instruction may need a scratch reg,
+ // we should not use both of the two scratch registers in
+ // UseScratchRegisterScope here.
+ UseScratchRegisterScope temps(tasm());
+ Register scratch = temps.Acquire();
+ FPURegister scratch_d = kScratchDoubleReg;
+ MemOperand src = g.ToMemOperand(source);
+ MemOperand dst = g.ToMemOperand(destination);
+ __ Ld_d(scratch, src);
+ __ Fld_d(scratch_d, dst);
+ __ St_d(scratch, dst);
+ __ Fst_d(scratch_d, src);
+ } else if (source->IsFPRegister()) {
+ FPURegister scratch_d = kScratchDoubleReg;
+ FPURegister src = g.ToDoubleRegister(source);
+ if (destination->IsFPRegister()) {
+ FPURegister dst = g.ToDoubleRegister(destination);
+ __ Move(scratch_d, src);
+ __ Move(src, dst);
+ __ Move(dst, scratch_d);
+ } else {
+ DCHECK(destination->IsFPStackSlot());
+ MemOperand dst = g.ToMemOperand(destination);
+ __ Move(scratch_d, src);
+ __ Fld_d(src, dst);
+ __ Fst_d(scratch_d, dst);
+ }
+ } else if (source->IsFPStackSlot()) {
+ DCHECK(destination->IsFPStackSlot());
+ UseScratchRegisterScope temps(tasm());
+ Register scratch = temps.Acquire();
+ MemOperand src0 = g.ToMemOperand(source);
+ MemOperand src1(src0.base(), src0.offset() + kIntSize);
+ MemOperand dst0 = g.ToMemOperand(destination);
+ MemOperand dst1(dst0.base(), dst0.offset() + kIntSize);
+ FPURegister scratch_d = kScratchDoubleReg;
+ __ Fld_d(scratch_d, dst0); // Save destination in temp_1.
+ __ Ld_w(scratch, src0); // Then use scratch to copy source to destination.
+ __ St_w(scratch, dst0);
+ __ Ld_w(scratch, src1);
+ __ St_w(scratch, dst1);
+ __ Fst_d(scratch_d, src0);
+ } else {
+ // No other combinations are possible.
+ UNREACHABLE();
+ }
+}
+
+void CodeGenerator::AssembleJumpTable(Label** targets, size_t target_count) {
+ // On 64-bit LOONG64 we emit the jump tables inline.
+ UNREACHABLE();
+}
+
+#undef ASSEMBLE_ATOMIC_LOAD_INTEGER
+#undef ASSEMBLE_ATOMIC_STORE_INTEGER
+#undef ASSEMBLE_ATOMIC_BINOP
+#undef ASSEMBLE_ATOMIC_BINOP_EXT
+#undef ASSEMBLE_ATOMIC_EXCHANGE_INTEGER
+#undef ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT
+#undef ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER
+#undef ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT
+#undef ASSEMBLE_IEEE754_BINOP
+#undef ASSEMBLE_IEEE754_UNOP
+
+#undef TRACE_MSG
+#undef TRACE_UNIMPL
+#undef __
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/compiler/backend/loong64/instruction-codes-loong64.h b/deps/v8/src/compiler/backend/loong64/instruction-codes-loong64.h
new file mode 100644
index 0000000000..f31818cac2
--- /dev/null
+++ b/deps/v8/src/compiler/backend/loong64/instruction-codes-loong64.h
@@ -0,0 +1,397 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_BACKEND_LOONG64_INSTRUCTION_CODES_LOONG64_H_
+#define V8_COMPILER_BACKEND_LOONG64_INSTRUCTION_CODES_LOONG64_H_
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+// LOONG64-specific opcodes that specify which assembly sequence to emit.
+// Most opcodes specify a single instruction.
+#define TARGET_ARCH_OPCODE_LIST(V) \
+ V(Loong64Add_d) \
+ V(Loong64Add_w) \
+ V(Loong64AddOvf_d) \
+ V(Loong64Sub_d) \
+ V(Loong64Sub_w) \
+ V(Loong64SubOvf_d) \
+ V(Loong64Mul_d) \
+ V(Loong64MulOvf_w) \
+ V(Loong64Mulh_d) \
+ V(Loong64Mulh_w) \
+ V(Loong64Mulh_wu) \
+ V(Loong64Mul_w) \
+ V(Loong64Div_d) \
+ V(Loong64Div_w) \
+ V(Loong64Div_du) \
+ V(Loong64Div_wu) \
+ V(Loong64Mod_d) \
+ V(Loong64Mod_w) \
+ V(Loong64Mod_du) \
+ V(Loong64Mod_wu) \
+ V(Loong64And) \
+ V(Loong64And32) \
+ V(Loong64Or) \
+ V(Loong64Or32) \
+ V(Loong64Nor) \
+ V(Loong64Nor32) \
+ V(Loong64Xor) \
+ V(Loong64Xor32) \
+ V(Loong64Alsl_d) \
+ V(Loong64Alsl_w) \
+ V(Loong64Sll_d) \
+ V(Loong64Sll_w) \
+ V(Loong64Srl_d) \
+ V(Loong64Srl_w) \
+ V(Loong64Sra_d) \
+ V(Loong64Sra_w) \
+ V(Loong64Rotr_d) \
+ V(Loong64Rotr_w) \
+ V(Loong64Bstrpick_d) \
+ V(Loong64Bstrpick_w) \
+ V(Loong64Bstrins_d) \
+ V(Loong64Bstrins_w) \
+ V(Loong64ByteSwap64) \
+ V(Loong64ByteSwap32) \
+ V(Loong64Clz_d) \
+ V(Loong64Clz_w) \
+ V(Loong64Mov) \
+ V(Loong64Tst) \
+ V(Loong64Cmp) \
+ V(Loong64Float32Cmp) \
+ V(Loong64Float32Add) \
+ V(Loong64Float32Sub) \
+ V(Loong64Float32Mul) \
+ V(Loong64Float32Div) \
+ V(Loong64Float32Abs) \
+ V(Loong64Float32Neg) \
+ V(Loong64Float32Sqrt) \
+ V(Loong64Float32Max) \
+ V(Loong64Float32Min) \
+ V(Loong64Float32ToFloat64) \
+ V(Loong64Float32RoundDown) \
+ V(Loong64Float32RoundUp) \
+ V(Loong64Float32RoundTruncate) \
+ V(Loong64Float32RoundTiesEven) \
+ V(Loong64Float32ToInt32) \
+ V(Loong64Float32ToInt64) \
+ V(Loong64Float32ToUint32) \
+ V(Loong64Float32ToUint64) \
+ V(Loong64Float64Cmp) \
+ V(Loong64Float64Add) \
+ V(Loong64Float64Sub) \
+ V(Loong64Float64Mul) \
+ V(Loong64Float64Div) \
+ V(Loong64Float64Mod) \
+ V(Loong64Float64Abs) \
+ V(Loong64Float64Neg) \
+ V(Loong64Float64Sqrt) \
+ V(Loong64Float64Max) \
+ V(Loong64Float64Min) \
+ V(Loong64Float64ToFloat32) \
+ V(Loong64Float64RoundDown) \
+ V(Loong64Float64RoundUp) \
+ V(Loong64Float64RoundTruncate) \
+ V(Loong64Float64RoundTiesEven) \
+ V(Loong64Float64ToInt32) \
+ V(Loong64Float64ToInt64) \
+ V(Loong64Float64ToUint32) \
+ V(Loong64Float64ToUint64) \
+ V(Loong64Int32ToFloat32) \
+ V(Loong64Int32ToFloat64) \
+ V(Loong64Int64ToFloat32) \
+ V(Loong64Int64ToFloat64) \
+ V(Loong64Uint32ToFloat32) \
+ V(Loong64Uint32ToFloat64) \
+ V(Loong64Uint64ToFloat32) \
+ V(Loong64Uint64ToFloat64) \
+ V(Loong64Float64ExtractLowWord32) \
+ V(Loong64Float64ExtractHighWord32) \
+ V(Loong64Float64InsertLowWord32) \
+ V(Loong64Float64InsertHighWord32) \
+ V(Loong64BitcastDL) \
+ V(Loong64BitcastLD) \
+ V(Loong64Float64SilenceNaN) \
+ V(Loong64Ld_b) \
+ V(Loong64Ld_bu) \
+ V(Loong64St_b) \
+ V(Loong64Ld_h) \
+ V(Loong64Ld_hu) \
+ V(Loong64St_h) \
+ V(Loong64Ld_w) \
+ V(Loong64Ld_wu) \
+ V(Loong64St_w) \
+ V(Loong64Ld_d) \
+ V(Loong64St_d) \
+ V(Loong64Fld_s) \
+ V(Loong64Fst_s) \
+ V(Loong64Fld_d) \
+ V(Loong64Fst_d) \
+ V(Loong64Push) \
+ V(Loong64Peek) \
+ V(Loong64Poke) \
+ V(Loong64StackClaim) \
+ V(Loong64Ext_w_b) \
+ V(Loong64Ext_w_h) \
+ V(Loong64Dbar) \
+ V(Loong64S128Const) \
+ V(Loong64S128Zero) \
+ V(Loong64S128AllOnes) \
+ V(Loong64I32x4Splat) \
+ V(Loong64I32x4ExtractLane) \
+ V(Loong64I32x4ReplaceLane) \
+ V(Loong64I32x4Add) \
+ V(Loong64I32x4Sub) \
+ V(Loong64F64x2Abs) \
+ V(Loong64F64x2Neg) \
+ V(Loong64F32x4Splat) \
+ V(Loong64F32x4ExtractLane) \
+ V(Loong64F32x4ReplaceLane) \
+ V(Loong64F32x4SConvertI32x4) \
+ V(Loong64F32x4UConvertI32x4) \
+ V(Loong64I32x4Mul) \
+ V(Loong64I32x4MaxS) \
+ V(Loong64I32x4MinS) \
+ V(Loong64I32x4Eq) \
+ V(Loong64I32x4Ne) \
+ V(Loong64I32x4Shl) \
+ V(Loong64I32x4ShrS) \
+ V(Loong64I32x4ShrU) \
+ V(Loong64I32x4MaxU) \
+ V(Loong64I32x4MinU) \
+ V(Loong64F64x2Sqrt) \
+ V(Loong64F64x2Add) \
+ V(Loong64F64x2Sub) \
+ V(Loong64F64x2Mul) \
+ V(Loong64F64x2Div) \
+ V(Loong64F64x2Min) \
+ V(Loong64F64x2Max) \
+ V(Loong64F64x2Eq) \
+ V(Loong64F64x2Ne) \
+ V(Loong64F64x2Lt) \
+ V(Loong64F64x2Le) \
+ V(Loong64F64x2Splat) \
+ V(Loong64F64x2ExtractLane) \
+ V(Loong64F64x2ReplaceLane) \
+ V(Loong64F64x2Pmin) \
+ V(Loong64F64x2Pmax) \
+ V(Loong64F64x2Ceil) \
+ V(Loong64F64x2Floor) \
+ V(Loong64F64x2Trunc) \
+ V(Loong64F64x2NearestInt) \
+ V(Loong64F64x2ConvertLowI32x4S) \
+ V(Loong64F64x2ConvertLowI32x4U) \
+ V(Loong64F64x2PromoteLowF32x4) \
+ V(Loong64I64x2Splat) \
+ V(Loong64I64x2ExtractLane) \
+ V(Loong64I64x2ReplaceLane) \
+ V(Loong64I64x2Add) \
+ V(Loong64I64x2Sub) \
+ V(Loong64I64x2Mul) \
+ V(Loong64I64x2Neg) \
+ V(Loong64I64x2Shl) \
+ V(Loong64I64x2ShrS) \
+ V(Loong64I64x2ShrU) \
+ V(Loong64I64x2BitMask) \
+ V(Loong64I64x2Eq) \
+ V(Loong64I64x2Ne) \
+ V(Loong64I64x2GtS) \
+ V(Loong64I64x2GeS) \
+ V(Loong64I64x2Abs) \
+ V(Loong64I64x2SConvertI32x4Low) \
+ V(Loong64I64x2SConvertI32x4High) \
+ V(Loong64I64x2UConvertI32x4Low) \
+ V(Loong64I64x2UConvertI32x4High) \
+ V(Loong64ExtMulLow) \
+ V(Loong64ExtMulHigh) \
+ V(Loong64ExtAddPairwise) \
+ V(Loong64F32x4Abs) \
+ V(Loong64F32x4Neg) \
+ V(Loong64F32x4Sqrt) \
+ V(Loong64F32x4RecipApprox) \
+ V(Loong64F32x4RecipSqrtApprox) \
+ V(Loong64F32x4Add) \
+ V(Loong64F32x4Sub) \
+ V(Loong64F32x4Mul) \
+ V(Loong64F32x4Div) \
+ V(Loong64F32x4Max) \
+ V(Loong64F32x4Min) \
+ V(Loong64F32x4Eq) \
+ V(Loong64F32x4Ne) \
+ V(Loong64F32x4Lt) \
+ V(Loong64F32x4Le) \
+ V(Loong64F32x4Pmin) \
+ V(Loong64F32x4Pmax) \
+ V(Loong64F32x4Ceil) \
+ V(Loong64F32x4Floor) \
+ V(Loong64F32x4Trunc) \
+ V(Loong64F32x4NearestInt) \
+ V(Loong64F32x4DemoteF64x2Zero) \
+ V(Loong64I32x4SConvertF32x4) \
+ V(Loong64I32x4UConvertF32x4) \
+ V(Loong64I32x4Neg) \
+ V(Loong64I32x4GtS) \
+ V(Loong64I32x4GeS) \
+ V(Loong64I32x4GtU) \
+ V(Loong64I32x4GeU) \
+ V(Loong64I32x4Abs) \
+ V(Loong64I32x4BitMask) \
+ V(Loong64I32x4DotI16x8S) \
+ V(Loong64I32x4TruncSatF64x2SZero) \
+ V(Loong64I32x4TruncSatF64x2UZero) \
+ V(Loong64I16x8Splat) \
+ V(Loong64I16x8ExtractLaneU) \
+ V(Loong64I16x8ExtractLaneS) \
+ V(Loong64I16x8ReplaceLane) \
+ V(Loong64I16x8Neg) \
+ V(Loong64I16x8Shl) \
+ V(Loong64I16x8ShrS) \
+ V(Loong64I16x8ShrU) \
+ V(Loong64I16x8Add) \
+ V(Loong64I16x8AddSatS) \
+ V(Loong64I16x8Sub) \
+ V(Loong64I16x8SubSatS) \
+ V(Loong64I16x8Mul) \
+ V(Loong64I16x8MaxS) \
+ V(Loong64I16x8MinS) \
+ V(Loong64I16x8Eq) \
+ V(Loong64I16x8Ne) \
+ V(Loong64I16x8GtS) \
+ V(Loong64I16x8GeS) \
+ V(Loong64I16x8AddSatU) \
+ V(Loong64I16x8SubSatU) \
+ V(Loong64I16x8MaxU) \
+ V(Loong64I16x8MinU) \
+ V(Loong64I16x8GtU) \
+ V(Loong64I16x8GeU) \
+ V(Loong64I16x8RoundingAverageU) \
+ V(Loong64I16x8Abs) \
+ V(Loong64I16x8BitMask) \
+ V(Loong64I16x8Q15MulRSatS) \
+ V(Loong64I8x16Splat) \
+ V(Loong64I8x16ExtractLaneU) \
+ V(Loong64I8x16ExtractLaneS) \
+ V(Loong64I8x16ReplaceLane) \
+ V(Loong64I8x16Neg) \
+ V(Loong64I8x16Shl) \
+ V(Loong64I8x16ShrS) \
+ V(Loong64I8x16Add) \
+ V(Loong64I8x16AddSatS) \
+ V(Loong64I8x16Sub) \
+ V(Loong64I8x16SubSatS) \
+ V(Loong64I8x16MaxS) \
+ V(Loong64I8x16MinS) \
+ V(Loong64I8x16Eq) \
+ V(Loong64I8x16Ne) \
+ V(Loong64I8x16GtS) \
+ V(Loong64I8x16GeS) \
+ V(Loong64I8x16ShrU) \
+ V(Loong64I8x16AddSatU) \
+ V(Loong64I8x16SubSatU) \
+ V(Loong64I8x16MaxU) \
+ V(Loong64I8x16MinU) \
+ V(Loong64I8x16GtU) \
+ V(Loong64I8x16GeU) \
+ V(Loong64I8x16RoundingAverageU) \
+ V(Loong64I8x16Abs) \
+ V(Loong64I8x16Popcnt) \
+ V(Loong64I8x16BitMask) \
+ V(Loong64S128And) \
+ V(Loong64S128Or) \
+ V(Loong64S128Xor) \
+ V(Loong64S128Not) \
+ V(Loong64S128Select) \
+ V(Loong64S128AndNot) \
+ V(Loong64I64x2AllTrue) \
+ V(Loong64I32x4AllTrue) \
+ V(Loong64I16x8AllTrue) \
+ V(Loong64I8x16AllTrue) \
+ V(Loong64V128AnyTrue) \
+ V(Loong64S32x4InterleaveRight) \
+ V(Loong64S32x4InterleaveLeft) \
+ V(Loong64S32x4PackEven) \
+ V(Loong64S32x4PackOdd) \
+ V(Loong64S32x4InterleaveEven) \
+ V(Loong64S32x4InterleaveOdd) \
+ V(Loong64S32x4Shuffle) \
+ V(Loong64S16x8InterleaveRight) \
+ V(Loong64S16x8InterleaveLeft) \
+ V(Loong64S16x8PackEven) \
+ V(Loong64S16x8PackOdd) \
+ V(Loong64S16x8InterleaveEven) \
+ V(Loong64S16x8InterleaveOdd) \
+ V(Loong64S16x4Reverse) \
+ V(Loong64S16x2Reverse) \
+ V(Loong64S8x16InterleaveRight) \
+ V(Loong64S8x16InterleaveLeft) \
+ V(Loong64S8x16PackEven) \
+ V(Loong64S8x16PackOdd) \
+ V(Loong64S8x16InterleaveEven) \
+ V(Loong64S8x16InterleaveOdd) \
+ V(Loong64I8x16Shuffle) \
+ V(Loong64I8x16Swizzle) \
+ V(Loong64S8x16Concat) \
+ V(Loong64S8x8Reverse) \
+ V(Loong64S8x4Reverse) \
+ V(Loong64S8x2Reverse) \
+ V(Loong64S128LoadSplat) \
+ V(Loong64S128Load8x8S) \
+ V(Loong64S128Load8x8U) \
+ V(Loong64S128Load16x4S) \
+ V(Loong64S128Load16x4U) \
+ V(Loong64S128Load32x2S) \
+ V(Loong64S128Load32x2U) \
+ V(Loong64S128Load32Zero) \
+ V(Loong64S128Load64Zero) \
+ V(Loong64LoadLane) \
+ V(Loong64StoreLane) \
+ V(Loong64I32x4SConvertI16x8Low) \
+ V(Loong64I32x4SConvertI16x8High) \
+ V(Loong64I32x4UConvertI16x8Low) \
+ V(Loong64I32x4UConvertI16x8High) \
+ V(Loong64I16x8SConvertI8x16Low) \
+ V(Loong64I16x8SConvertI8x16High) \
+ V(Loong64I16x8SConvertI32x4) \
+ V(Loong64I16x8UConvertI32x4) \
+ V(Loong64I16x8UConvertI8x16Low) \
+ V(Loong64I16x8UConvertI8x16High) \
+ V(Loong64I8x16SConvertI16x8) \
+ V(Loong64I8x16UConvertI16x8) \
+ V(Loong64StoreCompressTagged) \
+ V(Loong64Word64AtomicLoadUint32) \
+ V(Loong64Word64AtomicLoadUint64) \
+ V(Loong64Word64AtomicStoreWord64) \
+ V(Loong64Word64AtomicAddUint64) \
+ V(Loong64Word64AtomicSubUint64) \
+ V(Loong64Word64AtomicAndUint64) \
+ V(Loong64Word64AtomicOrUint64) \
+ V(Loong64Word64AtomicXorUint64) \
+ V(Loong64Word64AtomicExchangeUint64) \
+ V(Loong64Word64AtomicCompareExchangeUint64)
+
+// Addressing modes represent the "shape" of inputs to an instruction.
+// Many instructions support multiple addressing modes. Addressing modes
+// are encoded into the InstructionCode of the instruction and tell the
+// code generator after register allocation which assembler method to call.
+//
+// We use the following local notation for addressing modes:
+//
+// R = register
+// O = register or stack slot
+// D = double register
+// I = immediate (handle, external, int32)
+// MRI = [register + immediate]
+// MRR = [register + register]
+#define TARGET_ADDRESSING_MODE_LIST(V) \
+ V(MRI) /* [%r0 + K] */ \
+ V(MRR) /* [%r0 + %r1] */ \
+ V(Root) /* [%rr + K] */
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
+
+#endif // V8_COMPILER_BACKEND_LOONG64_INSTRUCTION_CODES_LOONG64_H_
diff --git a/deps/v8/src/compiler/backend/loong64/instruction-scheduler-loong64.cc b/deps/v8/src/compiler/backend/loong64/instruction-scheduler-loong64.cc
new file mode 100644
index 0000000000..3cfec9c403
--- /dev/null
+++ b/deps/v8/src/compiler/backend/loong64/instruction-scheduler-loong64.cc
@@ -0,0 +1,26 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/codegen/macro-assembler.h"
+#include "src/compiler/backend/instruction-scheduler.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+// TODO(LOONG_dev): LOONG64 Support instruction scheduler.
+bool InstructionScheduler::SchedulerSupported() { return false; }
+
+int InstructionScheduler::GetTargetInstructionFlags(
+ const Instruction* instr) const {
+ UNREACHABLE();
+}
+
+int InstructionScheduler::GetInstructionLatency(const Instruction* instr) {
+ UNREACHABLE();
+}
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/compiler/backend/loong64/instruction-selector-loong64.cc b/deps/v8/src/compiler/backend/loong64/instruction-selector-loong64.cc
new file mode 100644
index 0000000000..454bfa9986
--- /dev/null
+++ b/deps/v8/src/compiler/backend/loong64/instruction-selector-loong64.cc
@@ -0,0 +1,3124 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/base/bits.h"
+#include "src/base/platform/wrappers.h"
+#include "src/codegen/machine-type.h"
+#include "src/compiler/backend/instruction-selector-impl.h"
+#include "src/compiler/node-matchers.h"
+#include "src/compiler/node-properties.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+#define TRACE_UNIMPL() \
+ PrintF("UNIMPLEMENTED instr_sel: %s at line %d\n", __FUNCTION__, __LINE__)
+
+#define TRACE() PrintF("instr_sel: %s at line %d\n", __FUNCTION__, __LINE__)
+
+// Adds loong64-specific methods for generating InstructionOperands.
+class Loong64OperandGenerator final : public OperandGenerator {
+ public:
+ explicit Loong64OperandGenerator(InstructionSelector* selector)
+ : OperandGenerator(selector) {}
+
+ InstructionOperand UseOperand(Node* node, InstructionCode opcode) {
+ if (CanBeImmediate(node, opcode)) {
+ return UseImmediate(node);
+ }
+ return UseRegister(node);
+ }
+
+ // Use the zero register if the node has the immediate value zero, otherwise
+ // assign a register.
+ InstructionOperand UseRegisterOrImmediateZero(Node* node) {
+ if ((IsIntegerConstant(node) && (GetIntegerConstantValue(node) == 0)) ||
+ (IsFloatConstant(node) &&
+ (bit_cast<int64_t>(GetFloatConstantValue(node)) == 0))) {
+ return UseImmediate(node);
+ }
+ return UseRegister(node);
+ }
+
+ bool IsIntegerConstant(Node* node) {
+ return (node->opcode() == IrOpcode::kInt32Constant) ||
+ (node->opcode() == IrOpcode::kInt64Constant);
+ }
+
+ int64_t GetIntegerConstantValue(Node* node) {
+ if (node->opcode() == IrOpcode::kInt32Constant) {
+ return OpParameter<int32_t>(node->op());
+ }
+ DCHECK_EQ(IrOpcode::kInt64Constant, node->opcode());
+ return OpParameter<int64_t>(node->op());
+ }
+
+ bool IsFloatConstant(Node* node) {
+ return (node->opcode() == IrOpcode::kFloat32Constant) ||
+ (node->opcode() == IrOpcode::kFloat64Constant);
+ }
+
+ double GetFloatConstantValue(Node* node) {
+ if (node->opcode() == IrOpcode::kFloat32Constant) {
+ return OpParameter<float>(node->op());
+ }
+ DCHECK_EQ(IrOpcode::kFloat64Constant, node->opcode());
+ return OpParameter<double>(node->op());
+ }
+
+ bool CanBeImmediate(Node* node, InstructionCode mode) {
+ return IsIntegerConstant(node) &&
+ CanBeImmediate(GetIntegerConstantValue(node), mode);
+ }
+
+ bool CanBeImmediate(int64_t value, InstructionCode opcode) {
+ switch (ArchOpcodeField::decode(opcode)) {
+ case kLoong64Sll_w:
+ case kLoong64Srl_w:
+ case kLoong64Sra_w:
+ return is_uint5(value);
+ case kLoong64Sll_d:
+ case kLoong64Srl_d:
+ case kLoong64Sra_d:
+ return is_uint6(value);
+ case kLoong64And:
+ case kLoong64And32:
+ case kLoong64Or:
+ case kLoong64Or32:
+ case kLoong64Xor:
+ case kLoong64Xor32:
+ case kLoong64Tst:
+ return is_uint12(value);
+ case kLoong64Ld_b:
+ case kLoong64Ld_bu:
+ case kLoong64St_b:
+ case kLoong64Ld_h:
+ case kLoong64Ld_hu:
+ case kLoong64St_h:
+ case kLoong64Ld_w:
+ case kLoong64Ld_wu:
+ case kLoong64St_w:
+ case kLoong64Ld_d:
+ case kLoong64St_d:
+ case kLoong64Fld_s:
+ case kLoong64Fst_s:
+ case kLoong64Fld_d:
+ case kLoong64Fst_d:
+ return is_int16(value);
+ default:
+ return is_int12(value);
+ }
+ }
+
+ private:
+ bool ImmediateFitsAddrMode1Instruction(int32_t imm) const {
+ TRACE_UNIMPL();
+ return false;
+ }
+};
+
+static void VisitRR(InstructionSelector* selector, ArchOpcode opcode,
+ Node* node) {
+ Loong64OperandGenerator g(selector);
+ selector->Emit(opcode, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)));
+}
+
+static void VisitRRI(InstructionSelector* selector, ArchOpcode opcode,
+ Node* node) {
+ Loong64OperandGenerator g(selector);
+ int32_t imm = OpParameter<int32_t>(node->op());
+ selector->Emit(opcode, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)), g.UseImmediate(imm));
+}
+
+static void VisitSimdShift(InstructionSelector* selector, ArchOpcode opcode,
+ Node* node) {
+ Loong64OperandGenerator g(selector);
+ if (g.IsIntegerConstant(node->InputAt(1))) {
+ selector->Emit(opcode, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)),
+ g.UseImmediate(node->InputAt(1)));
+ } else {
+ selector->Emit(opcode, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)),
+ g.UseRegister(node->InputAt(1)));
+ }
+}
+
+static void VisitRRIR(InstructionSelector* selector, ArchOpcode opcode,
+ Node* node) {
+ Loong64OperandGenerator g(selector);
+ int32_t imm = OpParameter<int32_t>(node->op());
+ selector->Emit(opcode, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)), g.UseImmediate(imm),
+ g.UseRegister(node->InputAt(1)));
+}
+
+static void VisitRRR(InstructionSelector* selector, ArchOpcode opcode,
+ Node* node) {
+ Loong64OperandGenerator g(selector);
+ selector->Emit(opcode, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)),
+ g.UseRegister(node->InputAt(1)));
+}
+
+static void VisitUniqueRRR(InstructionSelector* selector, ArchOpcode opcode,
+ Node* node) {
+ Loong64OperandGenerator g(selector);
+ selector->Emit(opcode, g.DefineAsRegister(node),
+ g.UseUniqueRegister(node->InputAt(0)),
+ g.UseUniqueRegister(node->InputAt(1)));
+}
+
+void VisitRRRR(InstructionSelector* selector, ArchOpcode opcode, Node* node) {
+ Loong64OperandGenerator g(selector);
+ selector->Emit(
+ opcode, g.DefineSameAsFirst(node), g.UseRegister(node->InputAt(0)),
+ g.UseRegister(node->InputAt(1)), g.UseRegister(node->InputAt(2)));
+}
+
+static void VisitRRO(InstructionSelector* selector, ArchOpcode opcode,
+ Node* node) {
+ Loong64OperandGenerator g(selector);
+ selector->Emit(opcode, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)),
+ g.UseOperand(node->InputAt(1), opcode));
+}
+
+struct ExtendingLoadMatcher {
+ ExtendingLoadMatcher(Node* node, InstructionSelector* selector)
+ : matches_(false), selector_(selector), base_(nullptr), immediate_(0) {
+ Initialize(node);
+ }
+
+ bool Matches() const { return matches_; }
+
+ Node* base() const {
+ DCHECK(Matches());
+ return base_;
+ }
+ int64_t immediate() const {
+ DCHECK(Matches());
+ return immediate_;
+ }
+ ArchOpcode opcode() const {
+ DCHECK(Matches());
+ return opcode_;
+ }
+
+ private:
+ bool matches_;
+ InstructionSelector* selector_;
+ Node* base_;
+ int64_t immediate_;
+ ArchOpcode opcode_;
+
+ void Initialize(Node* node) {
+ Int64BinopMatcher m(node);
+ // When loading a 64-bit value and shifting by 32, we should
+ // just load and sign-extend the interesting 4 bytes instead.
+ // This happens, for example, when we're loading and untagging SMIs.
+ DCHECK(m.IsWord64Sar());
+ if (m.left().IsLoad() && m.right().Is(32) &&
+ selector_->CanCover(m.node(), m.left().node())) {
+ DCHECK_EQ(selector_->GetEffectLevel(node),
+ selector_->GetEffectLevel(m.left().node()));
+ MachineRepresentation rep =
+ LoadRepresentationOf(m.left().node()->op()).representation();
+ DCHECK_EQ(3, ElementSizeLog2Of(rep));
+ if (rep != MachineRepresentation::kTaggedSigned &&
+ rep != MachineRepresentation::kTaggedPointer &&
+ rep != MachineRepresentation::kTagged &&
+ rep != MachineRepresentation::kWord64) {
+ return;
+ }
+
+ Loong64OperandGenerator g(selector_);
+ Node* load = m.left().node();
+ Node* offset = load->InputAt(1);
+ base_ = load->InputAt(0);
+ opcode_ = kLoong64Ld_w;
+ if (g.CanBeImmediate(offset, opcode_)) {
+ immediate_ = g.GetIntegerConstantValue(offset) + 4;
+ matches_ = g.CanBeImmediate(immediate_, kLoong64Ld_w);
+ }
+ }
+ }
+};
+
+bool TryEmitExtendingLoad(InstructionSelector* selector, Node* node,
+ Node* output_node) {
+ ExtendingLoadMatcher m(node, selector);
+ Loong64OperandGenerator g(selector);
+ if (m.Matches()) {
+ InstructionOperand inputs[2];
+ inputs[0] = g.UseRegister(m.base());
+ InstructionCode opcode =
+ m.opcode() | AddressingModeField::encode(kMode_MRI);
+ DCHECK(is_int32(m.immediate()));
+ inputs[1] = g.TempImmediate(static_cast<int32_t>(m.immediate()));
+ InstructionOperand outputs[] = {g.DefineAsRegister(output_node)};
+ selector->Emit(opcode, arraysize(outputs), outputs, arraysize(inputs),
+ inputs);
+ return true;
+ }
+ return false;
+}
+
+bool TryMatchImmediate(InstructionSelector* selector,
+ InstructionCode* opcode_return, Node* node,
+ size_t* input_count_return, InstructionOperand* inputs) {
+ Loong64OperandGenerator g(selector);
+ if (g.CanBeImmediate(node, *opcode_return)) {
+ *opcode_return |= AddressingModeField::encode(kMode_MRI);
+ inputs[0] = g.UseImmediate(node);
+ *input_count_return = 1;
+ return true;
+ }
+ return false;
+}
+
+static void VisitBinop(InstructionSelector* selector, Node* node,
+ InstructionCode opcode, bool has_reverse_opcode,
+ InstructionCode reverse_opcode,
+ FlagsContinuation* cont) {
+ Loong64OperandGenerator g(selector);
+ Int32BinopMatcher m(node);
+ InstructionOperand inputs[2];
+ size_t input_count = 0;
+ InstructionOperand outputs[1];
+ size_t output_count = 0;
+
+ if (TryMatchImmediate(selector, &opcode, m.right().node(), &input_count,
+ &inputs[1])) {
+ inputs[0] = g.UseRegister(m.left().node());
+ input_count++;
+ } else if (has_reverse_opcode &&
+ TryMatchImmediate(selector, &reverse_opcode, m.left().node(),
+ &input_count, &inputs[1])) {
+ inputs[0] = g.UseRegister(m.right().node());
+ opcode = reverse_opcode;
+ input_count++;
+ } else {
+ inputs[input_count++] = g.UseRegister(m.left().node());
+ inputs[input_count++] = g.UseOperand(m.right().node(), opcode);
+ }
+
+ outputs[output_count++] = g.DefineAsRegister(node);
+
+ DCHECK_NE(0u, input_count);
+ DCHECK_EQ(1u, output_count);
+ DCHECK_GE(arraysize(inputs), input_count);
+ DCHECK_GE(arraysize(outputs), output_count);
+
+ selector->EmitWithContinuation(opcode, output_count, outputs, input_count,
+ inputs, cont);
+}
+
+static void VisitBinop(InstructionSelector* selector, Node* node,
+ InstructionCode opcode, bool has_reverse_opcode,
+ InstructionCode reverse_opcode) {
+ FlagsContinuation cont;
+ VisitBinop(selector, node, opcode, has_reverse_opcode, reverse_opcode, &cont);
+}
+
+static void VisitBinop(InstructionSelector* selector, Node* node,
+ InstructionCode opcode, FlagsContinuation* cont) {
+ VisitBinop(selector, node, opcode, false, kArchNop, cont);
+}
+
+static void VisitBinop(InstructionSelector* selector, Node* node,
+ InstructionCode opcode) {
+ VisitBinop(selector, node, opcode, false, kArchNop);
+}
+
+void InstructionSelector::VisitStackSlot(Node* node) {
+ StackSlotRepresentation rep = StackSlotRepresentationOf(node->op());
+ int alignment = rep.alignment();
+ int slot = frame_->AllocateSpillSlot(rep.size(), alignment);
+ OperandGenerator g(this);
+
+ Emit(kArchStackSlot, g.DefineAsRegister(node),
+ sequence()->AddImmediate(Constant(slot)), 0, nullptr);
+}
+
+void InstructionSelector::VisitAbortCSAAssert(Node* node) {
+ Loong64OperandGenerator g(this);
+ Emit(kArchAbortCSAAssert, g.NoOutput(), g.UseFixed(node->InputAt(0), a0));
+}
+
+void EmitLoad(InstructionSelector* selector, Node* node, InstructionCode opcode,
+ Node* output = nullptr) {
+ Loong64OperandGenerator g(selector);
+ Node* base = node->InputAt(0);
+ Node* index = node->InputAt(1);
+
+ ExternalReferenceMatcher m(base);
+ if (m.HasResolvedValue() && g.IsIntegerConstant(index) &&
+ selector->CanAddressRelativeToRootsRegister(m.ResolvedValue())) {
+ ptrdiff_t const delta =
+ g.GetIntegerConstantValue(index) +
+ TurboAssemblerBase::RootRegisterOffsetForExternalReference(
+ selector->isolate(), m.ResolvedValue());
+ // Check that the delta is a 32-bit integer due to the limitations of
+ // immediate operands.
+ if (is_int32(delta)) {
+ opcode |= AddressingModeField::encode(kMode_Root);
+ selector->Emit(opcode,
+ g.DefineAsRegister(output == nullptr ? node : output),
+ g.UseImmediate(static_cast<int32_t>(delta)));
+ return;
+ }
+ }
+
+ if (g.CanBeImmediate(index, opcode)) {
+ selector->Emit(opcode | AddressingModeField::encode(kMode_MRI),
+ g.DefineAsRegister(output == nullptr ? node : output),
+ g.UseRegister(base), g.UseImmediate(index));
+ } else {
+ selector->Emit(opcode | AddressingModeField::encode(kMode_MRR),
+ g.DefineAsRegister(output == nullptr ? node : output),
+ g.UseRegister(base), g.UseRegister(index));
+ }
+}
+
+void InstructionSelector::VisitStoreLane(Node* node) { UNREACHABLE(); }
+
+void InstructionSelector::VisitLoadLane(Node* node) { UNREACHABLE(); }
+
+void InstructionSelector::VisitLoadTransform(Node* node) {
+ LoadTransformParameters params = LoadTransformParametersOf(node->op());
+
+ InstructionCode opcode = kArchNop;
+ switch (params.transformation) {
+ // TODO(LOONG_dev): LOONG64 S128 LoadSplat
+ case LoadTransformation::kS128Load8Splat:
+ opcode = kLoong64S128LoadSplat;
+ break;
+ case LoadTransformation::kS128Load16Splat:
+ opcode = kLoong64S128LoadSplat;
+ break;
+ case LoadTransformation::kS128Load32Splat:
+ opcode = kLoong64S128LoadSplat;
+ break;
+ case LoadTransformation::kS128Load64Splat:
+ opcode = kLoong64S128LoadSplat;
+ break;
+ case LoadTransformation::kS128Load8x8S:
+ opcode = kLoong64S128Load8x8S;
+ break;
+ case LoadTransformation::kS128Load8x8U:
+ opcode = kLoong64S128Load8x8U;
+ break;
+ case LoadTransformation::kS128Load16x4S:
+ opcode = kLoong64S128Load16x4S;
+ break;
+ case LoadTransformation::kS128Load16x4U:
+ opcode = kLoong64S128Load16x4U;
+ break;
+ case LoadTransformation::kS128Load32x2S:
+ opcode = kLoong64S128Load32x2S;
+ break;
+ case LoadTransformation::kS128Load32x2U:
+ opcode = kLoong64S128Load32x2U;
+ break;
+ case LoadTransformation::kS128Load32Zero:
+ opcode = kLoong64S128Load32Zero;
+ break;
+ case LoadTransformation::kS128Load64Zero:
+ opcode = kLoong64S128Load64Zero;
+ break;
+ default:
+ UNIMPLEMENTED();
+ }
+
+ EmitLoad(this, node, opcode);
+}
+
+void InstructionSelector::VisitLoad(Node* node) {
+ LoadRepresentation load_rep = LoadRepresentationOf(node->op());
+
+ InstructionCode opcode = kArchNop;
+ switch (load_rep.representation()) {
+ case MachineRepresentation::kFloat32:
+ opcode = kLoong64Fld_s;
+ break;
+ case MachineRepresentation::kFloat64:
+ opcode = kLoong64Fld_d;
+ break;
+ case MachineRepresentation::kBit: // Fall through.
+ case MachineRepresentation::kWord8:
+ opcode = load_rep.IsUnsigned() ? kLoong64Ld_bu : kLoong64Ld_b;
+ break;
+ case MachineRepresentation::kWord16:
+ opcode = load_rep.IsUnsigned() ? kLoong64Ld_hu : kLoong64Ld_h;
+ break;
+ case MachineRepresentation::kWord32:
+ opcode = kLoong64Ld_w;
+ break;
+ case MachineRepresentation::kTaggedSigned: // Fall through.
+ case MachineRepresentation::kTaggedPointer: // Fall through.
+ case MachineRepresentation::kTagged: // Fall through.
+ case MachineRepresentation::kWord64:
+ opcode = kLoong64Ld_d;
+ break;
+ case MachineRepresentation::kCompressedPointer: // Fall through.
+ case MachineRepresentation::kCompressed: // Fall through.
+ case MachineRepresentation::kMapWord: // Fall through.
+ case MachineRepresentation::kNone:
+ case MachineRepresentation::kSimd128:
+ UNREACHABLE();
+ }
+
+ EmitLoad(this, node, opcode);
+}
+
+void InstructionSelector::VisitProtectedLoad(Node* node) {
+ // TODO(eholk)
+ UNIMPLEMENTED();
+}
+
+void InstructionSelector::VisitStore(Node* node) {
+ Loong64OperandGenerator g(this);
+ Node* base = node->InputAt(0);
+ Node* index = node->InputAt(1);
+ Node* value = node->InputAt(2);
+
+ StoreRepresentation store_rep = StoreRepresentationOf(node->op());
+ WriteBarrierKind write_barrier_kind = store_rep.write_barrier_kind();
+ MachineRepresentation rep = store_rep.representation();
+
+ if (FLAG_enable_unconditional_write_barriers && CanBeTaggedPointer(rep)) {
+ write_barrier_kind = kFullWriteBarrier;
+ }
+
+ // TODO(loong64): I guess this could be done in a better way.
+ if (write_barrier_kind != kNoWriteBarrier && !FLAG_disable_write_barriers) {
+ DCHECK(CanBeTaggedPointer(rep));
+ AddressingMode addressing_mode;
+ InstructionOperand inputs[3];
+ size_t input_count = 0;
+ inputs[input_count++] = g.UseUniqueRegister(base);
+ // OutOfLineRecordWrite uses the index in an arithmetic instruction, so we
+ // must check kArithmeticImm as well as kLoadStoreImm64.
+ if (g.CanBeImmediate(index, kLoong64Add_d)) {
+ inputs[input_count++] = g.UseImmediate(index);
+ addressing_mode = kMode_MRI;
+ } else {
+ inputs[input_count++] = g.UseUniqueRegister(index);
+ addressing_mode = kMode_MRR;
+ }
+ inputs[input_count++] = g.UseUniqueRegister(value);
+ RecordWriteMode record_write_mode =
+ WriteBarrierKindToRecordWriteMode(write_barrier_kind);
+ InstructionCode code = kArchStoreWithWriteBarrier;
+ code |= AddressingModeField::encode(addressing_mode);
+ code |= MiscField::encode(static_cast<int>(record_write_mode));
+ Emit(code, 0, nullptr, input_count, inputs);
+ } else {
+ ArchOpcode opcode;
+ switch (rep) {
+ case MachineRepresentation::kFloat32:
+ opcode = kLoong64Fst_s;
+ break;
+ case MachineRepresentation::kFloat64:
+ opcode = kLoong64Fst_d;
+ break;
+ case MachineRepresentation::kBit: // Fall through.
+ case MachineRepresentation::kWord8:
+ opcode = kLoong64St_b;
+ break;
+ case MachineRepresentation::kWord16:
+ opcode = kLoong64St_h;
+ break;
+ case MachineRepresentation::kWord32:
+ opcode = kLoong64St_w;
+ break;
+ case MachineRepresentation::kTaggedSigned: // Fall through.
+ case MachineRepresentation::kTaggedPointer: // Fall through.
+ case MachineRepresentation::kTagged: // Fall through.
+ case MachineRepresentation::kWord64:
+ opcode = kLoong64St_d;
+ break;
+ case MachineRepresentation::kCompressedPointer: // Fall through.
+ case MachineRepresentation::kCompressed: // Fall through.
+ case MachineRepresentation::kMapWord: // Fall through.
+ case MachineRepresentation::kNone:
+ case MachineRepresentation::kSimd128:
+ UNREACHABLE();
+ }
+
+ ExternalReferenceMatcher m(base);
+ if (m.HasResolvedValue() && g.IsIntegerConstant(index) &&
+ CanAddressRelativeToRootsRegister(m.ResolvedValue())) {
+ ptrdiff_t const delta =
+ g.GetIntegerConstantValue(index) +
+ TurboAssemblerBase::RootRegisterOffsetForExternalReference(
+ isolate(), m.ResolvedValue());
+ // Check that the delta is a 32-bit integer due to the limitations of
+ // immediate operands.
+ if (is_int32(delta)) {
+ Emit(opcode | AddressingModeField::encode(kMode_Root), g.NoOutput(),
+ g.UseImmediate(static_cast<int32_t>(delta)), g.UseImmediate(0),
+ g.UseRegisterOrImmediateZero(value));
+ return;
+ }
+ }
+
+ if (g.CanBeImmediate(index, opcode)) {
+ Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(),
+ g.UseRegister(base), g.UseImmediate(index),
+ g.UseRegisterOrImmediateZero(value));
+ } else {
+ Emit(opcode | AddressingModeField::encode(kMode_MRR), g.NoOutput(),
+ g.UseRegister(base), g.UseRegister(index),
+ g.UseRegisterOrImmediateZero(value));
+ }
+ }
+}
+
+void InstructionSelector::VisitProtectedStore(Node* node) {
+ // TODO(eholk)
+ UNIMPLEMENTED();
+}
+
+void InstructionSelector::VisitWord32And(Node* node) {
+ Loong64OperandGenerator g(this);
+ Int32BinopMatcher m(node);
+ if (m.left().IsWord32Shr() && CanCover(node, m.left().node()) &&
+ m.right().HasResolvedValue()) {
+ uint32_t mask = m.right().ResolvedValue();
+ uint32_t mask_width = base::bits::CountPopulation(mask);
+ uint32_t mask_msb = base::bits::CountLeadingZeros32(mask);
+ if ((mask_width != 0) && (mask_msb + mask_width == 32)) {
+ // The mask must be contiguous, and occupy the least-significant bits.
+ DCHECK_EQ(0u, base::bits::CountTrailingZeros32(mask));
+
+ // Select Bstrpick_w for And(Shr(x, imm), mask) where the mask is in the
+ // least significant bits.
+ Int32BinopMatcher mleft(m.left().node());
+ if (mleft.right().HasResolvedValue()) {
+ // Any shift value can match; int32 shifts use `value % 32`.
+ uint32_t lsb = mleft.right().ResolvedValue() & 0x1F;
+
+ // Bstrpick_w cannot extract bits past the register size, however since
+ // shifting the original value would have introduced some zeros we can
+ // still use Bstrpick_w with a smaller mask and the remaining bits will
+ // be zeros.
+ if (lsb + mask_width > 32) mask_width = 32 - lsb;
+
+ Emit(kLoong64Bstrpick_w, g.DefineAsRegister(node),
+ g.UseRegister(mleft.left().node()), g.TempImmediate(lsb),
+ g.TempImmediate(mask_width));
+ return;
+ }
+ // Other cases fall through to the normal And operation.
+ }
+ }
+ if (m.right().HasResolvedValue()) {
+ uint32_t mask = m.right().ResolvedValue();
+ uint32_t shift = base::bits::CountPopulation(~mask);
+ uint32_t msb = base::bits::CountLeadingZeros32(~mask);
+ if (shift != 0 && shift != 32 && msb + shift == 32) {
+ // Insert zeros for (x >> K) << K => x & ~(2^K - 1) expression reduction
+ // and remove constant loading of inverted mask.
+ Emit(kLoong64Bstrins_w, g.DefineSameAsFirst(node),
+ g.UseRegister(m.left().node()), g.TempImmediate(0),
+ g.TempImmediate(shift));
+ return;
+ }
+ }
+ VisitBinop(this, node, kLoong64And32, true, kLoong64And32);
+}
+
+void InstructionSelector::VisitWord64And(Node* node) {
+ Loong64OperandGenerator g(this);
+ Int64BinopMatcher m(node);
+ if (m.left().IsWord64Shr() && CanCover(node, m.left().node()) &&
+ m.right().HasResolvedValue()) {
+ uint64_t mask = m.right().ResolvedValue();
+ uint32_t mask_width = base::bits::CountPopulation(mask);
+ uint32_t mask_msb = base::bits::CountLeadingZeros64(mask);
+ if ((mask_width != 0) && (mask_msb + mask_width == 64)) {
+ // The mask must be contiguous, and occupy the least-significant bits.
+ DCHECK_EQ(0u, base::bits::CountTrailingZeros64(mask));
+
+ // Select Bstrpick_d for And(Shr(x, imm), mask) where the mask is in the
+ // least significant bits.
+ Int64BinopMatcher mleft(m.left().node());
+ if (mleft.right().HasResolvedValue()) {
+ // Any shift value can match; int64 shifts use `value % 64`.
+ uint32_t lsb =
+ static_cast<uint32_t>(mleft.right().ResolvedValue() & 0x3F);
+
+ // Bstrpick_d cannot extract bits past the register size, however since
+ // shifting the original value would have introduced some zeros we can
+ // still use Bstrpick_d with a smaller mask and the remaining bits will
+ // be zeros.
+ if (lsb + mask_width > 64) mask_width = 64 - lsb;
+
+ if (lsb == 0 && mask_width == 64) {
+ Emit(kArchNop, g.DefineSameAsFirst(node), g.Use(mleft.left().node()));
+ } else {
+ Emit(kLoong64Bstrpick_d, g.DefineAsRegister(node),
+ g.UseRegister(mleft.left().node()), g.TempImmediate(lsb),
+ g.TempImmediate(static_cast<int32_t>(mask_width)));
+ }
+ return;
+ }
+ // Other cases fall through to the normal And operation.
+ }
+ }
+ if (m.right().HasResolvedValue()) {
+ uint64_t mask = m.right().ResolvedValue();
+ uint32_t shift = base::bits::CountPopulation(~mask);
+ uint32_t msb = base::bits::CountLeadingZeros64(~mask);
+ if (shift != 0 && shift < 32 && msb + shift == 64) {
+ // Insert zeros for (x >> K) << K => x & ~(2^K - 1) expression reduction
+ // and remove constant loading of inverted mask. Dins cannot insert bits
+ // past word size, so shifts smaller than 32 are covered.
+ Emit(kLoong64Bstrins_d, g.DefineSameAsFirst(node),
+ g.UseRegister(m.left().node()), g.TempImmediate(0),
+ g.TempImmediate(shift));
+ return;
+ }
+ }
+ VisitBinop(this, node, kLoong64And, true, kLoong64And);
+}
+
+void InstructionSelector::VisitWord32Or(Node* node) {
+ VisitBinop(this, node, kLoong64Or32, true, kLoong64Or32);
+}
+
+void InstructionSelector::VisitWord64Or(Node* node) {
+ VisitBinop(this, node, kLoong64Or, true, kLoong64Or);
+}
+
+void InstructionSelector::VisitWord32Xor(Node* node) {
+ Int32BinopMatcher m(node);
+ if (m.left().IsWord32Or() && CanCover(node, m.left().node()) &&
+ m.right().Is(-1)) {
+ Int32BinopMatcher mleft(m.left().node());
+ if (!mleft.right().HasResolvedValue()) {
+ Loong64OperandGenerator g(this);
+ Emit(kLoong64Nor32, g.DefineAsRegister(node),
+ g.UseRegister(mleft.left().node()),
+ g.UseRegister(mleft.right().node()));
+ return;
+ }
+ }
+ if (m.right().Is(-1)) {
+ // Use Nor for bit negation and eliminate constant loading for xori.
+ Loong64OperandGenerator g(this);
+ Emit(kLoong64Nor32, g.DefineAsRegister(node),
+ g.UseRegister(m.left().node()), g.TempImmediate(0));
+ return;
+ }
+ VisitBinop(this, node, kLoong64Xor32, true, kLoong64Xor32);
+}
+
+void InstructionSelector::VisitWord64Xor(Node* node) {
+ Int64BinopMatcher m(node);
+ if (m.left().IsWord64Or() && CanCover(node, m.left().node()) &&
+ m.right().Is(-1)) {
+ Int64BinopMatcher mleft(m.left().node());
+ if (!mleft.right().HasResolvedValue()) {
+ Loong64OperandGenerator g(this);
+ Emit(kLoong64Nor, g.DefineAsRegister(node),
+ g.UseRegister(mleft.left().node()),
+ g.UseRegister(mleft.right().node()));
+ return;
+ }
+ }
+ if (m.right().Is(-1)) {
+ // Use Nor for bit negation and eliminate constant loading for xori.
+ Loong64OperandGenerator g(this);
+ Emit(kLoong64Nor, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
+ g.TempImmediate(0));
+ return;
+ }
+ VisitBinop(this, node, kLoong64Xor, true, kLoong64Xor);
+}
+
+void InstructionSelector::VisitWord32Shl(Node* node) {
+ Int32BinopMatcher m(node);
+ if (m.left().IsWord32And() && CanCover(node, m.left().node()) &&
+ m.right().IsInRange(1, 31)) {
+ Loong64OperandGenerator g(this);
+ Int32BinopMatcher mleft(m.left().node());
+ // Match Word32Shl(Word32And(x, mask), imm) to Sll_w where the mask is
+ // contiguous, and the shift immediate non-zero.
+ if (mleft.right().HasResolvedValue()) {
+ uint32_t mask = mleft.right().ResolvedValue();
+ uint32_t mask_width = base::bits::CountPopulation(mask);
+ uint32_t mask_msb = base::bits::CountLeadingZeros32(mask);
+ if ((mask_width != 0) && (mask_msb + mask_width == 32)) {
+ uint32_t shift = m.right().ResolvedValue();
+ DCHECK_EQ(0u, base::bits::CountTrailingZeros32(mask));
+ DCHECK_NE(0u, shift);
+ if ((shift + mask_width) >= 32) {
+ // If the mask is contiguous and reaches or extends beyond the top
+ // bit, only the shift is needed.
+ Emit(kLoong64Sll_w, g.DefineAsRegister(node),
+ g.UseRegister(mleft.left().node()),
+ g.UseImmediate(m.right().node()));
+ return;
+ }
+ }
+ }
+ }
+ VisitRRO(this, kLoong64Sll_w, node);
+}
+
+void InstructionSelector::VisitWord32Shr(Node* node) {
+ Int32BinopMatcher m(node);
+ if (m.left().IsWord32And() && m.right().HasResolvedValue()) {
+ uint32_t lsb = m.right().ResolvedValue() & 0x1F;
+ Int32BinopMatcher mleft(m.left().node());
+ if (mleft.right().HasResolvedValue() &&
+ mleft.right().ResolvedValue() != 0) {
+ // Select Bstrpick_w for Shr(And(x, mask), imm) where the result of the
+ // mask is shifted into the least-significant bits.
+ uint32_t mask = (mleft.right().ResolvedValue() >> lsb) << lsb;
+ unsigned mask_width = base::bits::CountPopulation(mask);
+ unsigned mask_msb = base::bits::CountLeadingZeros32(mask);
+ if ((mask_msb + mask_width + lsb) == 32) {
+ Loong64OperandGenerator g(this);
+ DCHECK_EQ(lsb, base::bits::CountTrailingZeros32(mask));
+ Emit(kLoong64Bstrpick_w, g.DefineAsRegister(node),
+ g.UseRegister(mleft.left().node()), g.TempImmediate(lsb),
+ g.TempImmediate(mask_width));
+ return;
+ }
+ }
+ }
+ VisitRRO(this, kLoong64Srl_w, node);
+}
+
+void InstructionSelector::VisitWord32Sar(Node* node) {
+ Int32BinopMatcher m(node);
+ if (m.left().IsWord32Shl() && CanCover(node, m.left().node())) {
+ Int32BinopMatcher mleft(m.left().node());
+ if (m.right().HasResolvedValue() && mleft.right().HasResolvedValue()) {
+ Loong64OperandGenerator g(this);
+ uint32_t sar = m.right().ResolvedValue();
+ uint32_t shl = mleft.right().ResolvedValue();
+ if ((sar == shl) && (sar == 16)) {
+ Emit(kLoong64Ext_w_h, g.DefineAsRegister(node),
+ g.UseRegister(mleft.left().node()));
+ return;
+ } else if ((sar == shl) && (sar == 24)) {
+ Emit(kLoong64Ext_w_b, g.DefineAsRegister(node),
+ g.UseRegister(mleft.left().node()));
+ return;
+ } else if ((sar == shl) && (sar == 32)) {
+ Emit(kLoong64Sll_w, g.DefineAsRegister(node),
+ g.UseRegister(mleft.left().node()), g.TempImmediate(0));
+ return;
+ }
+ }
+ }
+ VisitRRO(this, kLoong64Sra_w, node);
+}
+
+void InstructionSelector::VisitWord64Shl(Node* node) {
+ Loong64OperandGenerator g(this);
+ Int64BinopMatcher m(node);
+ if ((m.left().IsChangeInt32ToInt64() || m.left().IsChangeUint32ToUint64()) &&
+ m.right().IsInRange(32, 63) && CanCover(node, m.left().node())) {
+ // There's no need to sign/zero-extend to 64-bit if we shift out the upper
+ // 32 bits anyway.
+ Emit(kLoong64Sll_d, g.DefineAsRegister(node),
+ g.UseRegister(m.left().node()->InputAt(0)),
+ g.UseImmediate(m.right().node()));
+ return;
+ }
+ if (m.left().IsWord64And() && CanCover(node, m.left().node()) &&
+ m.right().IsInRange(1, 63)) {
+ // Match Word64Shl(Word64And(x, mask), imm) to Sll_d where the mask is
+ // contiguous, and the shift immediate non-zero.
+ Int64BinopMatcher mleft(m.left().node());
+ if (mleft.right().HasResolvedValue()) {
+ uint64_t mask = mleft.right().ResolvedValue();
+ uint32_t mask_width = base::bits::CountPopulation(mask);
+ uint32_t mask_msb = base::bits::CountLeadingZeros64(mask);
+ if ((mask_width != 0) && (mask_msb + mask_width == 64)) {
+ uint64_t shift = m.right().ResolvedValue();
+ DCHECK_EQ(0u, base::bits::CountTrailingZeros64(mask));
+ DCHECK_NE(0u, shift);
+
+ if ((shift + mask_width) >= 64) {
+ // If the mask is contiguous and reaches or extends beyond the top
+ // bit, only the shift is needed.
+ Emit(kLoong64Sll_d, g.DefineAsRegister(node),
+ g.UseRegister(mleft.left().node()),
+ g.UseImmediate(m.right().node()));
+ return;
+ }
+ }
+ }
+ }
+ VisitRRO(this, kLoong64Sll_d, node);
+}
+
+void InstructionSelector::VisitWord64Shr(Node* node) {
+ Int64BinopMatcher m(node);
+ if (m.left().IsWord64And() && m.right().HasResolvedValue()) {
+ uint32_t lsb = m.right().ResolvedValue() & 0x3F;
+ Int64BinopMatcher mleft(m.left().node());
+ if (mleft.right().HasResolvedValue() &&
+ mleft.right().ResolvedValue() != 0) {
+ // Select Bstrpick_d for Shr(And(x, mask), imm) where the result of the
+ // mask is shifted into the least-significant bits.
+ uint64_t mask = (mleft.right().ResolvedValue() >> lsb) << lsb;
+ unsigned mask_width = base::bits::CountPopulation(mask);
+ unsigned mask_msb = base::bits::CountLeadingZeros64(mask);
+ if ((mask_msb + mask_width + lsb) == 64) {
+ Loong64OperandGenerator g(this);
+ DCHECK_EQ(lsb, base::bits::CountTrailingZeros64(mask));
+ Emit(kLoong64Bstrpick_d, g.DefineAsRegister(node),
+ g.UseRegister(mleft.left().node()), g.TempImmediate(lsb),
+ g.TempImmediate(mask_width));
+ return;
+ }
+ }
+ }
+ VisitRRO(this, kLoong64Srl_d, node);
+}
+
+void InstructionSelector::VisitWord64Sar(Node* node) {
+ if (TryEmitExtendingLoad(this, node, node)) return;
+ VisitRRO(this, kLoong64Sra_d, node);
+}
+
+void InstructionSelector::VisitWord32Rol(Node* node) { UNREACHABLE(); }
+
+void InstructionSelector::VisitWord64Rol(Node* node) { UNREACHABLE(); }
+
+void InstructionSelector::VisitWord32Ror(Node* node) {
+ VisitRRO(this, kLoong64Rotr_w, node);
+}
+
+void InstructionSelector::VisitWord64Ror(Node* node) {
+ VisitRRO(this, kLoong64Rotr_d, node);
+}
+
+void InstructionSelector::VisitWord32ReverseBits(Node* node) { UNREACHABLE(); }
+
+void InstructionSelector::VisitWord64ReverseBits(Node* node) { UNREACHABLE(); }
+
+void InstructionSelector::VisitWord32ReverseBytes(Node* node) {
+ Loong64OperandGenerator g(this);
+ Emit(kLoong64ByteSwap32, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)));
+}
+
+void InstructionSelector::VisitWord64ReverseBytes(Node* node) {
+ Loong64OperandGenerator g(this);
+ Emit(kLoong64ByteSwap64, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)));
+}
+
+void InstructionSelector::VisitSimd128ReverseBytes(Node* node) {
+ UNREACHABLE();
+}
+
+void InstructionSelector::VisitWord32Clz(Node* node) {
+ VisitRR(this, kLoong64Clz_w, node);
+}
+
+void InstructionSelector::VisitWord64Clz(Node* node) {
+ VisitRR(this, kLoong64Clz_d, node);
+}
+
+void InstructionSelector::VisitWord32Ctz(Node* node) { UNREACHABLE(); }
+
+void InstructionSelector::VisitWord64Ctz(Node* node) { UNREACHABLE(); }
+
+void InstructionSelector::VisitWord32Popcnt(Node* node) { UNREACHABLE(); }
+
+void InstructionSelector::VisitWord64Popcnt(Node* node) { UNREACHABLE(); }
+
+void InstructionSelector::VisitInt32Add(Node* node) {
+ Loong64OperandGenerator g(this);
+ Int32BinopMatcher m(node);
+
+ // Select Alsl_w for (left + (left_of_right << imm)).
+ if (m.right().opcode() == IrOpcode::kWord32Shl &&
+ CanCover(node, m.left().node()) && CanCover(node, m.right().node())) {
+ Int32BinopMatcher mright(m.right().node());
+ if (mright.right().HasResolvedValue() && !m.left().HasResolvedValue()) {
+ int32_t shift_value =
+ static_cast<int32_t>(mright.right().ResolvedValue());
+ if (shift_value > 0 && shift_value <= 31) {
+ Emit(kLoong64Alsl_w, g.DefineAsRegister(node),
+ g.UseRegister(mright.left().node()),
+ g.UseRegister(m.left().node()), g.TempImmediate(shift_value));
+ return;
+ }
+ }
+ }
+
+ // Select Alsl_w for ((left_of_left << imm) + right).
+ if (m.left().opcode() == IrOpcode::kWord32Shl &&
+ CanCover(node, m.right().node()) && CanCover(node, m.left().node())) {
+ Int32BinopMatcher mleft(m.left().node());
+ if (mleft.right().HasResolvedValue() && !m.right().HasResolvedValue()) {
+ int32_t shift_value = static_cast<int32_t>(mleft.right().ResolvedValue());
+ if (shift_value > 0 && shift_value <= 31) {
+ Emit(kLoong64Alsl_w, g.DefineAsRegister(node),
+ g.UseRegister(mleft.left().node()),
+ g.UseRegister(m.right().node()), g.TempImmediate(shift_value));
+ return;
+ }
+ }
+ }
+
+ VisitBinop(this, node, kLoong64Add_w, true, kLoong64Add_w);
+}
+
+void InstructionSelector::VisitInt64Add(Node* node) {
+ Loong64OperandGenerator g(this);
+ Int64BinopMatcher m(node);
+
+ // Select Alsl_d for (left + (left_of_right << imm)).
+ if (m.right().opcode() == IrOpcode::kWord64Shl &&
+ CanCover(node, m.left().node()) && CanCover(node, m.right().node())) {
+ Int64BinopMatcher mright(m.right().node());
+ if (mright.right().HasResolvedValue() && !m.left().HasResolvedValue()) {
+ int32_t shift_value =
+ static_cast<int32_t>(mright.right().ResolvedValue());
+ if (shift_value > 0 && shift_value <= 31) {
+ Emit(kLoong64Alsl_d, g.DefineAsRegister(node),
+ g.UseRegister(mright.left().node()),
+ g.UseRegister(m.left().node()), g.TempImmediate(shift_value));
+ return;
+ }
+ }
+ }
+
+ // Select Alsl_d for ((left_of_left << imm) + right).
+ if (m.left().opcode() == IrOpcode::kWord64Shl &&
+ CanCover(node, m.right().node()) && CanCover(node, m.left().node())) {
+ Int64BinopMatcher mleft(m.left().node());
+ if (mleft.right().HasResolvedValue() && !m.right().HasResolvedValue()) {
+ int32_t shift_value = static_cast<int32_t>(mleft.right().ResolvedValue());
+ if (shift_value > 0 && shift_value <= 31) {
+ Emit(kLoong64Alsl_d, g.DefineAsRegister(node),
+ g.UseRegister(mleft.left().node()),
+ g.UseRegister(m.right().node()), g.TempImmediate(shift_value));
+ return;
+ }
+ }
+ }
+
+ VisitBinop(this, node, kLoong64Add_d, true, kLoong64Add_d);
+}
+
+void InstructionSelector::VisitInt32Sub(Node* node) {
+ VisitBinop(this, node, kLoong64Sub_w);
+}
+
+void InstructionSelector::VisitInt64Sub(Node* node) {
+ VisitBinop(this, node, kLoong64Sub_d);
+}
+
+void InstructionSelector::VisitInt32Mul(Node* node) {
+ Loong64OperandGenerator g(this);
+ Int32BinopMatcher m(node);
+ if (m.right().HasResolvedValue() && m.right().ResolvedValue() > 0) {
+ uint32_t value = static_cast<uint32_t>(m.right().ResolvedValue());
+ if (base::bits::IsPowerOfTwo(value)) {
+ Emit(kLoong64Sll_w | AddressingModeField::encode(kMode_None),
+ g.DefineAsRegister(node), g.UseRegister(m.left().node()),
+ g.TempImmediate(base::bits::WhichPowerOfTwo(value)));
+ return;
+ }
+ if (base::bits::IsPowerOfTwo(value - 1) && value - 1 > 0 &&
+ value - 1 <= 31) {
+ Emit(kLoong64Alsl_w, g.DefineAsRegister(node),
+ g.UseRegister(m.left().node()), g.UseRegister(m.left().node()),
+ g.TempImmediate(base::bits::WhichPowerOfTwo(value - 1)));
+ return;
+ }
+ if (base::bits::IsPowerOfTwo(value + 1)) {
+ InstructionOperand temp = g.TempRegister();
+ Emit(kLoong64Sll_w | AddressingModeField::encode(kMode_None), temp,
+ g.UseRegister(m.left().node()),
+ g.TempImmediate(base::bits::WhichPowerOfTwo(value + 1)));
+ Emit(kLoong64Sub_w | AddressingModeField::encode(kMode_None),
+ g.DefineAsRegister(node), temp, g.UseRegister(m.left().node()));
+ return;
+ }
+ }
+ Node* left = node->InputAt(0);
+ Node* right = node->InputAt(1);
+ if (CanCover(node, left) && CanCover(node, right)) {
+ if (left->opcode() == IrOpcode::kWord64Sar &&
+ right->opcode() == IrOpcode::kWord64Sar) {
+ Int64BinopMatcher leftInput(left), rightInput(right);
+ if (leftInput.right().Is(32) && rightInput.right().Is(32)) {
+ // Combine untagging shifts with Mulh_d.
+ Emit(kLoong64Mulh_d, g.DefineSameAsFirst(node),
+ g.UseRegister(leftInput.left().node()),
+ g.UseRegister(rightInput.left().node()));
+ return;
+ }
+ }
+ }
+ VisitRRR(this, kLoong64Mul_w, node);
+}
+
+void InstructionSelector::VisitInt32MulHigh(Node* node) {
+ VisitRRR(this, kLoong64Mulh_w, node);
+}
+
+void InstructionSelector::VisitUint32MulHigh(Node* node) {
+ VisitRRR(this, kLoong64Mulh_wu, node);
+}
+
+void InstructionSelector::VisitInt64Mul(Node* node) {
+ Loong64OperandGenerator g(this);
+ Int64BinopMatcher m(node);
+ if (m.right().HasResolvedValue() && m.right().ResolvedValue() > 0) {
+ uint32_t value = static_cast<uint32_t>(m.right().ResolvedValue());
+ if (base::bits::IsPowerOfTwo(value)) {
+ Emit(kLoong64Sll_d | AddressingModeField::encode(kMode_None),
+ g.DefineAsRegister(node), g.UseRegister(m.left().node()),
+ g.TempImmediate(base::bits::WhichPowerOfTwo(value)));
+ return;
+ }
+ if (base::bits::IsPowerOfTwo(value - 1) && value - 1 > 0 &&
+ value - 1 <= 31) {
+ // Alsl_d macro will handle the shifting value out of bound cases.
+ Emit(kLoong64Alsl_d, g.DefineAsRegister(node),
+ g.UseRegister(m.left().node()), g.UseRegister(m.left().node()),
+ g.TempImmediate(base::bits::WhichPowerOfTwo(value - 1)));
+ return;
+ }
+ if (base::bits::IsPowerOfTwo(value + 1)) {
+ InstructionOperand temp = g.TempRegister();
+ Emit(kLoong64Sll_d | AddressingModeField::encode(kMode_None), temp,
+ g.UseRegister(m.left().node()),
+ g.TempImmediate(base::bits::WhichPowerOfTwo(value + 1)));
+ Emit(kLoong64Sub_d | AddressingModeField::encode(kMode_None),
+ g.DefineAsRegister(node), temp, g.UseRegister(m.left().node()));
+ return;
+ }
+ }
+ Emit(kLoong64Mul_d, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
+ g.UseRegister(m.right().node()));
+}
+
+void InstructionSelector::VisitInt32Div(Node* node) {
+ Loong64OperandGenerator g(this);
+ Int32BinopMatcher m(node);
+ Node* left = node->InputAt(0);
+ Node* right = node->InputAt(1);
+ if (CanCover(node, left) && CanCover(node, right)) {
+ if (left->opcode() == IrOpcode::kWord64Sar &&
+ right->opcode() == IrOpcode::kWord64Sar) {
+ Int64BinopMatcher rightInput(right), leftInput(left);
+ if (rightInput.right().Is(32) && leftInput.right().Is(32)) {
+ // Combine both shifted operands with Div_d.
+ Emit(kLoong64Div_d, g.DefineSameAsFirst(node),
+ g.UseRegister(leftInput.left().node()),
+ g.UseRegister(rightInput.left().node()));
+ return;
+ }
+ }
+ }
+ Emit(kLoong64Div_w, g.DefineSameAsFirst(node), g.UseRegister(m.left().node()),
+ g.UseRegister(m.right().node()));
+}
+
+void InstructionSelector::VisitUint32Div(Node* node) {
+ Loong64OperandGenerator g(this);
+ Int32BinopMatcher m(node);
+ Emit(kLoong64Div_wu, g.DefineSameAsFirst(node),
+ g.UseRegister(m.left().node()), g.UseRegister(m.right().node()));
+}
+
+void InstructionSelector::VisitInt32Mod(Node* node) {
+ Loong64OperandGenerator g(this);
+ Int32BinopMatcher m(node);
+ Node* left = node->InputAt(0);
+ Node* right = node->InputAt(1);
+ if (CanCover(node, left) && CanCover(node, right)) {
+ if (left->opcode() == IrOpcode::kWord64Sar &&
+ right->opcode() == IrOpcode::kWord64Sar) {
+ Int64BinopMatcher rightInput(right), leftInput(left);
+ if (rightInput.right().Is(32) && leftInput.right().Is(32)) {
+ // Combine both shifted operands with Mod_d.
+ Emit(kLoong64Mod_d, g.DefineSameAsFirst(node),
+ g.UseRegister(leftInput.left().node()),
+ g.UseRegister(rightInput.left().node()));
+ return;
+ }
+ }
+ }
+ Emit(kLoong64Mod_w, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
+ g.UseRegister(m.right().node()));
+}
+
+void InstructionSelector::VisitUint32Mod(Node* node) {
+ Loong64OperandGenerator g(this);
+ Int32BinopMatcher m(node);
+ Emit(kLoong64Mod_wu, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
+ g.UseRegister(m.right().node()));
+}
+
+void InstructionSelector::VisitInt64Div(Node* node) {
+ Loong64OperandGenerator g(this);
+ Int64BinopMatcher m(node);
+ Emit(kLoong64Div_d, g.DefineSameAsFirst(node), g.UseRegister(m.left().node()),
+ g.UseRegister(m.right().node()));
+}
+
+void InstructionSelector::VisitUint64Div(Node* node) {
+ Loong64OperandGenerator g(this);
+ Int64BinopMatcher m(node);
+ Emit(kLoong64Div_du, g.DefineSameAsFirst(node),
+ g.UseRegister(m.left().node()), g.UseRegister(m.right().node()));
+}
+
+void InstructionSelector::VisitInt64Mod(Node* node) {
+ Loong64OperandGenerator g(this);
+ Int64BinopMatcher m(node);
+ Emit(kLoong64Mod_d, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
+ g.UseRegister(m.right().node()));
+}
+
+void InstructionSelector::VisitUint64Mod(Node* node) {
+ Loong64OperandGenerator g(this);
+ Int64BinopMatcher m(node);
+ Emit(kLoong64Mod_du, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
+ g.UseRegister(m.right().node()));
+}
+
+void InstructionSelector::VisitChangeFloat32ToFloat64(Node* node) {
+ VisitRR(this, kLoong64Float32ToFloat64, node);
+}
+
+void InstructionSelector::VisitRoundInt32ToFloat32(Node* node) {
+ VisitRR(this, kLoong64Int32ToFloat32, node);
+}
+
+void InstructionSelector::VisitRoundUint32ToFloat32(Node* node) {
+ VisitRR(this, kLoong64Uint32ToFloat32, node);
+}
+
+void InstructionSelector::VisitChangeInt32ToFloat64(Node* node) {
+ VisitRR(this, kLoong64Int32ToFloat64, node);
+}
+
+void InstructionSelector::VisitChangeInt64ToFloat64(Node* node) {
+ VisitRR(this, kLoong64Int64ToFloat64, node);
+}
+
+void InstructionSelector::VisitChangeUint32ToFloat64(Node* node) {
+ VisitRR(this, kLoong64Uint32ToFloat64, node);
+}
+
+void InstructionSelector::VisitTruncateFloat32ToInt32(Node* node) {
+ Loong64OperandGenerator g(this);
+ InstructionCode opcode = kLoong64Float32ToInt32;
+ TruncateKind kind = OpParameter<TruncateKind>(node->op());
+ if (kind == TruncateKind::kSetOverflowToMin) {
+ opcode |= MiscField::encode(true);
+ }
+ Emit(opcode, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
+}
+
+void InstructionSelector::VisitTruncateFloat32ToUint32(Node* node) {
+ Loong64OperandGenerator g(this);
+ InstructionCode opcode = kLoong64Float32ToUint32;
+ TruncateKind kind = OpParameter<TruncateKind>(node->op());
+ if (kind == TruncateKind::kSetOverflowToMin) {
+ opcode |= MiscField::encode(true);
+ }
+ Emit(opcode, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
+}
+
+void InstructionSelector::VisitChangeFloat64ToInt32(Node* node) {
+ Loong64OperandGenerator g(this);
+ Node* value = node->InputAt(0);
+ // TODO(LOONG_dev): LOONG64 Match ChangeFloat64ToInt32(Float64Round##OP) to
+ // corresponding instruction which does rounding and conversion to
+ // integer format.
+ if (CanCover(node, value)) {
+ if (value->opcode() == IrOpcode::kChangeFloat32ToFloat64) {
+ Node* next = value->InputAt(0);
+ if (!CanCover(value, next)) {
+ // Match float32 -> float64 -> int32 representation change path.
+ Emit(kLoong64Float32ToInt32, g.DefineAsRegister(node),
+ g.UseRegister(value->InputAt(0)));
+ return;
+ }
+ }
+ }
+ VisitRR(this, kLoong64Float64ToInt32, node);
+}
+
+void InstructionSelector::VisitChangeFloat64ToInt64(Node* node) {
+ VisitRR(this, kLoong64Float64ToInt64, node);
+}
+
+void InstructionSelector::VisitChangeFloat64ToUint32(Node* node) {
+ VisitRR(this, kLoong64Float64ToUint32, node);
+}
+
+void InstructionSelector::VisitChangeFloat64ToUint64(Node* node) {
+ VisitRR(this, kLoong64Float64ToUint64, node);
+}
+
+void InstructionSelector::VisitTruncateFloat64ToUint32(Node* node) {
+ VisitRR(this, kLoong64Float64ToUint32, node);
+}
+
+void InstructionSelector::VisitTruncateFloat64ToInt64(Node* node) {
+ Loong64OperandGenerator g(this);
+ InstructionCode opcode = kLoong64Float64ToInt64;
+ TruncateKind kind = OpParameter<TruncateKind>(node->op());
+ if (kind == TruncateKind::kSetOverflowToMin) {
+ opcode |= MiscField::encode(true);
+ }
+ Emit(opcode, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
+}
+
+void InstructionSelector::VisitTryTruncateFloat32ToInt64(Node* node) {
+ Loong64OperandGenerator g(this);
+ InstructionOperand inputs[] = {g.UseRegister(node->InputAt(0))};
+ InstructionOperand outputs[2];
+ size_t output_count = 0;
+ outputs[output_count++] = g.DefineAsRegister(node);
+
+ Node* success_output = NodeProperties::FindProjection(node, 1);
+ if (success_output) {
+ outputs[output_count++] = g.DefineAsRegister(success_output);
+ }
+
+ this->Emit(kLoong64Float32ToInt64, output_count, outputs, 1, inputs);
+}
+
+void InstructionSelector::VisitTryTruncateFloat64ToInt64(Node* node) {
+ Loong64OperandGenerator g(this);
+ InstructionOperand inputs[] = {g.UseRegister(node->InputAt(0))};
+ InstructionOperand outputs[2];
+ size_t output_count = 0;
+ outputs[output_count++] = g.DefineAsRegister(node);
+
+ Node* success_output = NodeProperties::FindProjection(node, 1);
+ if (success_output) {
+ outputs[output_count++] = g.DefineAsRegister(success_output);
+ }
+
+ Emit(kLoong64Float64ToInt64, output_count, outputs, 1, inputs);
+}
+
+void InstructionSelector::VisitTryTruncateFloat32ToUint64(Node* node) {
+ Loong64OperandGenerator g(this);
+ InstructionOperand inputs[] = {g.UseRegister(node->InputAt(0))};
+ InstructionOperand outputs[2];
+ size_t output_count = 0;
+ outputs[output_count++] = g.DefineAsRegister(node);
+
+ Node* success_output = NodeProperties::FindProjection(node, 1);
+ if (success_output) {
+ outputs[output_count++] = g.DefineAsRegister(success_output);
+ }
+
+ Emit(kLoong64Float32ToUint64, output_count, outputs, 1, inputs);
+}
+
+void InstructionSelector::VisitTryTruncateFloat64ToUint64(Node* node) {
+ Loong64OperandGenerator g(this);
+
+ InstructionOperand inputs[] = {g.UseRegister(node->InputAt(0))};
+ InstructionOperand outputs[2];
+ size_t output_count = 0;
+ outputs[output_count++] = g.DefineAsRegister(node);
+
+ Node* success_output = NodeProperties::FindProjection(node, 1);
+ if (success_output) {
+ outputs[output_count++] = g.DefineAsRegister(success_output);
+ }
+
+ Emit(kLoong64Float64ToUint64, output_count, outputs, 1, inputs);
+}
+
+void InstructionSelector::VisitBitcastWord32ToWord64(Node* node) {
+ UNIMPLEMENTED();
+}
+
+void InstructionSelector::VisitChangeInt32ToInt64(Node* node) {
+#ifdef USE_SIMULATOR
+ Node* value = node->InputAt(0);
+ if ((value->opcode() == IrOpcode::kLoad ||
+ value->opcode() == IrOpcode::kLoadImmutable) &&
+ CanCover(node, value)) {
+ // Generate sign-extending load.
+ LoadRepresentation load_rep = LoadRepresentationOf(value->op());
+ InstructionCode opcode = kArchNop;
+ switch (load_rep.representation()) {
+ case MachineRepresentation::kBit: // Fall through.
+ case MachineRepresentation::kWord8:
+ opcode = load_rep.IsUnsigned() ? kLoong64Ld_bu : kLoong64Ld_b;
+ break;
+ case MachineRepresentation::kWord16:
+ opcode = load_rep.IsUnsigned() ? kLoong64Ld_hu : kLoong64Ld_h;
+ break;
+ case MachineRepresentation::kWord32:
+ opcode = kLoong64Ld_w;
+ break;
+ default:
+ UNREACHABLE();
+ }
+ EmitLoad(this, value, opcode, node);
+ } else {
+ Loong64OperandGenerator g(this);
+ Emit(kLoong64Sll_w, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)), g.TempImmediate(0));
+ }
+#else
+ EmitIdentity(node);
+#endif
+}
+
+bool InstructionSelector::ZeroExtendsWord32ToWord64NoPhis(Node* node) {
+ DCHECK_NE(node->opcode(), IrOpcode::kPhi);
+ switch (node->opcode()) {
+ // Comparisons only emit 0/1, so the upper 32 bits must be zero.
+ case IrOpcode::kWord32Equal:
+ case IrOpcode::kInt32LessThan:
+ case IrOpcode::kInt32LessThanOrEqual:
+ case IrOpcode::kUint32LessThan:
+ case IrOpcode::kUint32LessThanOrEqual:
+ return true;
+ case IrOpcode::kWord32And: {
+ Int32BinopMatcher m(node);
+ if (m.right().HasResolvedValue()) {
+ uint32_t mask = m.right().ResolvedValue();
+ return is_uint31(mask);
+ }
+ return false;
+ }
+ case IrOpcode::kWord32Shr: {
+ Int32BinopMatcher m(node);
+ if (m.right().HasResolvedValue()) {
+ uint8_t sa = m.right().ResolvedValue() & 0x1f;
+ return sa > 0;
+ }
+ return false;
+ }
+ case IrOpcode::kLoad:
+ case IrOpcode::kLoadImmutable: {
+ LoadRepresentation load_rep = LoadRepresentationOf(node->op());
+ if (load_rep.IsUnsigned()) {
+ switch (load_rep.representation()) {
+ case MachineRepresentation::kBit: // Fall through.
+ case MachineRepresentation::kWord8: // Fall through.
+ case MachineRepresentation::kWord16:
+ return true;
+ default:
+ return false;
+ }
+ }
+ return false;
+ }
+ default:
+ return false;
+ }
+}
+
+void InstructionSelector::VisitChangeUint32ToUint64(Node* node) {
+ Loong64OperandGenerator g(this);
+ Node* value = node->InputAt(0);
+
+ if (value->opcode() == IrOpcode::kLoad) {
+ LoadRepresentation load_rep = LoadRepresentationOf(value->op());
+ if (load_rep.IsUnsigned() &&
+ load_rep.representation() == MachineRepresentation::kWord32) {
+ EmitLoad(this, value, kLoong64Ld_wu, node);
+ return;
+ }
+ }
+ if (ZeroExtendsWord32ToWord64(value)) {
+ EmitIdentity(node);
+ return;
+ }
+ Emit(kLoong64Bstrpick_d, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)), g.TempImmediate(0),
+ g.TempImmediate(32));
+}
+
+void InstructionSelector::VisitTruncateInt64ToInt32(Node* node) {
+ Loong64OperandGenerator g(this);
+ Node* value = node->InputAt(0);
+ if (CanCover(node, value)) {
+ switch (value->opcode()) {
+ case IrOpcode::kWord64Sar: {
+ if (CanCoverTransitively(node, value, value->InputAt(0)) &&
+ TryEmitExtendingLoad(this, value, node)) {
+ return;
+ } else {
+ Int64BinopMatcher m(value);
+ if (m.right().IsInRange(32, 63)) {
+ // After smi untagging no need for truncate. Combine sequence.
+ Emit(kLoong64Sra_d, g.DefineAsRegister(node),
+ g.UseRegister(m.left().node()),
+ g.UseImmediate(m.right().node()));
+ return;
+ }
+ }
+ break;
+ }
+ default:
+ break;
+ }
+ }
+ Emit(kLoong64Sll_w, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)),
+ g.TempImmediate(0));
+}
+
+void InstructionSelector::VisitTruncateFloat64ToFloat32(Node* node) {
+ Loong64OperandGenerator g(this);
+ Node* value = node->InputAt(0);
+ // Match TruncateFloat64ToFloat32(ChangeInt32ToFloat64) to corresponding
+ // instruction.
+ if (CanCover(node, value) &&
+ value->opcode() == IrOpcode::kChangeInt32ToFloat64) {
+ Emit(kLoong64Int32ToFloat32, g.DefineAsRegister(node),
+ g.UseRegister(value->InputAt(0)));
+ return;
+ }
+ VisitRR(this, kLoong64Float64ToFloat32, node);
+}
+
+void InstructionSelector::VisitTruncateFloat64ToWord32(Node* node) {
+ VisitRR(this, kArchTruncateDoubleToI, node);
+}
+
+void InstructionSelector::VisitRoundFloat64ToInt32(Node* node) {
+ VisitRR(this, kLoong64Float64ToInt32, node);
+}
+
+void InstructionSelector::VisitRoundInt64ToFloat32(Node* node) {
+ VisitRR(this, kLoong64Int64ToFloat32, node);
+}
+
+void InstructionSelector::VisitRoundInt64ToFloat64(Node* node) {
+ VisitRR(this, kLoong64Int64ToFloat64, node);
+}
+
+void InstructionSelector::VisitRoundUint64ToFloat32(Node* node) {
+ VisitRR(this, kLoong64Uint64ToFloat32, node);
+}
+
+void InstructionSelector::VisitRoundUint64ToFloat64(Node* node) {
+ VisitRR(this, kLoong64Uint64ToFloat64, node);
+}
+
+void InstructionSelector::VisitBitcastFloat32ToInt32(Node* node) {
+ VisitRR(this, kLoong64Float64ExtractLowWord32, node);
+}
+
+void InstructionSelector::VisitBitcastFloat64ToInt64(Node* node) {
+ VisitRR(this, kLoong64BitcastDL, node);
+}
+
+void InstructionSelector::VisitBitcastInt32ToFloat32(Node* node) {
+ Loong64OperandGenerator g(this);
+ Emit(kLoong64Float64InsertLowWord32, g.DefineAsRegister(node),
+ ImmediateOperand(ImmediateOperand::INLINE_INT32, 0),
+ g.UseRegister(node->InputAt(0)));
+}
+
+void InstructionSelector::VisitBitcastInt64ToFloat64(Node* node) {
+ VisitRR(this, kLoong64BitcastLD, node);
+}
+
+void InstructionSelector::VisitFloat32Add(Node* node) {
+ VisitRRR(this, kLoong64Float32Add, node);
+}
+
+void InstructionSelector::VisitFloat64Add(Node* node) {
+ VisitRRR(this, kLoong64Float64Add, node);
+}
+
+void InstructionSelector::VisitFloat32Sub(Node* node) {
+ VisitRRR(this, kLoong64Float32Sub, node);
+}
+
+void InstructionSelector::VisitFloat64Sub(Node* node) {
+ VisitRRR(this, kLoong64Float64Sub, node);
+}
+
+void InstructionSelector::VisitFloat32Mul(Node* node) {
+ VisitRRR(this, kLoong64Float32Mul, node);
+}
+
+void InstructionSelector::VisitFloat64Mul(Node* node) {
+ VisitRRR(this, kLoong64Float64Mul, node);
+}
+
+void InstructionSelector::VisitFloat32Div(Node* node) {
+ VisitRRR(this, kLoong64Float32Div, node);
+}
+
+void InstructionSelector::VisitFloat64Div(Node* node) {
+ VisitRRR(this, kLoong64Float64Div, node);
+}
+
+void InstructionSelector::VisitFloat64Mod(Node* node) {
+ Loong64OperandGenerator g(this);
+ Emit(kLoong64Float64Mod, g.DefineAsFixed(node, f0),
+ g.UseFixed(node->InputAt(0), f0), g.UseFixed(node->InputAt(1), f1))
+ ->MarkAsCall();
+}
+
+void InstructionSelector::VisitFloat32Max(Node* node) {
+ Loong64OperandGenerator g(this);
+ Emit(kLoong64Float32Max, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)));
+}
+
+void InstructionSelector::VisitFloat64Max(Node* node) {
+ Loong64OperandGenerator g(this);
+ Emit(kLoong64Float64Max, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)));
+}
+
+void InstructionSelector::VisitFloat32Min(Node* node) {
+ Loong64OperandGenerator g(this);
+ Emit(kLoong64Float32Min, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)));
+}
+
+void InstructionSelector::VisitFloat64Min(Node* node) {
+ Loong64OperandGenerator g(this);
+ Emit(kLoong64Float64Min, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)));
+}
+
+void InstructionSelector::VisitFloat32Abs(Node* node) {
+ VisitRR(this, kLoong64Float32Abs, node);
+}
+
+void InstructionSelector::VisitFloat64Abs(Node* node) {
+ VisitRR(this, kLoong64Float64Abs, node);
+}
+
+void InstructionSelector::VisitFloat32Sqrt(Node* node) {
+ VisitRR(this, kLoong64Float32Sqrt, node);
+}
+
+void InstructionSelector::VisitFloat64Sqrt(Node* node) {
+ VisitRR(this, kLoong64Float64Sqrt, node);
+}
+
+void InstructionSelector::VisitFloat32RoundDown(Node* node) {
+ VisitRR(this, kLoong64Float32RoundDown, node);
+}
+
+void InstructionSelector::VisitFloat64RoundDown(Node* node) {
+ VisitRR(this, kLoong64Float64RoundDown, node);
+}
+
+void InstructionSelector::VisitFloat32RoundUp(Node* node) {
+ VisitRR(this, kLoong64Float32RoundUp, node);
+}
+
+void InstructionSelector::VisitFloat64RoundUp(Node* node) {
+ VisitRR(this, kLoong64Float64RoundUp, node);
+}
+
+void InstructionSelector::VisitFloat32RoundTruncate(Node* node) {
+ VisitRR(this, kLoong64Float32RoundTruncate, node);
+}
+
+void InstructionSelector::VisitFloat64RoundTruncate(Node* node) {
+ VisitRR(this, kLoong64Float64RoundTruncate, node);
+}
+
+void InstructionSelector::VisitFloat64RoundTiesAway(Node* node) {
+ UNREACHABLE();
+}
+
+void InstructionSelector::VisitFloat32RoundTiesEven(Node* node) {
+ VisitRR(this, kLoong64Float32RoundTiesEven, node);
+}
+
+void InstructionSelector::VisitFloat64RoundTiesEven(Node* node) {
+ VisitRR(this, kLoong64Float64RoundTiesEven, node);
+}
+
+void InstructionSelector::VisitFloat32Neg(Node* node) {
+ VisitRR(this, kLoong64Float32Neg, node);
+}
+
+void InstructionSelector::VisitFloat64Neg(Node* node) {
+ VisitRR(this, kLoong64Float64Neg, node);
+}
+
+void InstructionSelector::VisitFloat64Ieee754Binop(Node* node,
+ InstructionCode opcode) {
+ Loong64OperandGenerator g(this);
+ Emit(opcode, g.DefineAsFixed(node, f0), g.UseFixed(node->InputAt(0), f0),
+ g.UseFixed(node->InputAt(1), f1))
+ ->MarkAsCall();
+}
+
+void InstructionSelector::VisitFloat64Ieee754Unop(Node* node,
+ InstructionCode opcode) {
+ Loong64OperandGenerator g(this);
+ Emit(opcode, g.DefineAsFixed(node, f0), g.UseFixed(node->InputAt(0), f0))
+ ->MarkAsCall();
+}
+
+void InstructionSelector::EmitPrepareArguments(
+ ZoneVector<PushParameter>* arguments, const CallDescriptor* call_descriptor,
+ Node* node) {
+ Loong64OperandGenerator g(this);
+
+ // Prepare for C function call.
+ if (call_descriptor->IsCFunctionCall()) {
+ Emit(kArchPrepareCallCFunction | MiscField::encode(static_cast<int>(
+ call_descriptor->ParameterCount())),
+ 0, nullptr, 0, nullptr);
+
+ // Poke any stack arguments.
+ int slot = 0;
+ for (PushParameter input : (*arguments)) {
+ Emit(kLoong64Poke, g.NoOutput(), g.UseRegister(input.node),
+ g.TempImmediate(slot << kSystemPointerSizeLog2));
+ ++slot;
+ }
+ } else {
+ int push_count = static_cast<int>(call_descriptor->ParameterSlotCount());
+ if (push_count > 0) {
+ // Calculate needed space
+ int stack_size = 0;
+ for (PushParameter input : (*arguments)) {
+ if (input.node) {
+ stack_size += input.location.GetSizeInPointers();
+ }
+ }
+ Emit(kLoong64StackClaim, g.NoOutput(),
+ g.TempImmediate(stack_size << kSystemPointerSizeLog2));
+ }
+ for (size_t n = 0; n < arguments->size(); ++n) {
+ PushParameter input = (*arguments)[n];
+ if (input.node) {
+ Emit(kLoong64Poke, g.NoOutput(), g.UseRegister(input.node),
+ g.TempImmediate(static_cast<int>(n << kSystemPointerSizeLog2)));
+ }
+ }
+ }
+}
+
+void InstructionSelector::EmitPrepareResults(
+ ZoneVector<PushParameter>* results, const CallDescriptor* call_descriptor,
+ Node* node) {
+ Loong64OperandGenerator g(this);
+
+ for (PushParameter output : *results) {
+ if (!output.location.IsCallerFrameSlot()) continue;
+ // Skip any alignment holes in nodes.
+ if (output.node != nullptr) {
+ DCHECK(!call_descriptor->IsCFunctionCall());
+ if (output.location.GetType() == MachineType::Float32()) {
+ MarkAsFloat32(output.node);
+ } else if (output.location.GetType() == MachineType::Float64()) {
+ MarkAsFloat64(output.node);
+ } else if (output.location.GetType() == MachineType::Simd128()) {
+ abort();
+ }
+ int offset = call_descriptor->GetOffsetToReturns();
+ int reverse_slot = -output.location.GetLocation() - offset;
+ Emit(kLoong64Peek, g.DefineAsRegister(output.node),
+ g.UseImmediate(reverse_slot));
+ }
+ }
+}
+
+bool InstructionSelector::IsTailCallAddressImmediate() { return false; }
+
+void InstructionSelector::VisitUnalignedLoad(Node* node) { UNREACHABLE(); }
+
+void InstructionSelector::VisitUnalignedStore(Node* node) { UNREACHABLE(); }
+
+namespace {
+
+// Shared routine for multiple compare operations.
+static void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
+ InstructionOperand left, InstructionOperand right,
+ FlagsContinuation* cont) {
+ selector->EmitWithContinuation(opcode, left, right, cont);
+}
+
+// Shared routine for multiple float32 compare operations.
+void VisitFloat32Compare(InstructionSelector* selector, Node* node,
+ FlagsContinuation* cont) {
+ Loong64OperandGenerator g(selector);
+ Float32BinopMatcher m(node);
+ InstructionOperand lhs, rhs;
+
+ lhs = m.left().IsZero() ? g.UseImmediate(m.left().node())
+ : g.UseRegister(m.left().node());
+ rhs = m.right().IsZero() ? g.UseImmediate(m.right().node())
+ : g.UseRegister(m.right().node());
+ VisitCompare(selector, kLoong64Float32Cmp, lhs, rhs, cont);
+}
+
+// Shared routine for multiple float64 compare operations.
+void VisitFloat64Compare(InstructionSelector* selector, Node* node,
+ FlagsContinuation* cont) {
+ Loong64OperandGenerator g(selector);
+ Float64BinopMatcher m(node);
+ InstructionOperand lhs, rhs;
+
+ lhs = m.left().IsZero() ? g.UseImmediate(m.left().node())
+ : g.UseRegister(m.left().node());
+ rhs = m.right().IsZero() ? g.UseImmediate(m.right().node())
+ : g.UseRegister(m.right().node());
+ VisitCompare(selector, kLoong64Float64Cmp, lhs, rhs, cont);
+}
+
+// Shared routine for multiple word compare operations.
+void VisitWordCompare(InstructionSelector* selector, Node* node,
+ InstructionCode opcode, FlagsContinuation* cont,
+ bool commutative) {
+ Loong64OperandGenerator g(selector);
+ Node* left = node->InputAt(0);
+ Node* right = node->InputAt(1);
+
+ // Match immediates on left or right side of comparison.
+ if (g.CanBeImmediate(right, opcode)) {
+ if (opcode == kLoong64Tst) {
+ if (left->opcode() == IrOpcode::kTruncateInt64ToInt32) {
+ VisitCompare(selector, opcode, g.UseRegister(left->InputAt(0)),
+ g.UseImmediate(right), cont);
+ } else {
+ VisitCompare(selector, opcode, g.UseRegister(left),
+ g.UseImmediate(right), cont);
+ }
+ } else {
+ switch (cont->condition()) {
+ case kEqual:
+ case kNotEqual:
+ if (cont->IsSet()) {
+ VisitCompare(selector, opcode, g.UseRegister(left),
+ g.UseImmediate(right), cont);
+ } else {
+ VisitCompare(selector, opcode, g.UseRegister(left),
+ g.UseRegister(right), cont);
+ }
+ break;
+ case kSignedLessThan:
+ case kSignedGreaterThanOrEqual:
+ case kUnsignedLessThan:
+ case kUnsignedGreaterThanOrEqual:
+ VisitCompare(selector, opcode, g.UseRegister(left),
+ g.UseImmediate(right), cont);
+ break;
+ default:
+ VisitCompare(selector, opcode, g.UseRegister(left),
+ g.UseRegister(right), cont);
+ }
+ }
+ } else if (g.CanBeImmediate(left, opcode)) {
+ if (!commutative) cont->Commute();
+ if (opcode == kLoong64Tst) {
+ VisitCompare(selector, opcode, g.UseRegister(right), g.UseImmediate(left),
+ cont);
+ } else {
+ switch (cont->condition()) {
+ case kEqual:
+ case kNotEqual:
+ if (cont->IsSet()) {
+ VisitCompare(selector, opcode, g.UseRegister(right),
+ g.UseImmediate(left), cont);
+ } else {
+ VisitCompare(selector, opcode, g.UseRegister(right),
+ g.UseRegister(left), cont);
+ }
+ break;
+ case kSignedLessThan:
+ case kSignedGreaterThanOrEqual:
+ case kUnsignedLessThan:
+ case kUnsignedGreaterThanOrEqual:
+ VisitCompare(selector, opcode, g.UseRegister(right),
+ g.UseImmediate(left), cont);
+ break;
+ default:
+ VisitCompare(selector, opcode, g.UseRegister(right),
+ g.UseRegister(left), cont);
+ }
+ }
+ } else {
+ VisitCompare(selector, opcode, g.UseRegister(left), g.UseRegister(right),
+ cont);
+ }
+}
+
+void VisitOptimizedWord32Compare(InstructionSelector* selector, Node* node,
+ InstructionCode opcode,
+ FlagsContinuation* cont) {
+ // TODO(LOONG_dev): LOONG64 Add check for debug mode
+ VisitWordCompare(selector, node, opcode, cont, false);
+}
+
+#ifdef USE_SIMULATOR
+// Shared routine for multiple word compare operations.
+void VisitFullWord32Compare(InstructionSelector* selector, Node* node,
+ InstructionCode opcode, FlagsContinuation* cont) {
+ Loong64OperandGenerator g(selector);
+ InstructionOperand leftOp = g.TempRegister();
+ InstructionOperand rightOp = g.TempRegister();
+
+ selector->Emit(kLoong64Sll_d, leftOp, g.UseRegister(node->InputAt(0)),
+ g.TempImmediate(32));
+ selector->Emit(kLoong64Sll_d, rightOp, g.UseRegister(node->InputAt(1)),
+ g.TempImmediate(32));
+
+ VisitCompare(selector, opcode, leftOp, rightOp, cont);
+}
+#endif
+
+void VisitWord32Compare(InstructionSelector* selector, Node* node,
+ FlagsContinuation* cont) {
+ // LOONG64 doesn't support Word32 compare instructions. Instead it relies
+ // that the values in registers are correctly sign-extended and uses
+ // Word64 comparison instead.
+#ifdef USE_SIMULATOR
+ // When call to a host function in simulator, if the function return a
+ // int32 value, the simulator do not sign-extended to int64 because in
+ // simulator we do not know the function whether return a int32 or int64.
+ // so we need do a full word32 compare in this case.
+ if (node->InputAt(0)->opcode() == IrOpcode::kCall ||
+ node->InputAt(1)->opcode() == IrOpcode::kCall) {
+ VisitFullWord32Compare(selector, node, kLoong64Cmp, cont);
+ return;
+ }
+#endif
+ VisitOptimizedWord32Compare(selector, node, kLoong64Cmp, cont);
+}
+
+void VisitWord64Compare(InstructionSelector* selector, Node* node,
+ FlagsContinuation* cont) {
+ VisitWordCompare(selector, node, kLoong64Cmp, cont, false);
+}
+
+void EmitWordCompareZero(InstructionSelector* selector, Node* value,
+ FlagsContinuation* cont) {
+ Loong64OperandGenerator g(selector);
+ selector->EmitWithContinuation(kLoong64Cmp, g.UseRegister(value),
+ g.TempImmediate(0), cont);
+}
+
+void VisitAtomicLoad(InstructionSelector* selector, Node* node,
+ AtomicWidth width) {
+ Loong64OperandGenerator g(selector);
+ Node* base = node->InputAt(0);
+ Node* index = node->InputAt(1);
+
+ // The memory order is ignored.
+ AtomicLoadParameters atomic_load_params = AtomicLoadParametersOf(node->op());
+ LoadRepresentation load_rep = atomic_load_params.representation();
+ InstructionCode code;
+ switch (load_rep.representation()) {
+ case MachineRepresentation::kWord8:
+ DCHECK_IMPLIES(load_rep.IsSigned(), width == AtomicWidth::kWord32);
+ code = load_rep.IsSigned() ? kAtomicLoadInt8 : kAtomicLoadUint8;
+ break;
+ case MachineRepresentation::kWord16:
+ DCHECK_IMPLIES(load_rep.IsSigned(), width == AtomicWidth::kWord32);
+ code = load_rep.IsSigned() ? kAtomicLoadInt16 : kAtomicLoadUint16;
+ break;
+ case MachineRepresentation::kWord32:
+ code = (width == AtomicWidth::kWord32) ? kAtomicLoadWord32
+ : kLoong64Word64AtomicLoadUint32;
+ break;
+ case MachineRepresentation::kWord64:
+ code = kLoong64Word64AtomicLoadUint64;
+ break;
+ case MachineRepresentation::kTaggedSigned: // Fall through.
+ case MachineRepresentation::kTaggedPointer: // Fall through.
+ case MachineRepresentation::kTagged:
+ DCHECK_EQ(kTaggedSize, 8);
+ code = kLoong64Word64AtomicLoadUint64;
+ break;
+ default:
+ UNREACHABLE();
+ }
+
+ if (g.CanBeImmediate(index, code)) {
+ selector->Emit(code | AddressingModeField::encode(kMode_MRI) |
+ AtomicWidthField::encode(width),
+ g.DefineAsRegister(node), g.UseRegister(base),
+ g.UseImmediate(index));
+ } else {
+ InstructionOperand addr_reg = g.TempRegister();
+ selector->Emit(kLoong64Add_d | AddressingModeField::encode(kMode_None),
+ addr_reg, g.UseRegister(index), g.UseRegister(base));
+ // Emit desired load opcode, using temp addr_reg.
+ selector->Emit(code | AddressingModeField::encode(kMode_MRI) |
+ AtomicWidthField::encode(width),
+ g.DefineAsRegister(node), addr_reg, g.TempImmediate(0));
+ }
+}
+
+void VisitAtomicStore(InstructionSelector* selector, Node* node,
+ AtomicWidth width) {
+ Loong64OperandGenerator g(selector);
+ Node* base = node->InputAt(0);
+ Node* index = node->InputAt(1);
+ Node* value = node->InputAt(2);
+
+ // The memory order is ignored.
+ AtomicStoreParameters store_params = AtomicStoreParametersOf(node->op());
+ WriteBarrierKind write_barrier_kind = store_params.write_barrier_kind();
+ MachineRepresentation rep = store_params.representation();
+
+ if (FLAG_enable_unconditional_write_barriers &&
+ CanBeTaggedOrCompressedPointer(rep)) {
+ write_barrier_kind = kFullWriteBarrier;
+ }
+
+ InstructionCode code;
+
+ if (write_barrier_kind != kNoWriteBarrier && !FLAG_disable_write_barriers) {
+ DCHECK(CanBeTaggedPointer(rep));
+ DCHECK_EQ(kTaggedSize, 8);
+
+ RecordWriteMode record_write_mode =
+ WriteBarrierKindToRecordWriteMode(write_barrier_kind);
+ code = kArchAtomicStoreWithWriteBarrier;
+ code |= MiscField::encode(static_cast<int>(record_write_mode));
+ } else {
+ switch (rep) {
+ case MachineRepresentation::kWord8:
+ code = kAtomicStoreWord8;
+ break;
+ case MachineRepresentation::kWord16:
+ code = kAtomicStoreWord16;
+ break;
+ case MachineRepresentation::kWord32:
+ code = kAtomicStoreWord32;
+ break;
+ case MachineRepresentation::kWord64:
+ DCHECK_EQ(width, AtomicWidth::kWord64);
+ code = kLoong64Word64AtomicStoreWord64;
+ break;
+ case MachineRepresentation::kTaggedSigned: // Fall through.
+ case MachineRepresentation::kTaggedPointer: // Fall through.
+ case MachineRepresentation::kTagged:
+ DCHECK_EQ(kTaggedSize, 8);
+ code = kLoong64StoreCompressTagged;
+ break;
+ default:
+ UNREACHABLE();
+ }
+ }
+
+ if (g.CanBeImmediate(index, code)) {
+ selector->Emit(code | AddressingModeField::encode(kMode_MRI) |
+ AtomicWidthField::encode(width),
+ g.NoOutput(), g.UseRegister(base), g.UseImmediate(index),
+ g.UseRegisterOrImmediateZero(value));
+ } else {
+ InstructionOperand addr_reg = g.TempRegister();
+ selector->Emit(kLoong64Add_d | AddressingModeField::encode(kMode_None),
+ addr_reg, g.UseRegister(index), g.UseRegister(base));
+ // Emit desired store opcode, using temp addr_reg.
+ selector->Emit(code | AddressingModeField::encode(kMode_MRI) |
+ AtomicWidthField::encode(width),
+ g.NoOutput(), addr_reg, g.TempImmediate(0),
+ g.UseRegisterOrImmediateZero(value));
+ }
+}
+
+void VisitAtomicExchange(InstructionSelector* selector, Node* node,
+ ArchOpcode opcode, AtomicWidth width) {
+ Loong64OperandGenerator g(selector);
+ Node* base = node->InputAt(0);
+ Node* index = node->InputAt(1);
+ Node* value = node->InputAt(2);
+
+ AddressingMode addressing_mode = kMode_MRI;
+ InstructionOperand inputs[3];
+ size_t input_count = 0;
+ inputs[input_count++] = g.UseUniqueRegister(base);
+ inputs[input_count++] = g.UseUniqueRegister(index);
+ inputs[input_count++] = g.UseUniqueRegister(value);
+ InstructionOperand outputs[1];
+ outputs[0] = g.UseUniqueRegister(node);
+ InstructionOperand temp[3];
+ temp[0] = g.TempRegister();
+ temp[1] = g.TempRegister();
+ temp[2] = g.TempRegister();
+ InstructionCode code = opcode | AddressingModeField::encode(addressing_mode) |
+ AtomicWidthField::encode(width);
+ selector->Emit(code, 1, outputs, input_count, inputs, 3, temp);
+}
+
+void VisitAtomicCompareExchange(InstructionSelector* selector, Node* node,
+ ArchOpcode opcode, AtomicWidth width) {
+ Loong64OperandGenerator g(selector);
+ Node* base = node->InputAt(0);
+ Node* index = node->InputAt(1);
+ Node* old_value = node->InputAt(2);
+ Node* new_value = node->InputAt(3);
+
+ AddressingMode addressing_mode = kMode_MRI;
+ InstructionOperand inputs[4];
+ size_t input_count = 0;
+ inputs[input_count++] = g.UseUniqueRegister(base);
+ inputs[input_count++] = g.UseUniqueRegister(index);
+ inputs[input_count++] = g.UseUniqueRegister(old_value);
+ inputs[input_count++] = g.UseUniqueRegister(new_value);
+ InstructionOperand outputs[1];
+ outputs[0] = g.UseUniqueRegister(node);
+ InstructionOperand temp[3];
+ temp[0] = g.TempRegister();
+ temp[1] = g.TempRegister();
+ temp[2] = g.TempRegister();
+ InstructionCode code = opcode | AddressingModeField::encode(addressing_mode) |
+ AtomicWidthField::encode(width);
+ selector->Emit(code, 1, outputs, input_count, inputs, 3, temp);
+}
+
+void VisitAtomicBinop(InstructionSelector* selector, Node* node,
+ ArchOpcode opcode, AtomicWidth width) {
+ Loong64OperandGenerator g(selector);
+ Node* base = node->InputAt(0);
+ Node* index = node->InputAt(1);
+ Node* value = node->InputAt(2);
+
+ AddressingMode addressing_mode = kMode_MRI;
+ InstructionOperand inputs[3];
+ size_t input_count = 0;
+ inputs[input_count++] = g.UseUniqueRegister(base);
+ inputs[input_count++] = g.UseUniqueRegister(index);
+ inputs[input_count++] = g.UseUniqueRegister(value);
+ InstructionOperand outputs[1];
+ outputs[0] = g.UseUniqueRegister(node);
+ InstructionOperand temps[4];
+ temps[0] = g.TempRegister();
+ temps[1] = g.TempRegister();
+ temps[2] = g.TempRegister();
+ temps[3] = g.TempRegister();
+ InstructionCode code = opcode | AddressingModeField::encode(addressing_mode) |
+ AtomicWidthField::encode(width);
+ selector->Emit(code, 1, outputs, input_count, inputs, 4, temps);
+}
+
+} // namespace
+
+void InstructionSelector::VisitStackPointerGreaterThan(
+ Node* node, FlagsContinuation* cont) {
+ StackCheckKind kind = StackCheckKindOf(node->op());
+ InstructionCode opcode =
+ kArchStackPointerGreaterThan | MiscField::encode(static_cast<int>(kind));
+
+ Loong64OperandGenerator g(this);
+
+ // No outputs.
+ InstructionOperand* const outputs = nullptr;
+ const int output_count = 0;
+
+ // TempRegister(0) is used to store the comparison result.
+ // Applying an offset to this stack check requires a temp register. Offsets
+ // are only applied to the first stack check. If applying an offset, we must
+ // ensure the input and temp registers do not alias, thus kUniqueRegister.
+ InstructionOperand temps[] = {g.TempRegister(), g.TempRegister()};
+ const int temp_count = (kind == StackCheckKind::kJSFunctionEntry ? 2 : 1);
+ const auto register_mode = (kind == StackCheckKind::kJSFunctionEntry)
+ ? OperandGenerator::kUniqueRegister
+ : OperandGenerator::kRegister;
+
+ Node* const value = node->InputAt(0);
+ InstructionOperand inputs[] = {g.UseRegisterWithMode(value, register_mode)};
+ static constexpr int input_count = arraysize(inputs);
+
+ EmitWithContinuation(opcode, output_count, outputs, input_count, inputs,
+ temp_count, temps, cont);
+}
+
+// Shared routine for word comparisons against zero.
+void InstructionSelector::VisitWordCompareZero(Node* user, Node* value,
+ FlagsContinuation* cont) {
+ // Try to combine with comparisons against 0 by simply inverting the branch.
+ while (CanCover(user, value)) {
+ if (value->opcode() == IrOpcode::kWord32Equal) {
+ Int32BinopMatcher m(value);
+ if (!m.right().Is(0)) break;
+ user = value;
+ value = m.left().node();
+ } else if (value->opcode() == IrOpcode::kWord64Equal) {
+ Int64BinopMatcher m(value);
+ if (!m.right().Is(0)) break;
+ user = value;
+ value = m.left().node();
+ } else {
+ break;
+ }
+
+ cont->Negate();
+ }
+
+ if (CanCover(user, value)) {
+ switch (value->opcode()) {
+ case IrOpcode::kWord32Equal:
+ cont->OverwriteAndNegateIfEqual(kEqual);
+ return VisitWord32Compare(this, value, cont);
+ case IrOpcode::kInt32LessThan:
+ cont->OverwriteAndNegateIfEqual(kSignedLessThan);
+ return VisitWord32Compare(this, value, cont);
+ case IrOpcode::kInt32LessThanOrEqual:
+ cont->OverwriteAndNegateIfEqual(kSignedLessThanOrEqual);
+ return VisitWord32Compare(this, value, cont);
+ case IrOpcode::kUint32LessThan:
+ cont->OverwriteAndNegateIfEqual(kUnsignedLessThan);
+ return VisitWord32Compare(this, value, cont);
+ case IrOpcode::kUint32LessThanOrEqual:
+ cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
+ return VisitWord32Compare(this, value, cont);
+ case IrOpcode::kWord64Equal:
+ cont->OverwriteAndNegateIfEqual(kEqual);
+ return VisitWord64Compare(this, value, cont);
+ case IrOpcode::kInt64LessThan:
+ cont->OverwriteAndNegateIfEqual(kSignedLessThan);
+ return VisitWord64Compare(this, value, cont);
+ case IrOpcode::kInt64LessThanOrEqual:
+ cont->OverwriteAndNegateIfEqual(kSignedLessThanOrEqual);
+ return VisitWord64Compare(this, value, cont);
+ case IrOpcode::kUint64LessThan:
+ cont->OverwriteAndNegateIfEqual(kUnsignedLessThan);
+ return VisitWord64Compare(this, value, cont);
+ case IrOpcode::kUint64LessThanOrEqual:
+ cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
+ return VisitWord64Compare(this, value, cont);
+ case IrOpcode::kFloat32Equal:
+ cont->OverwriteAndNegateIfEqual(kEqual);
+ return VisitFloat32Compare(this, value, cont);
+ case IrOpcode::kFloat32LessThan:
+ cont->OverwriteAndNegateIfEqual(kUnsignedLessThan);
+ return VisitFloat32Compare(this, value, cont);
+ case IrOpcode::kFloat32LessThanOrEqual:
+ cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
+ return VisitFloat32Compare(this, value, cont);
+ case IrOpcode::kFloat64Equal:
+ cont->OverwriteAndNegateIfEqual(kEqual);
+ return VisitFloat64Compare(this, value, cont);
+ case IrOpcode::kFloat64LessThan:
+ cont->OverwriteAndNegateIfEqual(kUnsignedLessThan);
+ return VisitFloat64Compare(this, value, cont);
+ case IrOpcode::kFloat64LessThanOrEqual:
+ cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
+ return VisitFloat64Compare(this, value, cont);
+ case IrOpcode::kProjection:
+ // Check if this is the overflow output projection of an
+ // <Operation>WithOverflow node.
+ if (ProjectionIndexOf(value->op()) == 1u) {
+ // We cannot combine the <Operation>WithOverflow with this branch
+ // unless the 0th projection (the use of the actual value of the
+ // <Operation> is either nullptr, which means there's no use of the
+ // actual value, or was already defined, which means it is scheduled
+ // *AFTER* this branch).
+ Node* const node = value->InputAt(0);
+ Node* const result = NodeProperties::FindProjection(node, 0);
+ if (result == nullptr || IsDefined(result)) {
+ switch (node->opcode()) {
+ case IrOpcode::kInt32AddWithOverflow:
+ cont->OverwriteAndNegateIfEqual(kOverflow);
+ return VisitBinop(this, node, kLoong64Add_d, cont);
+ case IrOpcode::kInt32SubWithOverflow:
+ cont->OverwriteAndNegateIfEqual(kOverflow);
+ return VisitBinop(this, node, kLoong64Sub_d, cont);
+ case IrOpcode::kInt32MulWithOverflow:
+ cont->OverwriteAndNegateIfEqual(kOverflow);
+ return VisitBinop(this, node, kLoong64MulOvf_w, cont);
+ case IrOpcode::kInt64AddWithOverflow:
+ cont->OverwriteAndNegateIfEqual(kOverflow);
+ return VisitBinop(this, node, kLoong64AddOvf_d, cont);
+ case IrOpcode::kInt64SubWithOverflow:
+ cont->OverwriteAndNegateIfEqual(kOverflow);
+ return VisitBinop(this, node, kLoong64SubOvf_d, cont);
+ default:
+ break;
+ }
+ }
+ }
+ break;
+ case IrOpcode::kWord32And:
+ case IrOpcode::kWord64And:
+ return VisitWordCompare(this, value, kLoong64Tst, cont, true);
+ case IrOpcode::kStackPointerGreaterThan:
+ cont->OverwriteAndNegateIfEqual(kStackPointerGreaterThanCondition);
+ return VisitStackPointerGreaterThan(value, cont);
+ default:
+ break;
+ }
+ }
+
+ // Continuation could not be combined with a compare, emit compare against 0.
+ EmitWordCompareZero(this, value, cont);
+}
+
+void InstructionSelector::VisitSwitch(Node* node, const SwitchInfo& sw) {
+ Loong64OperandGenerator g(this);
+ InstructionOperand value_operand = g.UseRegister(node->InputAt(0));
+
+ // Emit either ArchTableSwitch or ArchBinarySearchSwitch.
+ if (enable_switch_jump_table_ == kEnableSwitchJumpTable) {
+ static const size_t kMaxTableSwitchValueRange = 2 << 16;
+ size_t table_space_cost = 10 + 2 * sw.value_range();
+ size_t table_time_cost = 3;
+ size_t lookup_space_cost = 2 + 2 * sw.case_count();
+ size_t lookup_time_cost = sw.case_count();
+ if (sw.case_count() > 0 &&
+ table_space_cost + 3 * table_time_cost <=
+ lookup_space_cost + 3 * lookup_time_cost &&
+ sw.min_value() > std::numeric_limits<int32_t>::min() &&
+ sw.value_range() <= kMaxTableSwitchValueRange) {
+ InstructionOperand index_operand = value_operand;
+ if (sw.min_value()) {
+ index_operand = g.TempRegister();
+ Emit(kLoong64Sub_w, index_operand, value_operand,
+ g.TempImmediate(sw.min_value()));
+ }
+ // Generate a table lookup.
+ return EmitTableSwitch(sw, index_operand);
+ }
+ }
+
+ // Generate a tree of conditional jumps.
+ return EmitBinarySearchSwitch(sw, value_operand);
+}
+
+void InstructionSelector::VisitWord32Equal(Node* const node) {
+ FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
+ Int32BinopMatcher m(node);
+ if (m.right().Is(0)) {
+ return VisitWordCompareZero(m.node(), m.left().node(), &cont);
+ }
+
+ VisitWord32Compare(this, node, &cont);
+}
+
+void InstructionSelector::VisitInt32LessThan(Node* node) {
+ FlagsContinuation cont = FlagsContinuation::ForSet(kSignedLessThan, node);
+ VisitWord32Compare(this, node, &cont);
+}
+
+void InstructionSelector::VisitInt32LessThanOrEqual(Node* node) {
+ FlagsContinuation cont =
+ FlagsContinuation::ForSet(kSignedLessThanOrEqual, node);
+ VisitWord32Compare(this, node, &cont);
+}
+
+void InstructionSelector::VisitUint32LessThan(Node* node) {
+ FlagsContinuation cont = FlagsContinuation::ForSet(kUnsignedLessThan, node);
+ VisitWord32Compare(this, node, &cont);
+}
+
+void InstructionSelector::VisitUint32LessThanOrEqual(Node* node) {
+ FlagsContinuation cont =
+ FlagsContinuation::ForSet(kUnsignedLessThanOrEqual, node);
+ VisitWord32Compare(this, node, &cont);
+}
+
+void InstructionSelector::VisitInt32AddWithOverflow(Node* node) {
+ if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
+ FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
+ return VisitBinop(this, node, kLoong64Add_d, &cont);
+ }
+ FlagsContinuation cont;
+ VisitBinop(this, node, kLoong64Add_d, &cont);
+}
+
+void InstructionSelector::VisitInt32SubWithOverflow(Node* node) {
+ if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
+ FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
+ return VisitBinop(this, node, kLoong64Sub_d, &cont);
+ }
+ FlagsContinuation cont;
+ VisitBinop(this, node, kLoong64Sub_d, &cont);
+}
+
+void InstructionSelector::VisitInt32MulWithOverflow(Node* node) {
+ if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
+ FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
+ return VisitBinop(this, node, kLoong64MulOvf_w, &cont);
+ }
+ FlagsContinuation cont;
+ VisitBinop(this, node, kLoong64MulOvf_w, &cont);
+}
+
+void InstructionSelector::VisitInt64AddWithOverflow(Node* node) {
+ if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
+ FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
+ return VisitBinop(this, node, kLoong64AddOvf_d, &cont);
+ }
+ FlagsContinuation cont;
+ VisitBinop(this, node, kLoong64AddOvf_d, &cont);
+}
+
+void InstructionSelector::VisitInt64SubWithOverflow(Node* node) {
+ if (Node* ovf = NodeProperties::FindProjection(node, 1)) {
+ FlagsContinuation cont = FlagsContinuation::ForSet(kOverflow, ovf);
+ return VisitBinop(this, node, kLoong64SubOvf_d, &cont);
+ }
+ FlagsContinuation cont;
+ VisitBinop(this, node, kLoong64SubOvf_d, &cont);
+}
+
+void InstructionSelector::VisitWord64Equal(Node* const node) {
+ FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
+ Int64BinopMatcher m(node);
+ if (m.right().Is(0)) {
+ return VisitWordCompareZero(m.node(), m.left().node(), &cont);
+ }
+
+ VisitWord64Compare(this, node, &cont);
+}
+
+void InstructionSelector::VisitInt64LessThan(Node* node) {
+ FlagsContinuation cont = FlagsContinuation::ForSet(kSignedLessThan, node);
+ VisitWord64Compare(this, node, &cont);
+}
+
+void InstructionSelector::VisitInt64LessThanOrEqual(Node* node) {
+ FlagsContinuation cont =
+ FlagsContinuation::ForSet(kSignedLessThanOrEqual, node);
+ VisitWord64Compare(this, node, &cont);
+}
+
+void InstructionSelector::VisitUint64LessThan(Node* node) {
+ FlagsContinuation cont = FlagsContinuation::ForSet(kUnsignedLessThan, node);
+ VisitWord64Compare(this, node, &cont);
+}
+
+void InstructionSelector::VisitUint64LessThanOrEqual(Node* node) {
+ FlagsContinuation cont =
+ FlagsContinuation::ForSet(kUnsignedLessThanOrEqual, node);
+ VisitWord64Compare(this, node, &cont);
+}
+
+void InstructionSelector::VisitFloat32Equal(Node* node) {
+ FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
+ VisitFloat32Compare(this, node, &cont);
+}
+
+void InstructionSelector::VisitFloat32LessThan(Node* node) {
+ FlagsContinuation cont = FlagsContinuation::ForSet(kUnsignedLessThan, node);
+ VisitFloat32Compare(this, node, &cont);
+}
+
+void InstructionSelector::VisitFloat32LessThanOrEqual(Node* node) {
+ FlagsContinuation cont =
+ FlagsContinuation::ForSet(kUnsignedLessThanOrEqual, node);
+ VisitFloat32Compare(this, node, &cont);
+}
+
+void InstructionSelector::VisitFloat64Equal(Node* node) {
+ FlagsContinuation cont = FlagsContinuation::ForSet(kEqual, node);
+ VisitFloat64Compare(this, node, &cont);
+}
+
+void InstructionSelector::VisitFloat64LessThan(Node* node) {
+ FlagsContinuation cont = FlagsContinuation::ForSet(kUnsignedLessThan, node);
+ VisitFloat64Compare(this, node, &cont);
+}
+
+void InstructionSelector::VisitFloat64LessThanOrEqual(Node* node) {
+ FlagsContinuation cont =
+ FlagsContinuation::ForSet(kUnsignedLessThanOrEqual, node);
+ VisitFloat64Compare(this, node, &cont);
+}
+
+void InstructionSelector::VisitFloat64ExtractLowWord32(Node* node) {
+ VisitRR(this, kLoong64Float64ExtractLowWord32, node);
+}
+
+void InstructionSelector::VisitFloat64ExtractHighWord32(Node* node) {
+ VisitRR(this, kLoong64Float64ExtractHighWord32, node);
+}
+
+void InstructionSelector::VisitFloat64SilenceNaN(Node* node) {
+ VisitRR(this, kLoong64Float64SilenceNaN, node);
+}
+
+void InstructionSelector::VisitFloat64InsertLowWord32(Node* node) {
+ Loong64OperandGenerator g(this);
+ Node* left = node->InputAt(0);
+ Node* right = node->InputAt(1);
+ Emit(kLoong64Float64InsertLowWord32, g.DefineSameAsFirst(node),
+ g.UseRegister(left), g.UseRegister(right));
+}
+
+void InstructionSelector::VisitFloat64InsertHighWord32(Node* node) {
+ Loong64OperandGenerator g(this);
+ Node* left = node->InputAt(0);
+ Node* right = node->InputAt(1);
+ Emit(kLoong64Float64InsertHighWord32, g.DefineSameAsFirst(node),
+ g.UseRegister(left), g.UseRegister(right));
+}
+
+void InstructionSelector::VisitMemoryBarrier(Node* node) {
+ Loong64OperandGenerator g(this);
+ Emit(kLoong64Dbar, g.NoOutput());
+}
+
+void InstructionSelector::VisitWord32AtomicLoad(Node* node) {
+ VisitAtomicLoad(this, node, AtomicWidth::kWord32);
+}
+
+void InstructionSelector::VisitWord32AtomicStore(Node* node) {
+ VisitAtomicStore(this, node, AtomicWidth::kWord32);
+}
+
+void InstructionSelector::VisitWord64AtomicLoad(Node* node) {
+ VisitAtomicLoad(this, node, AtomicWidth::kWord64);
+}
+
+void InstructionSelector::VisitWord64AtomicStore(Node* node) {
+ VisitAtomicStore(this, node, AtomicWidth::kWord64);
+}
+
+void InstructionSelector::VisitWord32AtomicExchange(Node* node) {
+ ArchOpcode opcode;
+ MachineType type = AtomicOpType(node->op());
+ if (type == MachineType::Int8()) {
+ opcode = kAtomicExchangeInt8;
+ } else if (type == MachineType::Uint8()) {
+ opcode = kAtomicExchangeUint8;
+ } else if (type == MachineType::Int16()) {
+ opcode = kAtomicExchangeInt16;
+ } else if (type == MachineType::Uint16()) {
+ opcode = kAtomicExchangeUint16;
+ } else if (type == MachineType::Int32() || type == MachineType::Uint32()) {
+ opcode = kAtomicExchangeWord32;
+ } else {
+ UNREACHABLE();
+ }
+
+ VisitAtomicExchange(this, node, opcode, AtomicWidth::kWord32);
+}
+
+void InstructionSelector::VisitWord64AtomicExchange(Node* node) {
+ ArchOpcode opcode;
+ MachineType type = AtomicOpType(node->op());
+ if (type == MachineType::Uint8()) {
+ opcode = kAtomicExchangeUint8;
+ } else if (type == MachineType::Uint16()) {
+ opcode = kAtomicExchangeUint16;
+ } else if (type == MachineType::Uint32()) {
+ opcode = kAtomicExchangeWord32;
+ } else if (type == MachineType::Uint64()) {
+ opcode = kLoong64Word64AtomicExchangeUint64;
+ } else {
+ UNREACHABLE();
+ }
+ VisitAtomicExchange(this, node, opcode, AtomicWidth::kWord64);
+}
+
+void InstructionSelector::VisitWord32AtomicCompareExchange(Node* node) {
+ ArchOpcode opcode;
+ MachineType type = AtomicOpType(node->op());
+ if (type == MachineType::Int8()) {
+ opcode = kAtomicCompareExchangeInt8;
+ } else if (type == MachineType::Uint8()) {
+ opcode = kAtomicCompareExchangeUint8;
+ } else if (type == MachineType::Int16()) {
+ opcode = kAtomicCompareExchangeInt16;
+ } else if (type == MachineType::Uint16()) {
+ opcode = kAtomicCompareExchangeUint16;
+ } else if (type == MachineType::Int32() || type == MachineType::Uint32()) {
+ opcode = kAtomicCompareExchangeWord32;
+ } else {
+ UNREACHABLE();
+ }
+
+ VisitAtomicCompareExchange(this, node, opcode, AtomicWidth::kWord32);
+}
+
+void InstructionSelector::VisitWord64AtomicCompareExchange(Node* node) {
+ ArchOpcode opcode;
+ MachineType type = AtomicOpType(node->op());
+ if (type == MachineType::Uint8()) {
+ opcode = kAtomicCompareExchangeUint8;
+ } else if (type == MachineType::Uint16()) {
+ opcode = kAtomicCompareExchangeUint16;
+ } else if (type == MachineType::Uint32()) {
+ opcode = kAtomicCompareExchangeWord32;
+ } else if (type == MachineType::Uint64()) {
+ opcode = kLoong64Word64AtomicCompareExchangeUint64;
+ } else {
+ UNREACHABLE();
+ }
+ VisitAtomicCompareExchange(this, node, opcode, AtomicWidth::kWord64);
+}
+void InstructionSelector::VisitWord32AtomicBinaryOperation(
+ Node* node, ArchOpcode int8_op, ArchOpcode uint8_op, ArchOpcode int16_op,
+ ArchOpcode uint16_op, ArchOpcode word32_op) {
+ ArchOpcode opcode;
+ MachineType type = AtomicOpType(node->op());
+ if (type == MachineType::Int8()) {
+ opcode = int8_op;
+ } else if (type == MachineType::Uint8()) {
+ opcode = uint8_op;
+ } else if (type == MachineType::Int16()) {
+ opcode = int16_op;
+ } else if (type == MachineType::Uint16()) {
+ opcode = uint16_op;
+ } else if (type == MachineType::Int32() || type == MachineType::Uint32()) {
+ opcode = word32_op;
+ } else {
+ UNREACHABLE();
+ }
+
+ VisitAtomicBinop(this, node, opcode, AtomicWidth::kWord32);
+}
+
+#define VISIT_ATOMIC_BINOP(op) \
+ void InstructionSelector::VisitWord32Atomic##op(Node* node) { \
+ VisitWord32AtomicBinaryOperation( \
+ node, kAtomic##op##Int8, kAtomic##op##Uint8, kAtomic##op##Int16, \
+ kAtomic##op##Uint16, kAtomic##op##Word32); \
+ }
+VISIT_ATOMIC_BINOP(Add)
+VISIT_ATOMIC_BINOP(Sub)
+VISIT_ATOMIC_BINOP(And)
+VISIT_ATOMIC_BINOP(Or)
+VISIT_ATOMIC_BINOP(Xor)
+#undef VISIT_ATOMIC_BINOP
+
+void InstructionSelector::VisitWord64AtomicBinaryOperation(
+ Node* node, ArchOpcode uint8_op, ArchOpcode uint16_op, ArchOpcode uint32_op,
+ ArchOpcode uint64_op) {
+ ArchOpcode opcode;
+ MachineType type = AtomicOpType(node->op());
+ if (type == MachineType::Uint8()) {
+ opcode = uint8_op;
+ } else if (type == MachineType::Uint16()) {
+ opcode = uint16_op;
+ } else if (type == MachineType::Uint32()) {
+ opcode = uint32_op;
+ } else if (type == MachineType::Uint64()) {
+ opcode = uint64_op;
+ } else {
+ UNREACHABLE();
+ }
+ VisitAtomicBinop(this, node, opcode, AtomicWidth::kWord64);
+}
+
+#define VISIT_ATOMIC_BINOP(op) \
+ void InstructionSelector::VisitWord64Atomic##op(Node* node) { \
+ VisitWord64AtomicBinaryOperation(node, kAtomic##op##Uint8, \
+ kAtomic##op##Uint16, kAtomic##op##Word32, \
+ kLoong64Word64Atomic##op##Uint64); \
+ }
+VISIT_ATOMIC_BINOP(Add)
+VISIT_ATOMIC_BINOP(Sub)
+VISIT_ATOMIC_BINOP(And)
+VISIT_ATOMIC_BINOP(Or)
+VISIT_ATOMIC_BINOP(Xor)
+#undef VISIT_ATOMIC_BINOP
+
+void InstructionSelector::VisitInt32AbsWithOverflow(Node* node) {
+ UNREACHABLE();
+}
+
+void InstructionSelector::VisitInt64AbsWithOverflow(Node* node) {
+ UNREACHABLE();
+}
+
+#define SIMD_TYPE_LIST(V) \
+ V(F64x2) \
+ V(F32x4) \
+ V(I64x2) \
+ V(I32x4) \
+ V(I16x8) \
+ V(I8x16)
+
+#define SIMD_UNOP_LIST(V) \
+ V(F64x2Abs, kLoong64F64x2Abs) \
+ V(F64x2Neg, kLoong64F64x2Neg) \
+ V(F64x2Sqrt, kLoong64F64x2Sqrt) \
+ V(F64x2Ceil, kLoong64F64x2Ceil) \
+ V(F64x2Floor, kLoong64F64x2Floor) \
+ V(F64x2Trunc, kLoong64F64x2Trunc) \
+ V(F64x2NearestInt, kLoong64F64x2NearestInt) \
+ V(I64x2Neg, kLoong64I64x2Neg) \
+ V(I64x2BitMask, kLoong64I64x2BitMask) \
+ V(F64x2ConvertLowI32x4S, kLoong64F64x2ConvertLowI32x4S) \
+ V(F64x2ConvertLowI32x4U, kLoong64F64x2ConvertLowI32x4U) \
+ V(F64x2PromoteLowF32x4, kLoong64F64x2PromoteLowF32x4) \
+ V(F32x4SConvertI32x4, kLoong64F32x4SConvertI32x4) \
+ V(F32x4UConvertI32x4, kLoong64F32x4UConvertI32x4) \
+ V(F32x4Abs, kLoong64F32x4Abs) \
+ V(F32x4Neg, kLoong64F32x4Neg) \
+ V(F32x4Sqrt, kLoong64F32x4Sqrt) \
+ V(F32x4RecipApprox, kLoong64F32x4RecipApprox) \
+ V(F32x4RecipSqrtApprox, kLoong64F32x4RecipSqrtApprox) \
+ V(F32x4Ceil, kLoong64F32x4Ceil) \
+ V(F32x4Floor, kLoong64F32x4Floor) \
+ V(F32x4Trunc, kLoong64F32x4Trunc) \
+ V(F32x4NearestInt, kLoong64F32x4NearestInt) \
+ V(F32x4DemoteF64x2Zero, kLoong64F32x4DemoteF64x2Zero) \
+ V(I64x2Abs, kLoong64I64x2Abs) \
+ V(I64x2SConvertI32x4Low, kLoong64I64x2SConvertI32x4Low) \
+ V(I64x2SConvertI32x4High, kLoong64I64x2SConvertI32x4High) \
+ V(I64x2UConvertI32x4Low, kLoong64I64x2UConvertI32x4Low) \
+ V(I64x2UConvertI32x4High, kLoong64I64x2UConvertI32x4High) \
+ V(I32x4SConvertF32x4, kLoong64I32x4SConvertF32x4) \
+ V(I32x4UConvertF32x4, kLoong64I32x4UConvertF32x4) \
+ V(I32x4Neg, kLoong64I32x4Neg) \
+ V(I32x4SConvertI16x8Low, kLoong64I32x4SConvertI16x8Low) \
+ V(I32x4SConvertI16x8High, kLoong64I32x4SConvertI16x8High) \
+ V(I32x4UConvertI16x8Low, kLoong64I32x4UConvertI16x8Low) \
+ V(I32x4UConvertI16x8High, kLoong64I32x4UConvertI16x8High) \
+ V(I32x4Abs, kLoong64I32x4Abs) \
+ V(I32x4BitMask, kLoong64I32x4BitMask) \
+ V(I32x4TruncSatF64x2SZero, kLoong64I32x4TruncSatF64x2SZero) \
+ V(I32x4TruncSatF64x2UZero, kLoong64I32x4TruncSatF64x2UZero) \
+ V(I16x8Neg, kLoong64I16x8Neg) \
+ V(I16x8SConvertI8x16Low, kLoong64I16x8SConvertI8x16Low) \
+ V(I16x8SConvertI8x16High, kLoong64I16x8SConvertI8x16High) \
+ V(I16x8UConvertI8x16Low, kLoong64I16x8UConvertI8x16Low) \
+ V(I16x8UConvertI8x16High, kLoong64I16x8UConvertI8x16High) \
+ V(I16x8Abs, kLoong64I16x8Abs) \
+ V(I16x8BitMask, kLoong64I16x8BitMask) \
+ V(I8x16Neg, kLoong64I8x16Neg) \
+ V(I8x16Abs, kLoong64I8x16Abs) \
+ V(I8x16Popcnt, kLoong64I8x16Popcnt) \
+ V(I8x16BitMask, kLoong64I8x16BitMask) \
+ V(S128Not, kLoong64S128Not) \
+ V(I64x2AllTrue, kLoong64I64x2AllTrue) \
+ V(I32x4AllTrue, kLoong64I32x4AllTrue) \
+ V(I16x8AllTrue, kLoong64I16x8AllTrue) \
+ V(I8x16AllTrue, kLoong64I8x16AllTrue) \
+ V(V128AnyTrue, kLoong64V128AnyTrue)
+
+#define SIMD_SHIFT_OP_LIST(V) \
+ V(I64x2Shl) \
+ V(I64x2ShrS) \
+ V(I64x2ShrU) \
+ V(I32x4Shl) \
+ V(I32x4ShrS) \
+ V(I32x4ShrU) \
+ V(I16x8Shl) \
+ V(I16x8ShrS) \
+ V(I16x8ShrU) \
+ V(I8x16Shl) \
+ V(I8x16ShrS) \
+ V(I8x16ShrU)
+
+#define SIMD_BINOP_LIST(V) \
+ V(F64x2Add, kLoong64F64x2Add) \
+ V(F64x2Sub, kLoong64F64x2Sub) \
+ V(F64x2Mul, kLoong64F64x2Mul) \
+ V(F64x2Div, kLoong64F64x2Div) \
+ V(F64x2Min, kLoong64F64x2Min) \
+ V(F64x2Max, kLoong64F64x2Max) \
+ V(F64x2Eq, kLoong64F64x2Eq) \
+ V(F64x2Ne, kLoong64F64x2Ne) \
+ V(F64x2Lt, kLoong64F64x2Lt) \
+ V(F64x2Le, kLoong64F64x2Le) \
+ V(I64x2Eq, kLoong64I64x2Eq) \
+ V(I64x2Ne, kLoong64I64x2Ne) \
+ V(I64x2Add, kLoong64I64x2Add) \
+ V(I64x2Sub, kLoong64I64x2Sub) \
+ V(I64x2Mul, kLoong64I64x2Mul) \
+ V(I64x2GtS, kLoong64I64x2GtS) \
+ V(I64x2GeS, kLoong64I64x2GeS) \
+ V(F32x4Add, kLoong64F32x4Add) \
+ V(F32x4Sub, kLoong64F32x4Sub) \
+ V(F32x4Mul, kLoong64F32x4Mul) \
+ V(F32x4Div, kLoong64F32x4Div) \
+ V(F32x4Max, kLoong64F32x4Max) \
+ V(F32x4Min, kLoong64F32x4Min) \
+ V(F32x4Eq, kLoong64F32x4Eq) \
+ V(F32x4Ne, kLoong64F32x4Ne) \
+ V(F32x4Lt, kLoong64F32x4Lt) \
+ V(F32x4Le, kLoong64F32x4Le) \
+ V(I32x4Add, kLoong64I32x4Add) \
+ V(I32x4Sub, kLoong64I32x4Sub) \
+ V(I32x4Mul, kLoong64I32x4Mul) \
+ V(I32x4MaxS, kLoong64I32x4MaxS) \
+ V(I32x4MinS, kLoong64I32x4MinS) \
+ V(I32x4MaxU, kLoong64I32x4MaxU) \
+ V(I32x4MinU, kLoong64I32x4MinU) \
+ V(I32x4Eq, kLoong64I32x4Eq) \
+ V(I32x4Ne, kLoong64I32x4Ne) \
+ V(I32x4GtS, kLoong64I32x4GtS) \
+ V(I32x4GeS, kLoong64I32x4GeS) \
+ V(I32x4GtU, kLoong64I32x4GtU) \
+ V(I32x4GeU, kLoong64I32x4GeU) \
+ V(I32x4DotI16x8S, kLoong64I32x4DotI16x8S) \
+ V(I16x8Add, kLoong64I16x8Add) \
+ V(I16x8AddSatS, kLoong64I16x8AddSatS) \
+ V(I16x8AddSatU, kLoong64I16x8AddSatU) \
+ V(I16x8Sub, kLoong64I16x8Sub) \
+ V(I16x8SubSatS, kLoong64I16x8SubSatS) \
+ V(I16x8SubSatU, kLoong64I16x8SubSatU) \
+ V(I16x8Mul, kLoong64I16x8Mul) \
+ V(I16x8MaxS, kLoong64I16x8MaxS) \
+ V(I16x8MinS, kLoong64I16x8MinS) \
+ V(I16x8MaxU, kLoong64I16x8MaxU) \
+ V(I16x8MinU, kLoong64I16x8MinU) \
+ V(I16x8Eq, kLoong64I16x8Eq) \
+ V(I16x8Ne, kLoong64I16x8Ne) \
+ V(I16x8GtS, kLoong64I16x8GtS) \
+ V(I16x8GeS, kLoong64I16x8GeS) \
+ V(I16x8GtU, kLoong64I16x8GtU) \
+ V(I16x8GeU, kLoong64I16x8GeU) \
+ V(I16x8RoundingAverageU, kLoong64I16x8RoundingAverageU) \
+ V(I16x8SConvertI32x4, kLoong64I16x8SConvertI32x4) \
+ V(I16x8UConvertI32x4, kLoong64I16x8UConvertI32x4) \
+ V(I16x8Q15MulRSatS, kLoong64I16x8Q15MulRSatS) \
+ V(I8x16Add, kLoong64I8x16Add) \
+ V(I8x16AddSatS, kLoong64I8x16AddSatS) \
+ V(I8x16AddSatU, kLoong64I8x16AddSatU) \
+ V(I8x16Sub, kLoong64I8x16Sub) \
+ V(I8x16SubSatS, kLoong64I8x16SubSatS) \
+ V(I8x16SubSatU, kLoong64I8x16SubSatU) \
+ V(I8x16MaxS, kLoong64I8x16MaxS) \
+ V(I8x16MinS, kLoong64I8x16MinS) \
+ V(I8x16MaxU, kLoong64I8x16MaxU) \
+ V(I8x16MinU, kLoong64I8x16MinU) \
+ V(I8x16Eq, kLoong64I8x16Eq) \
+ V(I8x16Ne, kLoong64I8x16Ne) \
+ V(I8x16GtS, kLoong64I8x16GtS) \
+ V(I8x16GeS, kLoong64I8x16GeS) \
+ V(I8x16GtU, kLoong64I8x16GtU) \
+ V(I8x16GeU, kLoong64I8x16GeU) \
+ V(I8x16RoundingAverageU, kLoong64I8x16RoundingAverageU) \
+ V(I8x16SConvertI16x8, kLoong64I8x16SConvertI16x8) \
+ V(I8x16UConvertI16x8, kLoong64I8x16UConvertI16x8) \
+ V(S128And, kLoong64S128And) \
+ V(S128Or, kLoong64S128Or) \
+ V(S128Xor, kLoong64S128Xor) \
+ V(S128AndNot, kLoong64S128AndNot)
+
+void InstructionSelector::VisitS128Const(Node* node) {
+ Loong64OperandGenerator g(this);
+ static const int kUint32Immediates = kSimd128Size / sizeof(uint32_t);
+ uint32_t val[kUint32Immediates];
+ memcpy(val, S128ImmediateParameterOf(node->op()).data(), kSimd128Size);
+ // If all bytes are zeros or ones, avoid emitting code for generic constants
+ bool all_zeros = !(val[0] || val[1] || val[2] || val[3]);
+ bool all_ones = val[0] == UINT32_MAX && val[1] == UINT32_MAX &&
+ val[2] == UINT32_MAX && val[3] == UINT32_MAX;
+ InstructionOperand dst = g.DefineAsRegister(node);
+ if (all_zeros) {
+ Emit(kLoong64S128Zero, dst);
+ } else if (all_ones) {
+ Emit(kLoong64S128AllOnes, dst);
+ } else {
+ Emit(kLoong64S128Const, dst, g.UseImmediate(val[0]), g.UseImmediate(val[1]),
+ g.UseImmediate(val[2]), g.UseImmediate(val[3]));
+ }
+}
+
+void InstructionSelector::VisitS128Zero(Node* node) {
+ Loong64OperandGenerator g(this);
+ Emit(kLoong64S128Zero, g.DefineAsRegister(node));
+}
+
+#define SIMD_VISIT_SPLAT(Type) \
+ void InstructionSelector::Visit##Type##Splat(Node* node) { \
+ VisitRR(this, kLoong64##Type##Splat, node); \
+ }
+SIMD_TYPE_LIST(SIMD_VISIT_SPLAT)
+#undef SIMD_VISIT_SPLAT
+
+#define SIMD_VISIT_EXTRACT_LANE(Type, Sign) \
+ void InstructionSelector::Visit##Type##ExtractLane##Sign(Node* node) { \
+ VisitRRI(this, kLoong64##Type##ExtractLane##Sign, node); \
+ }
+SIMD_VISIT_EXTRACT_LANE(F64x2, )
+SIMD_VISIT_EXTRACT_LANE(F32x4, )
+SIMD_VISIT_EXTRACT_LANE(I64x2, )
+SIMD_VISIT_EXTRACT_LANE(I32x4, )
+SIMD_VISIT_EXTRACT_LANE(I16x8, U)
+SIMD_VISIT_EXTRACT_LANE(I16x8, S)
+SIMD_VISIT_EXTRACT_LANE(I8x16, U)
+SIMD_VISIT_EXTRACT_LANE(I8x16, S)
+#undef SIMD_VISIT_EXTRACT_LANE
+
+#define SIMD_VISIT_REPLACE_LANE(Type) \
+ void InstructionSelector::Visit##Type##ReplaceLane(Node* node) { \
+ VisitRRIR(this, kLoong64##Type##ReplaceLane, node); \
+ }
+SIMD_TYPE_LIST(SIMD_VISIT_REPLACE_LANE)
+#undef SIMD_VISIT_REPLACE_LANE
+
+#define SIMD_VISIT_UNOP(Name, instruction) \
+ void InstructionSelector::Visit##Name(Node* node) { \
+ VisitRR(this, instruction, node); \
+ }
+SIMD_UNOP_LIST(SIMD_VISIT_UNOP)
+#undef SIMD_VISIT_UNOP
+
+#define SIMD_VISIT_SHIFT_OP(Name) \
+ void InstructionSelector::Visit##Name(Node* node) { \
+ VisitSimdShift(this, kLoong64##Name, node); \
+ }
+SIMD_SHIFT_OP_LIST(SIMD_VISIT_SHIFT_OP)
+#undef SIMD_VISIT_SHIFT_OP
+
+#define SIMD_VISIT_BINOP(Name, instruction) \
+ void InstructionSelector::Visit##Name(Node* node) { \
+ VisitRRR(this, instruction, node); \
+ }
+SIMD_BINOP_LIST(SIMD_VISIT_BINOP)
+#undef SIMD_VISIT_BINOP
+
+void InstructionSelector::VisitS128Select(Node* node) {
+ VisitRRRR(this, kLoong64S128Select, node);
+}
+
+#if V8_ENABLE_WEBASSEMBLY
+namespace {
+
+struct ShuffleEntry {
+ uint8_t shuffle[kSimd128Size];
+ ArchOpcode opcode;
+};
+
+static const ShuffleEntry arch_shuffles[] = {
+ {{0, 1, 2, 3, 16, 17, 18, 19, 4, 5, 6, 7, 20, 21, 22, 23},
+ kLoong64S32x4InterleaveRight},
+ {{8, 9, 10, 11, 24, 25, 26, 27, 12, 13, 14, 15, 28, 29, 30, 31},
+ kLoong64S32x4InterleaveLeft},
+ {{0, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19, 24, 25, 26, 27},
+ kLoong64S32x4PackEven},
+ {{4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31},
+ kLoong64S32x4PackOdd},
+ {{0, 1, 2, 3, 16, 17, 18, 19, 8, 9, 10, 11, 24, 25, 26, 27},
+ kLoong64S32x4InterleaveEven},
+ {{4, 5, 6, 7, 20, 21, 22, 23, 12, 13, 14, 15, 28, 29, 30, 31},
+ kLoong64S32x4InterleaveOdd},
+
+ {{0, 1, 16, 17, 2, 3, 18, 19, 4, 5, 20, 21, 6, 7, 22, 23},
+ kLoong64S16x8InterleaveRight},
+ {{8, 9, 24, 25, 10, 11, 26, 27, 12, 13, 28, 29, 14, 15, 30, 31},
+ kLoong64S16x8InterleaveLeft},
+ {{0, 1, 4, 5, 8, 9, 12, 13, 16, 17, 20, 21, 24, 25, 28, 29},
+ kLoong64S16x8PackEven},
+ {{2, 3, 6, 7, 10, 11, 14, 15, 18, 19, 22, 23, 26, 27, 30, 31},
+ kLoong64S16x8PackOdd},
+ {{0, 1, 16, 17, 4, 5, 20, 21, 8, 9, 24, 25, 12, 13, 28, 29},
+ kLoong64S16x8InterleaveEven},
+ {{2, 3, 18, 19, 6, 7, 22, 23, 10, 11, 26, 27, 14, 15, 30, 31},
+ kLoong64S16x8InterleaveOdd},
+ {{6, 7, 4, 5, 2, 3, 0, 1, 14, 15, 12, 13, 10, 11, 8, 9},
+ kLoong64S16x4Reverse},
+ {{2, 3, 0, 1, 6, 7, 4, 5, 10, 11, 8, 9, 14, 15, 12, 13},
+ kLoong64S16x2Reverse},
+
+ {{0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23},
+ kLoong64S8x16InterleaveRight},
+ {{8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31},
+ kLoong64S8x16InterleaveLeft},
+ {{0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30},
+ kLoong64S8x16PackEven},
+ {{1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31},
+ kLoong64S8x16PackOdd},
+ {{0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30},
+ kLoong64S8x16InterleaveEven},
+ {{1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31},
+ kLoong64S8x16InterleaveOdd},
+ {{7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8},
+ kLoong64S8x8Reverse},
+ {{3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12},
+ kLoong64S8x4Reverse},
+ {{1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14},
+ kLoong64S8x2Reverse}};
+
+bool TryMatchArchShuffle(const uint8_t* shuffle, const ShuffleEntry* table,
+ size_t num_entries, bool is_swizzle,
+ ArchOpcode* opcode) {
+ uint8_t mask = is_swizzle ? kSimd128Size - 1 : 2 * kSimd128Size - 1;
+ for (size_t i = 0; i < num_entries; ++i) {
+ const ShuffleEntry& entry = table[i];
+ int j = 0;
+ for (; j < kSimd128Size; ++j) {
+ if ((entry.shuffle[j] & mask) != (shuffle[j] & mask)) {
+ break;
+ }
+ }
+ if (j == kSimd128Size) {
+ *opcode = entry.opcode;
+ return true;
+ }
+ }
+ return false;
+}
+
+} // namespace
+
+void InstructionSelector::VisitI8x16Shuffle(Node* node) {
+ uint8_t shuffle[kSimd128Size];
+ bool is_swizzle;
+ CanonicalizeShuffle(node, shuffle, &is_swizzle);
+ uint8_t shuffle32x4[4];
+ ArchOpcode opcode;
+ if (TryMatchArchShuffle(shuffle, arch_shuffles, arraysize(arch_shuffles),
+ is_swizzle, &opcode)) {
+ VisitRRR(this, opcode, node);
+ return;
+ }
+ Node* input0 = node->InputAt(0);
+ Node* input1 = node->InputAt(1);
+ uint8_t offset;
+ Loong64OperandGenerator g(this);
+ if (wasm::SimdShuffle::TryMatchConcat(shuffle, &offset)) {
+ Emit(kLoong64S8x16Concat, g.DefineSameAsFirst(node), g.UseRegister(input1),
+ g.UseRegister(input0), g.UseImmediate(offset));
+ return;
+ }
+ if (wasm::SimdShuffle::TryMatch32x4Shuffle(shuffle, shuffle32x4)) {
+ Emit(kLoong64S32x4Shuffle, g.DefineAsRegister(node), g.UseRegister(input0),
+ g.UseRegister(input1),
+ g.UseImmediate(wasm::SimdShuffle::Pack4Lanes(shuffle32x4)));
+ return;
+ }
+ Emit(kLoong64I8x16Shuffle, g.DefineAsRegister(node), g.UseRegister(input0),
+ g.UseRegister(input1),
+ g.UseImmediate(wasm::SimdShuffle::Pack4Lanes(shuffle)),
+ g.UseImmediate(wasm::SimdShuffle::Pack4Lanes(shuffle + 4)),
+ g.UseImmediate(wasm::SimdShuffle::Pack4Lanes(shuffle + 8)),
+ g.UseImmediate(wasm::SimdShuffle::Pack4Lanes(shuffle + 12)));
+}
+#else
+void InstructionSelector::VisitI8x16Shuffle(Node* node) { UNREACHABLE(); }
+#endif // V8_ENABLE_WEBASSEMBLY
+
+void InstructionSelector::VisitI8x16Swizzle(Node* node) {
+ Loong64OperandGenerator g(this);
+ InstructionOperand temps[] = {g.TempSimd128Register()};
+ // We don't want input 0 or input 1 to be the same as output, since we will
+ // modify output before do the calculation.
+ Emit(kLoong64I8x16Swizzle, g.DefineAsRegister(node),
+ g.UseUniqueRegister(node->InputAt(0)),
+ g.UseUniqueRegister(node->InputAt(1)), arraysize(temps), temps);
+}
+
+void InstructionSelector::VisitSignExtendWord8ToInt32(Node* node) {
+ Loong64OperandGenerator g(this);
+ Emit(kLoong64Ext_w_b, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)));
+}
+
+void InstructionSelector::VisitSignExtendWord16ToInt32(Node* node) {
+ Loong64OperandGenerator g(this);
+ Emit(kLoong64Ext_w_h, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)));
+}
+
+void InstructionSelector::VisitSignExtendWord8ToInt64(Node* node) {
+ Loong64OperandGenerator g(this);
+ Emit(kLoong64Ext_w_b, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)));
+}
+
+void InstructionSelector::VisitSignExtendWord16ToInt64(Node* node) {
+ Loong64OperandGenerator g(this);
+ Emit(kLoong64Ext_w_h, g.DefineAsRegister(node),
+ g.UseRegister(node->InputAt(0)));
+}
+
+void InstructionSelector::VisitSignExtendWord32ToInt64(Node* node) {
+ Loong64OperandGenerator g(this);
+ Emit(kLoong64Sll_w, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)),
+ g.TempImmediate(0));
+}
+
+void InstructionSelector::VisitF32x4Pmin(Node* node) {
+ VisitUniqueRRR(this, kLoong64F32x4Pmin, node);
+}
+
+void InstructionSelector::VisitF32x4Pmax(Node* node) {
+ VisitUniqueRRR(this, kLoong64F32x4Pmax, node);
+}
+
+void InstructionSelector::VisitF64x2Pmin(Node* node) {
+ VisitUniqueRRR(this, kLoong64F64x2Pmin, node);
+}
+
+void InstructionSelector::VisitF64x2Pmax(Node* node) {
+ VisitUniqueRRR(this, kLoong64F64x2Pmax, node);
+}
+
+#define VISIT_EXT_MUL(OPCODE1, OPCODE2) \
+ void InstructionSelector::Visit##OPCODE1##ExtMulLow##OPCODE2(Node* node) {} \
+ void InstructionSelector::Visit##OPCODE1##ExtMulHigh##OPCODE2(Node* node) {}
+
+VISIT_EXT_MUL(I64x2, I32x4S)
+VISIT_EXT_MUL(I64x2, I32x4U)
+VISIT_EXT_MUL(I32x4, I16x8S)
+VISIT_EXT_MUL(I32x4, I16x8U)
+VISIT_EXT_MUL(I16x8, I8x16S)
+VISIT_EXT_MUL(I16x8, I8x16U)
+#undef VISIT_EXT_MUL
+
+#define VISIT_EXTADD_PAIRWISE(OPCODE) \
+ void InstructionSelector::Visit##OPCODE(Node* node) { \
+ Loong64OperandGenerator g(this); \
+ Emit(kLoong64ExtAddPairwise, g.DefineAsRegister(node), \
+ g.UseRegister(node->InputAt(0))); \
+ }
+VISIT_EXTADD_PAIRWISE(I16x8ExtAddPairwiseI8x16S)
+VISIT_EXTADD_PAIRWISE(I16x8ExtAddPairwiseI8x16U)
+VISIT_EXTADD_PAIRWISE(I32x4ExtAddPairwiseI16x8S)
+VISIT_EXTADD_PAIRWISE(I32x4ExtAddPairwiseI16x8U)
+#undef VISIT_EXTADD_PAIRWISE
+
+void InstructionSelector::AddOutputToSelectContinuation(OperandGenerator* g,
+ int first_input_index,
+ Node* node) {
+ UNREACHABLE();
+}
+
+// static
+MachineOperatorBuilder::Flags
+InstructionSelector::SupportedMachineOperatorFlags() {
+ MachineOperatorBuilder::Flags flags = MachineOperatorBuilder::kNoFlags;
+ return flags | MachineOperatorBuilder::kWord32ShiftIsSafe |
+ MachineOperatorBuilder::kInt32DivIsSafe |
+ MachineOperatorBuilder::kUint32DivIsSafe |
+ MachineOperatorBuilder::kFloat64RoundDown |
+ MachineOperatorBuilder::kFloat32RoundDown |
+ MachineOperatorBuilder::kFloat64RoundUp |
+ MachineOperatorBuilder::kFloat32RoundUp |
+ MachineOperatorBuilder::kFloat64RoundTruncate |
+ MachineOperatorBuilder::kFloat32RoundTruncate |
+ MachineOperatorBuilder::kFloat64RoundTiesEven |
+ MachineOperatorBuilder::kFloat32RoundTiesEven;
+}
+
+// static
+MachineOperatorBuilder::AlignmentRequirements
+InstructionSelector::AlignmentRequirements() {
+ return MachineOperatorBuilder::AlignmentRequirements::
+ FullUnalignedAccessSupport();
+}
+
+#undef SIMD_BINOP_LIST
+#undef SIMD_SHIFT_OP_LIST
+#undef SIMD_UNOP_LIST
+#undef SIMD_TYPE_LIST
+#undef TRACE_UNIMPL
+#undef TRACE
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/compiler/backend/mips/code-generator-mips.cc b/deps/v8/src/compiler/backend/mips/code-generator-mips.cc
index 2b8197e7e6..736248c824 100644
--- a/deps/v8/src/compiler/backend/mips/code-generator-mips.cc
+++ b/deps/v8/src/compiler/backend/mips/code-generator-mips.cc
@@ -93,7 +93,6 @@ class MipsOperandConverter final : public InstructionOperandConverter {
constant.ToDelayedStringConstant());
case Constant::kRpoNumber:
UNREACHABLE(); // TODO(titzer): RPO immediates on mips?
- break;
}
UNREACHABLE();
}
@@ -313,16 +312,6 @@ FPUCondition FlagsConditionToConditionCmpFPU(bool* predicate,
<< "\""; \
UNIMPLEMENTED();
-void EmitWordLoadPoisoningIfNeeded(CodeGenerator* codegen,
- InstructionCode opcode, Instruction* instr,
- MipsOperandConverter const& i) {
- const MemoryAccessMode access_mode = AccessModeField::decode(opcode);
- if (access_mode == kMemoryAccessPoisoned) {
- Register value = i.OutputRegister();
- codegen->tasm()->And(value, value, kSpeculationPoisonRegister);
- }
-}
-
} // namespace
#define ASSEMBLE_ATOMIC_LOAD_INTEGER(asm_instr) \
@@ -614,31 +603,6 @@ void CodeGenerator::BailoutIfDeoptimized() {
RelocInfo::CODE_TARGET, ne, kScratchReg, Operand(zero_reg));
}
-void CodeGenerator::GenerateSpeculationPoisonFromCodeStartRegister() {
- // Calculate a mask which has all bits set in the normal case, but has all
- // bits cleared if we are speculatively executing the wrong PC.
- // difference = (current - expected) | (expected - current)
- // poison = ~(difference >> (kBitsPerSystemPointer - 1))
- __ ComputeCodeStartAddress(kScratchReg);
- __ Move(kSpeculationPoisonRegister, kScratchReg);
- __ subu(kSpeculationPoisonRegister, kSpeculationPoisonRegister,
- kJavaScriptCallCodeStartRegister);
- __ subu(kJavaScriptCallCodeStartRegister, kJavaScriptCallCodeStartRegister,
- kScratchReg);
- __ or_(kSpeculationPoisonRegister, kSpeculationPoisonRegister,
- kJavaScriptCallCodeStartRegister);
- __ sra(kSpeculationPoisonRegister, kSpeculationPoisonRegister,
- kBitsPerSystemPointer - 1);
- __ nor(kSpeculationPoisonRegister, kSpeculationPoisonRegister,
- kSpeculationPoisonRegister);
-}
-
-void CodeGenerator::AssembleRegisterArgumentPoisoning() {
- __ And(kJSFunctionRegister, kJSFunctionRegister, kSpeculationPoisonRegister);
- __ And(kContextRegister, kContextRegister, kSpeculationPoisonRegister);
- __ And(sp, sp, kSpeculationPoisonRegister);
-}
-
// Assembles an instruction after register allocation, producing machine code.
CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Instruction* instr) {
@@ -902,7 +866,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ TruncateDoubleToI(isolate(), zone(), i.OutputRegister(),
i.InputDoubleRegister(0), DetermineStubCallMode());
break;
- case kArchStoreWithWriteBarrier: {
+ case kArchStoreWithWriteBarrier:
+ case kArchAtomicStoreWithWriteBarrier: {
RecordWriteMode mode =
static_cast<RecordWriteMode>(MiscField::decode(instr->opcode()));
Register object = i.InputRegister(0);
@@ -914,7 +879,14 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
scratch0, scratch1, mode,
DetermineStubCallMode());
__ Addu(kScratchReg, object, index);
- __ sw(value, MemOperand(kScratchReg));
+ if (arch_opcode == kArchStoreWithWriteBarrier) {
+ __ sw(value, MemOperand(kScratchReg));
+ } else {
+ DCHECK_EQ(kArchAtomicStoreWithWriteBarrier, arch_opcode);
+ __ sync();
+ __ sw(value, MemOperand(kScratchReg));
+ __ sync();
+ }
if (mode > RecordWriteMode::kValueIsPointer) {
__ JumpIfSmi(value, ool->exit());
}
@@ -938,10 +910,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
break;
}
- case kArchWordPoisonOnSpeculation:
- __ And(i.OutputRegister(), i.InputRegister(0),
- kSpeculationPoisonRegister);
- break;
case kIeee754Float64Acos:
ASSEMBLE_IEEE754_UNOP(acos);
break;
@@ -1541,30 +1509,24 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
case kMipsLbu:
__ lbu(i.OutputRegister(), i.MemoryOperand());
- EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kMipsLb:
__ lb(i.OutputRegister(), i.MemoryOperand());
- EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kMipsSb:
__ sb(i.InputOrZeroRegister(2), i.MemoryOperand());
break;
case kMipsLhu:
__ lhu(i.OutputRegister(), i.MemoryOperand());
- EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kMipsUlhu:
__ Ulhu(i.OutputRegister(), i.MemoryOperand());
- EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kMipsLh:
__ lh(i.OutputRegister(), i.MemoryOperand());
- EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kMipsUlh:
__ Ulh(i.OutputRegister(), i.MemoryOperand());
- EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kMipsSh:
__ sh(i.InputOrZeroRegister(2), i.MemoryOperand());
@@ -1574,11 +1536,9 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
case kMipsLw:
__ lw(i.OutputRegister(), i.MemoryOperand());
- EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kMipsUlw:
__ Ulw(i.OutputRegister(), i.MemoryOperand());
- EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kMipsSw:
__ sw(i.InputOrZeroRegister(2), i.MemoryOperand());
@@ -1658,7 +1618,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
default: {
UNREACHABLE();
- break;
}
}
} else {
@@ -1823,74 +1782,74 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ ilvr_w(dst, kSimd128RegZero, dst);
break;
}
- case kWord32AtomicLoadInt8:
+ case kAtomicLoadInt8:
ASSEMBLE_ATOMIC_LOAD_INTEGER(lb);
break;
- case kWord32AtomicLoadUint8:
+ case kAtomicLoadUint8:
ASSEMBLE_ATOMIC_LOAD_INTEGER(lbu);
break;
- case kWord32AtomicLoadInt16:
+ case kAtomicLoadInt16:
ASSEMBLE_ATOMIC_LOAD_INTEGER(lh);
break;
- case kWord32AtomicLoadUint16:
+ case kAtomicLoadUint16:
ASSEMBLE_ATOMIC_LOAD_INTEGER(lhu);
break;
- case kWord32AtomicLoadWord32:
+ case kAtomicLoadWord32:
ASSEMBLE_ATOMIC_LOAD_INTEGER(lw);
break;
- case kWord32AtomicStoreWord8:
+ case kAtomicStoreWord8:
ASSEMBLE_ATOMIC_STORE_INTEGER(sb);
break;
- case kWord32AtomicStoreWord16:
+ case kAtomicStoreWord16:
ASSEMBLE_ATOMIC_STORE_INTEGER(sh);
break;
- case kWord32AtomicStoreWord32:
+ case kAtomicStoreWord32:
ASSEMBLE_ATOMIC_STORE_INTEGER(sw);
break;
- case kWord32AtomicExchangeInt8:
+ case kAtomicExchangeInt8:
ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(true, 8);
break;
- case kWord32AtomicExchangeUint8:
+ case kAtomicExchangeUint8:
ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(false, 8);
break;
- case kWord32AtomicExchangeInt16:
+ case kAtomicExchangeInt16:
ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(true, 16);
break;
- case kWord32AtomicExchangeUint16:
+ case kAtomicExchangeUint16:
ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(false, 16);
break;
- case kWord32AtomicExchangeWord32:
+ case kAtomicExchangeWord32:
ASSEMBLE_ATOMIC_EXCHANGE_INTEGER();
break;
- case kWord32AtomicCompareExchangeInt8:
+ case kAtomicCompareExchangeInt8:
ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(true, 8);
break;
- case kWord32AtomicCompareExchangeUint8:
+ case kAtomicCompareExchangeUint8:
ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(false, 8);
break;
- case kWord32AtomicCompareExchangeInt16:
+ case kAtomicCompareExchangeInt16:
ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(true, 16);
break;
- case kWord32AtomicCompareExchangeUint16:
+ case kAtomicCompareExchangeUint16:
ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(false, 16);
break;
- case kWord32AtomicCompareExchangeWord32:
+ case kAtomicCompareExchangeWord32:
ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER();
break;
#define ATOMIC_BINOP_CASE(op, inst) \
- case kWord32Atomic##op##Int8: \
+ case kAtomic##op##Int8: \
ASSEMBLE_ATOMIC_BINOP_EXT(true, 8, inst); \
break; \
- case kWord32Atomic##op##Uint8: \
+ case kAtomic##op##Uint8: \
ASSEMBLE_ATOMIC_BINOP_EXT(false, 8, inst); \
break; \
- case kWord32Atomic##op##Int16: \
+ case kAtomic##op##Int16: \
ASSEMBLE_ATOMIC_BINOP_EXT(true, 16, inst); \
break; \
- case kWord32Atomic##op##Uint16: \
+ case kAtomic##op##Uint16: \
ASSEMBLE_ATOMIC_BINOP_EXT(false, 16, inst); \
break; \
- case kWord32Atomic##op##Word32: \
+ case kAtomic##op##Word32: \
ASSEMBLE_ATOMIC_BINOP(inst); \
break;
ATOMIC_BINOP_CASE(Add, Addu)
@@ -3675,7 +3634,6 @@ void AssembleBranchToLabels(CodeGenerator* gen, TurboAssembler* tasm,
break;
default:
UNSUPPORTED_COND(instr->arch_opcode(), condition);
- break;
}
} else if (instr->arch_opcode() == kMipsMulOvf) {
// Overflow occurs if overflow register is not zero
@@ -3688,7 +3646,6 @@ void AssembleBranchToLabels(CodeGenerator* gen, TurboAssembler* tasm,
break;
default:
UNSUPPORTED_COND(kMipsMulOvf, condition);
- break;
}
} else if (instr->arch_opcode() == kMipsCmp) {
cc = FlagsConditionToConditionCmp(condition);
@@ -3727,85 +3684,6 @@ void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
branch->fallthru);
}
-void CodeGenerator::AssembleBranchPoisoning(FlagsCondition condition,
- Instruction* instr) {
- // TODO(jarin) Handle float comparisons (kUnordered[Not]Equal).
- if (condition == kUnorderedEqual || condition == kUnorderedNotEqual) {
- return;
- }
-
- MipsOperandConverter i(this, instr);
- condition = NegateFlagsCondition(condition);
-
- switch (instr->arch_opcode()) {
- case kMipsCmp: {
- __ LoadZeroOnCondition(kSpeculationPoisonRegister, i.InputRegister(0),
- i.InputOperand(1),
- FlagsConditionToConditionCmp(condition));
- }
- return;
- case kMipsTst: {
- switch (condition) {
- case kEqual:
- __ LoadZeroIfConditionZero(kSpeculationPoisonRegister, kScratchReg);
- break;
- case kNotEqual:
- __ LoadZeroIfConditionNotZero(kSpeculationPoisonRegister,
- kScratchReg);
- break;
- default:
- UNREACHABLE();
- }
- }
- return;
- case kMipsAddOvf:
- case kMipsSubOvf: {
- // Overflow occurs if overflow register is negative
- __ Slt(kScratchReg2, kScratchReg, zero_reg);
- switch (condition) {
- case kOverflow:
- __ LoadZeroIfConditionNotZero(kSpeculationPoisonRegister,
- kScratchReg2);
- break;
- case kNotOverflow:
- __ LoadZeroIfConditionZero(kSpeculationPoisonRegister, kScratchReg2);
- break;
- default:
- UNSUPPORTED_COND(instr->arch_opcode(), condition);
- }
- }
- return;
- case kMipsMulOvf: {
- // Overflow occurs if overflow register is not zero
- switch (condition) {
- case kOverflow:
- __ LoadZeroIfConditionNotZero(kSpeculationPoisonRegister,
- kScratchReg);
- break;
- case kNotOverflow:
- __ LoadZeroIfConditionZero(kSpeculationPoisonRegister, kScratchReg);
- break;
- default:
- UNSUPPORTED_COND(instr->arch_opcode(), condition);
- }
- }
- return;
- case kMipsCmpS:
- case kMipsCmpD: {
- bool predicate;
- FlagsConditionToConditionCmpFPU(&predicate, condition);
- if (predicate) {
- __ LoadZeroIfFPUCondition(kSpeculationPoisonRegister);
- } else {
- __ LoadZeroIfNotFPUCondition(kSpeculationPoisonRegister);
- }
- }
- return;
- default:
- UNREACHABLE();
- }
-}
-
void CodeGenerator::AssembleArchDeoptBranch(Instruction* instr,
BranchInfo* branch) {
AssembleArchBranch(instr, branch);
@@ -4130,7 +4008,6 @@ void CodeGenerator::AssembleConstructFrame() {
__ RecordComment("-- OSR entrypoint --");
osr_pc_offset_ = __ pc_offset();
required_slots -= osr_helper()->UnoptimizedFrameSlots();
- ResetSpeculationPoison();
}
const RegList saves = call_descriptor->CalleeSavedRegisters();
@@ -4333,7 +4210,6 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
break;
case Constant::kInt64:
UNREACHABLE();
- break;
case Constant::kFloat64:
__ li(dst, Operand::EmbeddedNumber(src.ToFloat64().value()));
break;
@@ -4357,7 +4233,6 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
UNREACHABLE();
case Constant::kRpoNumber:
UNREACHABLE(); // TODO(titzer): loading RPO numbers on mips.
- break;
}
if (destination->IsStackSlot()) __ sw(dst, g.ToMemOperand(destination));
} else if (src.type() == Constant::kFloat32) {
diff --git a/deps/v8/src/compiler/backend/mips/instruction-scheduler-mips.cc b/deps/v8/src/compiler/backend/mips/instruction-scheduler-mips.cc
index 48635c9c15..aeb1756227 100644
--- a/deps/v8/src/compiler/backend/mips/instruction-scheduler-mips.cc
+++ b/deps/v8/src/compiler/backend/mips/instruction-scheduler-mips.cc
@@ -1444,8 +1444,6 @@ int InstructionScheduler::GetInstructionLatency(const Instruction* instr) {
AdduLatency(false) + AndLatency(false) + BranchShortLatency() + 1 +
SubuLatency() + AdduLatency();
}
- case kArchWordPoisonOnSpeculation:
- return AndLatency();
case kIeee754Float64Acos:
case kIeee754Float64Acosh:
case kIeee754Float64Asin:
@@ -1657,19 +1655,15 @@ int InstructionScheduler::GetInstructionLatency(const Instruction* instr) {
switch (op->representation()) {
case MachineRepresentation::kFloat32:
return Latency::SWC1 + SubuLatency(false);
- break;
case MachineRepresentation::kFloat64:
return Sdc1Latency() + SubuLatency(false);
- break;
default: {
UNREACHABLE();
- break;
}
}
} else {
return PushRegisterLatency();
}
- break;
}
case kMipsPeek: {
if (instr->OutputAt(0)->IsFPRegister()) {
@@ -1682,7 +1676,6 @@ int InstructionScheduler::GetInstructionLatency(const Instruction* instr) {
} else {
return 1;
}
- break;
}
case kMipsStackClaim:
return SubuLatency(false);
@@ -1699,41 +1692,40 @@ int InstructionScheduler::GetInstructionLatency(const Instruction* instr) {
} else {
return 1;
}
- break;
}
case kMipsByteSwap32:
return ByteSwapSignedLatency();
- case kWord32AtomicLoadInt8:
- case kWord32AtomicLoadUint8:
- case kWord32AtomicLoadInt16:
- case kWord32AtomicLoadUint16:
- case kWord32AtomicLoadWord32:
+ case kAtomicLoadInt8:
+ case kAtomicLoadUint8:
+ case kAtomicLoadInt16:
+ case kAtomicLoadUint16:
+ case kAtomicLoadWord32:
return 2;
- case kWord32AtomicStoreWord8:
- case kWord32AtomicStoreWord16:
- case kWord32AtomicStoreWord32:
+ case kAtomicStoreWord8:
+ case kAtomicStoreWord16:
+ case kAtomicStoreWord32:
return 3;
- case kWord32AtomicExchangeInt8:
+ case kAtomicExchangeInt8:
return Word32AtomicExchangeLatency(true, 8);
- case kWord32AtomicExchangeUint8:
+ case kAtomicExchangeUint8:
return Word32AtomicExchangeLatency(false, 8);
- case kWord32AtomicExchangeInt16:
+ case kAtomicExchangeInt16:
return Word32AtomicExchangeLatency(true, 16);
- case kWord32AtomicExchangeUint16:
+ case kAtomicExchangeUint16:
return Word32AtomicExchangeLatency(false, 16);
- case kWord32AtomicExchangeWord32: {
+ case kAtomicExchangeWord32: {
return 1 + AdduLatency() + Ldc1Latency() + 1 + ScLatency(0) +
BranchShortLatency() + 1;
}
- case kWord32AtomicCompareExchangeInt8:
+ case kAtomicCompareExchangeInt8:
return Word32AtomicCompareExchangeLatency(true, 8);
- case kWord32AtomicCompareExchangeUint8:
+ case kAtomicCompareExchangeUint8:
return Word32AtomicCompareExchangeLatency(false, 8);
- case kWord32AtomicCompareExchangeInt16:
+ case kAtomicCompareExchangeInt16:
return Word32AtomicCompareExchangeLatency(true, 16);
- case kWord32AtomicCompareExchangeUint16:
+ case kAtomicCompareExchangeUint16:
return Word32AtomicCompareExchangeLatency(false, 16);
- case kWord32AtomicCompareExchangeWord32:
+ case kAtomicCompareExchangeWord32:
return AdduLatency() + 1 + LlLatency(0) + BranchShortLatency() + 1;
case kMipsTst:
return AndLatency(instr->InputAt(1)->IsRegister());
diff --git a/deps/v8/src/compiler/backend/mips/instruction-selector-mips.cc b/deps/v8/src/compiler/backend/mips/instruction-selector-mips.cc
index c823612246..477c791ca0 100644
--- a/deps/v8/src/compiler/backend/mips/instruction-selector-mips.cc
+++ b/deps/v8/src/compiler/backend/mips/instruction-selector-mips.cc
@@ -375,10 +375,6 @@ void InstructionSelector::VisitLoad(Node* node) {
case MachineRepresentation::kNone:
UNREACHABLE();
}
- if (node->opcode() == IrOpcode::kPoisonedLoad) {
- CHECK_NE(poisoning_level_, PoisoningMitigationLevel::kDontPoison);
- opcode |= AccessModeField::encode(kMemoryAccessPoisoned);
- }
if (g.CanBeImmediate(index, opcode)) {
Emit(opcode | AddressingModeField::encode(kMode_MRI),
@@ -393,8 +389,6 @@ void InstructionSelector::VisitLoad(Node* node) {
}
}
-void InstructionSelector::VisitPoisonedLoad(Node* node) { VisitLoad(node); }
-
void InstructionSelector::VisitProtectedLoad(Node* node) {
// TODO(eholk)
UNIMPLEMENTED();
@@ -1906,22 +1900,26 @@ void InstructionSelector::VisitMemoryBarrier(Node* node) {
}
void InstructionSelector::VisitWord32AtomicLoad(Node* node) {
- LoadRepresentation load_rep = LoadRepresentationOf(node->op());
+ // TODO(mips-dev): Confirm whether there is any mips32 chip in use and
+ // support atomic loads of tagged values with barriers.
+ AtomicLoadParameters atomic_load_params = AtomicLoadParametersOf(node->op());
+ LoadRepresentation load_rep = atomic_load_params.representation();
MipsOperandGenerator g(this);
Node* base = node->InputAt(0);
Node* index = node->InputAt(1);
ArchOpcode opcode;
switch (load_rep.representation()) {
case MachineRepresentation::kWord8:
- opcode =
- load_rep.IsSigned() ? kWord32AtomicLoadInt8 : kWord32AtomicLoadUint8;
+ opcode = load_rep.IsSigned() ? kAtomicLoadInt8 : kAtomicLoadUint8;
break;
case MachineRepresentation::kWord16:
- opcode = load_rep.IsSigned() ? kWord32AtomicLoadInt16
- : kWord32AtomicLoadUint16;
+ opcode = load_rep.IsSigned() ? kAtomicLoadInt16 : kAtomicLoadUint16;
break;
+ case MachineRepresentation::kTaggedSigned: // Fall through.
+ case MachineRepresentation::kTaggedPointer: // Fall through.
+ case MachineRepresentation::kTagged:
case MachineRepresentation::kWord32:
- opcode = kWord32AtomicLoadWord32;
+ opcode = kAtomicLoadWord32;
break;
default:
UNREACHABLE();
@@ -1941,7 +1939,10 @@ void InstructionSelector::VisitWord32AtomicLoad(Node* node) {
}
void InstructionSelector::VisitWord32AtomicStore(Node* node) {
- MachineRepresentation rep = AtomicStoreRepresentationOf(node->op());
+ // TODO(mips-dev): Confirm whether there is any mips32 chip in use and
+ // support atomic stores of tagged values with barriers.
+ AtomicStoreParameters store_params = AtomicStoreParametersOf(node->op());
+ MachineRepresentation rep = store_params.representation();
MipsOperandGenerator g(this);
Node* base = node->InputAt(0);
Node* index = node->InputAt(1);
@@ -1949,13 +1950,16 @@ void InstructionSelector::VisitWord32AtomicStore(Node* node) {
ArchOpcode opcode;
switch (rep) {
case MachineRepresentation::kWord8:
- opcode = kWord32AtomicStoreWord8;
+ opcode = kAtomicStoreWord8;
break;
case MachineRepresentation::kWord16:
- opcode = kWord32AtomicStoreWord16;
+ opcode = kAtomicStoreWord16;
break;
+ case MachineRepresentation::kTaggedSigned: // Fall through.
+ case MachineRepresentation::kTaggedPointer: // Fall through.
+ case MachineRepresentation::kTagged:
case MachineRepresentation::kWord32:
- opcode = kWord32AtomicStoreWord32;
+ opcode = kAtomicStoreWord32;
break;
default:
UNREACHABLE();
@@ -1983,15 +1987,15 @@ void InstructionSelector::VisitWord32AtomicExchange(Node* node) {
ArchOpcode opcode;
MachineType type = AtomicOpType(node->op());
if (type == MachineType::Int8()) {
- opcode = kWord32AtomicExchangeInt8;
+ opcode = kAtomicExchangeInt8;
} else if (type == MachineType::Uint8()) {
- opcode = kWord32AtomicExchangeUint8;
+ opcode = kAtomicExchangeUint8;
} else if (type == MachineType::Int16()) {
- opcode = kWord32AtomicExchangeInt16;
+ opcode = kAtomicExchangeInt16;
} else if (type == MachineType::Uint16()) {
- opcode = kWord32AtomicExchangeUint16;
+ opcode = kAtomicExchangeUint16;
} else if (type == MachineType::Int32() || type == MachineType::Uint32()) {
- opcode = kWord32AtomicExchangeWord32;
+ opcode = kAtomicExchangeWord32;
} else {
UNREACHABLE();
}
@@ -2021,15 +2025,15 @@ void InstructionSelector::VisitWord32AtomicCompareExchange(Node* node) {
ArchOpcode opcode;
MachineType type = AtomicOpType(node->op());
if (type == MachineType::Int8()) {
- opcode = kWord32AtomicCompareExchangeInt8;
+ opcode = kAtomicCompareExchangeInt8;
} else if (type == MachineType::Uint8()) {
- opcode = kWord32AtomicCompareExchangeUint8;
+ opcode = kAtomicCompareExchangeUint8;
} else if (type == MachineType::Int16()) {
- opcode = kWord32AtomicCompareExchangeInt16;
+ opcode = kAtomicCompareExchangeInt16;
} else if (type == MachineType::Uint16()) {
- opcode = kWord32AtomicCompareExchangeUint16;
+ opcode = kAtomicCompareExchangeUint16;
} else if (type == MachineType::Int32() || type == MachineType::Uint32()) {
- opcode = kWord32AtomicCompareExchangeWord32;
+ opcode = kAtomicCompareExchangeWord32;
} else {
UNREACHABLE();
}
@@ -2091,12 +2095,11 @@ void InstructionSelector::VisitWord32AtomicBinaryOperation(
Emit(code, 1, outputs, input_count, inputs, 4, temps);
}
-#define VISIT_ATOMIC_BINOP(op) \
- void InstructionSelector::VisitWord32Atomic##op(Node* node) { \
- VisitWord32AtomicBinaryOperation( \
- node, kWord32Atomic##op##Int8, kWord32Atomic##op##Uint8, \
- kWord32Atomic##op##Int16, kWord32Atomic##op##Uint16, \
- kWord32Atomic##op##Word32); \
+#define VISIT_ATOMIC_BINOP(op) \
+ void InstructionSelector::VisitWord32Atomic##op(Node* node) { \
+ VisitWord32AtomicBinaryOperation( \
+ node, kAtomic##op##Int8, kAtomic##op##Uint8, kAtomic##op##Int16, \
+ kAtomic##op##Uint16, kAtomic##op##Word32); \
}
VISIT_ATOMIC_BINOP(Add)
VISIT_ATOMIC_BINOP(Sub)
diff --git a/deps/v8/src/compiler/backend/mips64/code-generator-mips64.cc b/deps/v8/src/compiler/backend/mips64/code-generator-mips64.cc
index 6fce103d24..f6fccd43d2 100644
--- a/deps/v8/src/compiler/backend/mips64/code-generator-mips64.cc
+++ b/deps/v8/src/compiler/backend/mips64/code-generator-mips64.cc
@@ -95,7 +95,6 @@ class MipsOperandConverter final : public InstructionOperandConverter {
constant.ToDelayedStringConstant());
case Constant::kRpoNumber:
UNREACHABLE(); // TODO(titzer): RPO immediates on mips?
- break;
}
UNREACHABLE();
}
@@ -321,16 +320,6 @@ FPUCondition FlagsConditionToConditionCmpFPU(bool* predicate,
UNREACHABLE();
}
-void EmitWordLoadPoisoningIfNeeded(CodeGenerator* codegen,
- InstructionCode opcode, Instruction* instr,
- MipsOperandConverter const& i) {
- const MemoryAccessMode access_mode = AccessModeField::decode(opcode);
- if (access_mode == kMemoryAccessPoisoned) {
- Register value = i.OutputRegister();
- codegen->tasm()->And(value, value, kSpeculationPoisonRegister);
- }
-}
-
} // namespace
#define ASSEMBLE_ATOMIC_LOAD_INTEGER(asm_instr) \
@@ -577,31 +566,6 @@ void CodeGenerator::BailoutIfDeoptimized() {
RelocInfo::CODE_TARGET, ne, kScratchReg, Operand(zero_reg));
}
-void CodeGenerator::GenerateSpeculationPoisonFromCodeStartRegister() {
- // Calculate a mask which has all bits set in the normal case, but has all
- // bits cleared if we are speculatively executing the wrong PC.
- // difference = (current - expected) | (expected - current)
- // poison = ~(difference >> (kBitsPerSystemPointer - 1))
- __ ComputeCodeStartAddress(kScratchReg);
- __ Move(kSpeculationPoisonRegister, kScratchReg);
- __ subu(kSpeculationPoisonRegister, kSpeculationPoisonRegister,
- kJavaScriptCallCodeStartRegister);
- __ subu(kJavaScriptCallCodeStartRegister, kJavaScriptCallCodeStartRegister,
- kScratchReg);
- __ or_(kSpeculationPoisonRegister, kSpeculationPoisonRegister,
- kJavaScriptCallCodeStartRegister);
- __ sra(kSpeculationPoisonRegister, kSpeculationPoisonRegister,
- kBitsPerSystemPointer - 1);
- __ nor(kSpeculationPoisonRegister, kSpeculationPoisonRegister,
- kSpeculationPoisonRegister);
-}
-
-void CodeGenerator::AssembleRegisterArgumentPoisoning() {
- __ And(kJSFunctionRegister, kJSFunctionRegister, kSpeculationPoisonRegister);
- __ And(kContextRegister, kContextRegister, kSpeculationPoisonRegister);
- __ And(sp, sp, kSpeculationPoisonRegister);
-}
-
// Assembles an instruction after register allocation, producing machine code.
CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Instruction* instr) {
@@ -803,7 +767,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kArchBinarySearchSwitch:
AssembleArchBinarySearchSwitch(instr);
break;
- break;
case kArchTableSwitch:
AssembleArchTableSwitch(instr);
break;
@@ -864,7 +827,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ TruncateDoubleToI(isolate(), zone(), i.OutputRegister(),
i.InputDoubleRegister(0), DetermineStubCallMode());
break;
- case kArchStoreWithWriteBarrier: {
+ case kArchStoreWithWriteBarrier: // Fall through.
+ case kArchAtomicStoreWithWriteBarrier: {
RecordWriteMode mode =
static_cast<RecordWriteMode>(MiscField::decode(instr->opcode()));
Register object = i.InputRegister(0);
@@ -876,7 +840,14 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
scratch0, scratch1, mode,
DetermineStubCallMode());
__ Daddu(kScratchReg, object, index);
- __ Sd(value, MemOperand(kScratchReg));
+ if (arch_opcode == kArchStoreWithWriteBarrier) {
+ __ Sd(value, MemOperand(kScratchReg));
+ } else {
+ DCHECK_EQ(kArchAtomicStoreWithWriteBarrier, arch_opcode);
+ __ sync();
+ __ Sd(value, MemOperand(kScratchReg));
+ __ sync();
+ }
if (mode > RecordWriteMode::kValueIsPointer) {
__ JumpIfSmi(value, ool->exit());
}
@@ -900,10 +871,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
break;
}
- case kArchWordPoisonOnSpeculation:
- __ And(i.OutputRegister(), i.InputRegister(0),
- kSpeculationPoisonRegister);
- break;
case kIeee754Float64Acos:
ASSEMBLE_IEEE754_UNOP(acos);
break;
@@ -1646,30 +1613,24 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
case kMips64Lbu:
__ Lbu(i.OutputRegister(), i.MemoryOperand());
- EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kMips64Lb:
__ Lb(i.OutputRegister(), i.MemoryOperand());
- EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kMips64Sb:
__ Sb(i.InputOrZeroRegister(2), i.MemoryOperand());
break;
case kMips64Lhu:
__ Lhu(i.OutputRegister(), i.MemoryOperand());
- EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kMips64Ulhu:
__ Ulhu(i.OutputRegister(), i.MemoryOperand());
- EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kMips64Lh:
__ Lh(i.OutputRegister(), i.MemoryOperand());
- EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kMips64Ulh:
__ Ulh(i.OutputRegister(), i.MemoryOperand());
- EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kMips64Sh:
__ Sh(i.InputOrZeroRegister(2), i.MemoryOperand());
@@ -1679,27 +1640,21 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
case kMips64Lw:
__ Lw(i.OutputRegister(), i.MemoryOperand());
- EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kMips64Ulw:
__ Ulw(i.OutputRegister(), i.MemoryOperand());
- EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kMips64Lwu:
__ Lwu(i.OutputRegister(), i.MemoryOperand());
- EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kMips64Ulwu:
__ Ulwu(i.OutputRegister(), i.MemoryOperand());
- EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kMips64Ld:
__ Ld(i.OutputRegister(), i.MemoryOperand());
- EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kMips64Uld:
__ Uld(i.OutputRegister(), i.MemoryOperand());
- EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kMips64Sw:
__ Sw(i.InputOrZeroRegister(2), i.MemoryOperand());
@@ -1919,149 +1874,172 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ StoreLane(sz, src, i.InputUint8(1), i.MemoryOperand(2));
break;
}
- case kWord32AtomicLoadInt8:
+ case kAtomicLoadInt8:
+ DCHECK_EQ(AtomicWidthField::decode(opcode), AtomicWidth::kWord32);
ASSEMBLE_ATOMIC_LOAD_INTEGER(Lb);
break;
- case kWord32AtomicLoadUint8:
+ case kAtomicLoadUint8:
ASSEMBLE_ATOMIC_LOAD_INTEGER(Lbu);
break;
- case kWord32AtomicLoadInt16:
+ case kAtomicLoadInt16:
+ DCHECK_EQ(AtomicWidthField::decode(opcode), AtomicWidth::kWord32);
ASSEMBLE_ATOMIC_LOAD_INTEGER(Lh);
break;
- case kWord32AtomicLoadUint16:
+ case kAtomicLoadUint16:
ASSEMBLE_ATOMIC_LOAD_INTEGER(Lhu);
break;
- case kWord32AtomicLoadWord32:
- ASSEMBLE_ATOMIC_LOAD_INTEGER(Lw);
- break;
- case kMips64Word64AtomicLoadUint8:
- ASSEMBLE_ATOMIC_LOAD_INTEGER(Lbu);
- break;
- case kMips64Word64AtomicLoadUint16:
- ASSEMBLE_ATOMIC_LOAD_INTEGER(Lhu);
- break;
- case kMips64Word64AtomicLoadUint32:
- ASSEMBLE_ATOMIC_LOAD_INTEGER(Lwu);
+ case kAtomicLoadWord32:
+ if (AtomicWidthField::decode(opcode) == AtomicWidth::kWord32)
+ ASSEMBLE_ATOMIC_LOAD_INTEGER(Lw);
+ else
+ ASSEMBLE_ATOMIC_LOAD_INTEGER(Lwu);
break;
case kMips64Word64AtomicLoadUint64:
ASSEMBLE_ATOMIC_LOAD_INTEGER(Ld);
break;
- case kWord32AtomicStoreWord8:
- ASSEMBLE_ATOMIC_STORE_INTEGER(Sb);
- break;
- case kWord32AtomicStoreWord16:
- ASSEMBLE_ATOMIC_STORE_INTEGER(Sh);
- break;
- case kWord32AtomicStoreWord32:
- ASSEMBLE_ATOMIC_STORE_INTEGER(Sw);
- break;
- case kMips64Word64AtomicStoreWord8:
+ case kAtomicStoreWord8:
ASSEMBLE_ATOMIC_STORE_INTEGER(Sb);
break;
- case kMips64Word64AtomicStoreWord16:
+ case kAtomicStoreWord16:
ASSEMBLE_ATOMIC_STORE_INTEGER(Sh);
break;
- case kMips64Word64AtomicStoreWord32:
+ case kAtomicStoreWord32:
ASSEMBLE_ATOMIC_STORE_INTEGER(Sw);
break;
+ case kMips64StoreCompressTagged:
case kMips64Word64AtomicStoreWord64:
ASSEMBLE_ATOMIC_STORE_INTEGER(Sd);
break;
- case kWord32AtomicExchangeInt8:
+ case kAtomicExchangeInt8:
+ DCHECK_EQ(AtomicWidthField::decode(opcode), AtomicWidth::kWord32);
ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(Ll, Sc, true, 8, 32);
break;
- case kWord32AtomicExchangeUint8:
- ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(Ll, Sc, false, 8, 32);
+ case kAtomicExchangeUint8:
+ switch (AtomicWidthField::decode(opcode)) {
+ case AtomicWidth::kWord32:
+ ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(Ll, Sc, false, 8, 32);
+ break;
+ case AtomicWidth::kWord64:
+ ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(Lld, Scd, false, 8, 64);
+ break;
+ }
break;
- case kWord32AtomicExchangeInt16:
+ case kAtomicExchangeInt16:
+ DCHECK_EQ(AtomicWidthField::decode(opcode), AtomicWidth::kWord32);
ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(Ll, Sc, true, 16, 32);
break;
- case kWord32AtomicExchangeUint16:
- ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(Ll, Sc, false, 16, 32);
- break;
- case kWord32AtomicExchangeWord32:
- ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(Ll, Sc);
- break;
- case kMips64Word64AtomicExchangeUint8:
- ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(Lld, Scd, false, 8, 64);
- break;
- case kMips64Word64AtomicExchangeUint16:
- ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(Lld, Scd, false, 16, 64);
+ case kAtomicExchangeUint16:
+ switch (AtomicWidthField::decode(opcode)) {
+ case AtomicWidth::kWord32:
+ ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(Ll, Sc, false, 16, 32);
+ break;
+ case AtomicWidth::kWord64:
+ ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(Lld, Scd, false, 16, 64);
+ break;
+ }
break;
- case kMips64Word64AtomicExchangeUint32:
- ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(Lld, Scd, false, 32, 64);
+ case kAtomicExchangeWord32:
+ switch (AtomicWidthField::decode(opcode)) {
+ case AtomicWidth::kWord32:
+ ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(Ll, Sc);
+ break;
+ case AtomicWidth::kWord64:
+ ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(Lld, Scd, false, 32, 64);
+ break;
+ }
break;
case kMips64Word64AtomicExchangeUint64:
ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(Lld, Scd);
break;
- case kWord32AtomicCompareExchangeInt8:
+ case kAtomicCompareExchangeInt8:
+ DCHECK_EQ(AtomicWidthField::decode(opcode), AtomicWidth::kWord32);
ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(Ll, Sc, true, 8, 32);
break;
- case kWord32AtomicCompareExchangeUint8:
- ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(Ll, Sc, false, 8, 32);
+ case kAtomicCompareExchangeUint8:
+ switch (AtomicWidthField::decode(opcode)) {
+ case AtomicWidth::kWord32:
+ ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(Ll, Sc, false, 8, 32);
+ break;
+ case AtomicWidth::kWord64:
+ ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(Lld, Scd, false, 8, 64);
+ break;
+ }
break;
- case kWord32AtomicCompareExchangeInt16:
+ case kAtomicCompareExchangeInt16:
+ DCHECK_EQ(AtomicWidthField::decode(opcode), AtomicWidth::kWord32);
ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(Ll, Sc, true, 16, 32);
break;
- case kWord32AtomicCompareExchangeUint16:
- ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(Ll, Sc, false, 16, 32);
- break;
- case kWord32AtomicCompareExchangeWord32:
- __ sll(i.InputRegister(2), i.InputRegister(2), 0);
- ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(Ll, Sc);
- break;
- case kMips64Word64AtomicCompareExchangeUint8:
- ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(Lld, Scd, false, 8, 64);
- break;
- case kMips64Word64AtomicCompareExchangeUint16:
- ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(Lld, Scd, false, 16, 64);
+ case kAtomicCompareExchangeUint16:
+ switch (AtomicWidthField::decode(opcode)) {
+ case AtomicWidth::kWord32:
+ ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(Ll, Sc, false, 16, 32);
+ break;
+ case AtomicWidth::kWord64:
+ ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(Lld, Scd, false, 16, 64);
+ break;
+ }
break;
- case kMips64Word64AtomicCompareExchangeUint32:
- ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(Lld, Scd, false, 32, 64);
+ case kAtomicCompareExchangeWord32:
+ switch (AtomicWidthField::decode(opcode)) {
+ case AtomicWidth::kWord32:
+ __ sll(i.InputRegister(2), i.InputRegister(2), 0);
+ ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(Ll, Sc);
+ break;
+ case AtomicWidth::kWord64:
+ ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(Lld, Scd, false, 32, 64);
+ break;
+ }
break;
case kMips64Word64AtomicCompareExchangeUint64:
ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(Lld, Scd);
break;
-#define ATOMIC_BINOP_CASE(op, inst) \
- case kWord32Atomic##op##Int8: \
- ASSEMBLE_ATOMIC_BINOP_EXT(Ll, Sc, true, 8, inst, 32); \
- break; \
- case kWord32Atomic##op##Uint8: \
- ASSEMBLE_ATOMIC_BINOP_EXT(Ll, Sc, false, 8, inst, 32); \
- break; \
- case kWord32Atomic##op##Int16: \
- ASSEMBLE_ATOMIC_BINOP_EXT(Ll, Sc, true, 16, inst, 32); \
- break; \
- case kWord32Atomic##op##Uint16: \
- ASSEMBLE_ATOMIC_BINOP_EXT(Ll, Sc, false, 16, inst, 32); \
- break; \
- case kWord32Atomic##op##Word32: \
- ASSEMBLE_ATOMIC_BINOP(Ll, Sc, inst); \
- break;
- ATOMIC_BINOP_CASE(Add, Addu)
- ATOMIC_BINOP_CASE(Sub, Subu)
- ATOMIC_BINOP_CASE(And, And)
- ATOMIC_BINOP_CASE(Or, Or)
- ATOMIC_BINOP_CASE(Xor, Xor)
-#undef ATOMIC_BINOP_CASE
-#define ATOMIC_BINOP_CASE(op, inst) \
- case kMips64Word64Atomic##op##Uint8: \
- ASSEMBLE_ATOMIC_BINOP_EXT(Lld, Scd, false, 8, inst, 64); \
- break; \
- case kMips64Word64Atomic##op##Uint16: \
- ASSEMBLE_ATOMIC_BINOP_EXT(Lld, Scd, false, 16, inst, 64); \
- break; \
- case kMips64Word64Atomic##op##Uint32: \
- ASSEMBLE_ATOMIC_BINOP_EXT(Lld, Scd, false, 32, inst, 64); \
- break; \
- case kMips64Word64Atomic##op##Uint64: \
- ASSEMBLE_ATOMIC_BINOP(Lld, Scd, inst); \
+#define ATOMIC_BINOP_CASE(op, inst32, inst64) \
+ case kAtomic##op##Int8: \
+ DCHECK_EQ(AtomicWidthField::decode(opcode), AtomicWidth::kWord32); \
+ ASSEMBLE_ATOMIC_BINOP_EXT(Ll, Sc, true, 8, inst32, 32); \
+ break; \
+ case kAtomic##op##Uint8: \
+ switch (AtomicWidthField::decode(opcode)) { \
+ case AtomicWidth::kWord32: \
+ ASSEMBLE_ATOMIC_BINOP_EXT(Ll, Sc, false, 8, inst32, 32); \
+ break; \
+ case AtomicWidth::kWord64: \
+ ASSEMBLE_ATOMIC_BINOP_EXT(Lld, Scd, false, 8, inst64, 64); \
+ break; \
+ } \
+ break; \
+ case kAtomic##op##Int16: \
+ DCHECK_EQ(AtomicWidthField::decode(opcode), AtomicWidth::kWord32); \
+ ASSEMBLE_ATOMIC_BINOP_EXT(Ll, Sc, true, 16, inst32, 32); \
+ break; \
+ case kAtomic##op##Uint16: \
+ switch (AtomicWidthField::decode(opcode)) { \
+ case AtomicWidth::kWord32: \
+ ASSEMBLE_ATOMIC_BINOP_EXT(Ll, Sc, false, 16, inst32, 32); \
+ break; \
+ case AtomicWidth::kWord64: \
+ ASSEMBLE_ATOMIC_BINOP_EXT(Lld, Scd, false, 16, inst64, 64); \
+ break; \
+ } \
+ break; \
+ case kAtomic##op##Word32: \
+ switch (AtomicWidthField::decode(opcode)) { \
+ case AtomicWidth::kWord32: \
+ ASSEMBLE_ATOMIC_BINOP(Ll, Sc, inst32); \
+ break; \
+ case AtomicWidth::kWord64: \
+ ASSEMBLE_ATOMIC_BINOP_EXT(Lld, Scd, false, 32, inst64, 64); \
+ break; \
+ } \
+ break; \
+ case kMips64Word64Atomic##op##Uint64: \
+ ASSEMBLE_ATOMIC_BINOP(Lld, Scd, inst64); \
break;
- ATOMIC_BINOP_CASE(Add, Daddu)
- ATOMIC_BINOP_CASE(Sub, Dsubu)
- ATOMIC_BINOP_CASE(And, And)
- ATOMIC_BINOP_CASE(Or, Or)
- ATOMIC_BINOP_CASE(Xor, Xor)
+ ATOMIC_BINOP_CASE(Add, Addu, Daddu)
+ ATOMIC_BINOP_CASE(Sub, Subu, Dsubu)
+ ATOMIC_BINOP_CASE(And, And, And)
+ ATOMIC_BINOP_CASE(Or, Or, Or)
+ ATOMIC_BINOP_CASE(Xor, Xor, Xor)
#undef ATOMIC_BINOP_CASE
case kMips64AssertEqual:
__ Assert(eq, static_cast<AbortReason>(i.InputOperand(2).immediate()),
@@ -3851,7 +3829,6 @@ void AssembleBranchToLabels(CodeGenerator* gen, TurboAssembler* tasm,
break;
default:
UNSUPPORTED_COND(instr->arch_opcode(), condition);
- break;
}
} else if (instr->arch_opcode() == kMips64MulOvf) {
// Overflow occurs if overflow register is not zero
@@ -3864,7 +3841,6 @@ void AssembleBranchToLabels(CodeGenerator* gen, TurboAssembler* tasm,
break;
default:
UNSUPPORTED_COND(kMipsMulOvf, condition);
- break;
}
} else if (instr->arch_opcode() == kMips64Cmp) {
cc = FlagsConditionToConditionCmp(condition);
@@ -3904,104 +3880,6 @@ void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
branch->fallthru);
}
-void CodeGenerator::AssembleBranchPoisoning(FlagsCondition condition,
- Instruction* instr) {
- // TODO(jarin) Handle float comparisons (kUnordered[Not]Equal).
- if (condition == kUnorderedEqual || condition == kUnorderedNotEqual) {
- return;
- }
-
- MipsOperandConverter i(this, instr);
- condition = NegateFlagsCondition(condition);
-
- switch (instr->arch_opcode()) {
- case kMips64Cmp: {
- __ LoadZeroOnCondition(kSpeculationPoisonRegister, i.InputRegister(0),
- i.InputOperand(1),
- FlagsConditionToConditionCmp(condition));
- }
- return;
- case kMips64Tst: {
- switch (condition) {
- case kEqual:
- __ LoadZeroIfConditionZero(kSpeculationPoisonRegister, kScratchReg);
- break;
- case kNotEqual:
- __ LoadZeroIfConditionNotZero(kSpeculationPoisonRegister,
- kScratchReg);
- break;
- default:
- UNREACHABLE();
- }
- }
- return;
- case kMips64Dadd:
- case kMips64Dsub: {
- // Check for overflow creates 1 or 0 for result.
- __ dsrl32(kScratchReg, i.OutputRegister(), 31);
- __ srl(kScratchReg2, i.OutputRegister(), 31);
- __ xor_(kScratchReg2, kScratchReg, kScratchReg2);
- switch (condition) {
- case kOverflow:
- __ LoadZeroIfConditionNotZero(kSpeculationPoisonRegister,
- kScratchReg2);
- break;
- case kNotOverflow:
- __ LoadZeroIfConditionZero(kSpeculationPoisonRegister, kScratchReg2);
- break;
- default:
- UNSUPPORTED_COND(instr->arch_opcode(), condition);
- }
- }
- return;
- case kMips64DaddOvf:
- case kMips64DsubOvf: {
- // Overflow occurs if overflow register is negative
- __ Slt(kScratchReg2, kScratchReg, zero_reg);
- switch (condition) {
- case kOverflow:
- __ LoadZeroIfConditionNotZero(kSpeculationPoisonRegister,
- kScratchReg2);
- break;
- case kNotOverflow:
- __ LoadZeroIfConditionZero(kSpeculationPoisonRegister, kScratchReg2);
- break;
- default:
- UNSUPPORTED_COND(instr->arch_opcode(), condition);
- }
- }
- return;
- case kMips64MulOvf: {
- // Overflow occurs if overflow register is not zero
- switch (condition) {
- case kOverflow:
- __ LoadZeroIfConditionNotZero(kSpeculationPoisonRegister,
- kScratchReg);
- break;
- case kNotOverflow:
- __ LoadZeroIfConditionZero(kSpeculationPoisonRegister, kScratchReg);
- break;
- default:
- UNSUPPORTED_COND(instr->arch_opcode(), condition);
- }
- }
- return;
- case kMips64CmpS:
- case kMips64CmpD: {
- bool predicate;
- FlagsConditionToConditionCmpFPU(&predicate, condition);
- if (predicate) {
- __ LoadZeroIfFPUCondition(kSpeculationPoisonRegister);
- } else {
- __ LoadZeroIfNotFPUCondition(kSpeculationPoisonRegister);
- }
- }
- return;
- default:
- UNREACHABLE();
- }
-}
-
#undef UNSUPPORTED_COND
void CodeGenerator::AssembleArchDeoptBranch(Instruction* instr,
@@ -4340,7 +4218,6 @@ void CodeGenerator::AssembleConstructFrame() {
__ RecordComment("-- OSR entrypoint --");
osr_pc_offset_ = __ pc_offset();
required_slots -= osr_helper()->UnoptimizedFrameSlots();
- ResetSpeculationPoison();
}
const RegList saves = call_descriptor->CalleeSavedRegisters();
@@ -4568,7 +4445,6 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
UNREACHABLE();
case Constant::kRpoNumber:
UNREACHABLE(); // TODO(titzer): loading RPO numbers on mips64.
- break;
}
if (destination->IsStackSlot()) __ Sd(dst, g.ToMemOperand(destination));
} else if (src.type() == Constant::kFloat32) {
diff --git a/deps/v8/src/compiler/backend/mips64/instruction-codes-mips64.h b/deps/v8/src/compiler/backend/mips64/instruction-codes-mips64.h
index e1b40a4be5..30d7f5af75 100644
--- a/deps/v8/src/compiler/backend/mips64/instruction-codes-mips64.h
+++ b/deps/v8/src/compiler/backend/mips64/instruction-codes-mips64.h
@@ -11,419 +11,393 @@ namespace compiler {
// MIPS64-specific opcodes that specify which assembly sequence to emit.
// Most opcodes specify a single instruction.
-#define TARGET_ARCH_OPCODE_LIST(V) \
- V(Mips64Add) \
- V(Mips64Dadd) \
- V(Mips64DaddOvf) \
- V(Mips64Sub) \
- V(Mips64Dsub) \
- V(Mips64DsubOvf) \
- V(Mips64Mul) \
- V(Mips64MulOvf) \
- V(Mips64MulHigh) \
- V(Mips64DMulHigh) \
- V(Mips64MulHighU) \
- V(Mips64Dmul) \
- V(Mips64Div) \
- V(Mips64Ddiv) \
- V(Mips64DivU) \
- V(Mips64DdivU) \
- V(Mips64Mod) \
- V(Mips64Dmod) \
- V(Mips64ModU) \
- V(Mips64DmodU) \
- V(Mips64And) \
- V(Mips64And32) \
- V(Mips64Or) \
- V(Mips64Or32) \
- V(Mips64Nor) \
- V(Mips64Nor32) \
- V(Mips64Xor) \
- V(Mips64Xor32) \
- V(Mips64Clz) \
- V(Mips64Lsa) \
- V(Mips64Dlsa) \
- V(Mips64Shl) \
- V(Mips64Shr) \
- V(Mips64Sar) \
- V(Mips64Ext) \
- V(Mips64Ins) \
- V(Mips64Dext) \
- V(Mips64Dins) \
- V(Mips64Dclz) \
- V(Mips64Ctz) \
- V(Mips64Dctz) \
- V(Mips64Popcnt) \
- V(Mips64Dpopcnt) \
- V(Mips64Dshl) \
- V(Mips64Dshr) \
- V(Mips64Dsar) \
- V(Mips64Ror) \
- V(Mips64Dror) \
- V(Mips64Mov) \
- V(Mips64Tst) \
- V(Mips64Cmp) \
- V(Mips64CmpS) \
- V(Mips64AddS) \
- V(Mips64SubS) \
- V(Mips64MulS) \
- V(Mips64DivS) \
- V(Mips64AbsS) \
- V(Mips64NegS) \
- V(Mips64SqrtS) \
- V(Mips64MaxS) \
- V(Mips64MinS) \
- V(Mips64CmpD) \
- V(Mips64AddD) \
- V(Mips64SubD) \
- V(Mips64MulD) \
- V(Mips64DivD) \
- V(Mips64ModD) \
- V(Mips64AbsD) \
- V(Mips64NegD) \
- V(Mips64SqrtD) \
- V(Mips64MaxD) \
- V(Mips64MinD) \
- V(Mips64Float64RoundDown) \
- V(Mips64Float64RoundTruncate) \
- V(Mips64Float64RoundUp) \
- V(Mips64Float64RoundTiesEven) \
- V(Mips64Float32RoundDown) \
- V(Mips64Float32RoundTruncate) \
- V(Mips64Float32RoundUp) \
- V(Mips64Float32RoundTiesEven) \
- V(Mips64CvtSD) \
- V(Mips64CvtDS) \
- V(Mips64TruncWD) \
- V(Mips64RoundWD) \
- V(Mips64FloorWD) \
- V(Mips64CeilWD) \
- V(Mips64TruncWS) \
- V(Mips64RoundWS) \
- V(Mips64FloorWS) \
- V(Mips64CeilWS) \
- V(Mips64TruncLS) \
- V(Mips64TruncLD) \
- V(Mips64TruncUwD) \
- V(Mips64TruncUwS) \
- V(Mips64TruncUlS) \
- V(Mips64TruncUlD) \
- V(Mips64CvtDW) \
- V(Mips64CvtSL) \
- V(Mips64CvtSW) \
- V(Mips64CvtSUw) \
- V(Mips64CvtSUl) \
- V(Mips64CvtDL) \
- V(Mips64CvtDUw) \
- V(Mips64CvtDUl) \
- V(Mips64Lb) \
- V(Mips64Lbu) \
- V(Mips64Sb) \
- V(Mips64Lh) \
- V(Mips64Ulh) \
- V(Mips64Lhu) \
- V(Mips64Ulhu) \
- V(Mips64Sh) \
- V(Mips64Ush) \
- V(Mips64Ld) \
- V(Mips64Uld) \
- V(Mips64Lw) \
- V(Mips64Ulw) \
- V(Mips64Lwu) \
- V(Mips64Ulwu) \
- V(Mips64Sw) \
- V(Mips64Usw) \
- V(Mips64Sd) \
- V(Mips64Usd) \
- V(Mips64Lwc1) \
- V(Mips64Ulwc1) \
- V(Mips64Swc1) \
- V(Mips64Uswc1) \
- V(Mips64Ldc1) \
- V(Mips64Uldc1) \
- V(Mips64Sdc1) \
- V(Mips64Usdc1) \
- V(Mips64BitcastDL) \
- V(Mips64BitcastLD) \
- V(Mips64Float64ExtractLowWord32) \
- V(Mips64Float64ExtractHighWord32) \
- V(Mips64Float64InsertLowWord32) \
- V(Mips64Float64InsertHighWord32) \
- V(Mips64Float32Max) \
- V(Mips64Float64Max) \
- V(Mips64Float32Min) \
- V(Mips64Float64Min) \
- V(Mips64Float64SilenceNaN) \
- V(Mips64Push) \
- V(Mips64Peek) \
- V(Mips64StoreToStackSlot) \
- V(Mips64ByteSwap64) \
- V(Mips64ByteSwap32) \
- V(Mips64StackClaim) \
- V(Mips64Seb) \
- V(Mips64Seh) \
- V(Mips64Sync) \
- V(Mips64AssertEqual) \
- V(Mips64S128Const) \
- V(Mips64S128Zero) \
- V(Mips64S128AllOnes) \
- V(Mips64I32x4Splat) \
- V(Mips64I32x4ExtractLane) \
- V(Mips64I32x4ReplaceLane) \
- V(Mips64I32x4Add) \
- V(Mips64I32x4Sub) \
- V(Mips64F64x2Abs) \
- V(Mips64F64x2Neg) \
- V(Mips64F32x4Splat) \
- V(Mips64F32x4ExtractLane) \
- V(Mips64F32x4ReplaceLane) \
- V(Mips64F32x4SConvertI32x4) \
- V(Mips64F32x4UConvertI32x4) \
- V(Mips64I32x4Mul) \
- V(Mips64I32x4MaxS) \
- V(Mips64I32x4MinS) \
- V(Mips64I32x4Eq) \
- V(Mips64I32x4Ne) \
- V(Mips64I32x4Shl) \
- V(Mips64I32x4ShrS) \
- V(Mips64I32x4ShrU) \
- V(Mips64I32x4MaxU) \
- V(Mips64I32x4MinU) \
- V(Mips64F64x2Sqrt) \
- V(Mips64F64x2Add) \
- V(Mips64F64x2Sub) \
- V(Mips64F64x2Mul) \
- V(Mips64F64x2Div) \
- V(Mips64F64x2Min) \
- V(Mips64F64x2Max) \
- V(Mips64F64x2Eq) \
- V(Mips64F64x2Ne) \
- V(Mips64F64x2Lt) \
- V(Mips64F64x2Le) \
- V(Mips64F64x2Splat) \
- V(Mips64F64x2ExtractLane) \
- V(Mips64F64x2ReplaceLane) \
- V(Mips64F64x2Pmin) \
- V(Mips64F64x2Pmax) \
- V(Mips64F64x2Ceil) \
- V(Mips64F64x2Floor) \
- V(Mips64F64x2Trunc) \
- V(Mips64F64x2NearestInt) \
- V(Mips64F64x2ConvertLowI32x4S) \
- V(Mips64F64x2ConvertLowI32x4U) \
- V(Mips64F64x2PromoteLowF32x4) \
- V(Mips64I64x2Splat) \
- V(Mips64I64x2ExtractLane) \
- V(Mips64I64x2ReplaceLane) \
- V(Mips64I64x2Add) \
- V(Mips64I64x2Sub) \
- V(Mips64I64x2Mul) \
- V(Mips64I64x2Neg) \
- V(Mips64I64x2Shl) \
- V(Mips64I64x2ShrS) \
- V(Mips64I64x2ShrU) \
- V(Mips64I64x2BitMask) \
- V(Mips64I64x2Eq) \
- V(Mips64I64x2Ne) \
- V(Mips64I64x2GtS) \
- V(Mips64I64x2GeS) \
- V(Mips64I64x2Abs) \
- V(Mips64I64x2SConvertI32x4Low) \
- V(Mips64I64x2SConvertI32x4High) \
- V(Mips64I64x2UConvertI32x4Low) \
- V(Mips64I64x2UConvertI32x4High) \
- V(Mips64ExtMulLow) \
- V(Mips64ExtMulHigh) \
- V(Mips64ExtAddPairwise) \
- V(Mips64F32x4Abs) \
- V(Mips64F32x4Neg) \
- V(Mips64F32x4Sqrt) \
- V(Mips64F32x4RecipApprox) \
- V(Mips64F32x4RecipSqrtApprox) \
- V(Mips64F32x4Add) \
- V(Mips64F32x4Sub) \
- V(Mips64F32x4Mul) \
- V(Mips64F32x4Div) \
- V(Mips64F32x4Max) \
- V(Mips64F32x4Min) \
- V(Mips64F32x4Eq) \
- V(Mips64F32x4Ne) \
- V(Mips64F32x4Lt) \
- V(Mips64F32x4Le) \
- V(Mips64F32x4Pmin) \
- V(Mips64F32x4Pmax) \
- V(Mips64F32x4Ceil) \
- V(Mips64F32x4Floor) \
- V(Mips64F32x4Trunc) \
- V(Mips64F32x4NearestInt) \
- V(Mips64F32x4DemoteF64x2Zero) \
- V(Mips64I32x4SConvertF32x4) \
- V(Mips64I32x4UConvertF32x4) \
- V(Mips64I32x4Neg) \
- V(Mips64I32x4GtS) \
- V(Mips64I32x4GeS) \
- V(Mips64I32x4GtU) \
- V(Mips64I32x4GeU) \
- V(Mips64I32x4Abs) \
- V(Mips64I32x4BitMask) \
- V(Mips64I32x4DotI16x8S) \
- V(Mips64I32x4TruncSatF64x2SZero) \
- V(Mips64I32x4TruncSatF64x2UZero) \
- V(Mips64I16x8Splat) \
- V(Mips64I16x8ExtractLaneU) \
- V(Mips64I16x8ExtractLaneS) \
- V(Mips64I16x8ReplaceLane) \
- V(Mips64I16x8Neg) \
- V(Mips64I16x8Shl) \
- V(Mips64I16x8ShrS) \
- V(Mips64I16x8ShrU) \
- V(Mips64I16x8Add) \
- V(Mips64I16x8AddSatS) \
- V(Mips64I16x8Sub) \
- V(Mips64I16x8SubSatS) \
- V(Mips64I16x8Mul) \
- V(Mips64I16x8MaxS) \
- V(Mips64I16x8MinS) \
- V(Mips64I16x8Eq) \
- V(Mips64I16x8Ne) \
- V(Mips64I16x8GtS) \
- V(Mips64I16x8GeS) \
- V(Mips64I16x8AddSatU) \
- V(Mips64I16x8SubSatU) \
- V(Mips64I16x8MaxU) \
- V(Mips64I16x8MinU) \
- V(Mips64I16x8GtU) \
- V(Mips64I16x8GeU) \
- V(Mips64I16x8RoundingAverageU) \
- V(Mips64I16x8Abs) \
- V(Mips64I16x8BitMask) \
- V(Mips64I16x8Q15MulRSatS) \
- V(Mips64I8x16Splat) \
- V(Mips64I8x16ExtractLaneU) \
- V(Mips64I8x16ExtractLaneS) \
- V(Mips64I8x16ReplaceLane) \
- V(Mips64I8x16Neg) \
- V(Mips64I8x16Shl) \
- V(Mips64I8x16ShrS) \
- V(Mips64I8x16Add) \
- V(Mips64I8x16AddSatS) \
- V(Mips64I8x16Sub) \
- V(Mips64I8x16SubSatS) \
- V(Mips64I8x16MaxS) \
- V(Mips64I8x16MinS) \
- V(Mips64I8x16Eq) \
- V(Mips64I8x16Ne) \
- V(Mips64I8x16GtS) \
- V(Mips64I8x16GeS) \
- V(Mips64I8x16ShrU) \
- V(Mips64I8x16AddSatU) \
- V(Mips64I8x16SubSatU) \
- V(Mips64I8x16MaxU) \
- V(Mips64I8x16MinU) \
- V(Mips64I8x16GtU) \
- V(Mips64I8x16GeU) \
- V(Mips64I8x16RoundingAverageU) \
- V(Mips64I8x16Abs) \
- V(Mips64I8x16Popcnt) \
- V(Mips64I8x16BitMask) \
- V(Mips64S128And) \
- V(Mips64S128Or) \
- V(Mips64S128Xor) \
- V(Mips64S128Not) \
- V(Mips64S128Select) \
- V(Mips64S128AndNot) \
- V(Mips64I64x2AllTrue) \
- V(Mips64I32x4AllTrue) \
- V(Mips64I16x8AllTrue) \
- V(Mips64I8x16AllTrue) \
- V(Mips64V128AnyTrue) \
- V(Mips64S32x4InterleaveRight) \
- V(Mips64S32x4InterleaveLeft) \
- V(Mips64S32x4PackEven) \
- V(Mips64S32x4PackOdd) \
- V(Mips64S32x4InterleaveEven) \
- V(Mips64S32x4InterleaveOdd) \
- V(Mips64S32x4Shuffle) \
- V(Mips64S16x8InterleaveRight) \
- V(Mips64S16x8InterleaveLeft) \
- V(Mips64S16x8PackEven) \
- V(Mips64S16x8PackOdd) \
- V(Mips64S16x8InterleaveEven) \
- V(Mips64S16x8InterleaveOdd) \
- V(Mips64S16x4Reverse) \
- V(Mips64S16x2Reverse) \
- V(Mips64S8x16InterleaveRight) \
- V(Mips64S8x16InterleaveLeft) \
- V(Mips64S8x16PackEven) \
- V(Mips64S8x16PackOdd) \
- V(Mips64S8x16InterleaveEven) \
- V(Mips64S8x16InterleaveOdd) \
- V(Mips64I8x16Shuffle) \
- V(Mips64I8x16Swizzle) \
- V(Mips64S8x16Concat) \
- V(Mips64S8x8Reverse) \
- V(Mips64S8x4Reverse) \
- V(Mips64S8x2Reverse) \
- V(Mips64S128LoadSplat) \
- V(Mips64S128Load8x8S) \
- V(Mips64S128Load8x8U) \
- V(Mips64S128Load16x4S) \
- V(Mips64S128Load16x4U) \
- V(Mips64S128Load32x2S) \
- V(Mips64S128Load32x2U) \
- V(Mips64S128Load32Zero) \
- V(Mips64S128Load64Zero) \
- V(Mips64S128LoadLane) \
- V(Mips64S128StoreLane) \
- V(Mips64MsaLd) \
- V(Mips64MsaSt) \
- V(Mips64I32x4SConvertI16x8Low) \
- V(Mips64I32x4SConvertI16x8High) \
- V(Mips64I32x4UConvertI16x8Low) \
- V(Mips64I32x4UConvertI16x8High) \
- V(Mips64I16x8SConvertI8x16Low) \
- V(Mips64I16x8SConvertI8x16High) \
- V(Mips64I16x8SConvertI32x4) \
- V(Mips64I16x8UConvertI32x4) \
- V(Mips64I16x8UConvertI8x16Low) \
- V(Mips64I16x8UConvertI8x16High) \
- V(Mips64I8x16SConvertI16x8) \
- V(Mips64I8x16UConvertI16x8) \
- V(Mips64Word64AtomicLoadUint8) \
- V(Mips64Word64AtomicLoadUint16) \
- V(Mips64Word64AtomicLoadUint32) \
- V(Mips64Word64AtomicLoadUint64) \
- V(Mips64Word64AtomicStoreWord8) \
- V(Mips64Word64AtomicStoreWord16) \
- V(Mips64Word64AtomicStoreWord32) \
- V(Mips64Word64AtomicStoreWord64) \
- V(Mips64Word64AtomicAddUint8) \
- V(Mips64Word64AtomicAddUint16) \
- V(Mips64Word64AtomicAddUint32) \
- V(Mips64Word64AtomicAddUint64) \
- V(Mips64Word64AtomicSubUint8) \
- V(Mips64Word64AtomicSubUint16) \
- V(Mips64Word64AtomicSubUint32) \
- V(Mips64Word64AtomicSubUint64) \
- V(Mips64Word64AtomicAndUint8) \
- V(Mips64Word64AtomicAndUint16) \
- V(Mips64Word64AtomicAndUint32) \
- V(Mips64Word64AtomicAndUint64) \
- V(Mips64Word64AtomicOrUint8) \
- V(Mips64Word64AtomicOrUint16) \
- V(Mips64Word64AtomicOrUint32) \
- V(Mips64Word64AtomicOrUint64) \
- V(Mips64Word64AtomicXorUint8) \
- V(Mips64Word64AtomicXorUint16) \
- V(Mips64Word64AtomicXorUint32) \
- V(Mips64Word64AtomicXorUint64) \
- V(Mips64Word64AtomicExchangeUint8) \
- V(Mips64Word64AtomicExchangeUint16) \
- V(Mips64Word64AtomicExchangeUint32) \
- V(Mips64Word64AtomicExchangeUint64) \
- V(Mips64Word64AtomicCompareExchangeUint8) \
- V(Mips64Word64AtomicCompareExchangeUint16) \
- V(Mips64Word64AtomicCompareExchangeUint32) \
+#define TARGET_ARCH_OPCODE_LIST(V) \
+ V(Mips64Add) \
+ V(Mips64Dadd) \
+ V(Mips64DaddOvf) \
+ V(Mips64Sub) \
+ V(Mips64Dsub) \
+ V(Mips64DsubOvf) \
+ V(Mips64Mul) \
+ V(Mips64MulOvf) \
+ V(Mips64MulHigh) \
+ V(Mips64DMulHigh) \
+ V(Mips64MulHighU) \
+ V(Mips64Dmul) \
+ V(Mips64Div) \
+ V(Mips64Ddiv) \
+ V(Mips64DivU) \
+ V(Mips64DdivU) \
+ V(Mips64Mod) \
+ V(Mips64Dmod) \
+ V(Mips64ModU) \
+ V(Mips64DmodU) \
+ V(Mips64And) \
+ V(Mips64And32) \
+ V(Mips64Or) \
+ V(Mips64Or32) \
+ V(Mips64Nor) \
+ V(Mips64Nor32) \
+ V(Mips64Xor) \
+ V(Mips64Xor32) \
+ V(Mips64Clz) \
+ V(Mips64Lsa) \
+ V(Mips64Dlsa) \
+ V(Mips64Shl) \
+ V(Mips64Shr) \
+ V(Mips64Sar) \
+ V(Mips64Ext) \
+ V(Mips64Ins) \
+ V(Mips64Dext) \
+ V(Mips64Dins) \
+ V(Mips64Dclz) \
+ V(Mips64Ctz) \
+ V(Mips64Dctz) \
+ V(Mips64Popcnt) \
+ V(Mips64Dpopcnt) \
+ V(Mips64Dshl) \
+ V(Mips64Dshr) \
+ V(Mips64Dsar) \
+ V(Mips64Ror) \
+ V(Mips64Dror) \
+ V(Mips64Mov) \
+ V(Mips64Tst) \
+ V(Mips64Cmp) \
+ V(Mips64CmpS) \
+ V(Mips64AddS) \
+ V(Mips64SubS) \
+ V(Mips64MulS) \
+ V(Mips64DivS) \
+ V(Mips64AbsS) \
+ V(Mips64NegS) \
+ V(Mips64SqrtS) \
+ V(Mips64MaxS) \
+ V(Mips64MinS) \
+ V(Mips64CmpD) \
+ V(Mips64AddD) \
+ V(Mips64SubD) \
+ V(Mips64MulD) \
+ V(Mips64DivD) \
+ V(Mips64ModD) \
+ V(Mips64AbsD) \
+ V(Mips64NegD) \
+ V(Mips64SqrtD) \
+ V(Mips64MaxD) \
+ V(Mips64MinD) \
+ V(Mips64Float64RoundDown) \
+ V(Mips64Float64RoundTruncate) \
+ V(Mips64Float64RoundUp) \
+ V(Mips64Float64RoundTiesEven) \
+ V(Mips64Float32RoundDown) \
+ V(Mips64Float32RoundTruncate) \
+ V(Mips64Float32RoundUp) \
+ V(Mips64Float32RoundTiesEven) \
+ V(Mips64CvtSD) \
+ V(Mips64CvtDS) \
+ V(Mips64TruncWD) \
+ V(Mips64RoundWD) \
+ V(Mips64FloorWD) \
+ V(Mips64CeilWD) \
+ V(Mips64TruncWS) \
+ V(Mips64RoundWS) \
+ V(Mips64FloorWS) \
+ V(Mips64CeilWS) \
+ V(Mips64TruncLS) \
+ V(Mips64TruncLD) \
+ V(Mips64TruncUwD) \
+ V(Mips64TruncUwS) \
+ V(Mips64TruncUlS) \
+ V(Mips64TruncUlD) \
+ V(Mips64CvtDW) \
+ V(Mips64CvtSL) \
+ V(Mips64CvtSW) \
+ V(Mips64CvtSUw) \
+ V(Mips64CvtSUl) \
+ V(Mips64CvtDL) \
+ V(Mips64CvtDUw) \
+ V(Mips64CvtDUl) \
+ V(Mips64Lb) \
+ V(Mips64Lbu) \
+ V(Mips64Sb) \
+ V(Mips64Lh) \
+ V(Mips64Ulh) \
+ V(Mips64Lhu) \
+ V(Mips64Ulhu) \
+ V(Mips64Sh) \
+ V(Mips64Ush) \
+ V(Mips64Ld) \
+ V(Mips64Uld) \
+ V(Mips64Lw) \
+ V(Mips64Ulw) \
+ V(Mips64Lwu) \
+ V(Mips64Ulwu) \
+ V(Mips64Sw) \
+ V(Mips64Usw) \
+ V(Mips64Sd) \
+ V(Mips64Usd) \
+ V(Mips64Lwc1) \
+ V(Mips64Ulwc1) \
+ V(Mips64Swc1) \
+ V(Mips64Uswc1) \
+ V(Mips64Ldc1) \
+ V(Mips64Uldc1) \
+ V(Mips64Sdc1) \
+ V(Mips64Usdc1) \
+ V(Mips64BitcastDL) \
+ V(Mips64BitcastLD) \
+ V(Mips64Float64ExtractLowWord32) \
+ V(Mips64Float64ExtractHighWord32) \
+ V(Mips64Float64InsertLowWord32) \
+ V(Mips64Float64InsertHighWord32) \
+ V(Mips64Float32Max) \
+ V(Mips64Float64Max) \
+ V(Mips64Float32Min) \
+ V(Mips64Float64Min) \
+ V(Mips64Float64SilenceNaN) \
+ V(Mips64Push) \
+ V(Mips64Peek) \
+ V(Mips64StoreToStackSlot) \
+ V(Mips64ByteSwap64) \
+ V(Mips64ByteSwap32) \
+ V(Mips64StackClaim) \
+ V(Mips64Seb) \
+ V(Mips64Seh) \
+ V(Mips64Sync) \
+ V(Mips64AssertEqual) \
+ V(Mips64S128Const) \
+ V(Mips64S128Zero) \
+ V(Mips64S128AllOnes) \
+ V(Mips64I32x4Splat) \
+ V(Mips64I32x4ExtractLane) \
+ V(Mips64I32x4ReplaceLane) \
+ V(Mips64I32x4Add) \
+ V(Mips64I32x4Sub) \
+ V(Mips64F64x2Abs) \
+ V(Mips64F64x2Neg) \
+ V(Mips64F32x4Splat) \
+ V(Mips64F32x4ExtractLane) \
+ V(Mips64F32x4ReplaceLane) \
+ V(Mips64F32x4SConvertI32x4) \
+ V(Mips64F32x4UConvertI32x4) \
+ V(Mips64I32x4Mul) \
+ V(Mips64I32x4MaxS) \
+ V(Mips64I32x4MinS) \
+ V(Mips64I32x4Eq) \
+ V(Mips64I32x4Ne) \
+ V(Mips64I32x4Shl) \
+ V(Mips64I32x4ShrS) \
+ V(Mips64I32x4ShrU) \
+ V(Mips64I32x4MaxU) \
+ V(Mips64I32x4MinU) \
+ V(Mips64F64x2Sqrt) \
+ V(Mips64F64x2Add) \
+ V(Mips64F64x2Sub) \
+ V(Mips64F64x2Mul) \
+ V(Mips64F64x2Div) \
+ V(Mips64F64x2Min) \
+ V(Mips64F64x2Max) \
+ V(Mips64F64x2Eq) \
+ V(Mips64F64x2Ne) \
+ V(Mips64F64x2Lt) \
+ V(Mips64F64x2Le) \
+ V(Mips64F64x2Splat) \
+ V(Mips64F64x2ExtractLane) \
+ V(Mips64F64x2ReplaceLane) \
+ V(Mips64F64x2Pmin) \
+ V(Mips64F64x2Pmax) \
+ V(Mips64F64x2Ceil) \
+ V(Mips64F64x2Floor) \
+ V(Mips64F64x2Trunc) \
+ V(Mips64F64x2NearestInt) \
+ V(Mips64F64x2ConvertLowI32x4S) \
+ V(Mips64F64x2ConvertLowI32x4U) \
+ V(Mips64F64x2PromoteLowF32x4) \
+ V(Mips64I64x2Splat) \
+ V(Mips64I64x2ExtractLane) \
+ V(Mips64I64x2ReplaceLane) \
+ V(Mips64I64x2Add) \
+ V(Mips64I64x2Sub) \
+ V(Mips64I64x2Mul) \
+ V(Mips64I64x2Neg) \
+ V(Mips64I64x2Shl) \
+ V(Mips64I64x2ShrS) \
+ V(Mips64I64x2ShrU) \
+ V(Mips64I64x2BitMask) \
+ V(Mips64I64x2Eq) \
+ V(Mips64I64x2Ne) \
+ V(Mips64I64x2GtS) \
+ V(Mips64I64x2GeS) \
+ V(Mips64I64x2Abs) \
+ V(Mips64I64x2SConvertI32x4Low) \
+ V(Mips64I64x2SConvertI32x4High) \
+ V(Mips64I64x2UConvertI32x4Low) \
+ V(Mips64I64x2UConvertI32x4High) \
+ V(Mips64ExtMulLow) \
+ V(Mips64ExtMulHigh) \
+ V(Mips64ExtAddPairwise) \
+ V(Mips64F32x4Abs) \
+ V(Mips64F32x4Neg) \
+ V(Mips64F32x4Sqrt) \
+ V(Mips64F32x4RecipApprox) \
+ V(Mips64F32x4RecipSqrtApprox) \
+ V(Mips64F32x4Add) \
+ V(Mips64F32x4Sub) \
+ V(Mips64F32x4Mul) \
+ V(Mips64F32x4Div) \
+ V(Mips64F32x4Max) \
+ V(Mips64F32x4Min) \
+ V(Mips64F32x4Eq) \
+ V(Mips64F32x4Ne) \
+ V(Mips64F32x4Lt) \
+ V(Mips64F32x4Le) \
+ V(Mips64F32x4Pmin) \
+ V(Mips64F32x4Pmax) \
+ V(Mips64F32x4Ceil) \
+ V(Mips64F32x4Floor) \
+ V(Mips64F32x4Trunc) \
+ V(Mips64F32x4NearestInt) \
+ V(Mips64F32x4DemoteF64x2Zero) \
+ V(Mips64I32x4SConvertF32x4) \
+ V(Mips64I32x4UConvertF32x4) \
+ V(Mips64I32x4Neg) \
+ V(Mips64I32x4GtS) \
+ V(Mips64I32x4GeS) \
+ V(Mips64I32x4GtU) \
+ V(Mips64I32x4GeU) \
+ V(Mips64I32x4Abs) \
+ V(Mips64I32x4BitMask) \
+ V(Mips64I32x4DotI16x8S) \
+ V(Mips64I32x4TruncSatF64x2SZero) \
+ V(Mips64I32x4TruncSatF64x2UZero) \
+ V(Mips64I16x8Splat) \
+ V(Mips64I16x8ExtractLaneU) \
+ V(Mips64I16x8ExtractLaneS) \
+ V(Mips64I16x8ReplaceLane) \
+ V(Mips64I16x8Neg) \
+ V(Mips64I16x8Shl) \
+ V(Mips64I16x8ShrS) \
+ V(Mips64I16x8ShrU) \
+ V(Mips64I16x8Add) \
+ V(Mips64I16x8AddSatS) \
+ V(Mips64I16x8Sub) \
+ V(Mips64I16x8SubSatS) \
+ V(Mips64I16x8Mul) \
+ V(Mips64I16x8MaxS) \
+ V(Mips64I16x8MinS) \
+ V(Mips64I16x8Eq) \
+ V(Mips64I16x8Ne) \
+ V(Mips64I16x8GtS) \
+ V(Mips64I16x8GeS) \
+ V(Mips64I16x8AddSatU) \
+ V(Mips64I16x8SubSatU) \
+ V(Mips64I16x8MaxU) \
+ V(Mips64I16x8MinU) \
+ V(Mips64I16x8GtU) \
+ V(Mips64I16x8GeU) \
+ V(Mips64I16x8RoundingAverageU) \
+ V(Mips64I16x8Abs) \
+ V(Mips64I16x8BitMask) \
+ V(Mips64I16x8Q15MulRSatS) \
+ V(Mips64I8x16Splat) \
+ V(Mips64I8x16ExtractLaneU) \
+ V(Mips64I8x16ExtractLaneS) \
+ V(Mips64I8x16ReplaceLane) \
+ V(Mips64I8x16Neg) \
+ V(Mips64I8x16Shl) \
+ V(Mips64I8x16ShrS) \
+ V(Mips64I8x16Add) \
+ V(Mips64I8x16AddSatS) \
+ V(Mips64I8x16Sub) \
+ V(Mips64I8x16SubSatS) \
+ V(Mips64I8x16MaxS) \
+ V(Mips64I8x16MinS) \
+ V(Mips64I8x16Eq) \
+ V(Mips64I8x16Ne) \
+ V(Mips64I8x16GtS) \
+ V(Mips64I8x16GeS) \
+ V(Mips64I8x16ShrU) \
+ V(Mips64I8x16AddSatU) \
+ V(Mips64I8x16SubSatU) \
+ V(Mips64I8x16MaxU) \
+ V(Mips64I8x16MinU) \
+ V(Mips64I8x16GtU) \
+ V(Mips64I8x16GeU) \
+ V(Mips64I8x16RoundingAverageU) \
+ V(Mips64I8x16Abs) \
+ V(Mips64I8x16Popcnt) \
+ V(Mips64I8x16BitMask) \
+ V(Mips64S128And) \
+ V(Mips64S128Or) \
+ V(Mips64S128Xor) \
+ V(Mips64S128Not) \
+ V(Mips64S128Select) \
+ V(Mips64S128AndNot) \
+ V(Mips64I64x2AllTrue) \
+ V(Mips64I32x4AllTrue) \
+ V(Mips64I16x8AllTrue) \
+ V(Mips64I8x16AllTrue) \
+ V(Mips64V128AnyTrue) \
+ V(Mips64S32x4InterleaveRight) \
+ V(Mips64S32x4InterleaveLeft) \
+ V(Mips64S32x4PackEven) \
+ V(Mips64S32x4PackOdd) \
+ V(Mips64S32x4InterleaveEven) \
+ V(Mips64S32x4InterleaveOdd) \
+ V(Mips64S32x4Shuffle) \
+ V(Mips64S16x8InterleaveRight) \
+ V(Mips64S16x8InterleaveLeft) \
+ V(Mips64S16x8PackEven) \
+ V(Mips64S16x8PackOdd) \
+ V(Mips64S16x8InterleaveEven) \
+ V(Mips64S16x8InterleaveOdd) \
+ V(Mips64S16x4Reverse) \
+ V(Mips64S16x2Reverse) \
+ V(Mips64S8x16InterleaveRight) \
+ V(Mips64S8x16InterleaveLeft) \
+ V(Mips64S8x16PackEven) \
+ V(Mips64S8x16PackOdd) \
+ V(Mips64S8x16InterleaveEven) \
+ V(Mips64S8x16InterleaveOdd) \
+ V(Mips64I8x16Shuffle) \
+ V(Mips64I8x16Swizzle) \
+ V(Mips64S8x16Concat) \
+ V(Mips64S8x8Reverse) \
+ V(Mips64S8x4Reverse) \
+ V(Mips64S8x2Reverse) \
+ V(Mips64S128LoadSplat) \
+ V(Mips64S128Load8x8S) \
+ V(Mips64S128Load8x8U) \
+ V(Mips64S128Load16x4S) \
+ V(Mips64S128Load16x4U) \
+ V(Mips64S128Load32x2S) \
+ V(Mips64S128Load32x2U) \
+ V(Mips64S128Load32Zero) \
+ V(Mips64S128Load64Zero) \
+ V(Mips64S128LoadLane) \
+ V(Mips64S128StoreLane) \
+ V(Mips64MsaLd) \
+ V(Mips64MsaSt) \
+ V(Mips64I32x4SConvertI16x8Low) \
+ V(Mips64I32x4SConvertI16x8High) \
+ V(Mips64I32x4UConvertI16x8Low) \
+ V(Mips64I32x4UConvertI16x8High) \
+ V(Mips64I16x8SConvertI8x16Low) \
+ V(Mips64I16x8SConvertI8x16High) \
+ V(Mips64I16x8SConvertI32x4) \
+ V(Mips64I16x8UConvertI32x4) \
+ V(Mips64I16x8UConvertI8x16Low) \
+ V(Mips64I16x8UConvertI8x16High) \
+ V(Mips64I8x16SConvertI16x8) \
+ V(Mips64I8x16UConvertI16x8) \
+ V(Mips64StoreCompressTagged) \
+ V(Mips64Word64AtomicLoadUint64) \
+ V(Mips64Word64AtomicStoreWord64) \
+ V(Mips64Word64AtomicAddUint64) \
+ V(Mips64Word64AtomicSubUint64) \
+ V(Mips64Word64AtomicAndUint64) \
+ V(Mips64Word64AtomicOrUint64) \
+ V(Mips64Word64AtomicXorUint64) \
+ V(Mips64Word64AtomicExchangeUint64) \
V(Mips64Word64AtomicCompareExchangeUint64)
// Addressing modes represent the "shape" of inputs to an instruction.
diff --git a/deps/v8/src/compiler/backend/mips64/instruction-scheduler-mips64.cc b/deps/v8/src/compiler/backend/mips64/instruction-scheduler-mips64.cc
index c63e0aa3d3..f79e334ed6 100644
--- a/deps/v8/src/compiler/backend/mips64/instruction-scheduler-mips64.cc
+++ b/deps/v8/src/compiler/backend/mips64/instruction-scheduler-mips64.cc
@@ -375,9 +375,6 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kMips64S128Load32Zero:
case kMips64S128Load64Zero:
case kMips64S128LoadLane:
- case kMips64Word64AtomicLoadUint8:
- case kMips64Word64AtomicLoadUint16:
- case kMips64Word64AtomicLoadUint32:
case kMips64Word64AtomicLoadUint64:
return kIsLoadOperation;
@@ -400,37 +397,14 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kMips64Uswc1:
case kMips64Sync:
case kMips64S128StoreLane:
- case kMips64Word64AtomicStoreWord8:
- case kMips64Word64AtomicStoreWord16:
- case kMips64Word64AtomicStoreWord32:
+ case kMips64StoreCompressTagged:
case kMips64Word64AtomicStoreWord64:
- case kMips64Word64AtomicAddUint8:
- case kMips64Word64AtomicAddUint16:
- case kMips64Word64AtomicAddUint32:
case kMips64Word64AtomicAddUint64:
- case kMips64Word64AtomicSubUint8:
- case kMips64Word64AtomicSubUint16:
- case kMips64Word64AtomicSubUint32:
case kMips64Word64AtomicSubUint64:
- case kMips64Word64AtomicAndUint8:
- case kMips64Word64AtomicAndUint16:
- case kMips64Word64AtomicAndUint32:
case kMips64Word64AtomicAndUint64:
- case kMips64Word64AtomicOrUint8:
- case kMips64Word64AtomicOrUint16:
- case kMips64Word64AtomicOrUint32:
case kMips64Word64AtomicOrUint64:
- case kMips64Word64AtomicXorUint8:
- case kMips64Word64AtomicXorUint16:
- case kMips64Word64AtomicXorUint32:
case kMips64Word64AtomicXorUint64:
- case kMips64Word64AtomicExchangeUint8:
- case kMips64Word64AtomicExchangeUint16:
- case kMips64Word64AtomicExchangeUint32:
case kMips64Word64AtomicExchangeUint64:
- case kMips64Word64AtomicCompareExchangeUint8:
- case kMips64Word64AtomicCompareExchangeUint16:
- case kMips64Word64AtomicCompareExchangeUint32:
case kMips64Word64AtomicCompareExchangeUint64:
return kHasSideEffect;
@@ -1352,8 +1326,6 @@ int InstructionScheduler::GetInstructionLatency(const Instruction* instr) {
return DadduLatency(false) + AndLatency(false) + AssertLatency() +
DadduLatency(false) + AndLatency(false) + BranchShortLatency() +
1 + DsubuLatency() + DadduLatency();
- case kArchWordPoisonOnSpeculation:
- return AndLatency();
case kIeee754Float64Acos:
case kIeee754Float64Acosh:
case kIeee754Float64Asin:
@@ -1740,35 +1712,35 @@ int InstructionScheduler::GetInstructionLatency(const Instruction* instr) {
return ByteSwapSignedLatency();
case kMips64ByteSwap32:
return ByteSwapSignedLatency();
- case kWord32AtomicLoadInt8:
- case kWord32AtomicLoadUint8:
- case kWord32AtomicLoadInt16:
- case kWord32AtomicLoadUint16:
- case kWord32AtomicLoadWord32:
+ case kAtomicLoadInt8:
+ case kAtomicLoadUint8:
+ case kAtomicLoadInt16:
+ case kAtomicLoadUint16:
+ case kAtomicLoadWord32:
return 2;
- case kWord32AtomicStoreWord8:
- case kWord32AtomicStoreWord16:
- case kWord32AtomicStoreWord32:
+ case kAtomicStoreWord8:
+ case kAtomicStoreWord16:
+ case kAtomicStoreWord32:
return 3;
- case kWord32AtomicExchangeInt8:
+ case kAtomicExchangeInt8:
return Word32AtomicExchangeLatency(true, 8);
- case kWord32AtomicExchangeUint8:
+ case kAtomicExchangeUint8:
return Word32AtomicExchangeLatency(false, 8);
- case kWord32AtomicExchangeInt16:
+ case kAtomicExchangeInt16:
return Word32AtomicExchangeLatency(true, 16);
- case kWord32AtomicExchangeUint16:
+ case kAtomicExchangeUint16:
return Word32AtomicExchangeLatency(false, 16);
- case kWord32AtomicExchangeWord32:
+ case kAtomicExchangeWord32:
return 2 + LlLatency(0) + 1 + ScLatency(0) + BranchShortLatency() + 1;
- case kWord32AtomicCompareExchangeInt8:
+ case kAtomicCompareExchangeInt8:
return Word32AtomicCompareExchangeLatency(true, 8);
- case kWord32AtomicCompareExchangeUint8:
+ case kAtomicCompareExchangeUint8:
return Word32AtomicCompareExchangeLatency(false, 8);
- case kWord32AtomicCompareExchangeInt16:
+ case kAtomicCompareExchangeInt16:
return Word32AtomicCompareExchangeLatency(true, 16);
- case kWord32AtomicCompareExchangeUint16:
+ case kAtomicCompareExchangeUint16:
return Word32AtomicCompareExchangeLatency(false, 16);
- case kWord32AtomicCompareExchangeWord32:
+ case kAtomicCompareExchangeWord32:
return 3 + LlLatency(0) + BranchShortLatency() + 1 + ScLatency(0) +
BranchShortLatency() + 1;
case kMips64AssertEqual:
diff --git a/deps/v8/src/compiler/backend/mips64/instruction-selector-mips64.cc b/deps/v8/src/compiler/backend/mips64/instruction-selector-mips64.cc
index bec7bbefdc..192f82c9db 100644
--- a/deps/v8/src/compiler/backend/mips64/instruction-selector-mips64.cc
+++ b/deps/v8/src/compiler/backend/mips64/instruction-selector-mips64.cc
@@ -515,16 +515,10 @@ void InstructionSelector::VisitLoad(Node* node) {
case MachineRepresentation::kNone:
UNREACHABLE();
}
- if (node->opcode() == IrOpcode::kPoisonedLoad) {
- CHECK_NE(poisoning_level_, PoisoningMitigationLevel::kDontPoison);
- opcode |= AccessModeField::encode(kMemoryAccessPoisoned);
- }
EmitLoad(this, node, opcode);
}
-void InstructionSelector::VisitPoisonedLoad(Node* node) { VisitLoad(node); }
-
void InstructionSelector::VisitProtectedLoad(Node* node) {
// TODO(eholk)
UNIMPLEMENTED();
@@ -2041,10 +2035,13 @@ void VisitWordCompare(InstructionSelector* selector, Node* node,
bool IsNodeUnsigned(Node* n) {
NodeMatcher m(n);
- if (m.IsLoad() || m.IsUnalignedLoad() || m.IsPoisonedLoad() ||
- m.IsProtectedLoad() || m.IsWord32AtomicLoad() || m.IsWord64AtomicLoad()) {
+ if (m.IsLoad() || m.IsUnalignedLoad() || m.IsProtectedLoad()) {
LoadRepresentation load_rep = LoadRepresentationOf(n->op());
return load_rep.IsUnsigned();
+ } else if (m.IsWord32AtomicLoad() || m.IsWord64AtomicLoad()) {
+ AtomicLoadParameters atomic_load_params = AtomicLoadParametersOf(n->op());
+ LoadRepresentation load_rep = atomic_load_params.representation();
+ return load_rep.IsUnsigned();
} else {
return m.IsUint32Div() || m.IsUint32LessThan() ||
m.IsUint32LessThanOrEqual() || m.IsUint32Mod() ||
@@ -2144,12 +2141,43 @@ void EmitWordCompareZero(InstructionSelector* selector, Node* value,
}
void VisitAtomicLoad(InstructionSelector* selector, Node* node,
- ArchOpcode opcode) {
+ AtomicWidth width) {
Mips64OperandGenerator g(selector);
Node* base = node->InputAt(0);
Node* index = node->InputAt(1);
- if (g.CanBeImmediate(index, opcode)) {
- selector->Emit(opcode | AddressingModeField::encode(kMode_MRI),
+
+ // The memory order is ignored.
+ AtomicLoadParameters atomic_load_params = AtomicLoadParametersOf(node->op());
+ LoadRepresentation load_rep = atomic_load_params.representation();
+ InstructionCode code;
+ switch (load_rep.representation()) {
+ case MachineRepresentation::kWord8:
+ DCHECK_IMPLIES(load_rep.IsSigned(), width == AtomicWidth::kWord32);
+ code = load_rep.IsSigned() ? kAtomicLoadInt8 : kAtomicLoadUint8;
+ break;
+ case MachineRepresentation::kWord16:
+ DCHECK_IMPLIES(load_rep.IsSigned(), width == AtomicWidth::kWord32);
+ code = load_rep.IsSigned() ? kAtomicLoadInt16 : kAtomicLoadUint16;
+ break;
+ case MachineRepresentation::kWord32:
+ code = kAtomicLoadWord32;
+ break;
+ case MachineRepresentation::kWord64:
+ code = kMips64Word64AtomicLoadUint64;
+ break;
+ case MachineRepresentation::kTaggedSigned: // Fall through.
+ case MachineRepresentation::kTaggedPointer: // Fall through.
+ case MachineRepresentation::kTagged:
+ DCHECK_EQ(kTaggedSize, 8);
+ code = kMips64Word64AtomicLoadUint64;
+ break;
+ default:
+ UNREACHABLE();
+ }
+
+ if (g.CanBeImmediate(index, code)) {
+ selector->Emit(code | AddressingModeField::encode(kMode_MRI) |
+ AtomicWidthField::encode(width),
g.DefineAsRegister(node), g.UseRegister(base),
g.UseImmediate(index));
} else {
@@ -2157,35 +2185,93 @@ void VisitAtomicLoad(InstructionSelector* selector, Node* node,
selector->Emit(kMips64Dadd | AddressingModeField::encode(kMode_None),
addr_reg, g.UseRegister(index), g.UseRegister(base));
// Emit desired load opcode, using temp addr_reg.
- selector->Emit(opcode | AddressingModeField::encode(kMode_MRI),
+ selector->Emit(code | AddressingModeField::encode(kMode_MRI) |
+ AtomicWidthField::encode(width),
g.DefineAsRegister(node), addr_reg, g.TempImmediate(0));
}
}
void VisitAtomicStore(InstructionSelector* selector, Node* node,
- ArchOpcode opcode) {
+ AtomicWidth width) {
Mips64OperandGenerator g(selector);
Node* base = node->InputAt(0);
Node* index = node->InputAt(1);
Node* value = node->InputAt(2);
- if (g.CanBeImmediate(index, opcode)) {
- selector->Emit(opcode | AddressingModeField::encode(kMode_MRI),
- g.NoOutput(), g.UseRegister(base), g.UseImmediate(index),
- g.UseRegisterOrImmediateZero(value));
+ // The memory order is ignored.
+ AtomicStoreParameters store_params = AtomicStoreParametersOf(node->op());
+ WriteBarrierKind write_barrier_kind = store_params.write_barrier_kind();
+ MachineRepresentation rep = store_params.representation();
+
+ if (FLAG_enable_unconditional_write_barriers &&
+ CanBeTaggedOrCompressedPointer(rep)) {
+ write_barrier_kind = kFullWriteBarrier;
+ }
+
+ InstructionCode code;
+
+ if (write_barrier_kind != kNoWriteBarrier && !FLAG_disable_write_barriers) {
+ DCHECK(CanBeTaggedPointer(rep));
+ DCHECK_EQ(AtomicWidthSize(width), kTaggedSize);
+
+ InstructionOperand inputs[3];
+ size_t input_count = 0;
+ inputs[input_count++] = g.UseUniqueRegister(base);
+ inputs[input_count++] = g.UseUniqueRegister(index);
+ inputs[input_count++] = g.UseUniqueRegister(value);
+ RecordWriteMode record_write_mode =
+ WriteBarrierKindToRecordWriteMode(write_barrier_kind);
+ InstructionOperand temps[] = {g.TempRegister(), g.TempRegister()};
+ size_t const temp_count = arraysize(temps);
+ code = kArchAtomicStoreWithWriteBarrier;
+ code |= MiscField::encode(static_cast<int>(record_write_mode));
+ selector->Emit(code, 0, nullptr, input_count, inputs, temp_count, temps);
} else {
- InstructionOperand addr_reg = g.TempRegister();
- selector->Emit(kMips64Dadd | AddressingModeField::encode(kMode_None),
- addr_reg, g.UseRegister(index), g.UseRegister(base));
- // Emit desired store opcode, using temp addr_reg.
- selector->Emit(opcode | AddressingModeField::encode(kMode_MRI),
- g.NoOutput(), addr_reg, g.TempImmediate(0),
- g.UseRegisterOrImmediateZero(value));
+ switch (rep) {
+ case MachineRepresentation::kWord8:
+ code = kAtomicStoreWord8;
+ break;
+ case MachineRepresentation::kWord16:
+ code = kAtomicStoreWord16;
+ break;
+ case MachineRepresentation::kWord32:
+ code = kAtomicStoreWord32;
+ break;
+ case MachineRepresentation::kWord64:
+ DCHECK_EQ(width, AtomicWidth::kWord64);
+ code = kMips64Word64AtomicStoreWord64;
+ break;
+ case MachineRepresentation::kTaggedSigned: // Fall through.
+ case MachineRepresentation::kTaggedPointer: // Fall through.
+ case MachineRepresentation::kTagged:
+ DCHECK_EQ(AtomicWidthSize(width), kTaggedSize);
+ code = kMips64StoreCompressTagged;
+ break;
+ default:
+ UNREACHABLE();
+ }
+ code |= AtomicWidthField::encode(width);
+
+ if (g.CanBeImmediate(index, code)) {
+ selector->Emit(code | AddressingModeField::encode(kMode_MRI) |
+ AtomicWidthField::encode(width),
+ g.NoOutput(), g.UseRegister(base), g.UseImmediate(index),
+ g.UseRegisterOrImmediateZero(value));
+ } else {
+ InstructionOperand addr_reg = g.TempRegister();
+ selector->Emit(kMips64Dadd | AddressingModeField::encode(kMode_None),
+ addr_reg, g.UseRegister(index), g.UseRegister(base));
+ // Emit desired store opcode, using temp addr_reg.
+ selector->Emit(code | AddressingModeField::encode(kMode_MRI) |
+ AtomicWidthField::encode(width),
+ g.NoOutput(), addr_reg, g.TempImmediate(0),
+ g.UseRegisterOrImmediateZero(value));
+ }
}
}
void VisitAtomicExchange(InstructionSelector* selector, Node* node,
- ArchOpcode opcode) {
+ ArchOpcode opcode, AtomicWidth width) {
Mips64OperandGenerator g(selector);
Node* base = node->InputAt(0);
Node* index = node->InputAt(1);
@@ -2203,12 +2289,13 @@ void VisitAtomicExchange(InstructionSelector* selector, Node* node,
temp[0] = g.TempRegister();
temp[1] = g.TempRegister();
temp[2] = g.TempRegister();
- InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
+ InstructionCode code = opcode | AddressingModeField::encode(addressing_mode) |
+ AtomicWidthField::encode(width);
selector->Emit(code, 1, outputs, input_count, inputs, 3, temp);
}
void VisitAtomicCompareExchange(InstructionSelector* selector, Node* node,
- ArchOpcode opcode) {
+ ArchOpcode opcode, AtomicWidth width) {
Mips64OperandGenerator g(selector);
Node* base = node->InputAt(0);
Node* index = node->InputAt(1);
@@ -2228,12 +2315,13 @@ void VisitAtomicCompareExchange(InstructionSelector* selector, Node* node,
temp[0] = g.TempRegister();
temp[1] = g.TempRegister();
temp[2] = g.TempRegister();
- InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
+ InstructionCode code = opcode | AddressingModeField::encode(addressing_mode) |
+ AtomicWidthField::encode(width);
selector->Emit(code, 1, outputs, input_count, inputs, 3, temp);
}
void VisitAtomicBinop(InstructionSelector* selector, Node* node,
- ArchOpcode opcode) {
+ ArchOpcode opcode, AtomicWidth width) {
Mips64OperandGenerator g(selector);
Node* base = node->InputAt(0);
Node* index = node->InputAt(1);
@@ -2252,7 +2340,8 @@ void VisitAtomicBinop(InstructionSelector* selector, Node* node,
temps[1] = g.TempRegister();
temps[2] = g.TempRegister();
temps[3] = g.TempRegister();
- InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
+ InstructionCode code = opcode | AddressingModeField::encode(addressing_mode) |
+ AtomicWidthField::encode(width);
selector->Emit(code, 1, outputs, input_count, inputs, 4, temps);
}
@@ -2615,163 +2704,93 @@ void InstructionSelector::VisitMemoryBarrier(Node* node) {
}
void InstructionSelector::VisitWord32AtomicLoad(Node* node) {
- LoadRepresentation load_rep = LoadRepresentationOf(node->op());
- ArchOpcode opcode;
- switch (load_rep.representation()) {
- case MachineRepresentation::kWord8:
- opcode =
- load_rep.IsSigned() ? kWord32AtomicLoadInt8 : kWord32AtomicLoadUint8;
- break;
- case MachineRepresentation::kWord16:
- opcode = load_rep.IsSigned() ? kWord32AtomicLoadInt16
- : kWord32AtomicLoadUint16;
- break;
- case MachineRepresentation::kWord32:
- opcode = kWord32AtomicLoadWord32;
- break;
- default:
- UNREACHABLE();
- }
- VisitAtomicLoad(this, node, opcode);
+ VisitAtomicLoad(this, node, AtomicWidth::kWord32);
}
void InstructionSelector::VisitWord32AtomicStore(Node* node) {
- MachineRepresentation rep = AtomicStoreRepresentationOf(node->op());
- ArchOpcode opcode;
- switch (rep) {
- case MachineRepresentation::kWord8:
- opcode = kWord32AtomicStoreWord8;
- break;
- case MachineRepresentation::kWord16:
- opcode = kWord32AtomicStoreWord16;
- break;
- case MachineRepresentation::kWord32:
- opcode = kWord32AtomicStoreWord32;
- break;
- default:
- UNREACHABLE();
- }
-
- VisitAtomicStore(this, node, opcode);
+ VisitAtomicStore(this, node, AtomicWidth::kWord32);
}
void InstructionSelector::VisitWord64AtomicLoad(Node* node) {
- LoadRepresentation load_rep = LoadRepresentationOf(node->op());
- ArchOpcode opcode;
- switch (load_rep.representation()) {
- case MachineRepresentation::kWord8:
- opcode = kMips64Word64AtomicLoadUint8;
- break;
- case MachineRepresentation::kWord16:
- opcode = kMips64Word64AtomicLoadUint16;
- break;
- case MachineRepresentation::kWord32:
- opcode = kMips64Word64AtomicLoadUint32;
- break;
- case MachineRepresentation::kWord64:
- opcode = kMips64Word64AtomicLoadUint64;
- break;
- default:
- UNREACHABLE();
- }
- VisitAtomicLoad(this, node, opcode);
+ VisitAtomicLoad(this, node, AtomicWidth::kWord64);
}
void InstructionSelector::VisitWord64AtomicStore(Node* node) {
- MachineRepresentation rep = AtomicStoreRepresentationOf(node->op());
- ArchOpcode opcode;
- switch (rep) {
- case MachineRepresentation::kWord8:
- opcode = kMips64Word64AtomicStoreWord8;
- break;
- case MachineRepresentation::kWord16:
- opcode = kMips64Word64AtomicStoreWord16;
- break;
- case MachineRepresentation::kWord32:
- opcode = kMips64Word64AtomicStoreWord32;
- break;
- case MachineRepresentation::kWord64:
- opcode = kMips64Word64AtomicStoreWord64;
- break;
- default:
- UNREACHABLE();
- }
-
- VisitAtomicStore(this, node, opcode);
+ VisitAtomicStore(this, node, AtomicWidth::kWord64);
}
void InstructionSelector::VisitWord32AtomicExchange(Node* node) {
ArchOpcode opcode;
MachineType type = AtomicOpType(node->op());
if (type == MachineType::Int8()) {
- opcode = kWord32AtomicExchangeInt8;
+ opcode = kAtomicExchangeInt8;
} else if (type == MachineType::Uint8()) {
- opcode = kWord32AtomicExchangeUint8;
+ opcode = kAtomicExchangeUint8;
} else if (type == MachineType::Int16()) {
- opcode = kWord32AtomicExchangeInt16;
+ opcode = kAtomicExchangeInt16;
} else if (type == MachineType::Uint16()) {
- opcode = kWord32AtomicExchangeUint16;
+ opcode = kAtomicExchangeUint16;
} else if (type == MachineType::Int32() || type == MachineType::Uint32()) {
- opcode = kWord32AtomicExchangeWord32;
+ opcode = kAtomicExchangeWord32;
} else {
UNREACHABLE();
}
- VisitAtomicExchange(this, node, opcode);
+ VisitAtomicExchange(this, node, opcode, AtomicWidth::kWord32);
}
void InstructionSelector::VisitWord64AtomicExchange(Node* node) {
ArchOpcode opcode;
MachineType type = AtomicOpType(node->op());
if (type == MachineType::Uint8()) {
- opcode = kMips64Word64AtomicExchangeUint8;
+ opcode = kAtomicExchangeUint8;
} else if (type == MachineType::Uint16()) {
- opcode = kMips64Word64AtomicExchangeUint16;
+ opcode = kAtomicExchangeUint16;
} else if (type == MachineType::Uint32()) {
- opcode = kMips64Word64AtomicExchangeUint32;
+ opcode = kAtomicExchangeWord32;
} else if (type == MachineType::Uint64()) {
opcode = kMips64Word64AtomicExchangeUint64;
} else {
UNREACHABLE();
}
- VisitAtomicExchange(this, node, opcode);
+ VisitAtomicExchange(this, node, opcode, AtomicWidth::kWord64);
}
void InstructionSelector::VisitWord32AtomicCompareExchange(Node* node) {
ArchOpcode opcode;
MachineType type = AtomicOpType(node->op());
if (type == MachineType::Int8()) {
- opcode = kWord32AtomicCompareExchangeInt8;
+ opcode = kAtomicCompareExchangeInt8;
} else if (type == MachineType::Uint8()) {
- opcode = kWord32AtomicCompareExchangeUint8;
+ opcode = kAtomicCompareExchangeUint8;
} else if (type == MachineType::Int16()) {
- opcode = kWord32AtomicCompareExchangeInt16;
+ opcode = kAtomicCompareExchangeInt16;
} else if (type == MachineType::Uint16()) {
- opcode = kWord32AtomicCompareExchangeUint16;
+ opcode = kAtomicCompareExchangeUint16;
} else if (type == MachineType::Int32() || type == MachineType::Uint32()) {
- opcode = kWord32AtomicCompareExchangeWord32;
+ opcode = kAtomicCompareExchangeWord32;
} else {
UNREACHABLE();
}
- VisitAtomicCompareExchange(this, node, opcode);
+ VisitAtomicCompareExchange(this, node, opcode, AtomicWidth::kWord32);
}
void InstructionSelector::VisitWord64AtomicCompareExchange(Node* node) {
ArchOpcode opcode;
MachineType type = AtomicOpType(node->op());
if (type == MachineType::Uint8()) {
- opcode = kMips64Word64AtomicCompareExchangeUint8;
+ opcode = kAtomicCompareExchangeUint8;
} else if (type == MachineType::Uint16()) {
- opcode = kMips64Word64AtomicCompareExchangeUint16;
+ opcode = kAtomicCompareExchangeUint16;
} else if (type == MachineType::Uint32()) {
- opcode = kMips64Word64AtomicCompareExchangeUint32;
+ opcode = kAtomicCompareExchangeWord32;
} else if (type == MachineType::Uint64()) {
opcode = kMips64Word64AtomicCompareExchangeUint64;
} else {
UNREACHABLE();
}
- VisitAtomicCompareExchange(this, node, opcode);
+ VisitAtomicCompareExchange(this, node, opcode, AtomicWidth::kWord64);
}
void InstructionSelector::VisitWord32AtomicBinaryOperation(
Node* node, ArchOpcode int8_op, ArchOpcode uint8_op, ArchOpcode int16_op,
@@ -2792,15 +2811,14 @@ void InstructionSelector::VisitWord32AtomicBinaryOperation(
UNREACHABLE();
}
- VisitAtomicBinop(this, node, opcode);
+ VisitAtomicBinop(this, node, opcode, AtomicWidth::kWord32);
}
-#define VISIT_ATOMIC_BINOP(op) \
- void InstructionSelector::VisitWord32Atomic##op(Node* node) { \
- VisitWord32AtomicBinaryOperation( \
- node, kWord32Atomic##op##Int8, kWord32Atomic##op##Uint8, \
- kWord32Atomic##op##Int16, kWord32Atomic##op##Uint16, \
- kWord32Atomic##op##Word32); \
+#define VISIT_ATOMIC_BINOP(op) \
+ void InstructionSelector::VisitWord32Atomic##op(Node* node) { \
+ VisitWord32AtomicBinaryOperation( \
+ node, kAtomic##op##Int8, kAtomic##op##Uint8, kAtomic##op##Int16, \
+ kAtomic##op##Uint16, kAtomic##op##Word32); \
}
VISIT_ATOMIC_BINOP(Add)
VISIT_ATOMIC_BINOP(Sub)
@@ -2825,14 +2843,14 @@ void InstructionSelector::VisitWord64AtomicBinaryOperation(
} else {
UNREACHABLE();
}
- VisitAtomicBinop(this, node, opcode);
+ VisitAtomicBinop(this, node, opcode, AtomicWidth::kWord64);
}
#define VISIT_ATOMIC_BINOP(op) \
void InstructionSelector::VisitWord64Atomic##op(Node* node) { \
- VisitWord64AtomicBinaryOperation( \
- node, kMips64Word64Atomic##op##Uint8, kMips64Word64Atomic##op##Uint16, \
- kMips64Word64Atomic##op##Uint32, kMips64Word64Atomic##op##Uint64); \
+ VisitWord64AtomicBinaryOperation(node, kAtomic##op##Uint8, \
+ kAtomic##op##Uint16, kAtomic##op##Word32, \
+ kMips64Word64Atomic##op##Uint64); \
}
VISIT_ATOMIC_BINOP(Add)
VISIT_ATOMIC_BINOP(Sub)
diff --git a/deps/v8/src/compiler/backend/ppc/code-generator-ppc.cc b/deps/v8/src/compiler/backend/ppc/code-generator-ppc.cc
index cf324353f2..0bf29ba686 100644
--- a/deps/v8/src/compiler/backend/ppc/code-generator-ppc.cc
+++ b/deps/v8/src/compiler/backend/ppc/code-generator-ppc.cc
@@ -38,9 +38,7 @@ class PPCOperandConverter final : public InstructionOperandConverter {
RCBit OutputRCBit() const {
switch (instr_->flags_mode()) {
case kFlags_branch:
- case kFlags_branch_and_poison:
case kFlags_deoptimize:
- case kFlags_deoptimize_and_poison:
case kFlags_set:
case kFlags_trap:
case kFlags_select:
@@ -289,15 +287,6 @@ Condition FlagsConditionToCondition(FlagsCondition condition, ArchOpcode op) {
UNREACHABLE();
}
-void EmitWordLoadPoisoningIfNeeded(CodeGenerator* codegen, Instruction* instr,
- PPCOperandConverter const& i) {
- const MemoryAccessMode access_mode = AccessModeField::decode(instr->opcode());
- if (access_mode == kMemoryAccessPoisoned) {
- Register value = i.OutputRegister();
- codegen->tasm()->and_(value, value, kSpeculationPoisonRegister);
- }
-}
-
} // namespace
#define ASSEMBLE_FLOAT_UNOP_RC(asm_instr, round) \
@@ -777,25 +766,6 @@ void CodeGenerator::BailoutIfDeoptimized() {
RelocInfo::CODE_TARGET, ne, cr0);
}
-void CodeGenerator::GenerateSpeculationPoisonFromCodeStartRegister() {
- Register scratch = kScratchReg;
-
- __ ComputeCodeStartAddress(scratch);
-
- // Calculate a mask which has all bits set in the normal case, but has all
- // bits cleared if we are speculatively executing the wrong PC.
- __ CmpS64(kJavaScriptCallCodeStartRegister, scratch);
- __ li(scratch, Operand::Zero());
- __ notx(kSpeculationPoisonRegister, scratch);
- __ isel(eq, kSpeculationPoisonRegister, kSpeculationPoisonRegister, scratch);
-}
-
-void CodeGenerator::AssembleRegisterArgumentPoisoning() {
- __ and_(kJSFunctionRegister, kJSFunctionRegister, kSpeculationPoisonRegister);
- __ and_(kContextRegister, kContextRegister, kSpeculationPoisonRegister);
- __ and_(sp, sp, kSpeculationPoisonRegister);
-}
-
// Assembles an instruction after register allocation, producing machine code.
CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Instruction* instr) {
@@ -1164,10 +1134,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Operand(offset.offset()), r0);
break;
}
- case kArchWordPoisonOnSpeculation:
- __ and_(i.OutputRegister(), i.InputRegister(0),
- kSpeculationPoisonRegister);
- break;
case kPPC_Peek: {
int reverse_slot = i.InputInt32(0);
int offset =
@@ -1953,10 +1919,11 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
DCHECK_EQ(LeaveRC, i.OutputRCBit());
break;
case kPPC_BitcastFloat32ToInt32:
- __ MovFloatToInt(i.OutputRegister(), i.InputDoubleRegister(0));
+ __ MovFloatToInt(i.OutputRegister(), i.InputDoubleRegister(0),
+ kScratchDoubleReg);
break;
case kPPC_BitcastInt32ToFloat32:
- __ MovIntToFloat(i.OutputDoubleRegister(), i.InputRegister(0));
+ __ MovIntToFloat(i.OutputDoubleRegister(), i.InputRegister(0), ip);
break;
#if V8_TARGET_ARCH_PPC64
case kPPC_BitcastDoubleToInt64:
@@ -1968,33 +1935,26 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
#endif
case kPPC_LoadWordU8:
ASSEMBLE_LOAD_INTEGER(lbz, lbzx);
- EmitWordLoadPoisoningIfNeeded(this, instr, i);
break;
case kPPC_LoadWordS8:
ASSEMBLE_LOAD_INTEGER(lbz, lbzx);
__ extsb(i.OutputRegister(), i.OutputRegister());
- EmitWordLoadPoisoningIfNeeded(this, instr, i);
break;
case kPPC_LoadWordU16:
ASSEMBLE_LOAD_INTEGER(lhz, lhzx);
- EmitWordLoadPoisoningIfNeeded(this, instr, i);
break;
case kPPC_LoadWordS16:
ASSEMBLE_LOAD_INTEGER(lha, lhax);
- EmitWordLoadPoisoningIfNeeded(this, instr, i);
break;
case kPPC_LoadWordU32:
ASSEMBLE_LOAD_INTEGER(lwz, lwzx);
- EmitWordLoadPoisoningIfNeeded(this, instr, i);
break;
case kPPC_LoadWordS32:
ASSEMBLE_LOAD_INTEGER(lwa, lwax);
- EmitWordLoadPoisoningIfNeeded(this, instr, i);
break;
#if V8_TARGET_ARCH_PPC64
case kPPC_LoadWord64:
ASSEMBLE_LOAD_INTEGER(ld, ldx);
- EmitWordLoadPoisoningIfNeeded(this, instr, i);
break;
#endif
case kPPC_LoadFloat32:
@@ -2051,25 +2011,17 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
DCHECK_EQ(LeaveRC, i.OutputRCBit());
break;
}
- case kWord32AtomicLoadInt8:
- case kPPC_AtomicLoadUint8:
- case kWord32AtomicLoadInt16:
- case kPPC_AtomicLoadUint16:
- case kPPC_AtomicLoadWord32:
- case kPPC_AtomicLoadWord64:
- case kPPC_AtomicStoreUint8:
- case kPPC_AtomicStoreUint16:
- case kPPC_AtomicStoreWord32:
- case kPPC_AtomicStoreWord64:
+ case kAtomicLoadInt8:
+ case kAtomicLoadInt16:
UNREACHABLE();
- case kWord32AtomicExchangeInt8:
+ case kAtomicExchangeInt8:
ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(lbarx, stbcx);
__ extsb(i.OutputRegister(0), i.OutputRegister(0));
break;
case kPPC_AtomicExchangeUint8:
ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(lbarx, stbcx);
break;
- case kWord32AtomicExchangeInt16:
+ case kAtomicExchangeInt16:
ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(lharx, sthcx);
__ extsh(i.OutputRegister(0), i.OutputRegister(0));
break;
@@ -2082,13 +2034,13 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kPPC_AtomicExchangeWord64:
ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(ldarx, stdcx);
break;
- case kWord32AtomicCompareExchangeInt8:
+ case kAtomicCompareExchangeInt8:
ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_SIGN_EXT(CmpS64, lbarx, stbcx, extsb);
break;
case kPPC_AtomicCompareExchangeUint8:
ASSEMBLE_ATOMIC_COMPARE_EXCHANGE(CmpS64, lbarx, stbcx, ZeroExtByte);
break;
- case kWord32AtomicCompareExchangeInt16:
+ case kAtomicCompareExchangeInt16:
ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_SIGN_EXT(CmpS64, lharx, sthcx, extsh);
break;
case kPPC_AtomicCompareExchangeUint16:
@@ -2135,6 +2087,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Register input = i.InputRegister(0);
Register output = i.OutputRegister();
Register temp1 = r0;
+ if (CpuFeatures::IsSupported(PPC_10_PLUS)) {
+ __ brw(output, input);
+ break;
+ }
__ rotlwi(temp1, input, 8);
__ rlwimi(temp1, input, 24, 0, 7);
__ rlwimi(temp1, input, 24, 16, 23);
@@ -2143,7 +2099,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
case kPPC_LoadByteRev32: {
ASSEMBLE_LOAD_INTEGER_RR(lwbrx);
- EmitWordLoadPoisoningIfNeeded(this, instr, i);
break;
}
case kPPC_StoreByteRev32: {
@@ -2156,6 +2111,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Register temp1 = r0;
Register temp2 = kScratchReg;
Register temp3 = i.TempRegister(0);
+ if (CpuFeatures::IsSupported(PPC_10_PLUS)) {
+ __ brd(output, input);
+ break;
+ }
__ rldicl(temp1, input, 32, 32);
__ rotlwi(temp2, input, 8);
__ rlwimi(temp2, input, 24, 0, 7);
@@ -2169,7 +2128,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
case kPPC_LoadByteRev64: {
ASSEMBLE_LOAD_INTEGER_RR(ldbrx);
- EmitWordLoadPoisoningIfNeeded(this, instr, i);
break;
}
case kPPC_StoreByteRev64: {
@@ -2186,7 +2144,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
case kPPC_F32x4Splat: {
Simd128Register dst = i.OutputSimd128Register();
- __ MovFloatToInt(kScratchReg, i.InputDoubleRegister(0));
+ __ MovFloatToInt(kScratchReg, i.InputDoubleRegister(0),
+ kScratchDoubleReg);
__ mtvsrd(dst, kScratchReg);
__ vspltw(dst, dst, Operand(1));
break;
@@ -2229,7 +2188,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ vextractuw(kScratchSimd128Reg, i.InputSimd128Register(0),
Operand((3 - i.InputInt8(1)) * lane_width_in_bytes));
__ mfvsrd(kScratchReg, kScratchSimd128Reg);
- __ MovIntToFloat(i.OutputDoubleRegister(), kScratchReg);
+ __ MovIntToFloat(i.OutputDoubleRegister(), kScratchReg, ip);
break;
}
case kPPC_I64x2ExtractLane: {
@@ -2292,7 +2251,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
constexpr int lane_width_in_bytes = 4;
Simd128Register dst = i.OutputSimd128Register();
- __ MovFloatToInt(r0, i.InputDoubleRegister(2));
+ __ MovFloatToInt(r0, i.InputDoubleRegister(2), kScratchDoubleReg);
if (CpuFeatures::IsSupported(PPC_10_PLUS)) {
__ vinsw(dst, r0, Operand((3 - i.InputInt8(1)) * lane_width_in_bytes));
} else {
@@ -3522,7 +3481,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
MemOperand operand = i.MemoryOperand(&mode, &index);
DCHECK_EQ(mode, kMode_MRR);
__ vextractub(kScratchSimd128Reg, i.InputSimd128Register(0),
- Operand(15 - i.InputInt8(3)));
+ Operand(15 - i.InputUint8(3)));
__ stxsibx(kScratchSimd128Reg, operand);
break;
}
@@ -3799,21 +3758,6 @@ void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
if (!branch->fallthru) __ b(flabel); // no fallthru to flabel.
}
-void CodeGenerator::AssembleBranchPoisoning(FlagsCondition condition,
- Instruction* instr) {
- // TODO(John) Handle float comparisons (kUnordered[Not]Equal).
- if (condition == kUnorderedEqual || condition == kUnorderedNotEqual ||
- condition == kOverflow || condition == kNotOverflow) {
- return;
- }
-
- ArchOpcode op = instr->arch_opcode();
- condition = NegateFlagsCondition(condition);
- __ li(kScratchReg, Operand::Zero());
- __ isel(FlagsConditionToCondition(condition, op), kSpeculationPoisonRegister,
- kScratchReg, kSpeculationPoisonRegister, cr0);
-}
-
void CodeGenerator::AssembleArchDeoptBranch(Instruction* instr,
BranchInfo* branch) {
AssembleArchBranch(instr, branch);
@@ -3940,7 +3884,6 @@ void CodeGenerator::AssembleArchBoolean(Instruction* instr,
break;
default:
UNREACHABLE();
- break;
}
} else {
if (reg_value != 0) __ li(reg, Operand::Zero());
@@ -4079,7 +4022,6 @@ void CodeGenerator::AssembleConstructFrame() {
__ RecordComment("-- OSR entrypoint --");
osr_pc_offset_ = __ pc_offset();
required_slots -= osr_helper()->UnoptimizedFrameSlots();
- ResetSpeculationPoison();
}
const RegList saves_fp = call_descriptor->CalleeSavedFPRegisters();
@@ -4353,7 +4295,6 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
}
case Constant::kRpoNumber:
UNREACHABLE(); // TODO(dcarney): loading RPO constants on PPC.
- break;
}
if (destination->IsStackSlot()) {
__ StoreU64(dst, g.ToMemOperand(destination), r0);
diff --git a/deps/v8/src/compiler/backend/ppc/instruction-codes-ppc.h b/deps/v8/src/compiler/backend/ppc/instruction-codes-ppc.h
index 64f532a52b..4182e8b71b 100644
--- a/deps/v8/src/compiler/backend/ppc/instruction-codes-ppc.h
+++ b/deps/v8/src/compiler/backend/ppc/instruction-codes-ppc.h
@@ -138,17 +138,6 @@ namespace compiler {
V(PPC_StoreSimd128) \
V(PPC_ByteRev32) \
V(PPC_ByteRev64) \
- V(PPC_CompressSigned) \
- V(PPC_CompressPointer) \
- V(PPC_CompressAny) \
- V(PPC_AtomicStoreUint8) \
- V(PPC_AtomicStoreUint16) \
- V(PPC_AtomicStoreWord32) \
- V(PPC_AtomicStoreWord64) \
- V(PPC_AtomicLoadUint8) \
- V(PPC_AtomicLoadUint16) \
- V(PPC_AtomicLoadWord32) \
- V(PPC_AtomicLoadWord64) \
V(PPC_AtomicExchangeUint8) \
V(PPC_AtomicExchangeUint16) \
V(PPC_AtomicExchangeWord32) \
diff --git a/deps/v8/src/compiler/backend/ppc/instruction-scheduler-ppc.cc b/deps/v8/src/compiler/backend/ppc/instruction-scheduler-ppc.cc
index aeb1377879..0270dc401e 100644
--- a/deps/v8/src/compiler/backend/ppc/instruction-scheduler-ppc.cc
+++ b/deps/v8/src/compiler/backend/ppc/instruction-scheduler-ppc.cc
@@ -112,9 +112,6 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kPPC_BitcastDoubleToInt64:
case kPPC_ByteRev32:
case kPPC_ByteRev64:
- case kPPC_CompressSigned:
- case kPPC_CompressPointer:
- case kPPC_CompressAny:
case kPPC_F64x2Splat:
case kPPC_F64x2ExtractLane:
case kPPC_F64x2ReplaceLane:
@@ -332,10 +329,6 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kPPC_LoadFloat32:
case kPPC_LoadDouble:
case kPPC_LoadSimd128:
- case kPPC_AtomicLoadUint8:
- case kPPC_AtomicLoadUint16:
- case kPPC_AtomicLoadWord32:
- case kPPC_AtomicLoadWord64:
case kPPC_Peek:
case kPPC_LoadDecompressTaggedSigned:
case kPPC_LoadDecompressTaggedPointer:
@@ -378,10 +371,6 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kPPC_S128Store64Lane:
return kHasSideEffect;
- case kPPC_AtomicStoreUint8:
- case kPPC_AtomicStoreUint16:
- case kPPC_AtomicStoreWord32:
- case kPPC_AtomicStoreWord64:
case kPPC_AtomicExchangeUint8:
case kPPC_AtomicExchangeUint16:
case kPPC_AtomicExchangeWord32:
diff --git a/deps/v8/src/compiler/backend/ppc/instruction-selector-ppc.cc b/deps/v8/src/compiler/backend/ppc/instruction-selector-ppc.cc
index c74211aa38..bfa7c0a6e0 100644
--- a/deps/v8/src/compiler/backend/ppc/instruction-selector-ppc.cc
+++ b/deps/v8/src/compiler/backend/ppc/instruction-selector-ppc.cc
@@ -167,9 +167,9 @@ void InstructionSelector::VisitAbortCSAAssert(Node* node) {
Emit(kArchAbortCSAAssert, g.NoOutput(), g.UseFixed(node->InputAt(0), r4));
}
-void InstructionSelector::VisitLoad(Node* node) {
- LoadRepresentation load_rep = LoadRepresentationOf(node->op());
- PPCOperandGenerator g(this);
+static void VisitLoadCommon(InstructionSelector* selector, Node* node,
+ LoadRepresentation load_rep) {
+ PPCOperandGenerator g(selector);
Node* base = node->InputAt(0);
Node* offset = node->InputAt(1);
InstructionCode opcode = kArchNop;
@@ -229,54 +229,51 @@ void InstructionSelector::VisitLoad(Node* node) {
UNREACHABLE();
}
- if (node->opcode() == IrOpcode::kPoisonedLoad &&
- poisoning_level_ != PoisoningMitigationLevel::kDontPoison) {
- opcode |= AccessModeField::encode(kMemoryAccessPoisoned);
- }
-
bool is_atomic = (node->opcode() == IrOpcode::kWord32AtomicLoad ||
node->opcode() == IrOpcode::kWord64AtomicLoad);
if (g.CanBeImmediate(offset, mode)) {
- Emit(opcode | AddressingModeField::encode(kMode_MRI),
- g.DefineAsRegister(node), g.UseRegister(base), g.UseImmediate(offset),
- g.UseImmediate(is_atomic));
+ selector->Emit(opcode | AddressingModeField::encode(kMode_MRI),
+ g.DefineAsRegister(node), g.UseRegister(base),
+ g.UseImmediate(offset), g.UseImmediate(is_atomic));
} else if (g.CanBeImmediate(base, mode)) {
- Emit(opcode | AddressingModeField::encode(kMode_MRI),
- g.DefineAsRegister(node), g.UseRegister(offset), g.UseImmediate(base),
- g.UseImmediate(is_atomic));
+ selector->Emit(opcode | AddressingModeField::encode(kMode_MRI),
+ g.DefineAsRegister(node), g.UseRegister(offset),
+ g.UseImmediate(base), g.UseImmediate(is_atomic));
} else {
- Emit(opcode | AddressingModeField::encode(kMode_MRR),
- g.DefineAsRegister(node), g.UseRegister(base), g.UseRegister(offset),
- g.UseImmediate(is_atomic));
+ selector->Emit(opcode | AddressingModeField::encode(kMode_MRR),
+ g.DefineAsRegister(node), g.UseRegister(base),
+ g.UseRegister(offset), g.UseImmediate(is_atomic));
}
}
-void InstructionSelector::VisitPoisonedLoad(Node* node) { VisitLoad(node); }
+void InstructionSelector::VisitLoad(Node* node) {
+ LoadRepresentation load_rep = LoadRepresentationOf(node->op());
+ VisitLoadCommon(this, node, load_rep);
+}
void InstructionSelector::VisitProtectedLoad(Node* node) {
// TODO(eholk)
UNIMPLEMENTED();
}
-void InstructionSelector::VisitStore(Node* node) {
- PPCOperandGenerator g(this);
+void VisitStoreCommon(InstructionSelector* selector, Node* node,
+ StoreRepresentation store_rep,
+ base::Optional<AtomicMemoryOrder> atomic_order) {
+ PPCOperandGenerator g(selector);
Node* base = node->InputAt(0);
Node* offset = node->InputAt(1);
Node* value = node->InputAt(2);
+ // TODO(miladfarca): maybe use atomic_order?
bool is_atomic = (node->opcode() == IrOpcode::kWord32AtomicStore ||
node->opcode() == IrOpcode::kWord64AtomicStore);
- MachineRepresentation rep;
+ MachineRepresentation rep = store_rep.representation();
WriteBarrierKind write_barrier_kind = kNoWriteBarrier;
- if (is_atomic) {
- rep = AtomicStoreRepresentationOf(node->op());
- } else {
- StoreRepresentation store_rep = StoreRepresentationOf(node->op());
+ if (!is_atomic) {
write_barrier_kind = store_rep.write_barrier_kind();
- rep = store_rep.representation();
}
if (FLAG_enable_unconditional_write_barriers &&
@@ -312,7 +309,7 @@ void InstructionSelector::VisitStore(Node* node) {
code |= AddressingModeField::encode(addressing_mode);
code |= MiscField::encode(static_cast<int>(record_write_mode));
CHECK_EQ(is_atomic, false);
- Emit(code, 0, nullptr, input_count, inputs, temp_count, temps);
+ selector->Emit(code, 0, nullptr, input_count, inputs, temp_count, temps);
} else {
ArchOpcode opcode;
ImmediateMode mode = kInt16Imm;
@@ -346,7 +343,6 @@ void InstructionSelector::VisitStore(Node* node) {
break;
#else
UNREACHABLE();
- break;
#endif
case MachineRepresentation::kTaggedSigned: // Fall through.
case MachineRepresentation::kTaggedPointer: // Fall through.
@@ -374,21 +370,26 @@ void InstructionSelector::VisitStore(Node* node) {
}
if (g.CanBeImmediate(offset, mode)) {
- Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(),
- g.UseRegister(base), g.UseImmediate(offset), g.UseRegister(value),
- g.UseImmediate(is_atomic));
+ selector->Emit(opcode | AddressingModeField::encode(kMode_MRI),
+ g.NoOutput(), g.UseRegister(base), g.UseImmediate(offset),
+ g.UseRegister(value), g.UseImmediate(is_atomic));
} else if (g.CanBeImmediate(base, mode)) {
- Emit(opcode | AddressingModeField::encode(kMode_MRI), g.NoOutput(),
- g.UseRegister(offset), g.UseImmediate(base), g.UseRegister(value),
- g.UseImmediate(is_atomic));
+ selector->Emit(opcode | AddressingModeField::encode(kMode_MRI),
+ g.NoOutput(), g.UseRegister(offset), g.UseImmediate(base),
+ g.UseRegister(value), g.UseImmediate(is_atomic));
} else {
- Emit(opcode | AddressingModeField::encode(kMode_MRR), g.NoOutput(),
- g.UseRegister(base), g.UseRegister(offset), g.UseRegister(value),
- g.UseImmediate(is_atomic));
+ selector->Emit(opcode | AddressingModeField::encode(kMode_MRR),
+ g.NoOutput(), g.UseRegister(base), g.UseRegister(offset),
+ g.UseRegister(value), g.UseImmediate(is_atomic));
}
}
}
+void InstructionSelector::VisitStore(Node* node) {
+ VisitStoreCommon(this, node, StoreRepresentationOf(node->op()),
+ base::nullopt);
+}
+
void InstructionSelector::VisitProtectedStore(Node* node) {
// TODO(eholk)
UNIMPLEMENTED();
@@ -1956,16 +1957,28 @@ void InstructionSelector::VisitMemoryBarrier(Node* node) {
Emit(kPPC_Sync, g.NoOutput());
}
-void InstructionSelector::VisitWord32AtomicLoad(Node* node) { VisitLoad(node); }
+void InstructionSelector::VisitWord32AtomicLoad(Node* node) {
+ AtomicLoadParameters atomic_load_params = AtomicLoadParametersOf(node->op());
+ LoadRepresentation load_rep = atomic_load_params.representation();
+ VisitLoadCommon(this, node, load_rep);
+}
-void InstructionSelector::VisitWord64AtomicLoad(Node* node) { VisitLoad(node); }
+void InstructionSelector::VisitWord64AtomicLoad(Node* node) {
+ AtomicLoadParameters atomic_load_params = AtomicLoadParametersOf(node->op());
+ LoadRepresentation load_rep = atomic_load_params.representation();
+ VisitLoadCommon(this, node, load_rep);
+}
void InstructionSelector::VisitWord32AtomicStore(Node* node) {
- VisitStore(node);
+ AtomicStoreParameters store_params = AtomicStoreParametersOf(node->op());
+ VisitStoreCommon(this, node, store_params.store_representation(),
+ store_params.order());
}
void InstructionSelector::VisitWord64AtomicStore(Node* node) {
- VisitStore(node);
+ AtomicStoreParameters store_params = AtomicStoreParametersOf(node->op());
+ VisitStoreCommon(this, node, store_params.store_representation(),
+ store_params.order());
}
void VisitAtomicExchange(InstructionSelector* selector, Node* node,
@@ -1991,11 +2004,11 @@ void InstructionSelector::VisitWord32AtomicExchange(Node* node) {
ArchOpcode opcode;
MachineType type = AtomicOpType(node->op());
if (type == MachineType::Int8()) {
- opcode = kWord32AtomicExchangeInt8;
+ opcode = kAtomicExchangeInt8;
} else if (type == MachineType::Uint8()) {
opcode = kPPC_AtomicExchangeUint8;
} else if (type == MachineType::Int16()) {
- opcode = kWord32AtomicExchangeInt16;
+ opcode = kAtomicExchangeInt16;
} else if (type == MachineType::Uint16()) {
opcode = kPPC_AtomicExchangeUint16;
} else if (type == MachineType::Int32() || type == MachineType::Uint32()) {
@@ -2052,11 +2065,11 @@ void InstructionSelector::VisitWord32AtomicCompareExchange(Node* node) {
MachineType type = AtomicOpType(node->op());
ArchOpcode opcode;
if (type == MachineType::Int8()) {
- opcode = kWord32AtomicCompareExchangeInt8;
+ opcode = kAtomicCompareExchangeInt8;
} else if (type == MachineType::Uint8()) {
opcode = kPPC_AtomicCompareExchangeUint8;
} else if (type == MachineType::Int16()) {
- opcode = kWord32AtomicCompareExchangeInt16;
+ opcode = kAtomicCompareExchangeInt16;
} else if (type == MachineType::Uint16()) {
opcode = kPPC_AtomicCompareExchangeUint16;
} else if (type == MachineType::Int32() || type == MachineType::Uint32()) {
diff --git a/deps/v8/src/compiler/backend/riscv64/code-generator-riscv64.cc b/deps/v8/src/compiler/backend/riscv64/code-generator-riscv64.cc
index 2d92ae1567..559378b19b 100644
--- a/deps/v8/src/compiler/backend/riscv64/code-generator-riscv64.cc
+++ b/deps/v8/src/compiler/backend/riscv64/code-generator-riscv64.cc
@@ -106,7 +106,6 @@ class RiscvOperandConverter final : public InstructionOperandConverter {
constant.ToDelayedStringConstant());
case Constant::kRpoNumber:
UNREACHABLE(); // TODO(titzer): RPO immediates
- break;
}
UNREACHABLE();
}
@@ -307,17 +306,6 @@ FPUCondition FlagsConditionToConditionCmpFPU(bool* predicate,
UNREACHABLE();
}
-void EmitWordLoadPoisoningIfNeeded(CodeGenerator* codegen,
- InstructionCode opcode, Instruction* instr,
- RiscvOperandConverter const& i) {
- const MemoryAccessMode access_mode =
- static_cast<MemoryAccessMode>(MiscField::decode(opcode));
- if (access_mode == kMemoryAccessPoisoned) {
- Register value = i.OutputRegister();
- codegen->tasm()->And(value, value, kSpeculationPoisonRegister);
- }
-}
-
} // namespace
#define ASSEMBLE_ATOMIC_LOAD_INTEGER(asm_instr) \
@@ -336,7 +324,7 @@ void EmitWordLoadPoisoningIfNeeded(CodeGenerator* codegen,
#define ASSEMBLE_ATOMIC_BINOP(load_linked, store_conditional, bin_instr) \
do { \
Label binop; \
- __ Add64(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); \
+ __ Add64(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); \
__ sync(); \
__ bind(&binop); \
__ load_linked(i.OutputRegister(0), MemOperand(i.TempRegister(0), 0)); \
@@ -351,7 +339,7 @@ void EmitWordLoadPoisoningIfNeeded(CodeGenerator* codegen,
size, bin_instr, representation) \
do { \
Label binop; \
- __ Add64(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); \
+ __ Add64(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); \
if (representation == 32) { \
__ And(i.TempRegister(3), i.TempRegister(0), 0x3); \
} else { \
@@ -380,7 +368,7 @@ void EmitWordLoadPoisoningIfNeeded(CodeGenerator* codegen,
Label exchange; \
__ sync(); \
__ bind(&exchange); \
- __ Add64(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); \
+ __ Add64(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); \
__ load_linked(i.OutputRegister(0), MemOperand(i.TempRegister(0), 0)); \
__ Move(i.TempRegister(1), i.InputRegister(2)); \
__ store_conditional(i.TempRegister(1), MemOperand(i.TempRegister(0), 0)); \
@@ -392,7 +380,7 @@ void EmitWordLoadPoisoningIfNeeded(CodeGenerator* codegen,
load_linked, store_conditional, sign_extend, size, representation) \
do { \
Label exchange; \
- __ Add64(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); \
+ __ Add64(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); \
if (representation == 32) { \
__ And(i.TempRegister(1), i.TempRegister(0), 0x3); \
} else { \
@@ -419,7 +407,7 @@ void EmitWordLoadPoisoningIfNeeded(CodeGenerator* codegen,
do { \
Label compareExchange; \
Label exit; \
- __ Add64(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); \
+ __ Add64(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); \
__ sync(); \
__ bind(&compareExchange); \
__ load_linked(i.OutputRegister(0), MemOperand(i.TempRegister(0), 0)); \
@@ -438,7 +426,7 @@ void EmitWordLoadPoisoningIfNeeded(CodeGenerator* codegen,
do { \
Label compareExchange; \
Label exit; \
- __ Add64(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); \
+ __ Add64(i.TempRegister(0), i.InputRegister(0), i.InputRegister(1)); \
if (representation == 32) { \
__ And(i.TempRegister(1), i.TempRegister(0), 0x3); \
} else { \
@@ -570,31 +558,6 @@ void CodeGenerator::BailoutIfDeoptimized() {
RelocInfo::CODE_TARGET, ne, kScratchReg, Operand(zero_reg));
}
-void CodeGenerator::GenerateSpeculationPoisonFromCodeStartRegister() {
- // Calculate a mask which has all bits set in the normal case, but has all
- // bits cleared if we are speculatively executing the wrong PC.
- // difference = (current - expected) | (expected - current)
- // poison = ~(difference >> (kBitsPerSystemPointer - 1))
- __ ComputeCodeStartAddress(kScratchReg);
- __ Move(kSpeculationPoisonRegister, kScratchReg);
- __ Sub32(kSpeculationPoisonRegister, kSpeculationPoisonRegister,
- kJavaScriptCallCodeStartRegister);
- __ Sub32(kJavaScriptCallCodeStartRegister, kJavaScriptCallCodeStartRegister,
- kScratchReg);
- __ or_(kSpeculationPoisonRegister, kSpeculationPoisonRegister,
- kJavaScriptCallCodeStartRegister);
- __ Sra64(kSpeculationPoisonRegister, kSpeculationPoisonRegister,
- kBitsPerSystemPointer - 1);
- __ Nor(kSpeculationPoisonRegister, kSpeculationPoisonRegister,
- kSpeculationPoisonRegister);
-}
-
-void CodeGenerator::AssembleRegisterArgumentPoisoning() {
- __ And(kJSFunctionRegister, kJSFunctionRegister, kSpeculationPoisonRegister);
- __ And(kContextRegister, kContextRegister, kSpeculationPoisonRegister);
- __ And(sp, sp, kSpeculationPoisonRegister);
-}
-
// Assembles an instruction after register allocation, producing machine code.
CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Instruction* instr) {
@@ -887,10 +850,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
- case kArchWordPoisonOnSpeculation:
- __ And(i.OutputRegister(), i.InputRegister(0),
- kSpeculationPoisonRegister);
- break;
case kIeee754Float64Acos:
ASSEMBLE_IEEE754_UNOP(acos);
break;
@@ -1094,17 +1053,16 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kRiscvPopcnt32: {
Register src = i.InputRegister(0);
Register dst = i.OutputRegister();
- __ Popcnt32(dst, src);
+ __ Popcnt32(dst, src, kScratchReg);
} break;
case kRiscvPopcnt64: {
Register src = i.InputRegister(0);
Register dst = i.OutputRegister();
- __ Popcnt64(dst, src);
+ __ Popcnt64(dst, src, kScratchReg);
} break;
case kRiscvShl32:
if (instr->InputAt(1)->IsRegister()) {
- __ Sll32(i.OutputRegister(), i.InputRegister(0),
- i.InputRegister(1));
+ __ Sll32(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
} else {
int64_t imm = i.InputOperand(1).immediate();
__ Sll32(i.OutputRegister(), i.InputRegister(0),
@@ -1113,8 +1071,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
case kRiscvShr32:
if (instr->InputAt(1)->IsRegister()) {
- __ Srl32(i.OutputRegister(), i.InputRegister(0),
- i.InputRegister(1));
+ __ Srl32(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
} else {
int64_t imm = i.InputOperand(1).immediate();
__ Srl32(i.OutputRegister(), i.InputRegister(0),
@@ -1123,8 +1080,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
case kRiscvSar32:
if (instr->InputAt(1)->IsRegister()) {
- __ Sra32(i.OutputRegister(), i.InputRegister(0),
- i.InputRegister(1));
+ __ Sra32(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
} else {
int64_t imm = i.InputOperand(1).immediate();
__ Sra32(i.OutputRegister(), i.InputRegister(0),
@@ -1553,30 +1509,24 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
case kRiscvLbu:
__ Lbu(i.OutputRegister(), i.MemoryOperand());
- EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kRiscvLb:
__ Lb(i.OutputRegister(), i.MemoryOperand());
- EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kRiscvSb:
__ Sb(i.InputOrZeroRegister(2), i.MemoryOperand());
break;
case kRiscvLhu:
__ Lhu(i.OutputRegister(), i.MemoryOperand());
- EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kRiscvUlhu:
__ Ulhu(i.OutputRegister(), i.MemoryOperand());
- EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kRiscvLh:
__ Lh(i.OutputRegister(), i.MemoryOperand());
- EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kRiscvUlh:
__ Ulh(i.OutputRegister(), i.MemoryOperand());
- EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kRiscvSh:
__ Sh(i.InputOrZeroRegister(2), i.MemoryOperand());
@@ -1586,27 +1536,21 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
case kRiscvLw:
__ Lw(i.OutputRegister(), i.MemoryOperand());
- EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kRiscvUlw:
__ Ulw(i.OutputRegister(), i.MemoryOperand());
- EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kRiscvLwu:
__ Lwu(i.OutputRegister(), i.MemoryOperand());
- EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kRiscvUlwu:
__ Ulwu(i.OutputRegister(), i.MemoryOperand());
- EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kRiscvLd:
__ Ld(i.OutputRegister(), i.MemoryOperand());
- EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kRiscvUld:
__ Uld(i.OutputRegister(), i.MemoryOperand());
- EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kRiscvSw:
__ Sw(i.InputOrZeroRegister(2), i.MemoryOperand());
@@ -1625,7 +1569,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kRiscvULoadFloat: {
- __ ULoadFloat(i.OutputSingleRegister(), i.MemoryOperand());
+ __ ULoadFloat(i.OutputSingleRegister(), i.MemoryOperand(), kScratchReg);
break;
}
case kRiscvStoreFloat: {
@@ -1645,14 +1589,14 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
if (ft == kDoubleRegZero && !__ IsSingleZeroRegSet()) {
__ LoadFPRImmediate(kDoubleRegZero, 0.0f);
}
- __ UStoreFloat(ft, operand);
+ __ UStoreFloat(ft, operand, kScratchReg);
break;
}
case kRiscvLoadDouble:
__ LoadDouble(i.OutputDoubleRegister(), i.MemoryOperand());
break;
case kRiscvULoadDouble:
- __ ULoadDouble(i.OutputDoubleRegister(), i.MemoryOperand());
+ __ ULoadDouble(i.OutputDoubleRegister(), i.MemoryOperand(), kScratchReg);
break;
case kRiscvStoreDouble: {
FPURegister ft = i.InputOrZeroDoubleRegister(2);
@@ -1667,7 +1611,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
if (ft == kDoubleRegZero && !__ IsDoubleZeroRegSet()) {
__ LoadFPRImmediate(kDoubleRegZero, 0.0);
}
- __ UStoreDouble(ft, i.MemoryOperand());
+ __ UStoreDouble(ft, i.MemoryOperand(), kScratchReg);
break;
}
case kRiscvSync: {
@@ -1723,156 +1667,175 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kRiscvByteSwap64: {
- __ ByteSwap(i.OutputRegister(0), i.InputRegister(0), 8);
+ __ ByteSwap(i.OutputRegister(0), i.InputRegister(0), 8, kScratchReg);
break;
}
case kRiscvByteSwap32: {
- __ ByteSwap(i.OutputRegister(0), i.InputRegister(0), 4);
+ __ ByteSwap(i.OutputRegister(0), i.InputRegister(0), 4, kScratchReg);
break;
}
- case kWord32AtomicLoadInt8:
+ case kAtomicLoadInt8:
+ DCHECK_EQ(AtomicWidthField::decode(opcode), AtomicWidth::kWord32);
ASSEMBLE_ATOMIC_LOAD_INTEGER(Lb);
break;
- case kWord32AtomicLoadUint8:
+ case kAtomicLoadUint8:
ASSEMBLE_ATOMIC_LOAD_INTEGER(Lbu);
break;
- case kWord32AtomicLoadInt16:
+ case kAtomicLoadInt16:
+ DCHECK_EQ(AtomicWidthField::decode(opcode), AtomicWidth::kWord32);
ASSEMBLE_ATOMIC_LOAD_INTEGER(Lh);
break;
- case kWord32AtomicLoadUint16:
+ case kAtomicLoadUint16:
ASSEMBLE_ATOMIC_LOAD_INTEGER(Lhu);
break;
- case kWord32AtomicLoadWord32:
+ case kAtomicLoadWord32:
ASSEMBLE_ATOMIC_LOAD_INTEGER(Lw);
break;
- case kRiscvWord64AtomicLoadUint8:
- ASSEMBLE_ATOMIC_LOAD_INTEGER(Lbu);
- break;
- case kRiscvWord64AtomicLoadUint16:
- ASSEMBLE_ATOMIC_LOAD_INTEGER(Lhu);
- break;
- case kRiscvWord64AtomicLoadUint32:
- ASSEMBLE_ATOMIC_LOAD_INTEGER(Lwu);
- break;
case kRiscvWord64AtomicLoadUint64:
ASSEMBLE_ATOMIC_LOAD_INTEGER(Ld);
break;
- case kWord32AtomicStoreWord8:
- ASSEMBLE_ATOMIC_STORE_INTEGER(Sb);
- break;
- case kWord32AtomicStoreWord16:
- ASSEMBLE_ATOMIC_STORE_INTEGER(Sh);
- break;
- case kWord32AtomicStoreWord32:
- ASSEMBLE_ATOMIC_STORE_INTEGER(Sw);
- break;
- case kRiscvWord64AtomicStoreWord8:
+ case kAtomicStoreWord8:
ASSEMBLE_ATOMIC_STORE_INTEGER(Sb);
break;
- case kRiscvWord64AtomicStoreWord16:
+ case kAtomicStoreWord16:
ASSEMBLE_ATOMIC_STORE_INTEGER(Sh);
break;
- case kRiscvWord64AtomicStoreWord32:
+ case kAtomicStoreWord32:
ASSEMBLE_ATOMIC_STORE_INTEGER(Sw);
break;
case kRiscvWord64AtomicStoreWord64:
ASSEMBLE_ATOMIC_STORE_INTEGER(Sd);
break;
- case kWord32AtomicExchangeInt8:
+ case kAtomicExchangeInt8:
+ DCHECK_EQ(AtomicWidthField::decode(opcode), AtomicWidth::kWord32);
ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(Ll, Sc, true, 8, 32);
break;
- case kWord32AtomicExchangeUint8:
- ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(Ll, Sc, false, 8, 32);
+ case kAtomicExchangeUint8:
+ switch (AtomicWidthField::decode(opcode)) {
+ case AtomicWidth::kWord32:
+ ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(Ll, Sc, false, 8, 32);
+ break;
+ case AtomicWidth::kWord64:
+ ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(Lld, Scd, false, 8, 64);
+ break;
+ }
break;
- case kWord32AtomicExchangeInt16:
+ case kAtomicExchangeInt16:
+ DCHECK_EQ(AtomicWidthField::decode(opcode), AtomicWidth::kWord32);
ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(Ll, Sc, true, 16, 32);
break;
- case kWord32AtomicExchangeUint16:
- ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(Ll, Sc, false, 16, 32);
- break;
- case kWord32AtomicExchangeWord32:
- ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(Ll, Sc);
- break;
- case kRiscvWord64AtomicExchangeUint8:
- ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(Lld, Scd, false, 8, 64);
- break;
- case kRiscvWord64AtomicExchangeUint16:
- ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(Lld, Scd, false, 16, 64);
+ case kAtomicExchangeUint16:
+ switch (AtomicWidthField::decode(opcode)) {
+ case AtomicWidth::kWord32:
+ ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(Ll, Sc, false, 16, 32);
+ break;
+ case AtomicWidth::kWord64:
+ ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(Lld, Scd, false, 16, 64);
+ break;
+ }
break;
- case kRiscvWord64AtomicExchangeUint32:
- ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(Lld, Scd, false, 32, 64);
+ case kAtomicExchangeWord32:
+ switch (AtomicWidthField::decode(opcode)) {
+ case AtomicWidth::kWord32:
+ ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(Ll, Sc);
+ break;
+ case AtomicWidth::kWord64:
+ ASSEMBLE_ATOMIC_EXCHANGE_INTEGER_EXT(Lld, Scd, false, 32, 64);
+ break;
+ }
break;
case kRiscvWord64AtomicExchangeUint64:
ASSEMBLE_ATOMIC_EXCHANGE_INTEGER(Lld, Scd);
break;
- case kWord32AtomicCompareExchangeInt8:
+ case kAtomicCompareExchangeInt8:
+ DCHECK_EQ(AtomicWidthField::decode(opcode), AtomicWidth::kWord32);
ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(Ll, Sc, true, 8, 32);
break;
- case kWord32AtomicCompareExchangeUint8:
- ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(Ll, Sc, false, 8, 32);
+ case kAtomicCompareExchangeUint8:
+ switch (AtomicWidthField::decode(opcode)) {
+ case AtomicWidth::kWord32:
+ ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(Ll, Sc, false, 8, 32);
+ break;
+ case AtomicWidth::kWord64:
+ ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(Lld, Scd, false, 8, 64);
+ break;
+ }
break;
- case kWord32AtomicCompareExchangeInt16:
+ case kAtomicCompareExchangeInt16:
+ DCHECK_EQ(AtomicWidthField::decode(opcode), AtomicWidth::kWord32);
ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(Ll, Sc, true, 16, 32);
break;
- case kWord32AtomicCompareExchangeUint16:
- ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(Ll, Sc, false, 16, 32);
- break;
- case kWord32AtomicCompareExchangeWord32:
- __ Sll32(i.InputRegister(2), i.InputRegister(2), 0);
- ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(Ll, Sc);
- break;
- case kRiscvWord64AtomicCompareExchangeUint8:
- ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(Lld, Scd, false, 8, 64);
- break;
- case kRiscvWord64AtomicCompareExchangeUint16:
- ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(Lld, Scd, false, 16, 64);
+ case kAtomicCompareExchangeUint16:
+ switch (AtomicWidthField::decode(opcode)) {
+ case AtomicWidth::kWord32:
+ ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(Ll, Sc, false, 16, 32);
+ break;
+ case AtomicWidth::kWord64:
+ ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(Lld, Scd, false, 16, 64);
+ break;
+ }
break;
- case kRiscvWord64AtomicCompareExchangeUint32:
- ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(Lld, Scd, false, 32, 64);
+ case kAtomicCompareExchangeWord32:
+ switch (AtomicWidthField::decode(opcode)) {
+ case AtomicWidth::kWord32:
+ __ Sll32(i.InputRegister(2), i.InputRegister(2), 0);
+ ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(Ll, Sc);
+ break;
+ case AtomicWidth::kWord64:
+ ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER_EXT(Lld, Scd, false, 32, 64);
+ break;
+ }
break;
case kRiscvWord64AtomicCompareExchangeUint64:
ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_INTEGER(Lld, Scd);
break;
-#define ATOMIC_BINOP_CASE(op, inst) \
- case kWord32Atomic##op##Int8: \
- ASSEMBLE_ATOMIC_BINOP_EXT(Ll, Sc, true, 8, inst, 32); \
- break; \
- case kWord32Atomic##op##Uint8: \
- ASSEMBLE_ATOMIC_BINOP_EXT(Ll, Sc, false, 8, inst, 32); \
- break; \
- case kWord32Atomic##op##Int16: \
- ASSEMBLE_ATOMIC_BINOP_EXT(Ll, Sc, true, 16, inst, 32); \
- break; \
- case kWord32Atomic##op##Uint16: \
- ASSEMBLE_ATOMIC_BINOP_EXT(Ll, Sc, false, 16, inst, 32); \
- break; \
- case kWord32Atomic##op##Word32: \
- ASSEMBLE_ATOMIC_BINOP(Ll, Sc, inst); \
- break;
- ATOMIC_BINOP_CASE(Add, Add32)
- ATOMIC_BINOP_CASE(Sub, Sub32)
- ATOMIC_BINOP_CASE(And, And)
- ATOMIC_BINOP_CASE(Or, Or)
- ATOMIC_BINOP_CASE(Xor, Xor)
-#undef ATOMIC_BINOP_CASE
-#define ATOMIC_BINOP_CASE(op, inst) \
- case kRiscvWord64Atomic##op##Uint8: \
- ASSEMBLE_ATOMIC_BINOP_EXT(Lld, Scd, false, 8, inst, 64); \
- break; \
- case kRiscvWord64Atomic##op##Uint16: \
- ASSEMBLE_ATOMIC_BINOP_EXT(Lld, Scd, false, 16, inst, 64); \
- break; \
- case kRiscvWord64Atomic##op##Uint32: \
- ASSEMBLE_ATOMIC_BINOP_EXT(Lld, Scd, false, 32, inst, 64); \
- break; \
- case kRiscvWord64Atomic##op##Uint64: \
- ASSEMBLE_ATOMIC_BINOP(Lld, Scd, inst); \
+#define ATOMIC_BINOP_CASE(op, inst32, inst64) \
+ case kAtomic##op##Int8: \
+ DCHECK_EQ(AtomicWidthField::decode(opcode), AtomicWidth::kWord32); \
+ ASSEMBLE_ATOMIC_BINOP_EXT(Ll, Sc, true, 8, inst32, 32); \
+ break; \
+ case kAtomic##op##Uint8: \
+ switch (AtomicWidthField::decode(opcode)) { \
+ case AtomicWidth::kWord32: \
+ ASSEMBLE_ATOMIC_BINOP_EXT(Ll, Sc, false, 8, inst32, 32); \
+ break; \
+ case AtomicWidth::kWord64: \
+ ASSEMBLE_ATOMIC_BINOP_EXT(Lld, Scd, false, 8, inst64, 64); \
+ break; \
+ } \
+ break; \
+ case kAtomic##op##Int16: \
+ DCHECK_EQ(AtomicWidthField::decode(opcode), AtomicWidth::kWord32); \
+ ASSEMBLE_ATOMIC_BINOP_EXT(Ll, Sc, true, 16, inst32, 32); \
+ break; \
+ case kAtomic##op##Uint16: \
+ switch (AtomicWidthField::decode(opcode)) { \
+ case AtomicWidth::kWord32: \
+ ASSEMBLE_ATOMIC_BINOP_EXT(Ll, Sc, false, 16, inst32, 32); \
+ break; \
+ case AtomicWidth::kWord64: \
+ ASSEMBLE_ATOMIC_BINOP_EXT(Lld, Scd, false, 16, inst64, 64); \
+ break; \
+ } \
+ break; \
+ case kAtomic##op##Word32: \
+ switch (AtomicWidthField::decode(opcode)) { \
+ case AtomicWidth::kWord32: \
+ ASSEMBLE_ATOMIC_BINOP(Ll, Sc, inst32); \
+ break; \
+ case AtomicWidth::kWord64: \
+ ASSEMBLE_ATOMIC_BINOP_EXT(Lld, Scd, false, 32, inst64, 64); \
+ break; \
+ } \
+ break; \
+ case kRiscvWord64Atomic##op##Uint64: \
+ ASSEMBLE_ATOMIC_BINOP(Lld, Scd, inst64); \
break;
- ATOMIC_BINOP_CASE(Add, Add64)
- ATOMIC_BINOP_CASE(Sub, Sub64)
- ATOMIC_BINOP_CASE(And, And)
- ATOMIC_BINOP_CASE(Or, Or)
- ATOMIC_BINOP_CASE(Xor, Xor)
+ ATOMIC_BINOP_CASE(Add, Add32, Add64)
+ ATOMIC_BINOP_CASE(Sub, Sub32, Sub64)
+ ATOMIC_BINOP_CASE(And, And, And)
+ ATOMIC_BINOP_CASE(Or, Or, Or)
+ ATOMIC_BINOP_CASE(Xor, Xor, Xor)
#undef ATOMIC_BINOP_CASE
case kRiscvAssertEqual:
__ Assert(eq, static_cast<AbortReason>(i.InputOperand(2).immediate()),
@@ -1905,7 +1868,543 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ DecompressAnyTagged(result, operand);
break;
}
+ case kRiscvRvvSt: {
+ (__ VU).set(kScratchReg, VSew::E8, Vlmul::m1);
+ Register dst = i.MemoryOperand().offset() == 0 ? i.MemoryOperand().rm()
+ : kScratchReg;
+ if (i.MemoryOperand().offset() != 0) {
+ __ Add64(dst, i.MemoryOperand().rm(), i.MemoryOperand().offset());
+ }
+ __ vs(i.InputSimd128Register(2), dst, 0, VSew::E8);
+ break;
+ }
+ case kRiscvRvvLd: {
+ (__ VU).set(kScratchReg, VSew::E8, Vlmul::m1);
+ Register src = i.MemoryOperand().offset() == 0 ? i.MemoryOperand().rm()
+ : kScratchReg;
+ if (i.MemoryOperand().offset() != 0) {
+ __ Add64(src, i.MemoryOperand().rm(), i.MemoryOperand().offset());
+ }
+ __ vl(i.OutputSimd128Register(), src, 0, VSew::E8);
+ break;
+ }
+ case kRiscvS128Const: {
+ Simd128Register dst = i.OutputSimd128Register();
+ uint8_t imm[16];
+ *reinterpret_cast<uint64_t*>(imm) =
+ make_uint64(i.InputUint32(1), i.InputUint32(0));
+ *(reinterpret_cast<uint64_t*>(imm) + 1) =
+ make_uint64(i.InputUint32(3), i.InputUint32(2));
+ __ WasmRvvS128const(dst, imm);
+ break;
+ }
+ case kRiscvI64x2Add: {
+ (__ VU).set(kScratchReg, VSew::E64, Vlmul::m1);
+ __ vadd_vv(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kRiscvI32x4Add: {
+ (__ VU).set(kScratchReg, VSew::E32, Vlmul::m1);
+ __ vadd_vv(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kRiscvI16x8Add: {
+ (__ VU).set(kScratchReg, VSew::E16, Vlmul::m1);
+ __ vadd_vv(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kRiscvI16x8AddSatS: {
+ (__ VU).set(kScratchReg, VSew::E16, Vlmul::m1);
+ __ vsadd_vv(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kRiscvI16x8AddSatU: {
+ (__ VU).set(kScratchReg, VSew::E16, Vlmul::m1);
+ __ vsaddu_vv(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kRiscvI8x16Add: {
+ (__ VU).set(kScratchReg, VSew::E8, Vlmul::m1);
+ __ vadd_vv(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kRiscvI8x16AddSatS: {
+ (__ VU).set(kScratchReg, VSew::E8, Vlmul::m1);
+ __ vsadd_vv(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kRiscvI8x16AddSatU: {
+ (__ VU).set(kScratchReg, VSew::E8, Vlmul::m1);
+ __ vsaddu_vv(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kRiscvI64x2Sub: {
+ (__ VU).set(kScratchReg, VSew::E64, Vlmul::m1);
+ __ vsub_vv(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kRiscvI32x4Sub: {
+ (__ VU).set(kScratchReg, VSew::E32, Vlmul::m1);
+ __ vsub_vv(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kRiscvI16x8Sub: {
+ (__ VU).set(kScratchReg, VSew::E16, Vlmul::m1);
+ __ vsub_vv(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kRiscvI16x8SubSatS: {
+ (__ VU).set(kScratchReg, VSew::E16, Vlmul::m1);
+ __ vssub_vv(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kRiscvI16x8SubSatU: {
+ (__ VU).set(kScratchReg, VSew::E16, Vlmul::m1);
+ __ vssubu_vv(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kRiscvI8x16Sub: {
+ (__ VU).set(kScratchReg, VSew::E8, Vlmul::m1);
+ __ vsub_vv(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kRiscvI8x16SubSatS: {
+ (__ VU).set(kScratchReg, VSew::E8, Vlmul::m1);
+ __ vssub_vv(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kRiscvI8x16SubSatU: {
+ (__ VU).set(kScratchReg, VSew::E8, Vlmul::m1);
+ __ vssubu_vv(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kRiscvS128And: {
+ (__ VU).set(kScratchReg, VSew::E8, Vlmul::m1);
+ __ vand_vv(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kRiscvS128Or: {
+ (__ VU).set(kScratchReg, VSew::E8, Vlmul::m1);
+ __ vor_vv(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kRiscvS128Xor: {
+ (__ VU).set(kScratchReg, VSew::E8, Vlmul::m1);
+ __ vxor_vv(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1));
+ break;
+ }
+ case kRiscvS128Not: {
+ (__ VU).set(kScratchReg, VSew::E8, Vlmul::m1);
+ __ vnot_vv(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ break;
+ }
+ case kRiscvS128AndNot: {
+ (__ VU).set(kScratchReg, VSew::E8, Vlmul::m1);
+ __ vnot_vv(i.OutputSimd128Register(), i.InputSimd128Register(1));
+ __ vand_vv(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.OutputSimd128Register());
+ break;
+ }
+ case kRiscvI32x4ExtractLane: {
+ __ WasmRvvExtractLane(i.OutputRegister(), i.InputSimd128Register(0),
+ i.InputInt8(1), E32, m1);
+ break;
+ }
+ case kRiscvI8x16Splat: {
+ (__ VU).set(kScratchReg, E8, m1);
+ __ vmv_vx(i.OutputSimd128Register(), i.InputRegister(0));
+ break;
+ }
+ case kRiscvI16x8Splat: {
+ (__ VU).set(kScratchReg, E16, m1);
+ __ vmv_vx(i.OutputSimd128Register(), i.InputRegister(0));
+ break;
+ }
+ case kRiscvI32x4Splat: {
+ (__ VU).set(kScratchReg, E32, m1);
+ __ vmv_vx(i.OutputSimd128Register(), i.InputRegister(0));
+ break;
+ }
+ case kRiscvI64x2Splat: {
+ (__ VU).set(kScratchReg, E64, m1);
+ __ vmv_vx(i.OutputSimd128Register(), i.InputRegister(0));
+ break;
+ }
+ case kRiscvI32x4Abs: {
+ __ VU.set(kScratchReg, E32, m1);
+ __ vmv_vx(kSimd128RegZero, zero_reg);
+ __ vmv_vv(i.OutputSimd128Register(), i.InputSimd128Register(0));
+ __ vmslt_vv(v0, i.InputSimd128Register(0), kSimd128RegZero);
+ __ vsub_vv(i.OutputSimd128Register(), kSimd128RegZero,
+ i.InputSimd128Register(0), Mask);
+ break;
+ }
+ case kRiscvI8x16Eq: {
+ __ WasmRvvEq(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1), E8, m1);
+ break;
+ }
+ case kRiscvI16x8Eq: {
+ __ WasmRvvEq(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1), E16, m1);
+ break;
+ }
+ case kRiscvI32x4Eq: {
+ __ WasmRvvEq(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1), E32, m1);
+ break;
+ }
+ case kRiscvI64x2Eq: {
+ __ WasmRvvEq(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1), E64, m1);
+ break;
+ }
+ case kRiscvI8x16Ne: {
+ __ WasmRvvNe(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1), E8, m1);
+ break;
+ }
+ case kRiscvI16x8Ne: {
+ __ WasmRvvNe(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1), E16, m1);
+ break;
+ }
+ case kRiscvI32x4Ne: {
+ __ WasmRvvNe(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1), E32, m1);
+ break;
+ }
+ case kRiscvI64x2Ne: {
+ __ WasmRvvNe(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1), E64, m1);
+ break;
+ }
+ case kRiscvI8x16GeS: {
+ __ WasmRvvGeS(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1), E8, m1);
+ break;
+ }
+ case kRiscvI16x8GeS: {
+ __ WasmRvvGeS(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1), E16, m1);
+ break;
+ }
+ case kRiscvI32x4GeS: {
+ __ WasmRvvGeS(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1), E32, m1);
+ break;
+ }
+ case kRiscvI64x2GeS: {
+ __ WasmRvvGeS(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1), E64, m1);
+ break;
+ }
+ case kRiscvI8x16GeU: {
+ __ WasmRvvGeU(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1), E8, m1);
+ break;
+ }
+ case kRiscvI16x8GeU: {
+ __ WasmRvvGeU(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1), E16, m1);
+ break;
+ }
+ case kRiscvI32x4GeU: {
+ __ WasmRvvGeU(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1), E32, m1);
+ break;
+ }
+ case kRiscvI8x16GtS: {
+ __ WasmRvvGtS(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1), E8, m1);
+ break;
+ }
+ case kRiscvI16x8GtS: {
+ __ WasmRvvGtS(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1), E16, m1);
+ break;
+ }
+ case kRiscvI32x4GtS: {
+ __ WasmRvvGtU(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1), E32, m1);
+ break;
+ }
+ case kRiscvI64x2GtS: {
+ __ WasmRvvGtU(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1), E64, m1);
+ break;
+ }
+ case kRiscvI8x16GtU: {
+ __ WasmRvvGtU(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1), E8, m1);
+ break;
+ }
+ case kRiscvI16x8GtU: {
+ __ WasmRvvGtU(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1), E16, m1);
+ break;
+ }
+ case kRiscvI32x4GtU: {
+ __ WasmRvvGtU(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1), E32, m1);
+ break;
+ }
+ case kRiscvI8x16Shl: {
+ __ VU.set(kScratchReg, E8, m1);
+ if (instr->InputAt(1)->IsRegister()) {
+ __ vsll_vx(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputRegister(1));
+ } else {
+ __ vsll_vi(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputInt3(1));
+ }
+ break;
+ }
+ case kRiscvI16x8Shl: {
+ __ VU.set(kScratchReg, E16, m1);
+ if (instr->InputAt(1)->IsRegister()) {
+ __ vsll_vx(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputRegister(1));
+ } else {
+ __ vsll_vi(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputInt4(1));
+ }
+ break;
+ }
+ case kRiscvI32x4Shl: {
+ __ VU.set(kScratchReg, E32, m1);
+ if (instr->InputAt(1)->IsRegister()) {
+ __ vsll_vx(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputRegister(1));
+ } else {
+ __ vsll_vi(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputInt5(1));
+ }
+ break;
+ }
+ case kRiscvI64x2Shl: {
+ __ VU.set(kScratchReg, E64, m1);
+ if (instr->InputAt(1)->IsRegister()) {
+ __ vsll_vx(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputRegister(1));
+ } else {
+ if (is_int5(i.InputInt6(1))) {
+ __ vsll_vi(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputInt6(1));
+ } else {
+ __ li(kScratchReg, i.InputInt6(1));
+ __ vsll_vx(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ kScratchReg);
+ }
+ }
+ break;
+ }
+ case kRiscvI8x16ReplaceLane: {
+ Simd128Register src = i.InputSimd128Register(0);
+ Simd128Register dst = i.OutputSimd128Register();
+ __ VU.set(kScratchReg, E32, m1);
+ __ li(kScratchReg, 0x1 << i.InputInt8(1));
+ __ vmv_sx(v0, kScratchReg);
+ __ vmerge_vx(dst, i.InputRegister(2), src);
+ break;
+ }
+ case kRiscvI16x8ReplaceLane: {
+ Simd128Register src = i.InputSimd128Register(0);
+ Simd128Register dst = i.OutputSimd128Register();
+ __ VU.set(kScratchReg, E16, m1);
+ __ li(kScratchReg, 0x1 << i.InputInt8(1));
+ __ vmv_sx(v0, kScratchReg);
+ __ vmerge_vx(dst, i.InputRegister(2), src);
+ break;
+ }
+ case kRiscvI64x2ReplaceLane: {
+ Simd128Register src = i.InputSimd128Register(0);
+ Simd128Register dst = i.OutputSimd128Register();
+ __ VU.set(kScratchReg, E64, m1);
+ __ li(kScratchReg, 0x1 << i.InputInt8(1));
+ __ vmv_sx(v0, kScratchReg);
+ __ vmerge_vx(dst, i.InputRegister(2), src);
+ break;
+ }
+ case kRiscvI32x4ReplaceLane: {
+ Simd128Register src = i.InputSimd128Register(0);
+ Simd128Register dst = i.OutputSimd128Register();
+ __ VU.set(kScratchReg, E32, m1);
+ __ li(kScratchReg, 0x1 << i.InputInt8(1));
+ __ vmv_sx(v0, kScratchReg);
+ __ vmerge_vx(dst, i.InputRegister(2), src);
+ break;
+ }
+ case kRiscvI8x16BitMask: {
+ Register dst = i.OutputRegister();
+ Simd128Register src = i.InputSimd128Register(0);
+ __ VU.set(kScratchReg, E8, m1);
+ __ vmv_vx(kSimd128RegZero, zero_reg);
+ __ vmslt_vv(kSimd128ScratchReg, src, kSimd128RegZero);
+ __ VU.set(kScratchReg, E32, m1);
+ __ vmv_xs(dst, kSimd128ScratchReg);
+ break;
+ }
+ case kRiscvI16x8BitMask: {
+ Register dst = i.OutputRegister();
+ Simd128Register src = i.InputSimd128Register(0);
+ __ VU.set(kScratchReg, E16, m1);
+ __ vmv_vx(kSimd128RegZero, zero_reg);
+ __ vmslt_vv(kSimd128ScratchReg, src, kSimd128RegZero);
+ __ VU.set(kScratchReg, E32, m1);
+ __ vmv_xs(dst, kSimd128ScratchReg);
+ break;
+ }
+ case kRiscvI32x4BitMask: {
+ Register dst = i.OutputRegister();
+ Simd128Register src = i.InputSimd128Register(0);
+ __ VU.set(kScratchReg, E32, m1);
+ __ vmv_vx(kSimd128RegZero, zero_reg);
+ __ vmslt_vv(kSimd128ScratchReg, src, kSimd128RegZero);
+ __ vmv_xs(dst, kSimd128ScratchReg);
+ break;
+ }
+ case kRiscvI64x2BitMask: {
+ Register dst = i.OutputRegister();
+ Simd128Register src = i.InputSimd128Register(0);
+ __ VU.set(kScratchReg, E64, m1);
+ __ vmv_vx(kSimd128RegZero, zero_reg);
+ __ vmslt_vv(kSimd128ScratchReg, src, kSimd128RegZero);
+ __ VU.set(kScratchReg, E32, m1);
+ __ vmv_xs(dst, kSimd128ScratchReg);
+ break;
+ }
+ case kRiscvV128AnyTrue: {
+ __ VU.set(kScratchReg, E8, m1);
+ Register dst = i.OutputRegister();
+ Label t;
+ __ vmv_sx(kSimd128ScratchReg, zero_reg);
+ __ vredmaxu_vs(kSimd128ScratchReg, i.InputSimd128Register(0),
+ kSimd128ScratchReg);
+ __ vmv_xs(dst, kSimd128ScratchReg);
+ __ beq(dst, zero_reg, &t);
+ __ li(dst, 1);
+ __ bind(&t);
+ break;
+ }
+ case kRiscvI64x2AllTrue: {
+ __ VU.set(kScratchReg, E64, m1);
+ Register dst = i.OutputRegister();
+ Label all_true;
+ __ li(kScratchReg, -1);
+ __ vmv_sx(kSimd128ScratchReg, kScratchReg);
+ __ vredminu_vs(kSimd128ScratchReg, i.InputSimd128Register(0),
+ kSimd128ScratchReg);
+ __ vmv_xs(dst, kSimd128ScratchReg);
+ __ beqz(dst, &all_true);
+ __ li(dst, 1);
+ __ bind(&all_true);
+ break;
+ }
+ case kRiscvI32x4AllTrue: {
+ __ VU.set(kScratchReg, E32, m1);
+ Register dst = i.OutputRegister();
+ Label all_true;
+ __ li(kScratchReg, -1);
+ __ vmv_sx(kSimd128ScratchReg, kScratchReg);
+ __ vredminu_vs(kSimd128ScratchReg, i.InputSimd128Register(0),
+ kSimd128ScratchReg);
+ __ vmv_xs(dst, kSimd128ScratchReg);
+ __ beqz(dst, &all_true);
+ __ li(dst, 1);
+ __ bind(&all_true);
+ break;
+ }
+ case kRiscvI16x8AllTrue: {
+ __ VU.set(kScratchReg, E16, m1);
+ Register dst = i.OutputRegister();
+ Label all_true;
+ __ li(kScratchReg, -1);
+ __ vmv_sx(kSimd128ScratchReg, kScratchReg);
+ __ vredminu_vs(kSimd128ScratchReg, i.InputSimd128Register(0),
+ kSimd128ScratchReg);
+ __ vmv_xs(dst, kSimd128ScratchReg);
+ __ beqz(dst, &all_true);
+ __ li(dst, 1);
+ __ bind(&all_true);
+ break;
+ }
+ case kRiscvI8x16AllTrue: {
+ __ VU.set(kScratchReg, E8, m1);
+ Register dst = i.OutputRegister();
+ Label all_true;
+ __ li(kScratchReg, -1);
+ __ vmv_sx(kSimd128ScratchReg, kScratchReg);
+ __ vredminu_vs(kSimd128ScratchReg, i.InputSimd128Register(0),
+ kSimd128ScratchReg);
+ __ vmv_xs(dst, kSimd128ScratchReg);
+ __ beqz(dst, &all_true);
+ __ li(dst, 1);
+ __ bind(&all_true);
+ break;
+ }
+ case kRiscvI8x16Shuffle: {
+ VRegister dst = i.OutputSimd128Register(),
+ src0 = i.InputSimd128Register(0),
+ src1 = i.InputSimd128Register(1);
+
+ int64_t imm1 = make_uint64(i.InputInt32(3), i.InputInt32(2));
+ int64_t imm2 = make_uint64(i.InputInt32(5), i.InputInt32(4));
+ __ VU.set(kScratchReg, VSew::E64, Vlmul::m1);
+ __ li(kScratchReg, 1);
+ __ vmv_vx(v0, kScratchReg);
+ __ li(kScratchReg, imm1);
+ __ vmerge_vx(kSimd128ScratchReg, kScratchReg, kSimd128ScratchReg);
+ __ li(kScratchReg, imm2);
+ __ vsll_vi(v0, v0, 1);
+ __ vmerge_vx(kSimd128ScratchReg, kScratchReg, kSimd128ScratchReg);
+
+ __ VU.set(kScratchReg, E8, m1);
+ if (dst == src0) {
+ __ vmv_vv(kSimd128ScratchReg2, src0);
+ src0 = kSimd128ScratchReg2;
+ } else if (dst == src1) {
+ __ vmv_vv(kSimd128ScratchReg2, src1);
+ src1 = kSimd128ScratchReg2;
+ }
+ __ vrgather_vv(dst, src0, kSimd128ScratchReg);
+ __ vadd_vi(kSimd128ScratchReg, kSimd128ScratchReg, -16);
+ __ vrgather_vv(kSimd128ScratchReg, src1, kSimd128ScratchReg);
+ __ vor_vv(dst, dst, kSimd128ScratchReg);
+ break;
+ }
default:
+#ifdef DEBUG
+ switch (arch_opcode) {
+#define Print(name) \
+ case k##name: \
+ printf("k%s", #name); \
+ break;
+ TARGET_ARCH_OPCODE_LIST(Print);
+#undef Print
+ default:
+ break;
+ }
+#endif
UNIMPLEMENTED();
}
return kSuccess;
@@ -1916,6 +2415,19 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
<< "\""; \
UNIMPLEMENTED();
+bool IsInludeEqual(Condition cc) {
+ switch (cc) {
+ case equal:
+ case greater_equal:
+ case less_equal:
+ case Uless_equal:
+ case Ugreater_equal:
+ return true;
+ default:
+ return false;
+ }
+}
+
void AssembleBranchToLabels(CodeGenerator* gen, TurboAssembler* tasm,
Instruction* instr, FlagsCondition condition,
Label* tlabel, Label* flabel, bool fallthru) {
@@ -1952,7 +2464,6 @@ void AssembleBranchToLabels(CodeGenerator* gen, TurboAssembler* tasm,
break;
default:
UNSUPPORTED_COND(instr->arch_opcode(), condition);
- break;
}
} else if (instr->arch_opcode() == kRiscvMulOvf32) {
// Overflow occurs if overflow register is not zero
@@ -1965,14 +2476,17 @@ void AssembleBranchToLabels(CodeGenerator* gen, TurboAssembler* tasm,
break;
default:
UNSUPPORTED_COND(kRiscvMulOvf32, condition);
- break;
}
} else if (instr->arch_opcode() == kRiscvCmp) {
cc = FlagsConditionToConditionCmp(condition);
__ Branch(tlabel, cc, i.InputRegister(0), i.InputOperand(1));
} else if (instr->arch_opcode() == kRiscvCmpZero) {
cc = FlagsConditionToConditionCmp(condition);
- __ Branch(tlabel, cc, i.InputRegister(0), Operand(zero_reg));
+ if (i.InputOrZeroRegister(0) == zero_reg && IsInludeEqual(cc)) {
+ __ Branch(tlabel);
+ } else if (i.InputOrZeroRegister(0) != zero_reg) {
+ __ Branch(tlabel, cc, i.InputRegister(0), Operand(zero_reg));
+ }
} else if (instr->arch_opcode() == kArchStackPointerGreaterThan) {
cc = FlagsConditionToConditionCmp(condition);
Register lhs_register = sp;
@@ -2011,110 +2525,6 @@ void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
branch->fallthru);
}
-void CodeGenerator::AssembleBranchPoisoning(FlagsCondition condition,
- Instruction* instr) {
- // TODO(jarin) Handle float comparisons (kUnordered[Not]Equal).
- if (condition == kUnorderedEqual || condition == kUnorderedNotEqual) {
- return;
- }
-
- RiscvOperandConverter i(this, instr);
- condition = NegateFlagsCondition(condition);
-
- switch (instr->arch_opcode()) {
- case kRiscvCmp: {
- __ CompareI(kScratchReg, i.InputRegister(0), i.InputOperand(1),
- FlagsConditionToConditionCmp(condition));
- __ LoadZeroIfConditionNotZero(kSpeculationPoisonRegister, kScratchReg);
- }
- return;
- case kRiscvCmpZero: {
- __ CompareI(kScratchReg, i.InputRegister(0), Operand(zero_reg),
- FlagsConditionToConditionCmp(condition));
- __ LoadZeroIfConditionNotZero(kSpeculationPoisonRegister, kScratchReg);
- }
- return;
- case kRiscvTst: {
- switch (condition) {
- case kEqual:
- __ LoadZeroIfConditionZero(kSpeculationPoisonRegister, kScratchReg);
- break;
- case kNotEqual:
- __ LoadZeroIfConditionNotZero(kSpeculationPoisonRegister,
- kScratchReg);
- break;
- default:
- UNREACHABLE();
- }
- }
- return;
- case kRiscvAdd64:
- case kRiscvSub64: {
- // Check for overflow creates 1 or 0 for result.
- __ Srl64(kScratchReg, i.OutputRegister(), 63);
- __ Srl32(kScratchReg2, i.OutputRegister(), 31);
- __ Xor(kScratchReg2, kScratchReg, kScratchReg2);
- switch (condition) {
- case kOverflow:
- __ LoadZeroIfConditionNotZero(kSpeculationPoisonRegister,
- kScratchReg2);
- break;
- case kNotOverflow:
- __ LoadZeroIfConditionZero(kSpeculationPoisonRegister, kScratchReg2);
- break;
- default:
- UNSUPPORTED_COND(instr->arch_opcode(), condition);
- }
- }
- return;
- case kRiscvAddOvf64:
- case kRiscvSubOvf64: {
- // Overflow occurs if overflow register is negative
- __ Slt(kScratchReg2, kScratchReg, zero_reg);
- switch (condition) {
- case kOverflow:
- __ LoadZeroIfConditionNotZero(kSpeculationPoisonRegister,
- kScratchReg2);
- break;
- case kNotOverflow:
- __ LoadZeroIfConditionZero(kSpeculationPoisonRegister, kScratchReg2);
- break;
- default:
- UNSUPPORTED_COND(instr->arch_opcode(), condition);
- }
- }
- return;
- case kRiscvMulOvf32: {
- // Overflow occurs if overflow register is not zero
- switch (condition) {
- case kOverflow:
- __ LoadZeroIfConditionNotZero(kSpeculationPoisonRegister,
- kScratchReg);
- break;
- case kNotOverflow:
- __ LoadZeroIfConditionZero(kSpeculationPoisonRegister, kScratchReg);
- break;
- default:
- UNSUPPORTED_COND(instr->arch_opcode(), condition);
- }
- }
- return;
- case kRiscvCmpS:
- case kRiscvCmpD: {
- bool predicate;
- FlagsConditionToConditionCmpFPU(&predicate, condition);
- if (predicate) {
- __ LoadZeroIfConditionNotZero(kSpeculationPoisonRegister, kScratchReg);
- } else {
- __ LoadZeroIfConditionZero(kSpeculationPoisonRegister, kScratchReg);
- }
- }
- return;
- default:
- UNREACHABLE();
- }
-}
-
#undef UNSUPPORTED_COND
void CodeGenerator::AssembleArchDeoptBranch(Instruction* instr,
@@ -2489,7 +2899,6 @@ void CodeGenerator::AssembleConstructFrame() {
__ RecordComment("-- OSR entrypoint --");
osr_pc_offset_ = __ pc_offset();
required_slots -= osr_helper()->UnoptimizedFrameSlots();
- ResetSpeculationPoison();
}
const RegList saves = call_descriptor->CalleeSavedRegisters();
@@ -2735,7 +3144,6 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
}
case Constant::kRpoNumber:
UNREACHABLE(); // TODO(titzer): loading RPO numbers
- break;
}
if (destination->IsStackSlot()) __ Sd(dst, g.ToMemOperand(destination));
} else if (src.type() == Constant::kFloat32) {
@@ -2765,7 +3173,21 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
} else if (source->IsFPRegister()) {
MachineRepresentation rep = LocationOperand::cast(source)->representation();
if (rep == MachineRepresentation::kSimd128) {
- UNIMPLEMENTED();
+ VRegister src = g.ToSimd128Register(source);
+ if (destination->IsSimd128Register()) {
+ VRegister dst = g.ToSimd128Register(destination);
+ __ vmv_vv(dst, src);
+ } else {
+ DCHECK(destination->IsSimd128StackSlot());
+ Register dst = g.ToMemOperand(destination).offset() == 0
+ ? g.ToMemOperand(destination).rm()
+ : kScratchReg;
+ if (g.ToMemOperand(destination).offset() != 0) {
+ __ Add64(dst, g.ToMemOperand(destination).rm(),
+ g.ToMemOperand(destination).offset());
+ }
+ __ vs(src, dst, 0, E8);
+ }
} else {
FPURegister src = g.ToDoubleRegister(source);
if (destination->IsFPRegister()) {
@@ -2786,7 +3208,25 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
MemOperand src = g.ToMemOperand(source);
MachineRepresentation rep = LocationOperand::cast(source)->representation();
if (rep == MachineRepresentation::kSimd128) {
- UNIMPLEMENTED();
+ Register src_reg = src.offset() == 0 ? src.rm() : kScratchReg;
+ if (src.offset() != 0) {
+ __ Add64(src_reg, src.rm(), src.offset());
+ }
+ if (destination->IsSimd128Register()) {
+ __ vl(g.ToSimd128Register(destination), src_reg, 0, E8);
+ } else {
+ DCHECK(destination->IsSimd128StackSlot());
+ VRegister temp = kSimd128ScratchReg;
+ Register dst = g.ToMemOperand(destination).offset() == 0
+ ? g.ToMemOperand(destination).rm()
+ : kScratchReg;
+ if (g.ToMemOperand(destination).offset() != 0) {
+ __ Add64(dst, g.ToMemOperand(destination).rm(),
+ g.ToMemOperand(destination).offset());
+ }
+ __ vl(temp, src_reg, 0, E8);
+ __ vs(temp, dst, 0, E8);
+ }
} else {
if (destination->IsFPRegister()) {
if (rep == MachineRepresentation::kFloat32) {
diff --git a/deps/v8/src/compiler/backend/riscv64/instruction-codes-riscv64.h b/deps/v8/src/compiler/backend/riscv64/instruction-codes-riscv64.h
index 2f51c2b1c7..0c8d99a8e8 100644
--- a/deps/v8/src/compiler/backend/riscv64/instruction-codes-riscv64.h
+++ b/deps/v8/src/compiler/backend/riscv64/instruction-codes-riscv64.h
@@ -355,7 +355,7 @@ namespace compiler {
V(RiscvS8x16PackOdd) \
V(RiscvS8x16InterleaveEven) \
V(RiscvS8x16InterleaveOdd) \
- V(RiscvS8x16Shuffle) \
+ V(RiscvI8x16Shuffle) \
V(RiscvI8x16Swizzle) \
V(RiscvS8x16Concat) \
V(RiscvS8x8Reverse) \
@@ -373,8 +373,8 @@ namespace compiler {
V(RiscvS128Load32x2U) \
V(RiscvS128LoadLane) \
V(RiscvS128StoreLane) \
- V(RiscvMsaLd) \
- V(RiscvMsaSt) \
+ V(RiscvRvvLd) \
+ V(RiscvRvvSt) \
V(RiscvI32x4SConvertI16x8Low) \
V(RiscvI32x4SConvertI16x8High) \
V(RiscvI32x4UConvertI16x8Low) \
@@ -387,41 +387,14 @@ namespace compiler {
V(RiscvI16x8UConvertI8x16High) \
V(RiscvI8x16SConvertI16x8) \
V(RiscvI8x16UConvertI16x8) \
- V(RiscvWord64AtomicLoadUint8) \
- V(RiscvWord64AtomicLoadUint16) \
- V(RiscvWord64AtomicLoadUint32) \
V(RiscvWord64AtomicLoadUint64) \
- V(RiscvWord64AtomicStoreWord8) \
- V(RiscvWord64AtomicStoreWord16) \
- V(RiscvWord64AtomicStoreWord32) \
V(RiscvWord64AtomicStoreWord64) \
- V(RiscvWord64AtomicAddUint8) \
- V(RiscvWord64AtomicAddUint16) \
- V(RiscvWord64AtomicAddUint32) \
V(RiscvWord64AtomicAddUint64) \
- V(RiscvWord64AtomicSubUint8) \
- V(RiscvWord64AtomicSubUint16) \
- V(RiscvWord64AtomicSubUint32) \
V(RiscvWord64AtomicSubUint64) \
- V(RiscvWord64AtomicAndUint8) \
- V(RiscvWord64AtomicAndUint16) \
- V(RiscvWord64AtomicAndUint32) \
V(RiscvWord64AtomicAndUint64) \
- V(RiscvWord64AtomicOrUint8) \
- V(RiscvWord64AtomicOrUint16) \
- V(RiscvWord64AtomicOrUint32) \
V(RiscvWord64AtomicOrUint64) \
- V(RiscvWord64AtomicXorUint8) \
- V(RiscvWord64AtomicXorUint16) \
- V(RiscvWord64AtomicXorUint32) \
V(RiscvWord64AtomicXorUint64) \
- V(RiscvWord64AtomicExchangeUint8) \
- V(RiscvWord64AtomicExchangeUint16) \
- V(RiscvWord64AtomicExchangeUint32) \
V(RiscvWord64AtomicExchangeUint64) \
- V(RiscvWord64AtomicCompareExchangeUint8) \
- V(RiscvWord64AtomicCompareExchangeUint16) \
- V(RiscvWord64AtomicCompareExchangeUint32) \
V(RiscvWord64AtomicCompareExchangeUint64) \
V(RiscvStoreCompressTagged) \
V(RiscvLoadDecompressTaggedSigned) \
diff --git a/deps/v8/src/compiler/backend/riscv64/instruction-scheduler-riscv64.cc b/deps/v8/src/compiler/backend/riscv64/instruction-scheduler-riscv64.cc
index 157b11c930..471628b1f8 100644
--- a/deps/v8/src/compiler/backend/riscv64/instruction-scheduler-riscv64.cc
+++ b/deps/v8/src/compiler/backend/riscv64/instruction-scheduler-riscv64.cc
@@ -318,7 +318,7 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kRiscvS8x2Reverse:
case kRiscvS8x4Reverse:
case kRiscvS8x8Reverse:
- case kRiscvS8x16Shuffle:
+ case kRiscvI8x16Shuffle:
case kRiscvI8x16Swizzle:
case kRiscvSar32:
case kRiscvSignExtendByte:
@@ -352,7 +352,7 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kRiscvLw:
case kRiscvLoadFloat:
case kRiscvLwu:
- case kRiscvMsaLd:
+ case kRiscvRvvLd:
case kRiscvPeek:
case kRiscvUld:
case kRiscvULoadDouble:
@@ -372,9 +372,6 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kRiscvS128Load32x2S:
case kRiscvS128Load32x2U:
case kRiscvS128LoadLane:
- case kRiscvWord64AtomicLoadUint8:
- case kRiscvWord64AtomicLoadUint16:
- case kRiscvWord64AtomicLoadUint32:
case kRiscvWord64AtomicLoadUint64:
case kRiscvLoadDecompressTaggedSigned:
case kRiscvLoadDecompressTaggedPointer:
@@ -383,7 +380,7 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kRiscvModD:
case kRiscvModS:
- case kRiscvMsaSt:
+ case kRiscvRvvSt:
case kRiscvPush:
case kRiscvSb:
case kRiscvSd:
@@ -399,37 +396,13 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kRiscvUsw:
case kRiscvUStoreFloat:
case kRiscvSync:
- case kRiscvWord64AtomicStoreWord8:
- case kRiscvWord64AtomicStoreWord16:
- case kRiscvWord64AtomicStoreWord32:
case kRiscvWord64AtomicStoreWord64:
- case kRiscvWord64AtomicAddUint8:
- case kRiscvWord64AtomicAddUint16:
- case kRiscvWord64AtomicAddUint32:
case kRiscvWord64AtomicAddUint64:
- case kRiscvWord64AtomicSubUint8:
- case kRiscvWord64AtomicSubUint16:
- case kRiscvWord64AtomicSubUint32:
case kRiscvWord64AtomicSubUint64:
- case kRiscvWord64AtomicAndUint8:
- case kRiscvWord64AtomicAndUint16:
- case kRiscvWord64AtomicAndUint32:
case kRiscvWord64AtomicAndUint64:
- case kRiscvWord64AtomicOrUint8:
- case kRiscvWord64AtomicOrUint16:
- case kRiscvWord64AtomicOrUint32:
case kRiscvWord64AtomicOrUint64:
- case kRiscvWord64AtomicXorUint8:
- case kRiscvWord64AtomicXorUint16:
- case kRiscvWord64AtomicXorUint32:
case kRiscvWord64AtomicXorUint64:
- case kRiscvWord64AtomicExchangeUint8:
- case kRiscvWord64AtomicExchangeUint16:
- case kRiscvWord64AtomicExchangeUint32:
case kRiscvWord64AtomicExchangeUint64:
- case kRiscvWord64AtomicCompareExchangeUint8:
- case kRiscvWord64AtomicCompareExchangeUint16:
- case kRiscvWord64AtomicCompareExchangeUint32:
case kRiscvWord64AtomicCompareExchangeUint64:
case kRiscvStoreCompressTagged:
case kRiscvS128StoreLane:
@@ -1169,8 +1142,6 @@ int InstructionScheduler::GetInstructionLatency(const Instruction* instr) {
return Add64Latency(false) + AndLatency(false) + AssertLatency() +
Add64Latency(false) + AndLatency(false) + BranchShortLatency() +
1 + Sub64Latency() + Add64Latency();
- case kArchWordPoisonOnSpeculation:
- return AndLatency();
case kIeee754Float64Acos:
case kIeee754Float64Acosh:
case kIeee754Float64Asin:
@@ -1541,35 +1512,35 @@ int InstructionScheduler::GetInstructionLatency(const Instruction* instr) {
return ByteSwapSignedLatency();
case kRiscvByteSwap32:
return ByteSwapSignedLatency();
- case kWord32AtomicLoadInt8:
- case kWord32AtomicLoadUint8:
- case kWord32AtomicLoadInt16:
- case kWord32AtomicLoadUint16:
- case kWord32AtomicLoadWord32:
+ case kAtomicLoadInt8:
+ case kAtomicLoadUint8:
+ case kAtomicLoadInt16:
+ case kAtomicLoadUint16:
+ case kAtomicLoadWord32:
return 2;
- case kWord32AtomicStoreWord8:
- case kWord32AtomicStoreWord16:
- case kWord32AtomicStoreWord32:
+ case kAtomicStoreWord8:
+ case kAtomicStoreWord16:
+ case kAtomicStoreWord32:
return 3;
- case kWord32AtomicExchangeInt8:
+ case kAtomicExchangeInt8:
return Word32AtomicExchangeLatency(true, 8);
- case kWord32AtomicExchangeUint8:
+ case kAtomicExchangeUint8:
return Word32AtomicExchangeLatency(false, 8);
- case kWord32AtomicExchangeInt16:
+ case kAtomicExchangeInt16:
return Word32AtomicExchangeLatency(true, 16);
- case kWord32AtomicExchangeUint16:
+ case kAtomicExchangeUint16:
return Word32AtomicExchangeLatency(false, 16);
- case kWord32AtomicExchangeWord32:
+ case kAtomicExchangeWord32:
return 2 + LlLatency(0) + 1 + ScLatency(0) + BranchShortLatency() + 1;
- case kWord32AtomicCompareExchangeInt8:
+ case kAtomicCompareExchangeInt8:
return Word32AtomicCompareExchangeLatency(true, 8);
- case kWord32AtomicCompareExchangeUint8:
+ case kAtomicCompareExchangeUint8:
return Word32AtomicCompareExchangeLatency(false, 8);
- case kWord32AtomicCompareExchangeInt16:
+ case kAtomicCompareExchangeInt16:
return Word32AtomicCompareExchangeLatency(true, 16);
- case kWord32AtomicCompareExchangeUint16:
+ case kAtomicCompareExchangeUint16:
return Word32AtomicCompareExchangeLatency(false, 16);
- case kWord32AtomicCompareExchangeWord32:
+ case kAtomicCompareExchangeWord32:
return 3 + LlLatency(0) + BranchShortLatency() + 1 + ScLatency(0) +
BranchShortLatency() + 1;
case kRiscvAssertEqual:
diff --git a/deps/v8/src/compiler/backend/riscv64/instruction-selector-riscv64.cc b/deps/v8/src/compiler/backend/riscv64/instruction-selector-riscv64.cc
index 72706201e2..85d61aa02f 100644
--- a/deps/v8/src/compiler/backend/riscv64/instruction-selector-riscv64.cc
+++ b/deps/v8/src/compiler/backend/riscv64/instruction-selector-riscv64.cc
@@ -475,7 +475,7 @@ void InstructionSelector::VisitLoad(Node* node) {
opcode = kRiscvLd;
break;
case MachineRepresentation::kSimd128:
- opcode = kRiscvMsaLd;
+ opcode = kRiscvRvvLd;
break;
case MachineRepresentation::kCompressedPointer:
case MachineRepresentation::kCompressed:
@@ -489,16 +489,10 @@ void InstructionSelector::VisitLoad(Node* node) {
case MachineRepresentation::kNone:
UNREACHABLE();
}
- if (node->opcode() == IrOpcode::kPoisonedLoad) {
- CHECK_NE(poisoning_level_, PoisoningMitigationLevel::kDontPoison);
- opcode |= MiscField::encode(kMemoryAccessPoisoned);
- }
EmitLoad(this, node, opcode);
}
-void InstructionSelector::VisitPoisonedLoad(Node* node) { VisitLoad(node); }
-
void InstructionSelector::VisitProtectedLoad(Node* node) {
// TODO(eholk)
UNIMPLEMENTED();
@@ -560,7 +554,7 @@ void InstructionSelector::VisitStore(Node* node) {
opcode = kRiscvSd;
break;
case MachineRepresentation::kSimd128:
- opcode = kRiscvMsaSt;
+ opcode = kRiscvRvvSt;
break;
case MachineRepresentation::kCompressedPointer: // Fall through.
case MachineRepresentation::kCompressed:
@@ -569,7 +563,6 @@ void InstructionSelector::VisitStore(Node* node) {
break;
#else
UNREACHABLE();
- break;
#endif
case MachineRepresentation::kMapWord: // Fall through.
case MachineRepresentation::kNone:
@@ -1639,7 +1632,7 @@ void InstructionSelector::VisitUnalignedLoad(Node* node) {
opcode = kRiscvUld;
break;
case MachineRepresentation::kSimd128:
- opcode = kRiscvMsaLd;
+ opcode = kRiscvRvvLd;
break;
case MachineRepresentation::kBit: // Fall through.
case MachineRepresentation::kCompressedPointer: // Fall through.
@@ -1693,7 +1686,7 @@ void InstructionSelector::VisitUnalignedStore(Node* node) {
opcode = kRiscvUsd;
break;
case MachineRepresentation::kSimd128:
- opcode = kRiscvMsaSt;
+ opcode = kRiscvRvvSt;
break;
case MachineRepresentation::kBit: // Fall through.
case MachineRepresentation::kCompressedPointer: // Fall through.
@@ -1789,7 +1782,8 @@ void VisitWordCompare(InstructionSelector* selector, Node* node,
Int32BinopMatcher m(node, true);
NumberBinopMatcher n(node, true);
if (m.right().Is(0) || n.right().IsZero()) {
- VisitWordCompareZero(selector, g.UseRegister(left), cont);
+ VisitWordCompareZero(selector, g.UseRegisterOrImmediateZero(left),
+ cont);
} else {
VisitCompare(selector, opcode, g.UseRegister(left),
g.UseRegister(right), cont);
@@ -1802,7 +1796,8 @@ void VisitWordCompare(InstructionSelector* selector, Node* node,
case kUnsignedGreaterThanOrEqual: {
Int32BinopMatcher m(node, true);
if (m.right().Is(0)) {
- VisitWordCompareZero(selector, g.UseRegister(left), cont);
+ VisitWordCompareZero(selector, g.UseRegisterOrImmediateZero(left),
+ cont);
} else {
VisitCompare(selector, opcode, g.UseRegister(left),
g.UseImmediate(right), cont);
@@ -1811,7 +1806,8 @@ void VisitWordCompare(InstructionSelector* selector, Node* node,
default:
Int32BinopMatcher m(node, true);
if (m.right().Is(0)) {
- VisitWordCompareZero(selector, g.UseRegister(left), cont);
+ VisitWordCompareZero(selector, g.UseRegisterOrImmediateZero(left),
+ cont);
} else {
VisitCompare(selector, opcode, g.UseRegister(left),
g.UseRegister(right), cont);
@@ -1827,10 +1823,13 @@ void VisitWordCompare(InstructionSelector* selector, Node* node,
bool IsNodeUnsigned(Node* n) {
NodeMatcher m(n);
- if (m.IsLoad() || m.IsUnalignedLoad() || m.IsPoisonedLoad() ||
- m.IsProtectedLoad() || m.IsWord32AtomicLoad() || m.IsWord64AtomicLoad()) {
+ if (m.IsLoad() || m.IsUnalignedLoad() || m.IsProtectedLoad()) {
LoadRepresentation load_rep = LoadRepresentationOf(n->op());
return load_rep.IsUnsigned();
+ } else if (m.IsWord32AtomicLoad() || m.IsWord64AtomicLoad()) {
+ AtomicLoadParameters atomic_load_params = AtomicLoadParametersOf(n->op());
+ LoadRepresentation load_rep = atomic_load_params.representation();
+ return load_rep.IsUnsigned();
} else {
return m.IsUint32Div() || m.IsUint32LessThan() ||
m.IsUint32LessThanOrEqual() || m.IsUint32Mod() ||
@@ -1930,16 +1929,18 @@ void VisitWord64Compare(InstructionSelector* selector, Node* node,
void EmitWordCompareZero(InstructionSelector* selector, Node* value,
FlagsContinuation* cont) {
RiscvOperandGenerator g(selector);
- selector->EmitWithContinuation(kRiscvCmpZero, g.UseRegister(value), cont);
+ selector->EmitWithContinuation(kRiscvCmpZero,
+ g.UseRegisterOrImmediateZero(value), cont);
}
void VisitAtomicLoad(InstructionSelector* selector, Node* node,
- ArchOpcode opcode) {
+ ArchOpcode opcode, AtomicWidth width) {
RiscvOperandGenerator g(selector);
Node* base = node->InputAt(0);
Node* index = node->InputAt(1);
if (g.CanBeImmediate(index, opcode)) {
- selector->Emit(opcode | AddressingModeField::encode(kMode_MRI),
+ selector->Emit(opcode | AddressingModeField::encode(kMode_MRI) |
+ AtomicWidthField::encode(width),
g.DefineAsRegister(node), g.UseRegister(base),
g.UseImmediate(index));
} else {
@@ -1947,20 +1948,22 @@ void VisitAtomicLoad(InstructionSelector* selector, Node* node,
selector->Emit(kRiscvAdd64 | AddressingModeField::encode(kMode_None),
addr_reg, g.UseRegister(index), g.UseRegister(base));
// Emit desired load opcode, using temp addr_reg.
- selector->Emit(opcode | AddressingModeField::encode(kMode_MRI),
+ selector->Emit(opcode | AddressingModeField::encode(kMode_MRI) |
+ AtomicWidthField::encode(width),
g.DefineAsRegister(node), addr_reg, g.TempImmediate(0));
}
}
void VisitAtomicStore(InstructionSelector* selector, Node* node,
- ArchOpcode opcode) {
+ ArchOpcode opcode, AtomicWidth width) {
RiscvOperandGenerator g(selector);
Node* base = node->InputAt(0);
Node* index = node->InputAt(1);
Node* value = node->InputAt(2);
if (g.CanBeImmediate(index, opcode)) {
- selector->Emit(opcode | AddressingModeField::encode(kMode_MRI),
+ selector->Emit(opcode | AddressingModeField::encode(kMode_MRI) |
+ AtomicWidthField::encode(width),
g.NoOutput(), g.UseRegister(base), g.UseImmediate(index),
g.UseRegisterOrImmediateZero(value));
} else {
@@ -1968,14 +1971,15 @@ void VisitAtomicStore(InstructionSelector* selector, Node* node,
selector->Emit(kRiscvAdd64 | AddressingModeField::encode(kMode_None),
addr_reg, g.UseRegister(index), g.UseRegister(base));
// Emit desired store opcode, using temp addr_reg.
- selector->Emit(opcode | AddressingModeField::encode(kMode_MRI),
+ selector->Emit(opcode | AddressingModeField::encode(kMode_MRI) |
+ AtomicWidthField::encode(width),
g.NoOutput(), addr_reg, g.TempImmediate(0),
g.UseRegisterOrImmediateZero(value));
}
}
void VisitAtomicExchange(InstructionSelector* selector, Node* node,
- ArchOpcode opcode) {
+ ArchOpcode opcode, AtomicWidth width) {
RiscvOperandGenerator g(selector);
Node* base = node->InputAt(0);
Node* index = node->InputAt(1);
@@ -1993,12 +1997,13 @@ void VisitAtomicExchange(InstructionSelector* selector, Node* node,
temp[0] = g.TempRegister();
temp[1] = g.TempRegister();
temp[2] = g.TempRegister();
- InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
+ InstructionCode code = opcode | AddressingModeField::encode(addressing_mode) |
+ AtomicWidthField::encode(width);
selector->Emit(code, 1, outputs, input_count, inputs, 3, temp);
}
void VisitAtomicCompareExchange(InstructionSelector* selector, Node* node,
- ArchOpcode opcode) {
+ ArchOpcode opcode, AtomicWidth width) {
RiscvOperandGenerator g(selector);
Node* base = node->InputAt(0);
Node* index = node->InputAt(1);
@@ -2018,12 +2023,13 @@ void VisitAtomicCompareExchange(InstructionSelector* selector, Node* node,
temp[0] = g.TempRegister();
temp[1] = g.TempRegister();
temp[2] = g.TempRegister();
- InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
+ InstructionCode code = opcode | AddressingModeField::encode(addressing_mode) |
+ AtomicWidthField::encode(width);
selector->Emit(code, 1, outputs, input_count, inputs, 3, temp);
}
void VisitAtomicBinop(InstructionSelector* selector, Node* node,
- ArchOpcode opcode) {
+ ArchOpcode opcode, AtomicWidth width) {
RiscvOperandGenerator g(selector);
Node* base = node->InputAt(0);
Node* index = node->InputAt(1);
@@ -2042,7 +2048,8 @@ void VisitAtomicBinop(InstructionSelector* selector, Node* node,
temps[1] = g.TempRegister();
temps[2] = g.TempRegister();
temps[3] = g.TempRegister();
- InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
+ InstructionCode code = opcode | AddressingModeField::encode(addressing_mode) |
+ AtomicWidthField::encode(width);
selector->Emit(code, 1, outputs, input_count, inputs, 4, temps);
}
@@ -2404,163 +2411,201 @@ void InstructionSelector::VisitMemoryBarrier(Node* node) {
}
void InstructionSelector::VisitWord32AtomicLoad(Node* node) {
- LoadRepresentation load_rep = LoadRepresentationOf(node->op());
+ AtomicLoadParameters atomic_load_params = AtomicLoadParametersOf(node->op());
+ LoadRepresentation load_rep = atomic_load_params.representation();
ArchOpcode opcode;
switch (load_rep.representation()) {
case MachineRepresentation::kWord8:
- opcode =
- load_rep.IsSigned() ? kWord32AtomicLoadInt8 : kWord32AtomicLoadUint8;
+ opcode = load_rep.IsSigned() ? kAtomicLoadInt8 : kAtomicLoadUint8;
break;
case MachineRepresentation::kWord16:
- opcode = load_rep.IsSigned() ? kWord32AtomicLoadInt16
- : kWord32AtomicLoadUint16;
+ opcode = load_rep.IsSigned() ? kAtomicLoadInt16 : kAtomicLoadUint16;
break;
case MachineRepresentation::kWord32:
- opcode = kWord32AtomicLoadWord32;
+ opcode = kAtomicLoadWord32;
break;
default:
UNREACHABLE();
}
- VisitAtomicLoad(this, node, opcode);
+ VisitAtomicLoad(this, node, opcode, AtomicWidth::kWord32);
}
void InstructionSelector::VisitWord32AtomicStore(Node* node) {
- MachineRepresentation rep = AtomicStoreRepresentationOf(node->op());
+ AtomicStoreParameters store_params = AtomicStoreParametersOf(node->op());
+ MachineRepresentation rep = store_params.representation();
ArchOpcode opcode;
switch (rep) {
case MachineRepresentation::kWord8:
- opcode = kWord32AtomicStoreWord8;
+ opcode = kAtomicStoreWord8;
break;
case MachineRepresentation::kWord16:
- opcode = kWord32AtomicStoreWord16;
+ opcode = kAtomicStoreWord16;
break;
case MachineRepresentation::kWord32:
- opcode = kWord32AtomicStoreWord32;
+ opcode = kAtomicStoreWord32;
break;
default:
UNREACHABLE();
}
- VisitAtomicStore(this, node, opcode);
+ VisitAtomicStore(this, node, opcode, AtomicWidth::kWord32);
}
void InstructionSelector::VisitWord64AtomicLoad(Node* node) {
- LoadRepresentation load_rep = LoadRepresentationOf(node->op());
+ AtomicLoadParameters atomic_load_params = AtomicLoadParametersOf(node->op());
+ LoadRepresentation load_rep = atomic_load_params.representation();
ArchOpcode opcode;
switch (load_rep.representation()) {
case MachineRepresentation::kWord8:
- opcode = kRiscvWord64AtomicLoadUint8;
+ opcode = kAtomicLoadUint8;
break;
case MachineRepresentation::kWord16:
- opcode = kRiscvWord64AtomicLoadUint16;
+ opcode = kAtomicLoadUint16;
break;
case MachineRepresentation::kWord32:
- opcode = kRiscvWord64AtomicLoadUint32;
+ opcode = kAtomicLoadWord32;
break;
case MachineRepresentation::kWord64:
opcode = kRiscvWord64AtomicLoadUint64;
break;
+#ifdef V8_COMPRESS_POINTERS
+ case MachineRepresentation::kTaggedSigned:
+ opcode = kRiscv64LdDecompressTaggedSigned;
+ break;
+ case MachineRepresentation::kTaggedPointer:
+ opcode = kRiscv64LdDecompressTaggedPointer;
+ break;
+ case MachineRepresentation::kTagged:
+ opcode = kRiscv64LdDecompressAnyTagged;
+ break;
+#else
+ case MachineRepresentation::kTaggedSigned: // Fall through.
+ case MachineRepresentation::kTaggedPointer: // Fall through.
+ case MachineRepresentation::kTagged:
+ if (kTaggedSize == 8) {
+ opcode = kRiscvWord64AtomicLoadUint64;
+ } else {
+ opcode = kAtomicLoadWord32;
+ }
+ break;
+#endif
+ case MachineRepresentation::kCompressedPointer: // Fall through.
+ case MachineRepresentation::kCompressed:
+ DCHECK(COMPRESS_POINTERS_BOOL);
+ opcode = kAtomicLoadWord32;
+ break;
default:
UNREACHABLE();
}
- VisitAtomicLoad(this, node, opcode);
+ VisitAtomicLoad(this, node, opcode, AtomicWidth::kWord64);
}
void InstructionSelector::VisitWord64AtomicStore(Node* node) {
- MachineRepresentation rep = AtomicStoreRepresentationOf(node->op());
+ AtomicStoreParameters store_params = AtomicStoreParametersOf(node->op());
+ MachineRepresentation rep = store_params.representation();
ArchOpcode opcode;
switch (rep) {
case MachineRepresentation::kWord8:
- opcode = kRiscvWord64AtomicStoreWord8;
+ opcode = kAtomicStoreWord8;
break;
case MachineRepresentation::kWord16:
- opcode = kRiscvWord64AtomicStoreWord16;
+ opcode = kAtomicStoreWord16;
break;
case MachineRepresentation::kWord32:
- opcode = kRiscvWord64AtomicStoreWord32;
+ opcode = kAtomicStoreWord32;
break;
case MachineRepresentation::kWord64:
opcode = kRiscvWord64AtomicStoreWord64;
break;
+ case MachineRepresentation::kTaggedSigned: // Fall through.
+ case MachineRepresentation::kTaggedPointer: // Fall through.
+ case MachineRepresentation::kTagged:
+ opcode = kRiscvWord64AtomicStoreWord64;
+ break;
+ case MachineRepresentation::kCompressedPointer: // Fall through.
+ case MachineRepresentation::kCompressed:
+ CHECK(COMPRESS_POINTERS_BOOL);
+ opcode = kAtomicStoreWord32;
+ break;
default:
UNREACHABLE();
}
- VisitAtomicStore(this, node, opcode);
+ VisitAtomicStore(this, node, opcode, AtomicWidth::kWord64);
}
void InstructionSelector::VisitWord32AtomicExchange(Node* node) {
ArchOpcode opcode;
MachineType type = AtomicOpType(node->op());
if (type == MachineType::Int8()) {
- opcode = kWord32AtomicExchangeInt8;
+ opcode = kAtomicExchangeInt8;
} else if (type == MachineType::Uint8()) {
- opcode = kWord32AtomicExchangeUint8;
+ opcode = kAtomicExchangeUint8;
} else if (type == MachineType::Int16()) {
- opcode = kWord32AtomicExchangeInt16;
+ opcode = kAtomicExchangeInt16;
} else if (type == MachineType::Uint16()) {
- opcode = kWord32AtomicExchangeUint16;
+ opcode = kAtomicExchangeUint16;
} else if (type == MachineType::Int32() || type == MachineType::Uint32()) {
- opcode = kWord32AtomicExchangeWord32;
+ opcode = kAtomicExchangeWord32;
} else {
UNREACHABLE();
}
- VisitAtomicExchange(this, node, opcode);
+ VisitAtomicExchange(this, node, opcode, AtomicWidth::kWord32);
}
void InstructionSelector::VisitWord64AtomicExchange(Node* node) {
ArchOpcode opcode;
MachineType type = AtomicOpType(node->op());
if (type == MachineType::Uint8()) {
- opcode = kRiscvWord64AtomicExchangeUint8;
+ opcode = kAtomicExchangeUint8;
} else if (type == MachineType::Uint16()) {
- opcode = kRiscvWord64AtomicExchangeUint16;
+ opcode = kAtomicExchangeUint16;
} else if (type == MachineType::Uint32()) {
- opcode = kRiscvWord64AtomicExchangeUint32;
+ opcode = kAtomicExchangeWord32;
} else if (type == MachineType::Uint64()) {
opcode = kRiscvWord64AtomicExchangeUint64;
} else {
UNREACHABLE();
}
- VisitAtomicExchange(this, node, opcode);
+ VisitAtomicExchange(this, node, opcode, AtomicWidth::kWord64);
}
void InstructionSelector::VisitWord32AtomicCompareExchange(Node* node) {
ArchOpcode opcode;
MachineType type = AtomicOpType(node->op());
if (type == MachineType::Int8()) {
- opcode = kWord32AtomicCompareExchangeInt8;
+ opcode = kAtomicCompareExchangeInt8;
} else if (type == MachineType::Uint8()) {
- opcode = kWord32AtomicCompareExchangeUint8;
+ opcode = kAtomicCompareExchangeUint8;
} else if (type == MachineType::Int16()) {
- opcode = kWord32AtomicCompareExchangeInt16;
+ opcode = kAtomicCompareExchangeInt16;
} else if (type == MachineType::Uint16()) {
- opcode = kWord32AtomicCompareExchangeUint16;
+ opcode = kAtomicCompareExchangeUint16;
} else if (type == MachineType::Int32() || type == MachineType::Uint32()) {
- opcode = kWord32AtomicCompareExchangeWord32;
+ opcode = kAtomicCompareExchangeWord32;
} else {
UNREACHABLE();
}
- VisitAtomicCompareExchange(this, node, opcode);
+ VisitAtomicCompareExchange(this, node, opcode, AtomicWidth::kWord32);
}
void InstructionSelector::VisitWord64AtomicCompareExchange(Node* node) {
ArchOpcode opcode;
MachineType type = AtomicOpType(node->op());
if (type == MachineType::Uint8()) {
- opcode = kRiscvWord64AtomicCompareExchangeUint8;
+ opcode = kAtomicCompareExchangeUint8;
} else if (type == MachineType::Uint16()) {
- opcode = kRiscvWord64AtomicCompareExchangeUint16;
+ opcode = kAtomicCompareExchangeUint16;
} else if (type == MachineType::Uint32()) {
- opcode = kRiscvWord64AtomicCompareExchangeUint32;
+ opcode = kAtomicCompareExchangeWord32;
} else if (type == MachineType::Uint64()) {
opcode = kRiscvWord64AtomicCompareExchangeUint64;
} else {
UNREACHABLE();
}
- VisitAtomicCompareExchange(this, node, opcode);
+ VisitAtomicCompareExchange(this, node, opcode, AtomicWidth::kWord64);
}
void InstructionSelector::VisitWord32AtomicBinaryOperation(
Node* node, ArchOpcode int8_op, ArchOpcode uint8_op, ArchOpcode int16_op,
@@ -2581,15 +2626,14 @@ void InstructionSelector::VisitWord32AtomicBinaryOperation(
UNREACHABLE();
}
- VisitAtomicBinop(this, node, opcode);
+ VisitAtomicBinop(this, node, opcode, AtomicWidth::kWord32);
}
-#define VISIT_ATOMIC_BINOP(op) \
- void InstructionSelector::VisitWord32Atomic##op(Node* node) { \
- VisitWord32AtomicBinaryOperation( \
- node, kWord32Atomic##op##Int8, kWord32Atomic##op##Uint8, \
- kWord32Atomic##op##Int16, kWord32Atomic##op##Uint16, \
- kWord32Atomic##op##Word32); \
+#define VISIT_ATOMIC_BINOP(op) \
+ void InstructionSelector::VisitWord32Atomic##op(Node* node) { \
+ VisitWord32AtomicBinaryOperation( \
+ node, kAtomic##op##Int8, kAtomic##op##Uint8, kAtomic##op##Int16, \
+ kAtomic##op##Uint16, kAtomic##op##Word32); \
}
VISIT_ATOMIC_BINOP(Add)
VISIT_ATOMIC_BINOP(Sub)
@@ -2614,14 +2658,14 @@ void InstructionSelector::VisitWord64AtomicBinaryOperation(
} else {
UNREACHABLE();
}
- VisitAtomicBinop(this, node, opcode);
+ VisitAtomicBinop(this, node, opcode, AtomicWidth::kWord64);
}
-#define VISIT_ATOMIC_BINOP(op) \
- void InstructionSelector::VisitWord64Atomic##op(Node* node) { \
- VisitWord64AtomicBinaryOperation( \
- node, kRiscvWord64Atomic##op##Uint8, kRiscvWord64Atomic##op##Uint16, \
- kRiscvWord64Atomic##op##Uint32, kRiscvWord64Atomic##op##Uint64); \
+#define VISIT_ATOMIC_BINOP(op) \
+ void InstructionSelector::VisitWord64Atomic##op(Node* node) { \
+ VisitWord64AtomicBinaryOperation(node, kAtomic##op##Uint8, \
+ kAtomic##op##Uint16, kAtomic##op##Word32, \
+ kRiscvWord64Atomic##op##Uint64); \
}
VISIT_ATOMIC_BINOP(Add)
VISIT_ATOMIC_BINOP(Sub)
@@ -2640,6 +2684,7 @@ void InstructionSelector::VisitInt64AbsWithOverflow(Node* node) {
#define SIMD_TYPE_LIST(V) \
V(F32x4) \
+ V(I64x2) \
V(I32x4) \
V(I16x8) \
V(I8x16)
@@ -2844,6 +2889,7 @@ SIMD_VISIT_SPLAT(F64x2)
SIMD_VISIT_EXTRACT_LANE(F64x2, )
SIMD_VISIT_EXTRACT_LANE(F32x4, )
SIMD_VISIT_EXTRACT_LANE(I32x4, )
+SIMD_VISIT_EXTRACT_LANE(I64x2, )
SIMD_VISIT_EXTRACT_LANE(I16x8, U)
SIMD_VISIT_EXTRACT_LANE(I16x8, S)
SIMD_VISIT_EXTRACT_LANE(I8x16, U)
@@ -2890,73 +2936,75 @@ struct ShuffleEntry {
ArchOpcode opcode;
};
-static const ShuffleEntry arch_shuffles[] = {
- {{0, 1, 2, 3, 16, 17, 18, 19, 4, 5, 6, 7, 20, 21, 22, 23},
- kRiscvS32x4InterleaveRight},
- {{8, 9, 10, 11, 24, 25, 26, 27, 12, 13, 14, 15, 28, 29, 30, 31},
- kRiscvS32x4InterleaveLeft},
- {{0, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19, 24, 25, 26, 27},
- kRiscvS32x4PackEven},
- {{4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31},
- kRiscvS32x4PackOdd},
- {{0, 1, 2, 3, 16, 17, 18, 19, 8, 9, 10, 11, 24, 25, 26, 27},
- kRiscvS32x4InterleaveEven},
- {{4, 5, 6, 7, 20, 21, 22, 23, 12, 13, 14, 15, 28, 29, 30, 31},
- kRiscvS32x4InterleaveOdd},
-
- {{0, 1, 16, 17, 2, 3, 18, 19, 4, 5, 20, 21, 6, 7, 22, 23},
- kRiscvS16x8InterleaveRight},
- {{8, 9, 24, 25, 10, 11, 26, 27, 12, 13, 28, 29, 14, 15, 30, 31},
- kRiscvS16x8InterleaveLeft},
- {{0, 1, 4, 5, 8, 9, 12, 13, 16, 17, 20, 21, 24, 25, 28, 29},
- kRiscvS16x8PackEven},
- {{2, 3, 6, 7, 10, 11, 14, 15, 18, 19, 22, 23, 26, 27, 30, 31},
- kRiscvS16x8PackOdd},
- {{0, 1, 16, 17, 4, 5, 20, 21, 8, 9, 24, 25, 12, 13, 28, 29},
- kRiscvS16x8InterleaveEven},
- {{2, 3, 18, 19, 6, 7, 22, 23, 10, 11, 26, 27, 14, 15, 30, 31},
- kRiscvS16x8InterleaveOdd},
- {{6, 7, 4, 5, 2, 3, 0, 1, 14, 15, 12, 13, 10, 11, 8, 9},
- kRiscvS16x4Reverse},
- {{2, 3, 0, 1, 6, 7, 4, 5, 10, 11, 8, 9, 14, 15, 12, 13},
- kRiscvS16x2Reverse},
-
- {{0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23},
- kRiscvS8x16InterleaveRight},
- {{8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31},
- kRiscvS8x16InterleaveLeft},
- {{0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30},
- kRiscvS8x16PackEven},
- {{1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31},
- kRiscvS8x16PackOdd},
- {{0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30},
- kRiscvS8x16InterleaveEven},
- {{1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31},
- kRiscvS8x16InterleaveOdd},
- {{7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8}, kRiscvS8x8Reverse},
- {{3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12}, kRiscvS8x4Reverse},
- {{1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14},
- kRiscvS8x2Reverse}};
-
-bool TryMatchArchShuffle(const uint8_t* shuffle, const ShuffleEntry* table,
- size_t num_entries, bool is_swizzle,
- ArchOpcode* opcode) {
- uint8_t mask = is_swizzle ? kSimd128Size - 1 : 2 * kSimd128Size - 1;
- for (size_t i = 0; i < num_entries; ++i) {
- const ShuffleEntry& entry = table[i];
- int j = 0;
- for (; j < kSimd128Size; ++j) {
- if ((entry.shuffle[j] & mask) != (shuffle[j] & mask)) {
- break;
- }
- }
- if (j == kSimd128Size) {
- *opcode = entry.opcode;
- return true;
- }
- }
- return false;
-}
+// static const ShuffleEntry arch_shuffles[] = {
+// {{0, 1, 2, 3, 16, 17, 18, 19, 4, 5, 6, 7, 20, 21, 22, 23},
+// kRiscvS32x4InterleaveRight},
+// {{8, 9, 10, 11, 24, 25, 26, 27, 12, 13, 14, 15, 28, 29, 30, 31},
+// kRiscvS32x4InterleaveLeft},
+// {{0, 1, 2, 3, 8, 9, 10, 11, 16, 17, 18, 19, 24, 25, 26, 27},
+// kRiscvS32x4PackEven},
+// {{4, 5, 6, 7, 12, 13, 14, 15, 20, 21, 22, 23, 28, 29, 30, 31},
+// kRiscvS32x4PackOdd},
+// {{0, 1, 2, 3, 16, 17, 18, 19, 8, 9, 10, 11, 24, 25, 26, 27},
+// kRiscvS32x4InterleaveEven},
+// {{4, 5, 6, 7, 20, 21, 22, 23, 12, 13, 14, 15, 28, 29, 30, 31},
+// kRiscvS32x4InterleaveOdd},
+
+// {{0, 1, 16, 17, 2, 3, 18, 19, 4, 5, 20, 21, 6, 7, 22, 23},
+// kRiscvS16x8InterleaveRight},
+// {{8, 9, 24, 25, 10, 11, 26, 27, 12, 13, 28, 29, 14, 15, 30, 31},
+// kRiscvS16x8InterleaveLeft},
+// {{0, 1, 4, 5, 8, 9, 12, 13, 16, 17, 20, 21, 24, 25, 28, 29},
+// kRiscvS16x8PackEven},
+// {{2, 3, 6, 7, 10, 11, 14, 15, 18, 19, 22, 23, 26, 27, 30, 31},
+// kRiscvS16x8PackOdd},
+// {{0, 1, 16, 17, 4, 5, 20, 21, 8, 9, 24, 25, 12, 13, 28, 29},
+// kRiscvS16x8InterleaveEven},
+// {{2, 3, 18, 19, 6, 7, 22, 23, 10, 11, 26, 27, 14, 15, 30, 31},
+// kRiscvS16x8InterleaveOdd},
+// {{6, 7, 4, 5, 2, 3, 0, 1, 14, 15, 12, 13, 10, 11, 8, 9},
+// kRiscvS16x4Reverse},
+// {{2, 3, 0, 1, 6, 7, 4, 5, 10, 11, 8, 9, 14, 15, 12, 13},
+// kRiscvS16x2Reverse},
+
+// {{0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23},
+// kRiscvS8x16InterleaveRight},
+// {{8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31},
+// kRiscvS8x16InterleaveLeft},
+// {{0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30},
+// kRiscvS8x16PackEven},
+// {{1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31},
+// kRiscvS8x16PackOdd},
+// {{0, 16, 2, 18, 4, 20, 6, 22, 8, 24, 10, 26, 12, 28, 14, 30},
+// kRiscvS8x16InterleaveEven},
+// {{1, 17, 3, 19, 5, 21, 7, 23, 9, 25, 11, 27, 13, 29, 15, 31},
+// kRiscvS8x16InterleaveOdd},
+// {{7, 6, 5, 4, 3, 2, 1, 0, 15, 14, 13, 12, 11, 10, 9, 8},
+// kRiscvS8x8Reverse},
+// {{3, 2, 1, 0, 7, 6, 5, 4, 11, 10, 9, 8, 15, 14, 13, 12},
+// kRiscvS8x4Reverse},
+// {{1, 0, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 15, 14},
+// kRiscvS8x2Reverse}};
+
+// bool TryMatchArchShuffle(const uint8_t* shuffle, const ShuffleEntry* table,
+// size_t num_entries, bool is_swizzle,
+// ArchOpcode* opcode) {
+// uint8_t mask = is_swizzle ? kSimd128Size - 1 : 2 * kSimd128Size - 1;
+// for (size_t i = 0; i < num_entries; ++i) {
+// const ShuffleEntry& entry = table[i];
+// int j = 0;
+// for (; j < kSimd128Size; ++j) {
+// if ((entry.shuffle[j] & mask) != (shuffle[j] & mask)) {
+// break;
+// }
+// }
+// if (j == kSimd128Size) {
+// *opcode = entry.opcode;
+// return true;
+// }
+// }
+// return false;
+// }
} // namespace
@@ -2964,29 +3012,29 @@ void InstructionSelector::VisitI8x16Shuffle(Node* node) {
uint8_t shuffle[kSimd128Size];
bool is_swizzle;
CanonicalizeShuffle(node, shuffle, &is_swizzle);
- uint8_t shuffle32x4[4];
- ArchOpcode opcode;
- if (TryMatchArchShuffle(shuffle, arch_shuffles, arraysize(arch_shuffles),
- is_swizzle, &opcode)) {
- VisitRRR(this, opcode, node);
- return;
- }
Node* input0 = node->InputAt(0);
Node* input1 = node->InputAt(1);
- uint8_t offset;
RiscvOperandGenerator g(this);
- if (wasm::SimdShuffle::TryMatchConcat(shuffle, &offset)) {
- Emit(kRiscvS8x16Concat, g.DefineSameAsFirst(node), g.UseRegister(input1),
- g.UseRegister(input0), g.UseImmediate(offset));
- return;
- }
- if (wasm::SimdShuffle::TryMatch32x4Shuffle(shuffle, shuffle32x4)) {
- Emit(kRiscvS32x4Shuffle, g.DefineAsRegister(node), g.UseRegister(input0),
- g.UseRegister(input1),
- g.UseImmediate(wasm::SimdShuffle::Pack4Lanes(shuffle32x4)));
- return;
- }
- Emit(kRiscvS8x16Shuffle, g.DefineAsRegister(node), g.UseRegister(input0),
+ // uint8_t shuffle32x4[4];
+ // ArchOpcode opcode;
+ // if (TryMatchArchShuffle(shuffle, arch_shuffles, arraysize(arch_shuffles),
+ // is_swizzle, &opcode)) {
+ // VisitRRR(this, opcode, node);
+ // return;
+ // }
+ // uint8_t offset;
+ // if (wasm::SimdShuffle::TryMatchConcat(shuffle, &offset)) {
+ // Emit(kRiscvS8x16Concat, g.DefineSameAsFirst(node), g.UseRegister(input1),
+ // g.UseRegister(input0), g.UseImmediate(offset));
+ // return;
+ // }
+ // if (wasm::SimdShuffle::TryMatch32x4Shuffle(shuffle, shuffle32x4)) {
+ // Emit(kRiscvS32x4Shuffle, g.DefineAsRegister(node), g.UseRegister(input0),
+ // g.UseRegister(input1),
+ // g.UseImmediate(wasm::SimdShuffle::Pack4Lanes(shuffle32x4)));
+ // return;
+ // }
+ Emit(kRiscvI8x16Shuffle, g.DefineAsRegister(node), g.UseRegister(input0),
g.UseRegister(input1),
g.UseImmediate(wasm::SimdShuffle::Pack4Lanes(shuffle)),
g.UseImmediate(wasm::SimdShuffle::Pack4Lanes(shuffle + 4)),
diff --git a/deps/v8/src/compiler/backend/s390/code-generator-s390.cc b/deps/v8/src/compiler/backend/s390/code-generator-s390.cc
index 685293169d..3c2c3d6c06 100644
--- a/deps/v8/src/compiler/backend/s390/code-generator-s390.cc
+++ b/deps/v8/src/compiler/backend/s390/code-generator-s390.cc
@@ -985,15 +985,6 @@ void AdjustStackPointerForTailCall(
}
}
-void EmitWordLoadPoisoningIfNeeded(CodeGenerator* codegen, Instruction* instr,
- S390OperandConverter const& i) {
- const MemoryAccessMode access_mode = AccessModeField::decode(instr->opcode());
- if (access_mode == kMemoryAccessPoisoned) {
- Register value = i.OutputRegister();
- codegen->tasm()->AndP(value, kSpeculationPoisonRegister);
- }
-}
-
} // namespace
void CodeGenerator::AssembleTailCallBeforeGap(Instruction* instr,
@@ -1071,25 +1062,6 @@ void CodeGenerator::BailoutIfDeoptimized() {
RelocInfo::CODE_TARGET, ne);
}
-void CodeGenerator::GenerateSpeculationPoisonFromCodeStartRegister() {
- Register scratch = r1;
-
- __ ComputeCodeStartAddress(scratch);
-
- // Calculate a mask which has all bits set in the normal case, but has all
- // bits cleared if we are speculatively executing the wrong PC.
- __ mov(kSpeculationPoisonRegister, Operand::Zero());
- __ mov(r0, Operand(-1));
- __ CmpS64(kJavaScriptCallCodeStartRegister, scratch);
- __ LoadOnConditionP(eq, kSpeculationPoisonRegister, r0);
-}
-
-void CodeGenerator::AssembleRegisterArgumentPoisoning() {
- __ AndP(kJSFunctionRegister, kJSFunctionRegister, kSpeculationPoisonRegister);
- __ AndP(kContextRegister, kContextRegister, kSpeculationPoisonRegister);
- __ AndP(sp, sp, kSpeculationPoisonRegister);
-}
-
// Assembles an instruction after register allocation, producing machine code.
CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Instruction* instr) {
@@ -1395,10 +1367,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Operand(offset.offset()));
break;
}
- case kArchWordPoisonOnSpeculation:
- DCHECK_EQ(i.OutputRegister(), i.InputRegister(0));
- __ AndP(i.InputRegister(0), kSpeculationPoisonRegister);
- break;
case kS390_Peek: {
int reverse_slot = i.InputInt32(0);
int offset =
@@ -2155,7 +2123,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
case kS390_LoadWordS8:
ASSEMBLE_LOAD_INTEGER(LoadS8);
- EmitWordLoadPoisoningIfNeeded(this, instr, i);
break;
case kS390_BitcastFloat32ToInt32:
ASSEMBLE_UNARY_OP(R_DInstr(MovFloatToInt), R_MInstr(LoadU32), nullInstr);
@@ -2173,35 +2140,27 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
#endif
case kS390_LoadWordU8:
ASSEMBLE_LOAD_INTEGER(LoadU8);
- EmitWordLoadPoisoningIfNeeded(this, instr, i);
break;
case kS390_LoadWordU16:
ASSEMBLE_LOAD_INTEGER(LoadU16);
- EmitWordLoadPoisoningIfNeeded(this, instr, i);
break;
case kS390_LoadWordS16:
ASSEMBLE_LOAD_INTEGER(LoadS16);
- EmitWordLoadPoisoningIfNeeded(this, instr, i);
break;
case kS390_LoadWordU32:
ASSEMBLE_LOAD_INTEGER(LoadU32);
- EmitWordLoadPoisoningIfNeeded(this, instr, i);
break;
case kS390_LoadWordS32:
ASSEMBLE_LOAD_INTEGER(LoadS32);
- EmitWordLoadPoisoningIfNeeded(this, instr, i);
break;
case kS390_LoadReverse16:
ASSEMBLE_LOAD_INTEGER(lrvh);
- EmitWordLoadPoisoningIfNeeded(this, instr, i);
break;
case kS390_LoadReverse32:
ASSEMBLE_LOAD_INTEGER(lrv);
- EmitWordLoadPoisoningIfNeeded(this, instr, i);
break;
case kS390_LoadReverse64:
ASSEMBLE_LOAD_INTEGER(lrvg);
- EmitWordLoadPoisoningIfNeeded(this, instr, i);
break;
case kS390_LoadReverse16RR:
__ lrvr(i.OutputRegister(), i.InputRegister(0));
@@ -2238,7 +2197,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
case kS390_LoadWord64:
ASSEMBLE_LOAD_INTEGER(lg);
- EmitWordLoadPoisoningIfNeeded(this, instr, i);
break;
case kS390_LoadAndTestWord32: {
ASSEMBLE_LOADANDTEST32(ltr, lt_z);
@@ -2258,7 +2216,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
AddressingMode mode = kMode_None;
MemOperand operand = i.MemoryOperand(&mode);
__ vl(i.OutputSimd128Register(), operand, Condition(0));
- EmitWordLoadPoisoningIfNeeded(this, instr, i);
break;
}
case kS390_StoreWord8:
@@ -2327,40 +2284,37 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ lay(i.OutputRegister(), mem);
break;
}
- case kS390_Word64AtomicExchangeUint8:
- case kWord32AtomicExchangeInt8:
- case kWord32AtomicExchangeUint8: {
+ case kAtomicExchangeInt8:
+ case kAtomicExchangeUint8: {
Register base = i.InputRegister(0);
Register index = i.InputRegister(1);
Register value = i.InputRegister(2);
Register output = i.OutputRegister();
__ la(r1, MemOperand(base, index));
__ AtomicExchangeU8(r1, value, output, r0);
- if (opcode == kWord32AtomicExchangeInt8) {
+ if (opcode == kAtomicExchangeInt8) {
__ LoadS8(output, output);
} else {
__ LoadU8(output, output);
}
break;
}
- case kS390_Word64AtomicExchangeUint16:
- case kWord32AtomicExchangeInt16:
- case kWord32AtomicExchangeUint16: {
+ case kAtomicExchangeInt16:
+ case kAtomicExchangeUint16: {
Register base = i.InputRegister(0);
Register index = i.InputRegister(1);
Register value = i.InputRegister(2);
Register output = i.OutputRegister();
__ la(r1, MemOperand(base, index));
__ AtomicExchangeU16(r1, value, output, r0);
- if (opcode == kWord32AtomicExchangeInt16) {
+ if (opcode == kAtomicExchangeInt16) {
__ lghr(output, output);
} else {
__ llghr(output, output);
}
break;
}
- case kS390_Word64AtomicExchangeUint32:
- case kWord32AtomicExchangeWord32: {
+ case kAtomicExchangeWord32: {
Register base = i.InputRegister(0);
Register index = i.InputRegister(1);
Register value = i.InputRegister(2);
@@ -2373,34 +2327,30 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ bne(&do_cs, Label::kNear);
break;
}
- case kWord32AtomicCompareExchangeInt8:
+ case kAtomicCompareExchangeInt8:
ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_BYTE(LoadS8);
break;
- case kS390_Word64AtomicCompareExchangeUint8:
- case kWord32AtomicCompareExchangeUint8:
+ case kAtomicCompareExchangeUint8:
ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_BYTE(LoadU8);
break;
- case kWord32AtomicCompareExchangeInt16:
+ case kAtomicCompareExchangeInt16:
ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_HALFWORD(LoadS16);
break;
- case kS390_Word64AtomicCompareExchangeUint16:
- case kWord32AtomicCompareExchangeUint16:
+ case kAtomicCompareExchangeUint16:
ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_HALFWORD(LoadU16);
break;
- case kS390_Word64AtomicCompareExchangeUint32:
- case kWord32AtomicCompareExchangeWord32:
+ case kAtomicCompareExchangeWord32:
ASSEMBLE_ATOMIC_COMPARE_EXCHANGE_WORD();
break;
#define ATOMIC_BINOP_CASE(op, inst) \
- case kWord32Atomic##op##Int8: \
+ case kAtomic##op##Int8: \
ASSEMBLE_ATOMIC_BINOP_BYTE(inst, [&]() { \
intptr_t shift_right = static_cast<intptr_t>(shift_amount); \
__ srlk(result, prev, Operand(shift_right)); \
- __ LoadS8(result, result); \
+ __ LoadS8(result, result); \
}); \
break; \
- case kS390_Word64Atomic##op##Uint8: \
- case kWord32Atomic##op##Uint8: \
+ case kAtomic##op##Uint8: \
ASSEMBLE_ATOMIC_BINOP_BYTE(inst, [&]() { \
int rotate_left = shift_amount == 0 ? 0 : 64 - shift_amount; \
__ RotateInsertSelectBits(result, prev, Operand(56), Operand(63), \
@@ -2408,15 +2358,14 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
true); \
}); \
break; \
- case kWord32Atomic##op##Int16: \
+ case kAtomic##op##Int16: \
ASSEMBLE_ATOMIC_BINOP_HALFWORD(inst, [&]() { \
intptr_t shift_right = static_cast<intptr_t>(shift_amount); \
__ srlk(result, prev, Operand(shift_right)); \
- __ LoadS16(result, result); \
+ __ LoadS16(result, result); \
}); \
break; \
- case kS390_Word64Atomic##op##Uint16: \
- case kWord32Atomic##op##Uint16: \
+ case kAtomic##op##Uint16: \
ASSEMBLE_ATOMIC_BINOP_HALFWORD(inst, [&]() { \
int rotate_left = shift_amount == 0 ? 0 : 64 - shift_amount; \
__ RotateInsertSelectBits(result, prev, Operand(48), Operand(63), \
@@ -2430,24 +2379,19 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
ATOMIC_BINOP_CASE(Or, Or)
ATOMIC_BINOP_CASE(Xor, Xor)
#undef ATOMIC_BINOP_CASE
- case kS390_Word64AtomicAddUint32:
- case kWord32AtomicAddWord32:
+ case kAtomicAddWord32:
ASSEMBLE_ATOMIC_BINOP_WORD(laa);
break;
- case kS390_Word64AtomicSubUint32:
- case kWord32AtomicSubWord32:
+ case kAtomicSubWord32:
ASSEMBLE_ATOMIC_BINOP_WORD(LoadAndSub32);
break;
- case kS390_Word64AtomicAndUint32:
- case kWord32AtomicAndWord32:
+ case kAtomicAndWord32:
ASSEMBLE_ATOMIC_BINOP_WORD(lan);
break;
- case kS390_Word64AtomicOrUint32:
- case kWord32AtomicOrWord32:
+ case kAtomicOrWord32:
ASSEMBLE_ATOMIC_BINOP_WORD(lao);
break;
- case kS390_Word64AtomicXorUint32:
- case kWord32AtomicXorWord32:
+ case kAtomicXorWord32:
ASSEMBLE_ATOMIC_BINOP_WORD(lax);
break;
case kS390_Word64AtomicAddUint64:
@@ -2482,77 +2426,89 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
ASSEMBLE_ATOMIC64_COMP_EXCHANGE_WORD64();
break;
// Simd Support.
-#define SIMD_BINOP_LIST(V) \
- V(F64x2Add) \
- V(F64x2Sub) \
- V(F64x2Mul) \
- V(F64x2Div) \
- V(F64x2Min) \
- V(F64x2Max) \
- V(F64x2Eq) \
- V(F64x2Ne) \
- V(F64x2Lt) \
- V(F64x2Le) \
- V(F32x4Add) \
- V(F32x4Sub) \
- V(F32x4Mul) \
- V(F32x4Div) \
- V(F32x4Min) \
- V(F32x4Max) \
- V(F32x4Eq) \
- V(F32x4Ne) \
- V(F32x4Lt) \
- V(F32x4Le) \
- V(I64x2Add) \
- V(I64x2Sub) \
- V(I64x2Mul) \
- V(I64x2Eq) \
- V(I64x2Ne) \
- V(I64x2GtS) \
- V(I64x2GeS) \
- V(I32x4Add) \
- V(I32x4Sub) \
- V(I32x4Mul) \
- V(I32x4Eq) \
- V(I32x4Ne) \
- V(I32x4GtS) \
- V(I32x4GeS) \
- V(I32x4GtU) \
- V(I32x4GeU) \
- V(I32x4MinS) \
- V(I32x4MinU) \
- V(I32x4MaxS) \
- V(I32x4MaxU) \
- V(I16x8Add) \
- V(I16x8Sub) \
- V(I16x8Mul) \
- V(I16x8Eq) \
- V(I16x8Ne) \
- V(I16x8GtS) \
- V(I16x8GeS) \
- V(I16x8GtU) \
- V(I16x8GeU) \
- V(I16x8MinS) \
- V(I16x8MinU) \
- V(I16x8MaxS) \
- V(I16x8MaxU) \
- V(I8x16Add) \
- V(I8x16Sub) \
- V(I8x16Eq) \
- V(I8x16Ne) \
- V(I8x16GtS) \
- V(I8x16GeS) \
- V(I8x16GtU) \
- V(I8x16GeU) \
- V(I8x16MinS) \
- V(I8x16MinU) \
- V(I8x16MaxS) \
- V(I8x16MaxU)
-
-#define EMIT_SIMD_BINOP(name) \
+#define SIMD_BINOP_LIST(V) \
+ V(F64x2Add, Simd128Register) \
+ V(F64x2Sub, Simd128Register) \
+ V(F64x2Mul, Simd128Register) \
+ V(F64x2Div, Simd128Register) \
+ V(F64x2Min, Simd128Register) \
+ V(F64x2Max, Simd128Register) \
+ V(F64x2Eq, Simd128Register) \
+ V(F64x2Ne, Simd128Register) \
+ V(F64x2Lt, Simd128Register) \
+ V(F64x2Le, Simd128Register) \
+ V(F32x4Add, Simd128Register) \
+ V(F32x4Sub, Simd128Register) \
+ V(F32x4Mul, Simd128Register) \
+ V(F32x4Div, Simd128Register) \
+ V(F32x4Min, Simd128Register) \
+ V(F32x4Max, Simd128Register) \
+ V(F32x4Eq, Simd128Register) \
+ V(F32x4Ne, Simd128Register) \
+ V(F32x4Lt, Simd128Register) \
+ V(F32x4Le, Simd128Register) \
+ V(I64x2Add, Simd128Register) \
+ V(I64x2Sub, Simd128Register) \
+ V(I64x2Mul, Simd128Register) \
+ V(I64x2Eq, Simd128Register) \
+ V(I64x2Ne, Simd128Register) \
+ V(I64x2GtS, Simd128Register) \
+ V(I64x2GeS, Simd128Register) \
+ V(I64x2Shl, Register) \
+ V(I64x2ShrS, Register) \
+ V(I64x2ShrU, Register) \
+ V(I32x4Add, Simd128Register) \
+ V(I32x4Sub, Simd128Register) \
+ V(I32x4Mul, Simd128Register) \
+ V(I32x4Eq, Simd128Register) \
+ V(I32x4Ne, Simd128Register) \
+ V(I32x4GtS, Simd128Register) \
+ V(I32x4GeS, Simd128Register) \
+ V(I32x4GtU, Simd128Register) \
+ V(I32x4GeU, Simd128Register) \
+ V(I32x4MinS, Simd128Register) \
+ V(I32x4MinU, Simd128Register) \
+ V(I32x4MaxS, Simd128Register) \
+ V(I32x4MaxU, Simd128Register) \
+ V(I32x4Shl, Register) \
+ V(I32x4ShrS, Register) \
+ V(I32x4ShrU, Register) \
+ V(I16x8Add, Simd128Register) \
+ V(I16x8Sub, Simd128Register) \
+ V(I16x8Mul, Simd128Register) \
+ V(I16x8Eq, Simd128Register) \
+ V(I16x8Ne, Simd128Register) \
+ V(I16x8GtS, Simd128Register) \
+ V(I16x8GeS, Simd128Register) \
+ V(I16x8GtU, Simd128Register) \
+ V(I16x8GeU, Simd128Register) \
+ V(I16x8MinS, Simd128Register) \
+ V(I16x8MinU, Simd128Register) \
+ V(I16x8MaxS, Simd128Register) \
+ V(I16x8MaxU, Simd128Register) \
+ V(I16x8Shl, Register) \
+ V(I16x8ShrS, Register) \
+ V(I16x8ShrU, Register) \
+ V(I8x16Add, Simd128Register) \
+ V(I8x16Sub, Simd128Register) \
+ V(I8x16Eq, Simd128Register) \
+ V(I8x16Ne, Simd128Register) \
+ V(I8x16GtS, Simd128Register) \
+ V(I8x16GeS, Simd128Register) \
+ V(I8x16GtU, Simd128Register) \
+ V(I8x16GeU, Simd128Register) \
+ V(I8x16MinS, Simd128Register) \
+ V(I8x16MinU, Simd128Register) \
+ V(I8x16MaxS, Simd128Register) \
+ V(I8x16MaxU, Simd128Register) \
+ V(I8x16Shl, Register) \
+ V(I8x16ShrS, Register) \
+ V(I8x16ShrU, Register)
+
+#define EMIT_SIMD_BINOP(name, stype) \
case kS390_##name: { \
__ name(i.OutputSimd128Register(), i.InputSimd128Register(0), \
- i.InputSimd128Register(1)); \
+ i.Input##stype(1)); \
break; \
}
SIMD_BINOP_LIST(EMIT_SIMD_BINOP)
@@ -2657,64 +2613,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Condition(0));
break;
}
- // vector shifts
-#define VECTOR_SHIFT(op, mode) \
- { \
- __ vlvg(kScratchDoubleReg, i.InputRegister(1), MemOperand(r0, 0), \
- Condition(mode)); \
- __ vrep(kScratchDoubleReg, kScratchDoubleReg, Operand(0), \
- Condition(mode)); \
- __ op(i.OutputSimd128Register(), i.InputSimd128Register(0), \
- kScratchDoubleReg, Condition(0), Condition(0), Condition(mode)); \
- }
- case kS390_I64x2Shl: {
- VECTOR_SHIFT(veslv, 3);
- break;
- }
- case kS390_I64x2ShrS: {
- VECTOR_SHIFT(vesrav, 3);
- break;
- }
- case kS390_I64x2ShrU: {
- VECTOR_SHIFT(vesrlv, 3);
- break;
- }
- case kS390_I32x4Shl: {
- VECTOR_SHIFT(veslv, 2);
- break;
- }
- case kS390_I32x4ShrS: {
- VECTOR_SHIFT(vesrav, 2);
- break;
- }
- case kS390_I32x4ShrU: {
- VECTOR_SHIFT(vesrlv, 2);
- break;
- }
- case kS390_I16x8Shl: {
- VECTOR_SHIFT(veslv, 1);
- break;
- }
- case kS390_I16x8ShrS: {
- VECTOR_SHIFT(vesrav, 1);
- break;
- }
- case kS390_I16x8ShrU: {
- VECTOR_SHIFT(vesrlv, 1);
- break;
- }
- case kS390_I8x16Shl: {
- VECTOR_SHIFT(veslv, 0);
- break;
- }
- case kS390_I8x16ShrS: {
- VECTOR_SHIFT(vesrav, 0);
- break;
- }
- case kS390_I8x16ShrU: {
- VECTOR_SHIFT(vesrlv, 0);
- break;
- }
// vector unary ops
case kS390_F64x2Abs: {
__ vfpso(i.OutputSimd128Register(), i.InputSimd128Register(0),
@@ -3489,6 +3387,120 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ vpkls(dst, dst, kScratchDoubleReg, Condition(0), Condition(3));
break;
}
+#define LOAD_SPLAT(type) \
+ AddressingMode mode = kMode_None; \
+ MemOperand operand = i.MemoryOperand(&mode); \
+ Simd128Register dst = i.OutputSimd128Register(); \
+ __ LoadAndSplat##type##LE(dst, operand);
+ case kS390_S128Load64Splat: {
+ LOAD_SPLAT(64x2);
+ break;
+ }
+ case kS390_S128Load32Splat: {
+ LOAD_SPLAT(32x4);
+ break;
+ }
+ case kS390_S128Load16Splat: {
+ LOAD_SPLAT(16x8);
+ break;
+ }
+ case kS390_S128Load8Splat: {
+ LOAD_SPLAT(8x16);
+ break;
+ }
+#undef LOAD_SPLAT
+#define LOAD_EXTEND(type) \
+ AddressingMode mode = kMode_None; \
+ MemOperand operand = i.MemoryOperand(&mode); \
+ Simd128Register dst = i.OutputSimd128Register(); \
+ __ LoadAndExtend##type##LE(dst, operand);
+ case kS390_S128Load32x2U: {
+ LOAD_EXTEND(32x2U);
+ break;
+ }
+ case kS390_S128Load32x2S: {
+ LOAD_EXTEND(32x2S);
+ break;
+ }
+ case kS390_S128Load16x4U: {
+ LOAD_EXTEND(16x4U);
+ break;
+ }
+ case kS390_S128Load16x4S: {
+ LOAD_EXTEND(16x4S);
+ break;
+ }
+ case kS390_S128Load8x8U: {
+ LOAD_EXTEND(8x8U);
+ break;
+ }
+ case kS390_S128Load8x8S: {
+ LOAD_EXTEND(8x8S);
+ break;
+ }
+#undef LOAD_EXTEND
+#define LOAD_AND_ZERO(type) \
+ AddressingMode mode = kMode_None; \
+ MemOperand operand = i.MemoryOperand(&mode); \
+ Simd128Register dst = i.OutputSimd128Register(); \
+ __ LoadV##type##ZeroLE(dst, operand);
+ case kS390_S128Load32Zero: {
+ LOAD_AND_ZERO(32);
+ break;
+ }
+ case kS390_S128Load64Zero: {
+ LOAD_AND_ZERO(64);
+ break;
+ }
+#undef LOAD_AND_ZERO
+#undef LOAD_EXTEND
+#define LOAD_LANE(type, lane) \
+ AddressingMode mode = kMode_None; \
+ size_t index = 2; \
+ MemOperand operand = i.MemoryOperand(&mode, &index); \
+ Simd128Register dst = i.OutputSimd128Register(); \
+ DCHECK_EQ(dst, i.InputSimd128Register(0)); \
+ __ LoadLane##type##LE(dst, operand, lane);
+ case kS390_S128Load8Lane: {
+ LOAD_LANE(8, 15 - i.InputUint8(1));
+ break;
+ }
+ case kS390_S128Load16Lane: {
+ LOAD_LANE(16, 7 - i.InputUint8(1));
+ break;
+ }
+ case kS390_S128Load32Lane: {
+ LOAD_LANE(32, 3 - i.InputUint8(1));
+ break;
+ }
+ case kS390_S128Load64Lane: {
+ LOAD_LANE(64, 1 - i.InputUint8(1));
+ break;
+ }
+#undef LOAD_LANE
+#define STORE_LANE(type, lane) \
+ AddressingMode mode = kMode_None; \
+ size_t index = 2; \
+ MemOperand operand = i.MemoryOperand(&mode, &index); \
+ Simd128Register src = i.InputSimd128Register(0); \
+ __ StoreLane##type##LE(src, operand, lane);
+ case kS390_S128Store8Lane: {
+ STORE_LANE(8, 15 - i.InputUint8(1));
+ break;
+ }
+ case kS390_S128Store16Lane: {
+ STORE_LANE(16, 7 - i.InputUint8(1));
+ break;
+ }
+ case kS390_S128Store32Lane: {
+ STORE_LANE(32, 3 - i.InputUint8(1));
+ break;
+ }
+ case kS390_S128Store64Lane: {
+ STORE_LANE(64, 1 - i.InputUint8(1));
+ break;
+ }
+#undef STORE_LANE
case kS390_StoreCompressTagged: {
CHECK(!instr->HasOutput());
size_t index = 0;
@@ -3541,20 +3553,6 @@ void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
if (!branch->fallthru) __ b(flabel); // no fallthru to flabel.
}
-void CodeGenerator::AssembleBranchPoisoning(FlagsCondition condition,
- Instruction* instr) {
- // TODO(John) Handle float comparisons (kUnordered[Not]Equal).
- if (condition == kUnorderedEqual || condition == kUnorderedNotEqual ||
- condition == kOverflow || condition == kNotOverflow) {
- return;
- }
-
- condition = NegateFlagsCondition(condition);
- __ mov(r0, Operand::Zero());
- __ LoadOnConditionP(FlagsConditionToCondition(condition, kArchNop),
- kSpeculationPoisonRegister, r0);
-}
-
void CodeGenerator::AssembleArchDeoptBranch(Instruction* instr,
BranchInfo* branch) {
AssembleArchBranch(instr, branch);
@@ -3781,7 +3779,6 @@ void CodeGenerator::AssembleConstructFrame() {
__ RecordComment("-- OSR entrypoint --");
osr_pc_offset_ = __ pc_offset();
required_slots -= osr_helper()->UnoptimizedFrameSlots();
- ResetSpeculationPoison();
}
const RegList saves_fp = call_descriptor->CalleeSavedFPRegisters();
@@ -4028,7 +4025,6 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
}
case Constant::kRpoNumber:
UNREACHABLE(); // TODO(dcarney): loading RPO constants on S390.
- break;
}
if (destination->IsStackSlot()) {
__ StoreU64(dst, g.ToMemOperand(destination), r0);
diff --git a/deps/v8/src/compiler/backend/s390/instruction-codes-s390.h b/deps/v8/src/compiler/backend/s390/instruction-codes-s390.h
index 4eea2fa865..03806b57b1 100644
--- a/deps/v8/src/compiler/backend/s390/instruction-codes-s390.h
+++ b/deps/v8/src/compiler/backend/s390/instruction-codes-s390.h
@@ -161,36 +161,12 @@ namespace compiler {
V(S390_StoreReverseSimd128) \
V(S390_StoreFloat32) \
V(S390_StoreDouble) \
- V(S390_CompressSigned) \
- V(S390_CompressPointer) \
- V(S390_CompressAny) \
- V(S390_Word64AtomicExchangeUint8) \
- V(S390_Word64AtomicExchangeUint16) \
- V(S390_Word64AtomicExchangeUint32) \
V(S390_Word64AtomicExchangeUint64) \
- V(S390_Word64AtomicCompareExchangeUint8) \
- V(S390_Word64AtomicCompareExchangeUint16) \
- V(S390_Word64AtomicCompareExchangeUint32) \
V(S390_Word64AtomicCompareExchangeUint64) \
- V(S390_Word64AtomicAddUint8) \
- V(S390_Word64AtomicAddUint16) \
- V(S390_Word64AtomicAddUint32) \
V(S390_Word64AtomicAddUint64) \
- V(S390_Word64AtomicSubUint8) \
- V(S390_Word64AtomicSubUint16) \
- V(S390_Word64AtomicSubUint32) \
V(S390_Word64AtomicSubUint64) \
- V(S390_Word64AtomicAndUint8) \
- V(S390_Word64AtomicAndUint16) \
- V(S390_Word64AtomicAndUint32) \
V(S390_Word64AtomicAndUint64) \
- V(S390_Word64AtomicOrUint8) \
- V(S390_Word64AtomicOrUint16) \
- V(S390_Word64AtomicOrUint32) \
V(S390_Word64AtomicOrUint64) \
- V(S390_Word64AtomicXorUint8) \
- V(S390_Word64AtomicXorUint16) \
- V(S390_Word64AtomicXorUint32) \
V(S390_Word64AtomicXorUint64) \
V(S390_F64x2Splat) \
V(S390_F64x2ReplaceLane) \
@@ -396,6 +372,26 @@ namespace compiler {
V(S390_S128Not) \
V(S390_S128Select) \
V(S390_S128AndNot) \
+ V(S390_S128Load8Splat) \
+ V(S390_S128Load16Splat) \
+ V(S390_S128Load32Splat) \
+ V(S390_S128Load64Splat) \
+ V(S390_S128Load8x8S) \
+ V(S390_S128Load8x8U) \
+ V(S390_S128Load16x4S) \
+ V(S390_S128Load16x4U) \
+ V(S390_S128Load32x2S) \
+ V(S390_S128Load32x2U) \
+ V(S390_S128Load32Zero) \
+ V(S390_S128Load64Zero) \
+ V(S390_S128Load8Lane) \
+ V(S390_S128Load16Lane) \
+ V(S390_S128Load32Lane) \
+ V(S390_S128Load64Lane) \
+ V(S390_S128Store8Lane) \
+ V(S390_S128Store16Lane) \
+ V(S390_S128Store32Lane) \
+ V(S390_S128Store64Lane) \
V(S390_StoreSimd128) \
V(S390_LoadSimd128) \
V(S390_StoreCompressTagged) \
diff --git a/deps/v8/src/compiler/backend/s390/instruction-scheduler-s390.cc b/deps/v8/src/compiler/backend/s390/instruction-scheduler-s390.cc
index afc28b1f8c..d7046507c7 100644
--- a/deps/v8/src/compiler/backend/s390/instruction-scheduler-s390.cc
+++ b/deps/v8/src/compiler/backend/s390/instruction-scheduler-s390.cc
@@ -135,9 +135,6 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kS390_LoadAndTestWord64:
case kS390_LoadAndTestFloat32:
case kS390_LoadAndTestFloat64:
- case kS390_CompressSigned:
- case kS390_CompressPointer:
- case kS390_CompressAny:
case kS390_F64x2Splat:
case kS390_F64x2ReplaceLane:
case kS390_F64x2Abs:
@@ -362,6 +359,22 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kS390_LoadDecompressTaggedSigned:
case kS390_LoadDecompressTaggedPointer:
case kS390_LoadDecompressAnyTagged:
+ case kS390_S128Load8Splat:
+ case kS390_S128Load16Splat:
+ case kS390_S128Load32Splat:
+ case kS390_S128Load64Splat:
+ case kS390_S128Load8x8S:
+ case kS390_S128Load8x8U:
+ case kS390_S128Load16x4S:
+ case kS390_S128Load16x4U:
+ case kS390_S128Load32x2S:
+ case kS390_S128Load32x2U:
+ case kS390_S128Load32Zero:
+ case kS390_S128Load64Zero:
+ case kS390_S128Load8Lane:
+ case kS390_S128Load16Lane:
+ case kS390_S128Load32Lane:
+ case kS390_S128Load64Lane:
return kIsLoadOperation;
case kS390_StoreWord8:
@@ -379,35 +392,18 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kS390_Push:
case kS390_PushFrame:
case kS390_StoreToStackSlot:
+ case kS390_S128Store8Lane:
+ case kS390_S128Store16Lane:
+ case kS390_S128Store32Lane:
+ case kS390_S128Store64Lane:
return kHasSideEffect;
- case kS390_Word64AtomicExchangeUint8:
- case kS390_Word64AtomicExchangeUint16:
- case kS390_Word64AtomicExchangeUint32:
case kS390_Word64AtomicExchangeUint64:
- case kS390_Word64AtomicCompareExchangeUint8:
- case kS390_Word64AtomicCompareExchangeUint16:
- case kS390_Word64AtomicCompareExchangeUint32:
case kS390_Word64AtomicCompareExchangeUint64:
- case kS390_Word64AtomicAddUint8:
- case kS390_Word64AtomicAddUint16:
- case kS390_Word64AtomicAddUint32:
case kS390_Word64AtomicAddUint64:
- case kS390_Word64AtomicSubUint8:
- case kS390_Word64AtomicSubUint16:
- case kS390_Word64AtomicSubUint32:
case kS390_Word64AtomicSubUint64:
- case kS390_Word64AtomicAndUint8:
- case kS390_Word64AtomicAndUint16:
- case kS390_Word64AtomicAndUint32:
case kS390_Word64AtomicAndUint64:
- case kS390_Word64AtomicOrUint8:
- case kS390_Word64AtomicOrUint16:
- case kS390_Word64AtomicOrUint32:
case kS390_Word64AtomicOrUint64:
- case kS390_Word64AtomicXorUint8:
- case kS390_Word64AtomicXorUint16:
- case kS390_Word64AtomicXorUint32:
case kS390_Word64AtomicXorUint64:
return kHasSideEffect;
diff --git a/deps/v8/src/compiler/backend/s390/instruction-selector-s390.cc b/deps/v8/src/compiler/backend/s390/instruction-selector-s390.cc
index bcf5a8dfff..489065e65f 100644
--- a/deps/v8/src/compiler/backend/s390/instruction-selector-s390.cc
+++ b/deps/v8/src/compiler/backend/s390/instruction-selector-s390.cc
@@ -106,7 +106,6 @@ class S390OperandGenerator final : public OperandGenerator {
return OpParameter<int64_t>(node->op());
else
UNIMPLEMENTED();
- return 0L;
}
bool CanBeImmediate(Node* node, OperandModes mode) {
@@ -272,8 +271,7 @@ bool S390OpcodeOnlySupport12BitDisp(InstructionCode op) {
(S390OpcodeOnlySupport12BitDisp(op) ? OperandMode::kUint12Imm \
: OperandMode::kInt20Imm)
-ArchOpcode SelectLoadOpcode(Node* node) {
- LoadRepresentation load_rep = LoadRepresentationOf(node->op());
+ArchOpcode SelectLoadOpcode(LoadRepresentation load_rep) {
ArchOpcode opcode;
switch (load_rep.representation()) {
case MachineRepresentation::kFloat32:
@@ -466,7 +464,8 @@ void GenerateRightOperands(InstructionSelector* selector, Node* node,
} else if (*operand_mode & OperandMode::kAllowMemoryOperand) {
NodeMatcher mright(right);
if (mright.IsLoad() && selector->CanCover(node, right) &&
- canCombineWithLoad(SelectLoadOpcode(right))) {
+ canCombineWithLoad(
+ SelectLoadOpcode(LoadRepresentationOf(right->op())))) {
AddressingMode mode = g.GetEffectiveAddressMemoryOperand(
right, inputs, input_count, OpcodeImmMode(*opcode));
*opcode |= AddressingModeField::encode(mode);
@@ -695,23 +694,23 @@ void InstructionSelector::VisitAbortCSAAssert(Node* node) {
Emit(kArchAbortCSAAssert, g.NoOutput(), g.UseFixed(node->InputAt(0), r3));
}
-void InstructionSelector::VisitLoad(Node* node) {
+void InstructionSelector::VisitLoad(Node* node, Node* value,
+ InstructionCode opcode) {
S390OperandGenerator g(this);
- InstructionCode opcode = SelectLoadOpcode(node);
InstructionOperand outputs[] = {g.DefineAsRegister(node)};
InstructionOperand inputs[3];
size_t input_count = 0;
AddressingMode mode =
- g.GetEffectiveAddressMemoryOperand(node, inputs, &input_count);
+ g.GetEffectiveAddressMemoryOperand(value, inputs, &input_count);
opcode |= AddressingModeField::encode(mode);
- if (node->opcode() == IrOpcode::kPoisonedLoad) {
- CHECK_NE(poisoning_level_, PoisoningMitigationLevel::kDontPoison);
- opcode |= AccessModeField::encode(kMemoryAccessPoisoned);
- }
Emit(opcode, 1, outputs, input_count, inputs);
}
-void InstructionSelector::VisitPoisonedLoad(Node* node) { VisitLoad(node); }
+void InstructionSelector::VisitLoad(Node* node) {
+ LoadRepresentation load_rep = LoadRepresentationOf(node->op());
+ InstructionCode opcode = SelectLoadOpcode(load_rep);
+ VisitLoad(node, node, opcode);
+}
void InstructionSelector::VisitProtectedLoad(Node* node) {
// TODO(eholk)
@@ -2153,21 +2152,18 @@ void InstructionSelector::VisitMemoryBarrier(Node* node) {
bool InstructionSelector::IsTailCallAddressImmediate() { return false; }
void InstructionSelector::VisitWord32AtomicLoad(Node* node) {
- LoadRepresentation load_rep = LoadRepresentationOf(node->op());
- DCHECK(load_rep.representation() == MachineRepresentation::kWord8 ||
- load_rep.representation() == MachineRepresentation::kWord16 ||
- load_rep.representation() == MachineRepresentation::kWord32);
- USE(load_rep);
- VisitLoad(node);
+ AtomicLoadParameters atomic_load_params = AtomicLoadParametersOf(node->op());
+ LoadRepresentation load_rep = atomic_load_params.representation();
+ VisitLoad(node, node, SelectLoadOpcode(load_rep));
}
void InstructionSelector::VisitWord32AtomicStore(Node* node) {
- MachineRepresentation rep = AtomicStoreRepresentationOf(node->op());
- VisitGeneralStore(this, node, rep);
+ AtomicStoreParameters store_params = AtomicStoreParametersOf(node->op());
+ VisitGeneralStore(this, node, store_params.representation());
}
void VisitAtomicExchange(InstructionSelector* selector, Node* node,
- ArchOpcode opcode) {
+ ArchOpcode opcode, AtomicWidth width) {
S390OperandGenerator g(selector);
Node* base = node->InputAt(0);
Node* index = node->InputAt(1);
@@ -2181,7 +2177,8 @@ void VisitAtomicExchange(InstructionSelector* selector, Node* node,
inputs[input_count++] = g.UseUniqueRegister(value);
InstructionOperand outputs[1];
outputs[0] = g.DefineAsRegister(node);
- InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
+ InstructionCode code = opcode | AddressingModeField::encode(addressing_mode) |
+ AtomicWidthField::encode(width);
selector->Emit(code, 1, outputs, input_count, inputs);
}
@@ -2189,40 +2186,40 @@ void InstructionSelector::VisitWord32AtomicExchange(Node* node) {
ArchOpcode opcode;
MachineType type = AtomicOpType(node->op());
if (type == MachineType::Int8()) {
- opcode = kWord32AtomicExchangeInt8;
+ opcode = kAtomicExchangeInt8;
} else if (type == MachineType::Uint8()) {
- opcode = kWord32AtomicExchangeUint8;
+ opcode = kAtomicExchangeUint8;
} else if (type == MachineType::Int16()) {
- opcode = kWord32AtomicExchangeInt16;
+ opcode = kAtomicExchangeInt16;
} else if (type == MachineType::Uint16()) {
- opcode = kWord32AtomicExchangeUint16;
+ opcode = kAtomicExchangeUint16;
} else if (type == MachineType::Int32() || type == MachineType::Uint32()) {
- opcode = kWord32AtomicExchangeWord32;
+ opcode = kAtomicExchangeWord32;
} else {
UNREACHABLE();
}
- VisitAtomicExchange(this, node, opcode);
+ VisitAtomicExchange(this, node, opcode, AtomicWidth::kWord32);
}
void InstructionSelector::VisitWord64AtomicExchange(Node* node) {
ArchOpcode opcode;
MachineType type = AtomicOpType(node->op());
if (type == MachineType::Uint8()) {
- opcode = kS390_Word64AtomicExchangeUint8;
+ opcode = kAtomicExchangeUint8;
} else if (type == MachineType::Uint16()) {
- opcode = kS390_Word64AtomicExchangeUint16;
+ opcode = kAtomicExchangeUint16;
} else if (type == MachineType::Uint32()) {
- opcode = kS390_Word64AtomicExchangeUint32;
+ opcode = kAtomicExchangeWord32;
} else if (type == MachineType::Uint64()) {
opcode = kS390_Word64AtomicExchangeUint64;
} else {
UNREACHABLE();
}
- VisitAtomicExchange(this, node, opcode);
+ VisitAtomicExchange(this, node, opcode, AtomicWidth::kWord64);
}
void VisitAtomicCompareExchange(InstructionSelector* selector, Node* node,
- ArchOpcode opcode) {
+ ArchOpcode opcode, AtomicWidth width) {
S390OperandGenerator g(selector);
Node* base = node->InputAt(0);
Node* index = node->InputAt(1);
@@ -2248,7 +2245,8 @@ void VisitAtomicCompareExchange(InstructionSelector* selector, Node* node,
size_t output_count = 0;
outputs[output_count++] = g.DefineSameAsFirst(node);
- InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
+ InstructionCode code = opcode | AddressingModeField::encode(addressing_mode) |
+ AtomicWidthField::encode(width);
selector->Emit(code, output_count, outputs, input_count, inputs);
}
@@ -2256,40 +2254,40 @@ void InstructionSelector::VisitWord32AtomicCompareExchange(Node* node) {
MachineType type = AtomicOpType(node->op());
ArchOpcode opcode;
if (type == MachineType::Int8()) {
- opcode = kWord32AtomicCompareExchangeInt8;
+ opcode = kAtomicCompareExchangeInt8;
} else if (type == MachineType::Uint8()) {
- opcode = kWord32AtomicCompareExchangeUint8;
+ opcode = kAtomicCompareExchangeUint8;
} else if (type == MachineType::Int16()) {
- opcode = kWord32AtomicCompareExchangeInt16;
+ opcode = kAtomicCompareExchangeInt16;
} else if (type == MachineType::Uint16()) {
- opcode = kWord32AtomicCompareExchangeUint16;
+ opcode = kAtomicCompareExchangeUint16;
} else if (type == MachineType::Int32() || type == MachineType::Uint32()) {
- opcode = kWord32AtomicCompareExchangeWord32;
+ opcode = kAtomicCompareExchangeWord32;
} else {
UNREACHABLE();
}
- VisitAtomicCompareExchange(this, node, opcode);
+ VisitAtomicCompareExchange(this, node, opcode, AtomicWidth::kWord32);
}
void InstructionSelector::VisitWord64AtomicCompareExchange(Node* node) {
MachineType type = AtomicOpType(node->op());
ArchOpcode opcode;
if (type == MachineType::Uint8()) {
- opcode = kS390_Word64AtomicCompareExchangeUint8;
+ opcode = kAtomicCompareExchangeUint8;
} else if (type == MachineType::Uint16()) {
- opcode = kS390_Word64AtomicCompareExchangeUint16;
+ opcode = kAtomicCompareExchangeUint16;
} else if (type == MachineType::Uint32()) {
- opcode = kS390_Word64AtomicCompareExchangeUint32;
+ opcode = kAtomicCompareExchangeWord32;
} else if (type == MachineType::Uint64()) {
opcode = kS390_Word64AtomicCompareExchangeUint64;
} else {
UNREACHABLE();
}
- VisitAtomicCompareExchange(this, node, opcode);
+ VisitAtomicCompareExchange(this, node, opcode, AtomicWidth::kWord64);
}
void VisitAtomicBinop(InstructionSelector* selector, Node* node,
- ArchOpcode opcode) {
+ ArchOpcode opcode, AtomicWidth width) {
S390OperandGenerator g(selector);
Node* base = node->InputAt(0);
Node* index = node->InputAt(1);
@@ -2318,7 +2316,8 @@ void VisitAtomicBinop(InstructionSelector* selector, Node* node,
size_t temp_count = 0;
temps[temp_count++] = g.TempRegister();
- InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
+ InstructionCode code = opcode | AddressingModeField::encode(addressing_mode) |
+ AtomicWidthField::encode(width);
selector->Emit(code, output_count, outputs, input_count, inputs, temp_count,
temps);
}
@@ -2342,15 +2341,14 @@ void InstructionSelector::VisitWord32AtomicBinaryOperation(
} else {
UNREACHABLE();
}
- VisitAtomicBinop(this, node, opcode);
+ VisitAtomicBinop(this, node, opcode, AtomicWidth::kWord32);
}
-#define VISIT_ATOMIC_BINOP(op) \
- void InstructionSelector::VisitWord32Atomic##op(Node* node) { \
- VisitWord32AtomicBinaryOperation( \
- node, kWord32Atomic##op##Int8, kWord32Atomic##op##Uint8, \
- kWord32Atomic##op##Int16, kWord32Atomic##op##Uint16, \
- kWord32Atomic##op##Word32); \
+#define VISIT_ATOMIC_BINOP(op) \
+ void InstructionSelector::VisitWord32Atomic##op(Node* node) { \
+ VisitWord32AtomicBinaryOperation( \
+ node, kAtomic##op##Int8, kAtomic##op##Uint8, kAtomic##op##Int16, \
+ kAtomic##op##Uint16, kAtomic##op##Word32); \
}
VISIT_ATOMIC_BINOP(Add)
VISIT_ATOMIC_BINOP(Sub)
@@ -2376,14 +2374,14 @@ void InstructionSelector::VisitWord64AtomicBinaryOperation(
} else {
UNREACHABLE();
}
- VisitAtomicBinop(this, node, opcode);
+ VisitAtomicBinop(this, node, opcode, AtomicWidth::kWord64);
}
-#define VISIT_ATOMIC64_BINOP(op) \
- void InstructionSelector::VisitWord64Atomic##op(Node* node) { \
- VisitWord64AtomicBinaryOperation( \
- node, kS390_Word64Atomic##op##Uint8, kS390_Word64Atomic##op##Uint16, \
- kS390_Word64Atomic##op##Uint32, kS390_Word64Atomic##op##Uint64); \
+#define VISIT_ATOMIC64_BINOP(op) \
+ void InstructionSelector::VisitWord64Atomic##op(Node* node) { \
+ VisitWord64AtomicBinaryOperation(node, kAtomic##op##Uint8, \
+ kAtomic##op##Uint16, kAtomic##op##Word32, \
+ kS390_Word64Atomic##op##Uint64); \
}
VISIT_ATOMIC64_BINOP(Add)
VISIT_ATOMIC64_BINOP(Sub)
@@ -2393,14 +2391,14 @@ VISIT_ATOMIC64_BINOP(Xor)
#undef VISIT_ATOMIC64_BINOP
void InstructionSelector::VisitWord64AtomicLoad(Node* node) {
- LoadRepresentation load_rep = LoadRepresentationOf(node->op());
- USE(load_rep);
- VisitLoad(node);
+ AtomicLoadParameters atomic_load_params = AtomicLoadParametersOf(node->op());
+ LoadRepresentation load_rep = atomic_load_params.representation();
+ VisitLoad(node, node, SelectLoadOpcode(load_rep));
}
void InstructionSelector::VisitWord64AtomicStore(Node* node) {
- MachineRepresentation rep = AtomicStoreRepresentationOf(node->op());
- VisitGeneralStore(this, node, rep);
+ AtomicStoreParameters store_params = AtomicStoreParametersOf(node->op());
+ VisitGeneralStore(this, node, store_params.representation());
}
#define SIMD_TYPES(V) \
@@ -2789,18 +2787,107 @@ void InstructionSelector::EmitPrepareResults(
}
void InstructionSelector::VisitLoadLane(Node* node) {
- // We should never reach here, see http://crrev.com/c/2577820
- UNREACHABLE();
+ LoadLaneParameters params = LoadLaneParametersOf(node->op());
+ InstructionCode opcode;
+ if (params.rep == MachineType::Int8()) {
+ opcode = kS390_S128Load8Lane;
+ } else if (params.rep == MachineType::Int16()) {
+ opcode = kS390_S128Load16Lane;
+ } else if (params.rep == MachineType::Int32()) {
+ opcode = kS390_S128Load32Lane;
+ } else if (params.rep == MachineType::Int64()) {
+ opcode = kS390_S128Load64Lane;
+ } else {
+ UNREACHABLE();
+ }
+
+ S390OperandGenerator g(this);
+ InstructionOperand outputs[] = {g.DefineSameAsFirst(node)};
+ InstructionOperand inputs[5];
+ size_t input_count = 0;
+
+ inputs[input_count++] = g.UseRegister(node->InputAt(2));
+ inputs[input_count++] = g.UseImmediate(params.laneidx);
+
+ AddressingMode mode =
+ g.GetEffectiveAddressMemoryOperand(node, inputs, &input_count);
+ opcode |= AddressingModeField::encode(mode);
+ Emit(opcode, 1, outputs, input_count, inputs);
}
void InstructionSelector::VisitLoadTransform(Node* node) {
- // We should never reach here, see http://crrev.com/c/2050811
- UNREACHABLE();
+ LoadTransformParameters params = LoadTransformParametersOf(node->op());
+ ArchOpcode opcode;
+ switch (params.transformation) {
+ case LoadTransformation::kS128Load8Splat:
+ opcode = kS390_S128Load8Splat;
+ break;
+ case LoadTransformation::kS128Load16Splat:
+ opcode = kS390_S128Load16Splat;
+ break;
+ case LoadTransformation::kS128Load32Splat:
+ opcode = kS390_S128Load32Splat;
+ break;
+ case LoadTransformation::kS128Load64Splat:
+ opcode = kS390_S128Load64Splat;
+ break;
+ case LoadTransformation::kS128Load8x8S:
+ opcode = kS390_S128Load8x8S;
+ break;
+ case LoadTransformation::kS128Load8x8U:
+ opcode = kS390_S128Load8x8U;
+ break;
+ case LoadTransformation::kS128Load16x4S:
+ opcode = kS390_S128Load16x4S;
+ break;
+ case LoadTransformation::kS128Load16x4U:
+ opcode = kS390_S128Load16x4U;
+ break;
+ case LoadTransformation::kS128Load32x2S:
+ opcode = kS390_S128Load32x2S;
+ break;
+ case LoadTransformation::kS128Load32x2U:
+ opcode = kS390_S128Load32x2U;
+ break;
+ case LoadTransformation::kS128Load32Zero:
+ opcode = kS390_S128Load32Zero;
+ break;
+ case LoadTransformation::kS128Load64Zero:
+ opcode = kS390_S128Load64Zero;
+ break;
+ default:
+ UNREACHABLE();
+ }
+ VisitLoad(node, node, opcode);
}
void InstructionSelector::VisitStoreLane(Node* node) {
- // We should never reach here, see http://crrev.com/c/2577820
- UNREACHABLE();
+ StoreLaneParameters params = StoreLaneParametersOf(node->op());
+ InstructionCode opcode;
+ if (params.rep == MachineRepresentation::kWord8) {
+ opcode = kS390_S128Store8Lane;
+ } else if (params.rep == MachineRepresentation::kWord16) {
+ opcode = kS390_S128Store16Lane;
+ } else if (params.rep == MachineRepresentation::kWord32) {
+ opcode = kS390_S128Store32Lane;
+ } else if (params.rep == MachineRepresentation::kWord64) {
+ opcode = kS390_S128Store64Lane;
+ } else {
+ UNREACHABLE();
+ }
+
+ S390OperandGenerator g(this);
+ InstructionOperand outputs[] = {g.DefineSameAsFirst(node)};
+ InstructionOperand inputs[5];
+ size_t input_count = 0;
+
+ inputs[input_count++] = g.UseRegister(node->InputAt(2));
+ inputs[input_count++] = g.UseImmediate(params.laneidx);
+
+ AddressingMode mode =
+ g.GetEffectiveAddressMemoryOperand(node, inputs, &input_count);
+ opcode |= AddressingModeField::encode(mode);
+ Emit(opcode, 1, outputs, input_count, inputs);
}
void InstructionSelector::VisitTruncateFloat32ToInt32(Node* node) {
diff --git a/deps/v8/src/compiler/backend/x64/code-generator-x64.cc b/deps/v8/src/compiler/backend/x64/code-generator-x64.cc
index 60a40fb489..3e2298de3e 100644
--- a/deps/v8/src/compiler/backend/x64/code-generator-x64.cc
+++ b/deps/v8/src/compiler/backend/x64/code-generator-x64.cc
@@ -324,11 +324,124 @@ class OutOfLineRecordWrite final : public OutOfLineCode {
Zone* zone_;
};
+template <std::memory_order order>
+void EmitStore(TurboAssembler* tasm, Operand operand, Register value,
+ MachineRepresentation rep) {
+ if (order == std::memory_order_relaxed) {
+ switch (rep) {
+ case MachineRepresentation::kWord8:
+ tasm->movb(operand, value);
+ break;
+ case MachineRepresentation::kWord16:
+ tasm->movw(operand, value);
+ break;
+ case MachineRepresentation::kWord32:
+ tasm->movl(operand, value);
+ break;
+ case MachineRepresentation::kWord64:
+ tasm->movq(operand, value);
+ break;
+ case MachineRepresentation::kTagged:
+ tasm->StoreTaggedField(operand, value);
+ break;
+ default:
+ UNREACHABLE();
+ }
+ return;
+ }
+
+ DCHECK_EQ(order, std::memory_order_seq_cst);
+ switch (rep) {
+ case MachineRepresentation::kWord8:
+ tasm->movq(kScratchRegister, value);
+ tasm->xchgb(kScratchRegister, operand);
+ break;
+ case MachineRepresentation::kWord16:
+ tasm->movq(kScratchRegister, value);
+ tasm->xchgw(kScratchRegister, operand);
+ break;
+ case MachineRepresentation::kWord32:
+ tasm->movq(kScratchRegister, value);
+ tasm->xchgl(kScratchRegister, operand);
+ break;
+ case MachineRepresentation::kWord64:
+ tasm->movq(kScratchRegister, value);
+ tasm->xchgq(kScratchRegister, operand);
+ break;
+ case MachineRepresentation::kTagged:
+ tasm->AtomicStoreTaggedField(operand, value);
+ break;
+ default:
+ UNREACHABLE();
+ }
+}
+
+template <std::memory_order order>
+void EmitStore(TurboAssembler* tasm, Operand operand, Immediate value,
+ MachineRepresentation rep);
+
+template <>
+void EmitStore<std::memory_order_relaxed>(TurboAssembler* tasm, Operand operand,
+ Immediate value,
+ MachineRepresentation rep) {
+ switch (rep) {
+ case MachineRepresentation::kWord8:
+ tasm->movb(operand, value);
+ break;
+ case MachineRepresentation::kWord16:
+ tasm->movw(operand, value);
+ break;
+ case MachineRepresentation::kWord32:
+ tasm->movl(operand, value);
+ break;
+ case MachineRepresentation::kWord64:
+ tasm->movq(operand, value);
+ break;
+ case MachineRepresentation::kTagged:
+ tasm->StoreTaggedField(operand, value);
+ break;
+ default:
+ UNREACHABLE();
+ }
+}
+
#ifdef V8_IS_TSAN
-class OutOfLineTSANRelaxedStore final : public OutOfLineCode {
+void EmitMemoryProbeForTrapHandlerIfNeeded(TurboAssembler* tasm,
+ Register scratch, Operand operand,
+ StubCallMode mode, int size) {
+#if V8_ENABLE_WEBASSEMBLY && V8_TRAP_HANDLER_SUPPORTED
+ // The wasm OOB trap handler needs to be able to look up the faulting
+ // instruction pointer to handle the SIGSEGV raised by an OOB access. It
+ // will not handle SIGSEGVs raised by the TSAN store helpers. Emit a
+ // redundant load here to give the trap handler a chance to handle any
+ // OOB SIGSEGVs.
+ if (trap_handler::IsTrapHandlerEnabled() &&
+ mode == StubCallMode::kCallWasmRuntimeStub) {
+ switch (size) {
+ case kInt8Size:
+ tasm->movb(scratch, operand);
+ break;
+ case kInt16Size:
+ tasm->movw(scratch, operand);
+ break;
+ case kInt32Size:
+ tasm->movl(scratch, operand);
+ break;
+ case kInt64Size:
+ tasm->movq(scratch, operand);
+ break;
+ default:
+ UNREACHABLE();
+ }
+ }
+#endif
+}
+
+class OutOfLineTSANStore : public OutOfLineCode {
public:
- OutOfLineTSANRelaxedStore(CodeGenerator* gen, Operand operand, Register value,
- Register scratch0, StubCallMode stub_mode, int size)
+ OutOfLineTSANStore(CodeGenerator* gen, Operand operand, Register value,
+ Register scratch0, StubCallMode stub_mode, int size,
+ std::memory_order order)
: OutOfLineCode(gen),
operand_(operand),
value_(value),
@@ -337,6 +450,7 @@ class OutOfLineTSANRelaxedStore final : public OutOfLineCode {
stub_mode_(stub_mode),
#endif // V8_ENABLE_WEBASSEMBLY
size_(size),
+ memory_order_(order),
zone_(gen->zone()) {
DCHECK(!AreAliased(value, scratch0));
}
@@ -352,14 +466,15 @@ class OutOfLineTSANRelaxedStore final : public OutOfLineCode {
// A direct call to a wasm runtime stub defined in this module.
// Just encode the stub index. This will be patched when the code
// is added to the native module and copied into wasm code space.
- __ CallTSANRelaxedStoreStub(scratch0_, value_, save_fp_mode, size_,
- StubCallMode::kCallWasmRuntimeStub);
+ tasm()->CallTSANStoreStub(scratch0_, value_, save_fp_mode, size_,
+ StubCallMode::kCallWasmRuntimeStub,
+ memory_order_);
return;
}
#endif // V8_ENABLE_WEBASSEMBLY
- __ CallTSANRelaxedStoreStub(scratch0_, value_, save_fp_mode, size_,
- StubCallMode::kCallBuiltinPointer);
+ tasm()->CallTSANStoreStub(scratch0_, value_, save_fp_mode, size_,
+ StubCallMode::kCallBuiltinPointer, memory_order_);
}
private:
@@ -370,42 +485,66 @@ class OutOfLineTSANRelaxedStore final : public OutOfLineCode {
StubCallMode const stub_mode_;
#endif // V8_ENABLE_WEBASSEMBLY
int size_;
+ const std::memory_order memory_order_;
Zone* zone_;
};
-void EmitTSANStoreOOLIfNeeded(Zone* zone, CodeGenerator* codegen,
- TurboAssembler* tasm, Operand operand,
- Register value_reg, X64OperandConverter& i,
- StubCallMode mode, int size) {
+void EmitTSANStoreOOL(Zone* zone, CodeGenerator* codegen, TurboAssembler* tasm,
+ Operand operand, Register value_reg,
+ X64OperandConverter& i, StubCallMode mode, int size,
+ std::memory_order order) {
// The FOR_TESTING code doesn't initialize the root register. We can't call
// the TSAN builtin since we need to load the external reference through the
// root register.
// TODO(solanes, v8:7790, v8:11600): See if we can support the FOR_TESTING
- // path. It is not crucial, but it would be nice to remove this if.
- if (codegen->code_kind() == CodeKind::FOR_TESTING) return;
+ // path. It is not crucial, but it would be nice to remove this restriction.
+ DCHECK_NE(codegen->code_kind(), CodeKind::FOR_TESTING);
Register scratch0 = i.TempRegister(0);
- auto tsan_ool = zone->New<OutOfLineTSANRelaxedStore>(
- codegen, operand, value_reg, scratch0, mode, size);
+ auto tsan_ool = zone->New<OutOfLineTSANStore>(codegen, operand, value_reg,
+ scratch0, mode, size, order);
tasm->jmp(tsan_ool->entry());
tasm->bind(tsan_ool->exit());
}
-void EmitTSANStoreOOLIfNeeded(Zone* zone, CodeGenerator* codegen,
- TurboAssembler* tasm, Operand operand,
- Immediate value, X64OperandConverter& i,
- StubCallMode mode, int size) {
+template <std::memory_order order>
+Register GetTSANValueRegister(TurboAssembler* tasm, Register value,
+ X64OperandConverter& i) {
+ return value;
+}
+
+template <std::memory_order order>
+Register GetTSANValueRegister(TurboAssembler* tasm, Immediate value,
+ X64OperandConverter& i);
+
+template <>
+Register GetTSANValueRegister<std::memory_order_relaxed>(
+ TurboAssembler* tasm, Immediate value, X64OperandConverter& i) {
+ Register value_reg = i.TempRegister(1);
+ tasm->movq(value_reg, value);
+ return value_reg;
+}
+
+template <std::memory_order order, typename ValueT>
+void EmitTSANAwareStore(Zone* zone, CodeGenerator* codegen,
+ TurboAssembler* tasm, Operand operand, ValueT value,
+ X64OperandConverter& i, StubCallMode stub_call_mode,
+ MachineRepresentation rep) {
// The FOR_TESTING code doesn't initialize the root register. We can't call
// the TSAN builtin since we need to load the external reference through the
// root register.
// TODO(solanes, v8:7790, v8:11600): See if we can support the FOR_TESTING
- // path. It is not crucial, but it would be nice to remove this if.
- if (codegen->code_kind() == CodeKind::FOR_TESTING) return;
-
- Register value_reg = i.TempRegister(1);
- tasm->movq(value_reg, value);
- EmitTSANStoreOOLIfNeeded(zone, codegen, tasm, operand, value_reg, i, mode,
- size);
+ // path. It is not crucial, but it would be nice to remove this restriction.
+ if (codegen->code_kind() != CodeKind::FOR_TESTING) {
+ int size = ElementSizeInBytes(rep);
+ EmitMemoryProbeForTrapHandlerIfNeeded(tasm, i.TempRegister(0), operand,
+ stub_call_mode, size);
+ Register value_reg = GetTSANValueRegister<order>(tasm, value, i);
+ EmitTSANStoreOOL(zone, codegen, tasm, operand, value_reg, i, stub_call_mode,
+ size, order);
+ } else {
+ EmitStore<order>(tasm, operand, value, rep);
+ }
}
class OutOfLineTSANRelaxedLoad final : public OutOfLineCode {
@@ -453,10 +592,10 @@ class OutOfLineTSANRelaxedLoad final : public OutOfLineCode {
Zone* zone_;
};
-void EmitTSANLoadOOLIfNeeded(Zone* zone, CodeGenerator* codegen,
- TurboAssembler* tasm, Operand operand,
- X64OperandConverter& i, StubCallMode mode,
- int size) {
+void EmitTSANRelaxedLoadOOLIfNeeded(Zone* zone, CodeGenerator* codegen,
+ TurboAssembler* tasm, Operand operand,
+ X64OperandConverter& i, StubCallMode mode,
+ int size) {
// The FOR_TESTING code doesn't initialize the root register. We can't call
// the TSAN builtin since we need to load the external reference through the
// root register.
@@ -472,20 +611,20 @@ void EmitTSANLoadOOLIfNeeded(Zone* zone, CodeGenerator* codegen,
}
#else
-void EmitTSANStoreOOLIfNeeded(Zone* zone, CodeGenerator* codegen,
- TurboAssembler* tasm, Operand operand,
- Register value_reg, X64OperandConverter& i,
- StubCallMode mode, int size) {}
-
-void EmitTSANStoreOOLIfNeeded(Zone* zone, CodeGenerator* codegen,
- TurboAssembler* tasm, Operand operand,
- Immediate value, X64OperandConverter& i,
- StubCallMode mode, int size) {}
-
-void EmitTSANLoadOOLIfNeeded(Zone* zone, CodeGenerator* codegen,
- TurboAssembler* tasm, Operand operand,
- X64OperandConverter& i, StubCallMode mode,
- int size) {}
+template <std::memory_order order, typename ValueT>
+void EmitTSANAwareStore(Zone* zone, CodeGenerator* codegen,
+ TurboAssembler* tasm, Operand operand, ValueT value,
+ X64OperandConverter& i, StubCallMode stub_call_mode,
+ MachineRepresentation rep) {
+ DCHECK(order == std::memory_order_relaxed ||
+ order == std::memory_order_seq_cst);
+ EmitStore<order>(tasm, operand, value, rep);
+}
+
+void EmitTSANRelaxedLoadOOLIfNeeded(Zone* zone, CodeGenerator* codegen,
+ TurboAssembler* tasm, Operand operand,
+ X64OperandConverter& i, StubCallMode mode,
+ int size) {}
#endif // V8_IS_TSAN
#if V8_ENABLE_WEBASSEMBLY
@@ -569,16 +708,6 @@ void EmitOOLTrapIfNeeded(Zone* zone, CodeGenerator* codegen,
#endif // V8_ENABLE_WEBASSEMBLY
-void EmitWordLoadPoisoningIfNeeded(CodeGenerator* codegen,
- InstructionCode opcode, Instruction* instr,
- X64OperandConverter const& i) {
- const MemoryAccessMode access_mode = AccessModeField::decode(opcode);
- if (access_mode == kMemoryAccessPoisoned) {
- Register value = i.OutputRegister();
- codegen->tasm()->andq(value, kSpeculationPoisonRegister);
- }
-}
-
} // namespace
#define ASSEMBLE_UNOP(asm_instr) \
@@ -871,24 +1000,32 @@ void EmitWordLoadPoisoningIfNeeded(CodeGenerator* codegen,
} \
} while (false)
-#define ASSEMBLE_PINSR(ASM_INSTR) \
- do { \
- EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset()); \
- XMMRegister dst = i.OutputSimd128Register(); \
- XMMRegister src = i.InputSimd128Register(0); \
- uint8_t laneidx = i.InputUint8(1); \
- if (HasAddressingMode(instr)) { \
- __ ASM_INSTR(dst, src, i.MemoryOperand(2), laneidx); \
- break; \
- } \
- if (instr->InputAt(2)->IsFPRegister()) { \
- __ Movq(kScratchRegister, i.InputDoubleRegister(2)); \
- __ ASM_INSTR(dst, src, kScratchRegister, laneidx); \
- } else if (instr->InputAt(2)->IsRegister()) { \
- __ ASM_INSTR(dst, src, i.InputRegister(2), laneidx); \
- } else { \
- __ ASM_INSTR(dst, src, i.InputOperand(2), laneidx); \
- } \
+#define ASSEMBLE_PINSR(ASM_INSTR) \
+ do { \
+ XMMRegister dst = i.OutputSimd128Register(); \
+ XMMRegister src = i.InputSimd128Register(0); \
+ uint8_t laneidx = i.InputUint8(1); \
+ uint32_t load_offset; \
+ if (HasAddressingMode(instr)) { \
+ __ ASM_INSTR(dst, src, i.MemoryOperand(2), laneidx, &load_offset); \
+ } else if (instr->InputAt(2)->IsFPRegister()) { \
+ __ Movq(kScratchRegister, i.InputDoubleRegister(2)); \
+ __ ASM_INSTR(dst, src, kScratchRegister, laneidx, &load_offset); \
+ } else if (instr->InputAt(2)->IsRegister()) { \
+ __ ASM_INSTR(dst, src, i.InputRegister(2), laneidx, &load_offset); \
+ } else { \
+ __ ASM_INSTR(dst, src, i.InputOperand(2), laneidx, &load_offset); \
+ } \
+ EmitOOLTrapIfNeeded(zone(), this, opcode, instr, load_offset); \
+ } while (false)
+
+#define ASSEMBLE_SEQ_CST_STORE(rep) \
+ do { \
+ Register value = i.InputRegister(0); \
+ Operand operand = i.MemoryOperand(1); \
+ EmitTSANAwareStore<std::memory_order_seq_cst>( \
+ zone(), this, tasm(), operand, value, i, DetermineStubCallMode(), \
+ rep); \
} while (false)
void CodeGenerator::AssembleDeconstructFrame() {
@@ -1019,22 +1156,6 @@ void CodeGenerator::BailoutIfDeoptimized() {
RelocInfo::CODE_TARGET, not_zero);
}
-void CodeGenerator::GenerateSpeculationPoisonFromCodeStartRegister() {
- // Set a mask which has all bits set in the normal case, but has all
- // bits cleared if we are speculatively executing the wrong PC.
- __ ComputeCodeStartAddress(rbx);
- __ xorq(kSpeculationPoisonRegister, kSpeculationPoisonRegister);
- __ cmpq(kJavaScriptCallCodeStartRegister, rbx);
- __ Move(rbx, -1);
- __ cmovq(equal, kSpeculationPoisonRegister, rbx);
-}
-
-void CodeGenerator::AssembleRegisterArgumentPoisoning() {
- __ andq(kJSFunctionRegister, kSpeculationPoisonRegister);
- __ andq(kContextRegister, kSpeculationPoisonRegister);
- __ andq(rsp, kSpeculationPoisonRegister);
-}
-
// Assembles an instruction after register allocation, producing machine code.
CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Instruction* instr) {
@@ -1052,11 +1173,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
instr->HasCallDescriptorFlag(CallDescriptor::kFixedTargetRegister),
reg == kJavaScriptCallCodeStartRegister);
__ LoadCodeObjectEntry(reg, reg);
- if (instr->HasCallDescriptorFlag(CallDescriptor::kRetpoline)) {
- __ RetpolineCall(reg);
- } else {
- __ call(reg);
- }
+ __ call(reg);
}
RecordCallPosition(instr);
frame_access_state()->ClearSPDelta();
@@ -1078,19 +1195,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
if (DetermineStubCallMode() == StubCallMode::kCallWasmRuntimeStub) {
__ near_call(wasm_code, constant.rmode());
} else {
- if (instr->HasCallDescriptorFlag(CallDescriptor::kRetpoline)) {
- __ RetpolineCall(wasm_code, constant.rmode());
- } else {
- __ Call(wasm_code, constant.rmode());
- }
+ __ Call(wasm_code, constant.rmode());
}
} else {
- Register reg = i.InputRegister(0);
- if (instr->HasCallDescriptorFlag(CallDescriptor::kRetpoline)) {
- __ RetpolineCall(reg);
- } else {
- __ call(reg);
- }
+ __ call(i.InputRegister(0));
}
RecordCallPosition(instr);
frame_access_state()->ClearSPDelta();
@@ -1107,12 +1215,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ jmp(kScratchRegister);
}
} else {
- Register reg = i.InputRegister(0);
- if (instr->HasCallDescriptorFlag(CallDescriptor::kRetpoline)) {
- __ RetpolineJump(reg);
- } else {
- __ jmp(reg);
- }
+ __ jmp(i.InputRegister(0));
}
unwinding_info_writer_.MarkBlockWillExit();
frame_access_state()->ClearSPDelta();
@@ -1130,11 +1233,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
instr->HasCallDescriptorFlag(CallDescriptor::kFixedTargetRegister),
reg == kJavaScriptCallCodeStartRegister);
__ LoadCodeObjectEntry(reg, reg);
- if (instr->HasCallDescriptorFlag(CallDescriptor::kRetpoline)) {
- __ RetpolineJump(reg);
- } else {
- __ jmp(reg);
- }
+ __ jmp(reg);
}
unwinding_info_writer_.MarkBlockWillExit();
frame_access_state()->ClearSPDelta();
@@ -1147,11 +1246,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
DCHECK_IMPLIES(
instr->HasCallDescriptorFlag(CallDescriptor::kFixedTargetRegister),
reg == kJavaScriptCallCodeStartRegister);
- if (instr->HasCallDescriptorFlag(CallDescriptor::kRetpoline)) {
- __ RetpolineJump(reg);
- } else {
- __ jmp(reg);
- }
+ __ jmp(reg);
unwinding_info_writer_.MarkBlockWillExit();
frame_access_state()->ClearSPDelta();
frame_access_state()->SetFrameAccessToDefault();
@@ -1344,7 +1439,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ movl(result, result);
break;
}
- case kArchStoreWithWriteBarrier: {
+ case kArchStoreWithWriteBarrier: // Fall through.
+ case kArchAtomicStoreWithWriteBarrier: {
RecordWriteMode mode =
static_cast<RecordWriteMode>(MiscField::decode(instr->opcode()));
Register object = i.InputRegister(0);
@@ -1356,7 +1452,16 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
auto ool = zone()->New<OutOfLineRecordWrite>(this, object, operand, value,
scratch0, scratch1, mode,
DetermineStubCallMode());
- __ StoreTaggedField(operand, value);
+ if (arch_opcode == kArchStoreWithWriteBarrier) {
+ EmitTSANAwareStore<std::memory_order_relaxed>(
+ zone(), this, tasm(), operand, value, i, DetermineStubCallMode(),
+ MachineRepresentation::kTagged);
+ } else {
+ DCHECK_EQ(arch_opcode, kArchAtomicStoreWithWriteBarrier);
+ EmitTSANAwareStore<std::memory_order_seq_cst>(
+ zone(), this, tasm(), operand, value, i, DetermineStubCallMode(),
+ MachineRepresentation::kTagged);
+ }
if (mode > RecordWriteMode::kValueIsPointer) {
__ JumpIfSmi(value, ool->exit());
}
@@ -1364,14 +1469,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
MemoryChunk::kPointersFromHereAreInterestingMask,
not_zero, ool->entry());
__ bind(ool->exit());
- EmitTSANStoreOOLIfNeeded(zone(), this, tasm(), operand, value, i,
- DetermineStubCallMode(), kTaggedSize);
break;
}
- case kArchWordPoisonOnSpeculation:
- DCHECK_EQ(i.OutputRegister(), i.InputRegister(0));
- __ andq(i.InputRegister(0), kSpeculationPoisonRegister);
- break;
case kX64MFence:
__ mfence();
break;
@@ -1646,22 +1745,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
// when there is a (v)mulss depending on the result.
__ movaps(i.OutputDoubleRegister(), i.OutputDoubleRegister());
break;
- case kSSEFloat32Abs: {
- // TODO(bmeurer): Use RIP relative 128-bit constants.
- XMMRegister tmp = i.ToDoubleRegister(instr->TempAt(0));
- __ Pcmpeqd(tmp, tmp);
- __ Psrlq(tmp, byte{33});
- __ Andps(i.OutputDoubleRegister(), tmp);
- break;
- }
- case kSSEFloat32Neg: {
- // TODO(bmeurer): Use RIP relative 128-bit constants.
- XMMRegister tmp = i.ToDoubleRegister(instr->TempAt(0));
- __ Pcmpeqd(tmp, tmp);
- __ Psllq(tmp, byte{31});
- __ Xorps(i.OutputDoubleRegister(), tmp);
- break;
- }
case kSSEFloat32Sqrt:
ASSEMBLE_SSE_UNOP(sqrtss);
break;
@@ -1858,16 +1941,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ bind(ool->exit());
break;
}
- case kX64F64x2Abs:
- case kSSEFloat64Abs: {
- __ Abspd(i.OutputDoubleRegister());
- break;
- }
- case kX64F64x2Neg:
- case kSSEFloat64Neg: {
- __ Negpd(i.OutputDoubleRegister());
- break;
- }
case kSSEFloat64Sqrt:
ASSEMBLE_SSE_UNOP(Sqrtsd);
break;
@@ -2120,56 +2193,22 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
// when there is a (v)mulsd depending on the result.
__ Movapd(i.OutputDoubleRegister(), i.OutputDoubleRegister());
break;
- case kAVXFloat32Abs: {
- // TODO(bmeurer): Use RIP relative 128-bit constants.
- CpuFeatureScope avx_scope(tasm(), AVX);
- XMMRegister tmp = i.ToDoubleRegister(instr->TempAt(0));
- __ vpcmpeqd(tmp, tmp, tmp);
- __ vpsrlq(tmp, tmp, 33);
- if (instr->InputAt(0)->IsFPRegister()) {
- __ vandps(i.OutputDoubleRegister(), tmp, i.InputDoubleRegister(0));
- } else {
- __ vandps(i.OutputDoubleRegister(), tmp, i.InputOperand(0));
- }
+ case kX64Float32Abs: {
+ __ Absps(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
break;
}
- case kAVXFloat32Neg: {
- // TODO(bmeurer): Use RIP relative 128-bit constants.
- CpuFeatureScope avx_scope(tasm(), AVX);
- XMMRegister tmp = i.ToDoubleRegister(instr->TempAt(0));
- __ vpcmpeqd(tmp, tmp, tmp);
- __ vpsllq(tmp, tmp, 31);
- if (instr->InputAt(0)->IsFPRegister()) {
- __ vxorps(i.OutputDoubleRegister(), tmp, i.InputDoubleRegister(0));
- } else {
- __ vxorps(i.OutputDoubleRegister(), tmp, i.InputOperand(0));
- }
+ case kX64Float32Neg: {
+ __ Negps(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
break;
}
- case kAVXFloat64Abs: {
- // TODO(bmeurer): Use RIP relative 128-bit constants.
- CpuFeatureScope avx_scope(tasm(), AVX);
- XMMRegister tmp = i.ToDoubleRegister(instr->TempAt(0));
- __ vpcmpeqd(tmp, tmp, tmp);
- __ vpsrlq(tmp, tmp, 1);
- if (instr->InputAt(0)->IsFPRegister()) {
- __ vandpd(i.OutputDoubleRegister(), tmp, i.InputDoubleRegister(0));
- } else {
- __ vandpd(i.OutputDoubleRegister(), tmp, i.InputOperand(0));
- }
+ case kX64F64x2Abs:
+ case kX64Float64Abs: {
+ __ Abspd(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
break;
}
- case kAVXFloat64Neg: {
- // TODO(bmeurer): Use RIP relative 128-bit constants.
- CpuFeatureScope avx_scope(tasm(), AVX);
- XMMRegister tmp = i.ToDoubleRegister(instr->TempAt(0));
- __ vpcmpeqd(tmp, tmp, tmp);
- __ vpsllq(tmp, tmp, 63);
- if (instr->InputAt(0)->IsFPRegister()) {
- __ vxorpd(i.OutputDoubleRegister(), tmp, i.InputDoubleRegister(0));
- } else {
- __ vxorpd(i.OutputDoubleRegister(), tmp, i.InputOperand(0));
- }
+ case kX64F64x2Neg:
+ case kX64Float64Neg: {
+ __ Negpd(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
break;
}
case kSSEFloat64SilenceNaN:
@@ -2180,24 +2219,20 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset());
ASSEMBLE_MOVX(movsxbl);
__ AssertZeroExtended(i.OutputRegister());
- EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kX64Movzxbl:
EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset());
ASSEMBLE_MOVX(movzxbl);
__ AssertZeroExtended(i.OutputRegister());
- EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kX64Movsxbq:
EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset());
ASSEMBLE_MOVX(movsxbq);
- EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kX64Movzxbq:
EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset());
ASSEMBLE_MOVX(movzxbq);
__ AssertZeroExtended(i.OutputRegister());
- EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kX64Movb: {
EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset());
@@ -2205,29 +2240,26 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Operand operand = i.MemoryOperand(&index);
if (HasImmediateInput(instr, index)) {
Immediate value(Immediate(i.InputInt8(index)));
- __ movb(operand, value);
- EmitTSANStoreOOLIfNeeded(zone(), this, tasm(), operand, value, i,
- DetermineStubCallMode(), kInt8Size);
+ EmitTSANAwareStore<std::memory_order_relaxed>(
+ zone(), this, tasm(), operand, value, i, DetermineStubCallMode(),
+ MachineRepresentation::kWord8);
} else {
Register value(i.InputRegister(index));
- __ movb(operand, value);
- EmitTSANStoreOOLIfNeeded(zone(), this, tasm(), operand, value, i,
- DetermineStubCallMode(), kInt8Size);
+ EmitTSANAwareStore<std::memory_order_relaxed>(
+ zone(), this, tasm(), operand, value, i, DetermineStubCallMode(),
+ MachineRepresentation::kWord8);
}
- EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
}
case kX64Movsxwl:
EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset());
ASSEMBLE_MOVX(movsxwl);
__ AssertZeroExtended(i.OutputRegister());
- EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kX64Movzxwl:
EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset());
ASSEMBLE_MOVX(movzxwl);
__ AssertZeroExtended(i.OutputRegister());
- EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kX64Movsxwq:
EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset());
@@ -2237,7 +2269,6 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset());
ASSEMBLE_MOVX(movzxwq);
__ AssertZeroExtended(i.OutputRegister());
- EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kX64Movw: {
EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset());
@@ -2245,16 +2276,15 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Operand operand = i.MemoryOperand(&index);
if (HasImmediateInput(instr, index)) {
Immediate value(Immediate(i.InputInt16(index)));
- __ movw(operand, value);
- EmitTSANStoreOOLIfNeeded(zone(), this, tasm(), operand, value, i,
- DetermineStubCallMode(), kInt16Size);
+ EmitTSANAwareStore<std::memory_order_relaxed>(
+ zone(), this, tasm(), operand, value, i, DetermineStubCallMode(),
+ MachineRepresentation::kWord16);
} else {
Register value(i.InputRegister(index));
- __ movw(operand, value);
- EmitTSANStoreOOLIfNeeded(zone(), this, tasm(), operand, value, i,
- DetermineStubCallMode(), kInt16Size);
+ EmitTSANAwareStore<std::memory_order_relaxed>(
+ zone(), this, tasm(), operand, value, i, DetermineStubCallMode(),
+ MachineRepresentation::kWord16);
}
- EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
}
case kX64Movl:
@@ -2263,8 +2293,8 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
if (HasAddressingMode(instr)) {
Operand address(i.MemoryOperand());
__ movl(i.OutputRegister(), address);
- EmitTSANLoadOOLIfNeeded(zone(), this, tasm(), address, i,
- DetermineStubCallMode(), kInt32Size);
+ EmitTSANRelaxedLoadOOLIfNeeded(zone(), this, tasm(), address, i,
+ DetermineStubCallMode(), kInt32Size);
} else {
if (HasRegisterInput(instr, 0)) {
__ movl(i.OutputRegister(), i.InputRegister(0));
@@ -2278,48 +2308,43 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Operand operand = i.MemoryOperand(&index);
if (HasImmediateInput(instr, index)) {
Immediate value(i.InputImmediate(index));
- __ movl(operand, value);
- EmitTSANStoreOOLIfNeeded(zone(), this, tasm(), operand, value, i,
- DetermineStubCallMode(), kInt32Size);
+ EmitTSANAwareStore<std::memory_order_relaxed>(
+ zone(), this, tasm(), operand, value, i, DetermineStubCallMode(),
+ MachineRepresentation::kWord32);
} else {
Register value(i.InputRegister(index));
- __ movl(operand, value);
- EmitTSANStoreOOLIfNeeded(zone(), this, tasm(), operand, value, i,
- DetermineStubCallMode(), kInt32Size);
+ EmitTSANAwareStore<std::memory_order_relaxed>(
+ zone(), this, tasm(), operand, value, i, DetermineStubCallMode(),
+ MachineRepresentation::kWord32);
}
}
- EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kX64Movsxlq:
EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset());
ASSEMBLE_MOVX(movsxlq);
- EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kX64MovqDecompressTaggedSigned: {
CHECK(instr->HasOutput());
Operand address(i.MemoryOperand());
__ DecompressTaggedSigned(i.OutputRegister(), address);
- EmitTSANLoadOOLIfNeeded(zone(), this, tasm(), address, i,
- DetermineStubCallMode(), kTaggedSize);
- EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
+ EmitTSANRelaxedLoadOOLIfNeeded(zone(), this, tasm(), address, i,
+ DetermineStubCallMode(), kTaggedSize);
break;
}
case kX64MovqDecompressTaggedPointer: {
CHECK(instr->HasOutput());
Operand address(i.MemoryOperand());
__ DecompressTaggedPointer(i.OutputRegister(), address);
- EmitTSANLoadOOLIfNeeded(zone(), this, tasm(), address, i,
- DetermineStubCallMode(), kTaggedSize);
- EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
+ EmitTSANRelaxedLoadOOLIfNeeded(zone(), this, tasm(), address, i,
+ DetermineStubCallMode(), kTaggedSize);
break;
}
case kX64MovqDecompressAnyTagged: {
CHECK(instr->HasOutput());
Operand address(i.MemoryOperand());
__ DecompressAnyTagged(i.OutputRegister(), address);
- EmitTSANLoadOOLIfNeeded(zone(), this, tasm(), address, i,
- DetermineStubCallMode(), kTaggedSize);
- EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
+ EmitTSANRelaxedLoadOOLIfNeeded(zone(), this, tasm(), address, i,
+ DetermineStubCallMode(), kTaggedSize);
break;
}
case kX64MovqCompressTagged: {
@@ -2328,14 +2353,14 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Operand operand = i.MemoryOperand(&index);
if (HasImmediateInput(instr, index)) {
Immediate value(i.InputImmediate(index));
- __ StoreTaggedField(operand, value);
- EmitTSANStoreOOLIfNeeded(zone(), this, tasm(), operand, value, i,
- DetermineStubCallMode(), kTaggedSize);
+ EmitTSANAwareStore<std::memory_order_relaxed>(
+ zone(), this, tasm(), operand, value, i, DetermineStubCallMode(),
+ MachineRepresentation::kTagged);
} else {
Register value(i.InputRegister(index));
- __ StoreTaggedField(operand, value);
- EmitTSANStoreOOLIfNeeded(zone(), this, tasm(), operand, value, i,
- DetermineStubCallMode(), kTaggedSize);
+ EmitTSANAwareStore<std::memory_order_relaxed>(
+ zone(), this, tasm(), operand, value, i, DetermineStubCallMode(),
+ MachineRepresentation::kTagged);
}
break;
}
@@ -2344,24 +2369,23 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
if (instr->HasOutput()) {
Operand address(i.MemoryOperand());
__ movq(i.OutputRegister(), address);
- EmitTSANLoadOOLIfNeeded(zone(), this, tasm(), address, i,
- DetermineStubCallMode(), kInt64Size);
+ EmitTSANRelaxedLoadOOLIfNeeded(zone(), this, tasm(), address, i,
+ DetermineStubCallMode(), kInt64Size);
} else {
size_t index = 0;
Operand operand = i.MemoryOperand(&index);
if (HasImmediateInput(instr, index)) {
Immediate value(i.InputImmediate(index));
- __ movq(operand, value);
- EmitTSANStoreOOLIfNeeded(zone(), this, tasm(), operand, value, i,
- DetermineStubCallMode(), kInt64Size);
+ EmitTSANAwareStore<std::memory_order_relaxed>(
+ zone(), this, tasm(), operand, value, i, DetermineStubCallMode(),
+ MachineRepresentation::kWord64);
} else {
Register value(i.InputRegister(index));
- __ movq(operand, value);
- EmitTSANStoreOOLIfNeeded(zone(), this, tasm(), operand, value, i,
- DetermineStubCallMode(), kInt64Size);
+ EmitTSANAwareStore<std::memory_order_relaxed>(
+ zone(), this, tasm(), operand, value, i, DetermineStubCallMode(),
+ MachineRepresentation::kWord64);
}
}
- EmitWordLoadPoisoningIfNeeded(this, opcode, instr, i);
break;
case kX64Movss:
EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset());
@@ -2376,17 +2400,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kX64Movsd: {
EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset());
if (instr->HasOutput()) {
- const MemoryAccessMode access_mode = AccessModeField::decode(opcode);
- if (access_mode == kMemoryAccessPoisoned) {
- // If we have to poison the loaded value, we load into a general
- // purpose register first, mask it with the poison, and move the
- // value from the general purpose register into the double register.
- __ movq(kScratchRegister, i.MemoryOperand());
- __ andq(kScratchRegister, kSpeculationPoisonRegister);
- __ Movq(i.OutputDoubleRegister(), kScratchRegister);
- } else {
- __ Movsd(i.OutputDoubleRegister(), i.MemoryOperand());
- }
+ __ Movsd(i.OutputDoubleRegister(), i.MemoryOperand());
} else {
size_t index = 0;
Operand operand = i.MemoryOperand(&index);
@@ -2667,27 +2681,15 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kX64F64x2Qfma: {
- if (CpuFeatures::IsSupported(FMA3)) {
- CpuFeatureScope fma3_scope(tasm(), FMA3);
- __ vfmadd231pd(i.OutputSimd128Register(), i.InputSimd128Register(1),
- i.InputSimd128Register(2));
- } else {
- __ Movapd(kScratchDoubleReg, i.InputSimd128Register(2));
- __ Mulpd(kScratchDoubleReg, i.InputSimd128Register(1));
- __ Addpd(i.OutputSimd128Register(), kScratchDoubleReg);
- }
+ __ F64x2Qfma(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1), i.InputSimd128Register(2),
+ kScratchDoubleReg);
break;
}
case kX64F64x2Qfms: {
- if (CpuFeatures::IsSupported(FMA3)) {
- CpuFeatureScope fma3_scope(tasm(), FMA3);
- __ vfnmadd231pd(i.OutputSimd128Register(), i.InputSimd128Register(1),
- i.InputSimd128Register(2));
- } else {
- __ Movapd(kScratchDoubleReg, i.InputSimd128Register(2));
- __ Mulpd(kScratchDoubleReg, i.InputSimd128Register(1));
- __ Subpd(i.OutputSimd128Register(), kScratchDoubleReg);
- }
+ __ F64x2Qfms(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1), i.InputSimd128Register(2),
+ kScratchDoubleReg);
break;
}
case kX64F64x2ConvertLowI32x4S: {
@@ -2696,7 +2698,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
case kX64F64x2ConvertLowI32x4U: {
__ F64x2ConvertLowI32x4U(i.OutputSimd128Register(),
- i.InputSimd128Register(0));
+ i.InputSimd128Register(0), kScratchRegister);
break;
}
case kX64F64x2PromoteLowF32x4: {
@@ -2709,12 +2711,14 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
case kX64I32x4TruncSatF64x2SZero: {
__ I32x4TruncSatF64x2SZero(i.OutputSimd128Register(),
- i.InputSimd128Register(0));
+ i.InputSimd128Register(0), kScratchDoubleReg,
+ kScratchRegister);
break;
}
case kX64I32x4TruncSatF64x2UZero: {
__ I32x4TruncSatF64x2UZero(i.OutputSimd128Register(),
- i.InputSimd128Register(0));
+ i.InputSimd128Register(0), kScratchDoubleReg,
+ kScratchRegister);
break;
}
case kX64F32x4Splat: {
@@ -2868,27 +2872,15 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kX64F32x4Qfma: {
- if (CpuFeatures::IsSupported(FMA3)) {
- CpuFeatureScope fma3_scope(tasm(), FMA3);
- __ vfmadd231ps(i.OutputSimd128Register(), i.InputSimd128Register(1),
- i.InputSimd128Register(2));
- } else {
- __ Movaps(kScratchDoubleReg, i.InputSimd128Register(2));
- __ Mulps(kScratchDoubleReg, i.InputSimd128Register(1));
- __ Addps(i.OutputSimd128Register(), kScratchDoubleReg);
- }
+ __ F32x4Qfma(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1), i.InputSimd128Register(2),
+ kScratchDoubleReg);
break;
}
case kX64F32x4Qfms: {
- if (CpuFeatures::IsSupported(FMA3)) {
- CpuFeatureScope fma3_scope(tasm(), FMA3);
- __ vfnmadd231ps(i.OutputSimd128Register(), i.InputSimd128Register(1),
- i.InputSimd128Register(2));
- } else {
- __ Movaps(kScratchDoubleReg, i.InputSimd128Register(2));
- __ Mulps(kScratchDoubleReg, i.InputSimd128Register(1));
- __ Subps(i.OutputSimd128Register(), kScratchDoubleReg);
- }
+ __ F32x4Qfms(i.OutputSimd128Register(), i.InputSimd128Register(0),
+ i.InputSimd128Register(1), i.InputSimd128Register(2),
+ kScratchDoubleReg);
break;
}
case kX64F32x4Pmin: {
@@ -3084,21 +3076,9 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kX64I32x4SConvertF32x4: {
- DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
- XMMRegister dst = i.OutputSimd128Register();
- // NAN->0
- __ Movaps(kScratchDoubleReg, dst);
- __ Cmpeqps(kScratchDoubleReg, kScratchDoubleReg);
- __ Pand(dst, kScratchDoubleReg);
- // Set top bit if >= 0 (but not -0.0!)
- __ Pxor(kScratchDoubleReg, dst);
- // Convert
- __ Cvttps2dq(dst, dst);
- // Set top bit if >=0 is now < 0
- __ Pand(kScratchDoubleReg, dst);
- __ Psrad(kScratchDoubleReg, byte{31});
- // Set positive overflow lanes to 0x7FFFFFFF
- __ Pxor(dst, kScratchDoubleReg);
+ __ I32x4SConvertF32x4(i.OutputSimd128Register(),
+ i.InputSimd128Register(0), kScratchDoubleReg,
+ kScratchRegister);
break;
}
case kX64I32x4SConvertI16x8Low: {
@@ -3252,21 +3232,14 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kX64I32x4ExtAddPairwiseI16x8S: {
- XMMRegister dst = i.OutputSimd128Register();
- XMMRegister src1 = i.InputSimd128Register(0);
- // pmaddwd multiplies signed words in src1 and src2, producing signed
- // doublewords, then adds pairwise.
- // src1 = |a|b|c|d|e|f|g|h|
- // src2 = |1|1|1|1|1|1|1|1|
- // dst = | a*1 + b*1 | c*1 + d*1 | e*1 + f*1 | g*1 + h*1 |
- Operand src2 = __ ExternalReferenceAsOperand(
- ExternalReference::address_of_wasm_i16x8_splat_0x0001());
- __ Pmaddwd(dst, src1, src2);
+ __ I32x4ExtAddPairwiseI16x8S(i.OutputSimd128Register(),
+ i.InputSimd128Register(0), kScratchRegister);
break;
}
case kX64I32x4ExtAddPairwiseI16x8U: {
__ I32x4ExtAddPairwiseI16x8U(i.OutputSimd128Register(),
- i.InputSimd128Register(0));
+ i.InputSimd128Register(0),
+ kScratchDoubleReg);
break;
}
case kX64S128Const: {
@@ -3293,12 +3266,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
case kX64I16x8Splat: {
XMMRegister dst = i.OutputSimd128Register();
if (HasRegisterInput(instr, 0)) {
- __ Movd(dst, i.InputRegister(0));
+ __ I16x8Splat(dst, i.InputRegister(0));
} else {
- __ Movd(dst, i.InputOperand(0));
+ __ I16x8Splat(dst, i.InputOperand(0));
}
- __ Pshuflw(dst, dst, uint8_t{0x0});
- __ Pshufd(dst, dst, uint8_t{0x0});
break;
}
case kX64I16x8ExtractLaneS: {
@@ -3481,43 +3452,27 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
case kX64I16x8ExtAddPairwiseI8x16S: {
__ I16x8ExtAddPairwiseI8x16S(i.OutputSimd128Register(),
- i.InputSimd128Register(0));
+ i.InputSimd128Register(0), kScratchDoubleReg,
+ kScratchRegister);
break;
}
case kX64I16x8ExtAddPairwiseI8x16U: {
- XMMRegister dst = i.OutputSimd128Register();
- XMMRegister src1 = i.InputSimd128Register(0);
- Operand src2 = __ ExternalReferenceAsOperand(
- ExternalReference::address_of_wasm_i8x16_splat_0x01());
- __ Pmaddubsw(dst, src1, src2);
+ __ I16x8ExtAddPairwiseI8x16U(i.OutputSimd128Register(),
+ i.InputSimd128Register(0), kScratchRegister);
break;
}
case kX64I16x8Q15MulRSatS: {
__ I16x8Q15MulRSatS(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputSimd128Register(1));
+ i.InputSimd128Register(1), kScratchDoubleReg);
break;
}
case kX64I8x16Splat: {
XMMRegister dst = i.OutputSimd128Register();
- if (CpuFeatures::IsSupported(AVX2)) {
- CpuFeatureScope avx_scope(tasm(), AVX);
- CpuFeatureScope avx2_scope(tasm(), AVX2);
- if (HasRegisterInput(instr, 0)) {
- __ vmovd(kScratchDoubleReg, i.InputRegister(0));
- __ vpbroadcastb(dst, kScratchDoubleReg);
- } else {
- __ vpbroadcastb(dst, i.InputOperand(0));
- }
+ if (HasRegisterInput(instr, 0)) {
+ __ I8x16Splat(dst, i.InputRegister(0), kScratchDoubleReg);
} else {
- if (HasRegisterInput(instr, 0)) {
- __ Movd(dst, i.InputRegister(0));
- } else {
- __ Movd(dst, i.InputOperand(0));
- }
- __ Xorps(kScratchDoubleReg, kScratchDoubleReg);
- __ Pshufb(dst, kScratchDoubleReg);
+ __ I8x16Splat(dst, i.InputOperand(0), kScratchDoubleReg);
}
-
break;
}
case kX64Pextrb: {
@@ -3586,66 +3541,26 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
case kX64I8x16Shl: {
XMMRegister dst = i.OutputSimd128Register();
- DCHECK_EQ(dst, i.InputSimd128Register(0));
- // Temp registers for shift mask and additional moves to XMM registers.
- Register tmp = i.ToRegister(instr->TempAt(0));
- XMMRegister tmp_simd = i.TempSimd128Register(1);
+ XMMRegister src = i.InputSimd128Register(0);
+ DCHECK_IMPLIES(!CpuFeatures::IsSupported(AVX), dst == src);
if (HasImmediateInput(instr, 1)) {
- // Perform 16-bit shift, then mask away low bits.
- uint8_t shift = i.InputInt3(1);
- __ Psllw(dst, byte{shift});
-
- uint8_t bmask = static_cast<uint8_t>(0xff << shift);
- uint32_t mask = bmask << 24 | bmask << 16 | bmask << 8 | bmask;
- __ movl(tmp, Immediate(mask));
- __ Movd(tmp_simd, tmp);
- __ Pshufd(tmp_simd, tmp_simd, uint8_t{0});
- __ Pand(dst, tmp_simd);
+ __ I8x16Shl(dst, src, i.InputInt3(1), kScratchRegister,
+ kScratchDoubleReg);
} else {
- // Mask off the unwanted bits before word-shifting.
- __ Pcmpeqw(kScratchDoubleReg, kScratchDoubleReg);
- // Take shift value modulo 8.
- __ movq(tmp, i.InputRegister(1));
- __ andq(tmp, Immediate(7));
- __ addq(tmp, Immediate(8));
- __ Movq(tmp_simd, tmp);
- __ Psrlw(kScratchDoubleReg, tmp_simd);
- __ Packuswb(kScratchDoubleReg, kScratchDoubleReg);
- __ Pand(dst, kScratchDoubleReg);
- // TODO(zhin): subq here to avoid asking for another temporary register,
- // examine codegen for other i8x16 shifts, they use less instructions.
- __ subq(tmp, Immediate(8));
- __ Movq(tmp_simd, tmp);
- __ Psllw(dst, tmp_simd);
+ __ I8x16Shl(dst, src, i.InputRegister(1), kScratchRegister,
+ kScratchDoubleReg, i.TempSimd128Register(0));
}
break;
}
case kX64I8x16ShrS: {
XMMRegister dst = i.OutputSimd128Register();
- DCHECK_EQ(dst, i.InputSimd128Register(0));
+ XMMRegister src = i.InputSimd128Register(0);
+ DCHECK_IMPLIES(!CpuFeatures::IsSupported(AVX), dst == src);
if (HasImmediateInput(instr, 1)) {
- __ Punpckhbw(kScratchDoubleReg, dst);
- __ Punpcklbw(dst, dst);
- uint8_t shift = i.InputInt3(1) + 8;
- __ Psraw(kScratchDoubleReg, shift);
- __ Psraw(dst, shift);
- __ Packsswb(dst, kScratchDoubleReg);
+ __ I8x16ShrS(dst, src, i.InputInt3(1), kScratchDoubleReg);
} else {
- // Temp registers for shift mask andadditional moves to XMM registers.
- Register tmp = i.ToRegister(instr->TempAt(0));
- XMMRegister tmp_simd = i.TempSimd128Register(1);
- // Unpack the bytes into words, do arithmetic shifts, and repack.
- __ Punpckhbw(kScratchDoubleReg, dst);
- __ Punpcklbw(dst, dst);
- // Prepare shift value
- __ movq(tmp, i.InputRegister(1));
- // Take shift value modulo 8.
- __ andq(tmp, Immediate(7));
- __ addq(tmp, Immediate(8));
- __ Movq(tmp_simd, tmp);
- __ Psraw(kScratchDoubleReg, tmp_simd);
- __ Psraw(dst, tmp_simd);
- __ Packsswb(dst, kScratchDoubleReg);
+ __ I8x16ShrS(dst, src, i.InputRegister(1), kScratchRegister,
+ kScratchDoubleReg, i.TempSimd128Register(0));
}
break;
}
@@ -3701,34 +3616,14 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
case kX64I8x16ShrU: {
XMMRegister dst = i.OutputSimd128Register();
- // Unpack the bytes into words, do logical shifts, and repack.
- DCHECK_EQ(dst, i.InputSimd128Register(0));
- // Temp registers for shift mask andadditional moves to XMM registers.
- Register tmp = i.ToRegister(instr->TempAt(0));
- XMMRegister tmp_simd = i.TempSimd128Register(1);
+ XMMRegister src = i.InputSimd128Register(0);
+ DCHECK_IMPLIES(!CpuFeatures::IsSupported(AVX), dst == src);
if (HasImmediateInput(instr, 1)) {
- // Perform 16-bit shift, then mask away high bits.
- uint8_t shift = i.InputInt3(1);
- __ Psrlw(dst, byte{shift});
-
- uint8_t bmask = 0xff >> shift;
- uint32_t mask = bmask << 24 | bmask << 16 | bmask << 8 | bmask;
- __ movl(tmp, Immediate(mask));
- __ Movd(tmp_simd, tmp);
- __ Pshufd(tmp_simd, tmp_simd, byte{0});
- __ Pand(dst, tmp_simd);
+ __ I8x16ShrU(dst, src, i.InputInt3(1), kScratchRegister,
+ kScratchDoubleReg);
} else {
- __ Punpckhbw(kScratchDoubleReg, dst);
- __ Punpcklbw(dst, dst);
- // Prepare shift value
- __ movq(tmp, i.InputRegister(1));
- // Take shift value modulo 8.
- __ andq(tmp, Immediate(7));
- __ addq(tmp, Immediate(8));
- __ Movq(tmp_simd, tmp);
- __ Psrlw(kScratchDoubleReg, tmp_simd);
- __ Psrlw(dst, tmp_simd);
- __ Packuswb(dst, kScratchDoubleReg);
+ __ I8x16ShrU(dst, src, i.InputRegister(1), kScratchRegister,
+ kScratchDoubleReg, i.TempSimd128Register(0));
}
break;
}
@@ -3834,9 +3729,9 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kX64I8x16Swizzle: {
- bool omit_add = MiscField::decode(instr->opcode());
__ I8x16Swizzle(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.InputSimd128Register(1), omit_add);
+ i.InputSimd128Register(1), kScratchDoubleReg,
+ kScratchRegister, MiscField::decode(instr->opcode()));
break;
}
case kX64I8x16Shuffle: {
@@ -3888,45 +3783,25 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
case kX64I8x16Popcnt: {
__ I8x16Popcnt(i.OutputSimd128Register(), i.InputSimd128Register(0),
- i.TempSimd128Register(0));
+ i.TempSimd128Register(0), kScratchDoubleReg,
+ kScratchRegister);
break;
}
case kX64S128Load8Splat: {
EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset());
- XMMRegister dst = i.OutputSimd128Register();
- if (CpuFeatures::IsSupported(AVX2)) {
- CpuFeatureScope avx2_scope(tasm(), AVX2);
- __ vpbroadcastb(dst, i.MemoryOperand());
- } else {
- __ Pinsrb(dst, dst, i.MemoryOperand(), 0);
- __ Pxor(kScratchDoubleReg, kScratchDoubleReg);
- __ Pshufb(dst, kScratchDoubleReg);
- }
+ __ S128Load8Splat(i.OutputSimd128Register(), i.MemoryOperand(),
+ kScratchDoubleReg);
break;
}
case kX64S128Load16Splat: {
EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset());
- XMMRegister dst = i.OutputSimd128Register();
- if (CpuFeatures::IsSupported(AVX2)) {
- CpuFeatureScope avx2_scope(tasm(), AVX2);
- __ vpbroadcastw(dst, i.MemoryOperand());
- } else {
- __ Pinsrw(dst, dst, i.MemoryOperand(), 0);
- __ Pshuflw(dst, dst, uint8_t{0});
- __ Punpcklqdq(dst, dst);
- }
+ __ S128Load16Splat(i.OutputSimd128Register(), i.MemoryOperand(),
+ kScratchDoubleReg);
break;
}
case kX64S128Load32Splat: {
EmitOOLTrapIfNeeded(zone(), this, opcode, instr, __ pc_offset());
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope avx_scope(tasm(), AVX);
- __ vbroadcastss(i.OutputSimd128Register(), i.MemoryOperand());
- } else {
- __ movss(i.OutputSimd128Register(), i.MemoryOperand());
- __ shufps(i.OutputSimd128Register(), i.OutputSimd128Register(),
- byte{0});
- }
+ __ S128Load32Splat(i.OutputSimd128Register(), i.MemoryOperand());
break;
}
case kX64S128Load64Splat: {
@@ -4049,10 +3924,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
uint8_t half_dup = lane4 | (lane4 << 2) | (lane4 << 4) | (lane4 << 6);
if (lane < 4) {
ASSEMBLE_SIMD_IMM_INSTR(Pshuflw, dst, 0, half_dup);
- __ Pshufd(dst, dst, uint8_t{0});
+ __ Punpcklqdq(dst, dst);
} else {
ASSEMBLE_SIMD_IMM_INSTR(Pshufhw, dst, 0, half_dup);
- __ Pshufd(dst, dst, uint8_t{0xaa});
+ __ Punpckhqdq(dst, dst);
}
break;
}
@@ -4070,10 +3945,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
uint8_t half_dup = lane4 | (lane4 << 2) | (lane4 << 4) | (lane4 << 6);
if (lane < 4) {
__ Pshuflw(dst, dst, half_dup);
- __ Pshufd(dst, dst, uint8_t{0});
+ __ Punpcklqdq(dst, dst);
} else {
__ Pshufhw(dst, dst, half_dup);
- __ Pshufd(dst, dst, uint8_t{0xaa});
+ __ Punpckhqdq(dst, dst);
}
break;
}
@@ -4232,156 +4107,180 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
ASSEMBLE_SIMD_ALL_TRUE(Pcmpeqb);
break;
}
- case kWord32AtomicExchangeInt8: {
+ case kAtomicStoreWord8: {
+ ASSEMBLE_SEQ_CST_STORE(MachineRepresentation::kWord8);
+ break;
+ }
+ case kAtomicStoreWord16: {
+ ASSEMBLE_SEQ_CST_STORE(MachineRepresentation::kWord16);
+ break;
+ }
+ case kAtomicStoreWord32: {
+ ASSEMBLE_SEQ_CST_STORE(MachineRepresentation::kWord32);
+ break;
+ }
+ case kX64Word64AtomicStoreWord64: {
+ ASSEMBLE_SEQ_CST_STORE(MachineRepresentation::kWord64);
+ break;
+ }
+ case kAtomicExchangeInt8: {
+ DCHECK_EQ(AtomicWidthField::decode(opcode), AtomicWidth::kWord32);
__ xchgb(i.InputRegister(0), i.MemoryOperand(1));
__ movsxbl(i.InputRegister(0), i.InputRegister(0));
break;
}
- case kWord32AtomicExchangeUint8: {
+ case kAtomicExchangeUint8: {
__ xchgb(i.InputRegister(0), i.MemoryOperand(1));
- __ movzxbl(i.InputRegister(0), i.InputRegister(0));
+ switch (AtomicWidthField::decode(opcode)) {
+ case AtomicWidth::kWord32:
+ __ movzxbl(i.InputRegister(0), i.InputRegister(0));
+ break;
+ case AtomicWidth::kWord64:
+ __ movzxbq(i.InputRegister(0), i.InputRegister(0));
+ break;
+ }
break;
}
- case kWord32AtomicExchangeInt16: {
+ case kAtomicExchangeInt16: {
+ DCHECK_EQ(AtomicWidthField::decode(opcode), AtomicWidth::kWord32);
__ xchgw(i.InputRegister(0), i.MemoryOperand(1));
__ movsxwl(i.InputRegister(0), i.InputRegister(0));
break;
}
- case kWord32AtomicExchangeUint16: {
+ case kAtomicExchangeUint16: {
__ xchgw(i.InputRegister(0), i.MemoryOperand(1));
- __ movzxwl(i.InputRegister(0), i.InputRegister(0));
+ switch (AtomicWidthField::decode(opcode)) {
+ case AtomicWidth::kWord32:
+ __ movzxwl(i.InputRegister(0), i.InputRegister(0));
+ break;
+ case AtomicWidth::kWord64:
+ __ movzxwq(i.InputRegister(0), i.InputRegister(0));
+ break;
+ }
break;
}
- case kWord32AtomicExchangeWord32: {
+ case kAtomicExchangeWord32: {
__ xchgl(i.InputRegister(0), i.MemoryOperand(1));
break;
}
- case kWord32AtomicCompareExchangeInt8: {
+ case kAtomicCompareExchangeInt8: {
+ DCHECK_EQ(AtomicWidthField::decode(opcode), AtomicWidth::kWord32);
__ lock();
__ cmpxchgb(i.MemoryOperand(2), i.InputRegister(1));
__ movsxbl(rax, rax);
break;
}
- case kWord32AtomicCompareExchangeUint8: {
+ case kAtomicCompareExchangeUint8: {
__ lock();
__ cmpxchgb(i.MemoryOperand(2), i.InputRegister(1));
- __ movzxbl(rax, rax);
+ switch (AtomicWidthField::decode(opcode)) {
+ case AtomicWidth::kWord32:
+ __ movzxbl(rax, rax);
+ break;
+ case AtomicWidth::kWord64:
+ __ movzxbq(rax, rax);
+ break;
+ }
break;
}
- case kWord32AtomicCompareExchangeInt16: {
+ case kAtomicCompareExchangeInt16: {
+ DCHECK_EQ(AtomicWidthField::decode(opcode), AtomicWidth::kWord32);
__ lock();
__ cmpxchgw(i.MemoryOperand(2), i.InputRegister(1));
__ movsxwl(rax, rax);
break;
}
- case kWord32AtomicCompareExchangeUint16: {
+ case kAtomicCompareExchangeUint16: {
__ lock();
__ cmpxchgw(i.MemoryOperand(2), i.InputRegister(1));
- __ movzxwl(rax, rax);
+ switch (AtomicWidthField::decode(opcode)) {
+ case AtomicWidth::kWord32:
+ __ movzxwl(rax, rax);
+ break;
+ case AtomicWidth::kWord64:
+ __ movzxwq(rax, rax);
+ break;
+ }
break;
}
- case kWord32AtomicCompareExchangeWord32: {
+ case kAtomicCompareExchangeWord32: {
__ lock();
__ cmpxchgl(i.MemoryOperand(2), i.InputRegister(1));
- break;
- }
-#define ATOMIC_BINOP_CASE(op, inst) \
- case kWord32Atomic##op##Int8: \
- ASSEMBLE_ATOMIC_BINOP(inst, movb, cmpxchgb); \
- __ movsxbl(rax, rax); \
- break; \
- case kWord32Atomic##op##Uint8: \
- ASSEMBLE_ATOMIC_BINOP(inst, movb, cmpxchgb); \
- __ movzxbl(rax, rax); \
- break; \
- case kWord32Atomic##op##Int16: \
- ASSEMBLE_ATOMIC_BINOP(inst, movw, cmpxchgw); \
- __ movsxwl(rax, rax); \
- break; \
- case kWord32Atomic##op##Uint16: \
- ASSEMBLE_ATOMIC_BINOP(inst, movw, cmpxchgw); \
- __ movzxwl(rax, rax); \
- break; \
- case kWord32Atomic##op##Word32: \
- ASSEMBLE_ATOMIC_BINOP(inst, movl, cmpxchgl); \
- break;
- ATOMIC_BINOP_CASE(Add, addl)
- ATOMIC_BINOP_CASE(Sub, subl)
- ATOMIC_BINOP_CASE(And, andl)
- ATOMIC_BINOP_CASE(Or, orl)
- ATOMIC_BINOP_CASE(Xor, xorl)
-#undef ATOMIC_BINOP_CASE
- case kX64Word64AtomicExchangeUint8: {
- __ xchgb(i.InputRegister(0), i.MemoryOperand(1));
- __ movzxbq(i.InputRegister(0), i.InputRegister(0));
- break;
- }
- case kX64Word64AtomicExchangeUint16: {
- __ xchgw(i.InputRegister(0), i.MemoryOperand(1));
- __ movzxwq(i.InputRegister(0), i.InputRegister(0));
- break;
- }
- case kX64Word64AtomicExchangeUint32: {
- __ xchgl(i.InputRegister(0), i.MemoryOperand(1));
+ if (AtomicWidthField::decode(opcode) == AtomicWidth::kWord64) {
+ // Zero-extend the 32 bit value to 64 bit.
+ __ movl(rax, rax);
+ }
break;
}
case kX64Word64AtomicExchangeUint64: {
__ xchgq(i.InputRegister(0), i.MemoryOperand(1));
break;
}
- case kX64Word64AtomicCompareExchangeUint8: {
- __ lock();
- __ cmpxchgb(i.MemoryOperand(2), i.InputRegister(1));
- __ movzxbq(rax, rax);
- break;
- }
- case kX64Word64AtomicCompareExchangeUint16: {
- __ lock();
- __ cmpxchgw(i.MemoryOperand(2), i.InputRegister(1));
- __ movzxwq(rax, rax);
- break;
- }
- case kX64Word64AtomicCompareExchangeUint32: {
- __ lock();
- __ cmpxchgl(i.MemoryOperand(2), i.InputRegister(1));
- // Zero-extend the 32 bit value to 64 bit.
- __ movl(rax, rax);
- break;
- }
case kX64Word64AtomicCompareExchangeUint64: {
__ lock();
__ cmpxchgq(i.MemoryOperand(2), i.InputRegister(1));
break;
}
-#define ATOMIC64_BINOP_CASE(op, inst) \
- case kX64Word64Atomic##op##Uint8: \
- ASSEMBLE_ATOMIC64_BINOP(inst, movb, cmpxchgb); \
- __ movzxbq(rax, rax); \
- break; \
- case kX64Word64Atomic##op##Uint16: \
- ASSEMBLE_ATOMIC64_BINOP(inst, movw, cmpxchgw); \
- __ movzxwq(rax, rax); \
- break; \
- case kX64Word64Atomic##op##Uint32: \
- ASSEMBLE_ATOMIC64_BINOP(inst, movl, cmpxchgl); \
- break; \
- case kX64Word64Atomic##op##Uint64: \
- ASSEMBLE_ATOMIC64_BINOP(inst, movq, cmpxchgq); \
+#define ATOMIC_BINOP_CASE(op, inst32, inst64) \
+ case kAtomic##op##Int8: \
+ DCHECK_EQ(AtomicWidthField::decode(opcode), AtomicWidth::kWord32); \
+ ASSEMBLE_ATOMIC_BINOP(inst32, movb, cmpxchgb); \
+ __ movsxbl(rax, rax); \
+ break; \
+ case kAtomic##op##Uint8: \
+ switch (AtomicWidthField::decode(opcode)) { \
+ case AtomicWidth::kWord32: \
+ ASSEMBLE_ATOMIC_BINOP(inst32, movb, cmpxchgb); \
+ __ movzxbl(rax, rax); \
+ break; \
+ case AtomicWidth::kWord64: \
+ ASSEMBLE_ATOMIC64_BINOP(inst64, movb, cmpxchgb); \
+ __ movzxbq(rax, rax); \
+ break; \
+ } \
+ break; \
+ case kAtomic##op##Int16: \
+ DCHECK_EQ(AtomicWidthField::decode(opcode), AtomicWidth::kWord32); \
+ ASSEMBLE_ATOMIC_BINOP(inst32, movw, cmpxchgw); \
+ __ movsxwl(rax, rax); \
+ break; \
+ case kAtomic##op##Uint16: \
+ switch (AtomicWidthField::decode(opcode)) { \
+ case AtomicWidth::kWord32: \
+ ASSEMBLE_ATOMIC_BINOP(inst32, movw, cmpxchgw); \
+ __ movzxwl(rax, rax); \
+ break; \
+ case AtomicWidth::kWord64: \
+ ASSEMBLE_ATOMIC64_BINOP(inst64, movw, cmpxchgw); \
+ __ movzxwq(rax, rax); \
+ break; \
+ } \
+ break; \
+ case kAtomic##op##Word32: \
+ switch (AtomicWidthField::decode(opcode)) { \
+ case AtomicWidth::kWord32: \
+ ASSEMBLE_ATOMIC_BINOP(inst32, movl, cmpxchgl); \
+ break; \
+ case AtomicWidth::kWord64: \
+ ASSEMBLE_ATOMIC64_BINOP(inst64, movl, cmpxchgl); \
+ break; \
+ } \
+ break; \
+ case kX64Word64Atomic##op##Uint64: \
+ ASSEMBLE_ATOMIC64_BINOP(inst64, movq, cmpxchgq); \
break;
- ATOMIC64_BINOP_CASE(Add, addq)
- ATOMIC64_BINOP_CASE(Sub, subq)
- ATOMIC64_BINOP_CASE(And, andq)
- ATOMIC64_BINOP_CASE(Or, orq)
- ATOMIC64_BINOP_CASE(Xor, xorq)
-#undef ATOMIC64_BINOP_CASE
- case kWord32AtomicLoadInt8:
- case kWord32AtomicLoadUint8:
- case kWord32AtomicLoadInt16:
- case kWord32AtomicLoadUint16:
- case kWord32AtomicLoadWord32:
- case kWord32AtomicStoreWord8:
- case kWord32AtomicStoreWord16:
- case kWord32AtomicStoreWord32:
+ ATOMIC_BINOP_CASE(Add, addl, addq)
+ ATOMIC_BINOP_CASE(Sub, subl, subq)
+ ATOMIC_BINOP_CASE(And, andl, andq)
+ ATOMIC_BINOP_CASE(Or, orl, orq)
+ ATOMIC_BINOP_CASE(Xor, xorl, xorq)
+#undef ATOMIC_BINOP_CASE
+
+ case kAtomicLoadInt8:
+ case kAtomicLoadUint8:
+ case kAtomicLoadInt16:
+ case kAtomicLoadUint16:
+ case kAtomicLoadWord32:
UNREACHABLE(); // Won't be generated by instruction selector.
}
return kSuccess;
@@ -4407,6 +4306,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
#undef ASSEMBLE_SIMD_IMM_SHUFFLE
#undef ASSEMBLE_SIMD_ALL_TRUE
#undef ASSEMBLE_SIMD_SHIFT
+#undef ASSEMBLE_SEQ_CST_STORE
namespace {
@@ -4462,19 +4362,6 @@ void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
if (!branch->fallthru) __ jmp(flabel, flabel_distance);
}
-void CodeGenerator::AssembleBranchPoisoning(FlagsCondition condition,
- Instruction* instr) {
- // TODO(jarin) Handle float comparisons (kUnordered[Not]Equal).
- if (condition == kUnorderedEqual || condition == kUnorderedNotEqual) {
- return;
- }
-
- condition = NegateFlagsCondition(condition);
- __ Move(kScratchRegister, 0);
- __ cmovq(FlagsConditionToCondition(condition), kSpeculationPoisonRegister,
- kScratchRegister);
-}
-
void CodeGenerator::AssembleArchDeoptBranch(Instruction* instr,
BranchInfo* branch) {
Label::Distance flabel_distance =
@@ -4716,7 +4603,6 @@ void CodeGenerator::AssembleConstructFrame() {
__ RecordComment("-- OSR entrypoint --");
osr_pc_offset_ = __ pc_offset();
required_slots -= static_cast<int>(osr_helper()->UnoptimizedFrameSlots());
- ResetSpeculationPoison();
}
const RegList saves = call_descriptor->CalleeSavedRegisters();
@@ -4876,18 +4762,24 @@ void CodeGenerator::AssembleReturn(InstructionOperand* additional_pop_count) {
// The number of arguments without the receiver is
// max(argc_reg, parameter_slots-1), and the receiver is added in
// DropArguments().
- int parameter_slots_without_receiver = parameter_slots - 1;
Label mismatch_return;
Register scratch_reg = r10;
DCHECK_NE(argc_reg, scratch_reg);
DCHECK_EQ(0u, call_descriptor->CalleeSavedRegisters() & scratch_reg.bit());
DCHECK_EQ(0u, call_descriptor->CalleeSavedRegisters() & argc_reg.bit());
- __ cmpq(argc_reg, Immediate(parameter_slots_without_receiver));
+ if (kJSArgcIncludesReceiver) {
+ __ cmpq(argc_reg, Immediate(parameter_slots));
+ } else {
+ int parameter_slots_without_receiver = parameter_slots - 1;
+ __ cmpq(argc_reg, Immediate(parameter_slots_without_receiver));
+ }
__ j(greater, &mismatch_return, Label::kNear);
__ Ret(parameter_slots * kSystemPointerSize, scratch_reg);
__ bind(&mismatch_return);
__ DropArguments(argc_reg, scratch_reg, TurboAssembler::kCountIsInteger,
- TurboAssembler::kCountExcludesReceiver);
+ kJSArgcIncludesReceiver
+ ? TurboAssembler::kCountIncludesReceiver
+ : TurboAssembler::kCountExcludesReceiver);
// We use a return instead of a jump for better return address prediction.
__ Ret();
} else if (additional_pop_count->IsImmediate()) {
diff --git a/deps/v8/src/compiler/backend/x64/instruction-codes-x64.h b/deps/v8/src/compiler/backend/x64/instruction-codes-x64.h
index eba23dcfa9..e7fe45c5de 100644
--- a/deps/v8/src/compiler/backend/x64/instruction-codes-x64.h
+++ b/deps/v8/src/compiler/backend/x64/instruction-codes-x64.h
@@ -11,413 +11,389 @@ namespace compiler {
// X64-specific opcodes that specify which assembly sequence to emit.
// Most opcodes specify a single instruction.
-#define TARGET_ARCH_OPCODE_LIST(V) \
- V(X64Add) \
- V(X64Add32) \
- V(X64And) \
- V(X64And32) \
- V(X64Cmp) \
- V(X64Cmp32) \
- V(X64Cmp16) \
- V(X64Cmp8) \
- V(X64Test) \
- V(X64Test32) \
- V(X64Test16) \
- V(X64Test8) \
- V(X64Or) \
- V(X64Or32) \
- V(X64Xor) \
- V(X64Xor32) \
- V(X64Sub) \
- V(X64Sub32) \
- V(X64Imul) \
- V(X64Imul32) \
- V(X64ImulHigh32) \
- V(X64UmulHigh32) \
- V(X64Idiv) \
- V(X64Idiv32) \
- V(X64Udiv) \
- V(X64Udiv32) \
- V(X64Not) \
- V(X64Not32) \
- V(X64Neg) \
- V(X64Neg32) \
- V(X64Shl) \
- V(X64Shl32) \
- V(X64Shr) \
- V(X64Shr32) \
- V(X64Sar) \
- V(X64Sar32) \
- V(X64Rol) \
- V(X64Rol32) \
- V(X64Ror) \
- V(X64Ror32) \
- V(X64Lzcnt) \
- V(X64Lzcnt32) \
- V(X64Tzcnt) \
- V(X64Tzcnt32) \
- V(X64Popcnt) \
- V(X64Popcnt32) \
- V(X64Bswap) \
- V(X64Bswap32) \
- V(X64MFence) \
- V(X64LFence) \
- V(SSEFloat32Cmp) \
- V(SSEFloat32Add) \
- V(SSEFloat32Sub) \
- V(SSEFloat32Mul) \
- V(SSEFloat32Div) \
- V(SSEFloat32Abs) \
- V(SSEFloat32Neg) \
- V(SSEFloat32Sqrt) \
- V(SSEFloat32ToFloat64) \
- V(SSEFloat32ToInt32) \
- V(SSEFloat32ToUint32) \
- V(SSEFloat32Round) \
- V(SSEFloat64Cmp) \
- V(SSEFloat64Add) \
- V(SSEFloat64Sub) \
- V(SSEFloat64Mul) \
- V(SSEFloat64Div) \
- V(SSEFloat64Mod) \
- V(SSEFloat64Abs) \
- V(SSEFloat64Neg) \
- V(SSEFloat64Sqrt) \
- V(SSEFloat64Round) \
- V(SSEFloat32Max) \
- V(SSEFloat64Max) \
- V(SSEFloat32Min) \
- V(SSEFloat64Min) \
- V(SSEFloat64ToFloat32) \
- V(SSEFloat64ToInt32) \
- V(SSEFloat64ToUint32) \
- V(SSEFloat32ToInt64) \
- V(SSEFloat64ToInt64) \
- V(SSEFloat32ToUint64) \
- V(SSEFloat64ToUint64) \
- V(SSEInt32ToFloat64) \
- V(SSEInt32ToFloat32) \
- V(SSEInt64ToFloat32) \
- V(SSEInt64ToFloat64) \
- V(SSEUint64ToFloat32) \
- V(SSEUint64ToFloat64) \
- V(SSEUint32ToFloat64) \
- V(SSEUint32ToFloat32) \
- V(SSEFloat64ExtractLowWord32) \
- V(SSEFloat64ExtractHighWord32) \
- V(SSEFloat64InsertLowWord32) \
- V(SSEFloat64InsertHighWord32) \
- V(SSEFloat64LoadLowWord32) \
- V(SSEFloat64SilenceNaN) \
- V(AVXFloat32Cmp) \
- V(AVXFloat32Add) \
- V(AVXFloat32Sub) \
- V(AVXFloat32Mul) \
- V(AVXFloat32Div) \
- V(AVXFloat64Cmp) \
- V(AVXFloat64Add) \
- V(AVXFloat64Sub) \
- V(AVXFloat64Mul) \
- V(AVXFloat64Div) \
- V(AVXFloat64Abs) \
- V(AVXFloat64Neg) \
- V(AVXFloat32Abs) \
- V(AVXFloat32Neg) \
- V(X64Movsxbl) \
- V(X64Movzxbl) \
- V(X64Movsxbq) \
- V(X64Movzxbq) \
- V(X64Movb) \
- V(X64Movsxwl) \
- V(X64Movzxwl) \
- V(X64Movsxwq) \
- V(X64Movzxwq) \
- V(X64Movw) \
- V(X64Movl) \
- V(X64Movsxlq) \
- V(X64MovqDecompressTaggedSigned) \
- V(X64MovqDecompressTaggedPointer) \
- V(X64MovqDecompressAnyTagged) \
- V(X64MovqCompressTagged) \
- V(X64Movq) \
- V(X64Movsd) \
- V(X64Movss) \
- V(X64Movdqu) \
- V(X64BitcastFI) \
- V(X64BitcastDL) \
- V(X64BitcastIF) \
- V(X64BitcastLD) \
- V(X64Lea32) \
- V(X64Lea) \
- V(X64Dec32) \
- V(X64Inc32) \
- V(X64Push) \
- V(X64Poke) \
- V(X64Peek) \
- V(X64F64x2Splat) \
- V(X64F64x2ExtractLane) \
- V(X64F64x2ReplaceLane) \
- V(X64F64x2Abs) \
- V(X64F64x2Neg) \
- V(X64F64x2Sqrt) \
- V(X64F64x2Add) \
- V(X64F64x2Sub) \
- V(X64F64x2Mul) \
- V(X64F64x2Div) \
- V(X64F64x2Min) \
- V(X64F64x2Max) \
- V(X64F64x2Eq) \
- V(X64F64x2Ne) \
- V(X64F64x2Lt) \
- V(X64F64x2Le) \
- V(X64F64x2Qfma) \
- V(X64F64x2Qfms) \
- V(X64F64x2Pmin) \
- V(X64F64x2Pmax) \
- V(X64F64x2Round) \
- V(X64F64x2ConvertLowI32x4S) \
- V(X64F64x2ConvertLowI32x4U) \
- V(X64F64x2PromoteLowF32x4) \
- V(X64F32x4Splat) \
- V(X64F32x4ExtractLane) \
- V(X64F32x4ReplaceLane) \
- V(X64F32x4SConvertI32x4) \
- V(X64F32x4UConvertI32x4) \
- V(X64F32x4Abs) \
- V(X64F32x4Neg) \
- V(X64F32x4Sqrt) \
- V(X64F32x4RecipApprox) \
- V(X64F32x4RecipSqrtApprox) \
- V(X64F32x4Add) \
- V(X64F32x4Sub) \
- V(X64F32x4Mul) \
- V(X64F32x4Div) \
- V(X64F32x4Min) \
- V(X64F32x4Max) \
- V(X64F32x4Eq) \
- V(X64F32x4Ne) \
- V(X64F32x4Lt) \
- V(X64F32x4Le) \
- V(X64F32x4Qfma) \
- V(X64F32x4Qfms) \
- V(X64F32x4Pmin) \
- V(X64F32x4Pmax) \
- V(X64F32x4Round) \
- V(X64F32x4DemoteF64x2Zero) \
- V(X64I64x2Splat) \
- V(X64I64x2ExtractLane) \
- V(X64I64x2Abs) \
- V(X64I64x2Neg) \
- V(X64I64x2BitMask) \
- V(X64I64x2Shl) \
- V(X64I64x2ShrS) \
- V(X64I64x2Add) \
- V(X64I64x2Sub) \
- V(X64I64x2Mul) \
- V(X64I64x2Eq) \
- V(X64I64x2GtS) \
- V(X64I64x2GeS) \
- V(X64I64x2Ne) \
- V(X64I64x2ShrU) \
- V(X64I64x2ExtMulLowI32x4S) \
- V(X64I64x2ExtMulHighI32x4S) \
- V(X64I64x2ExtMulLowI32x4U) \
- V(X64I64x2ExtMulHighI32x4U) \
- V(X64I64x2SConvertI32x4Low) \
- V(X64I64x2SConvertI32x4High) \
- V(X64I64x2UConvertI32x4Low) \
- V(X64I64x2UConvertI32x4High) \
- V(X64I32x4Splat) \
- V(X64I32x4ExtractLane) \
- V(X64I32x4SConvertF32x4) \
- V(X64I32x4SConvertI16x8Low) \
- V(X64I32x4SConvertI16x8High) \
- V(X64I32x4Neg) \
- V(X64I32x4Shl) \
- V(X64I32x4ShrS) \
- V(X64I32x4Add) \
- V(X64I32x4Sub) \
- V(X64I32x4Mul) \
- V(X64I32x4MinS) \
- V(X64I32x4MaxS) \
- V(X64I32x4Eq) \
- V(X64I32x4Ne) \
- V(X64I32x4GtS) \
- V(X64I32x4GeS) \
- V(X64I32x4UConvertF32x4) \
- V(X64I32x4UConvertI16x8Low) \
- V(X64I32x4UConvertI16x8High) \
- V(X64I32x4ShrU) \
- V(X64I32x4MinU) \
- V(X64I32x4MaxU) \
- V(X64I32x4GtU) \
- V(X64I32x4GeU) \
- V(X64I32x4Abs) \
- V(X64I32x4BitMask) \
- V(X64I32x4DotI16x8S) \
- V(X64I32x4ExtMulLowI16x8S) \
- V(X64I32x4ExtMulHighI16x8S) \
- V(X64I32x4ExtMulLowI16x8U) \
- V(X64I32x4ExtMulHighI16x8U) \
- V(X64I32x4ExtAddPairwiseI16x8S) \
- V(X64I32x4ExtAddPairwiseI16x8U) \
- V(X64I32x4TruncSatF64x2SZero) \
- V(X64I32x4TruncSatF64x2UZero) \
- V(X64I16x8Splat) \
- V(X64I16x8ExtractLaneS) \
- V(X64I16x8SConvertI8x16Low) \
- V(X64I16x8SConvertI8x16High) \
- V(X64I16x8Neg) \
- V(X64I16x8Shl) \
- V(X64I16x8ShrS) \
- V(X64I16x8SConvertI32x4) \
- V(X64I16x8Add) \
- V(X64I16x8AddSatS) \
- V(X64I16x8Sub) \
- V(X64I16x8SubSatS) \
- V(X64I16x8Mul) \
- V(X64I16x8MinS) \
- V(X64I16x8MaxS) \
- V(X64I16x8Eq) \
- V(X64I16x8Ne) \
- V(X64I16x8GtS) \
- V(X64I16x8GeS) \
- V(X64I16x8UConvertI8x16Low) \
- V(X64I16x8UConvertI8x16High) \
- V(X64I16x8ShrU) \
- V(X64I16x8UConvertI32x4) \
- V(X64I16x8AddSatU) \
- V(X64I16x8SubSatU) \
- V(X64I16x8MinU) \
- V(X64I16x8MaxU) \
- V(X64I16x8GtU) \
- V(X64I16x8GeU) \
- V(X64I16x8RoundingAverageU) \
- V(X64I16x8Abs) \
- V(X64I16x8BitMask) \
- V(X64I16x8ExtMulLowI8x16S) \
- V(X64I16x8ExtMulHighI8x16S) \
- V(X64I16x8ExtMulLowI8x16U) \
- V(X64I16x8ExtMulHighI8x16U) \
- V(X64I16x8ExtAddPairwiseI8x16S) \
- V(X64I16x8ExtAddPairwiseI8x16U) \
- V(X64I16x8Q15MulRSatS) \
- V(X64I8x16Splat) \
- V(X64I8x16ExtractLaneS) \
- V(X64Pinsrb) \
- V(X64Pinsrw) \
- V(X64Pinsrd) \
- V(X64Pinsrq) \
- V(X64Pextrb) \
- V(X64Pextrw) \
- V(X64I8x16SConvertI16x8) \
- V(X64I8x16Neg) \
- V(X64I8x16Shl) \
- V(X64I8x16ShrS) \
- V(X64I8x16Add) \
- V(X64I8x16AddSatS) \
- V(X64I8x16Sub) \
- V(X64I8x16SubSatS) \
- V(X64I8x16MinS) \
- V(X64I8x16MaxS) \
- V(X64I8x16Eq) \
- V(X64I8x16Ne) \
- V(X64I8x16GtS) \
- V(X64I8x16GeS) \
- V(X64I8x16UConvertI16x8) \
- V(X64I8x16AddSatU) \
- V(X64I8x16SubSatU) \
- V(X64I8x16ShrU) \
- V(X64I8x16MinU) \
- V(X64I8x16MaxU) \
- V(X64I8x16GtU) \
- V(X64I8x16GeU) \
- V(X64I8x16RoundingAverageU) \
- V(X64I8x16Abs) \
- V(X64I8x16BitMask) \
- V(X64S128Const) \
- V(X64S128Zero) \
- V(X64S128AllOnes) \
- V(X64S128Not) \
- V(X64S128And) \
- V(X64S128Or) \
- V(X64S128Xor) \
- V(X64S128Select) \
- V(X64S128AndNot) \
- V(X64I8x16Swizzle) \
- V(X64I8x16Shuffle) \
- V(X64I8x16Popcnt) \
- V(X64S128Load8Splat) \
- V(X64S128Load16Splat) \
- V(X64S128Load32Splat) \
- V(X64S128Load64Splat) \
- V(X64S128Load8x8S) \
- V(X64S128Load8x8U) \
- V(X64S128Load16x4S) \
- V(X64S128Load16x4U) \
- V(X64S128Load32x2S) \
- V(X64S128Load32x2U) \
- V(X64S128Store32Lane) \
- V(X64S128Store64Lane) \
- V(X64Shufps) \
- V(X64S32x4Rotate) \
- V(X64S32x4Swizzle) \
- V(X64S32x4Shuffle) \
- V(X64S16x8Blend) \
- V(X64S16x8HalfShuffle1) \
- V(X64S16x8HalfShuffle2) \
- V(X64S8x16Alignr) \
- V(X64S16x8Dup) \
- V(X64S8x16Dup) \
- V(X64S16x8UnzipHigh) \
- V(X64S16x8UnzipLow) \
- V(X64S8x16UnzipHigh) \
- V(X64S8x16UnzipLow) \
- V(X64S64x2UnpackHigh) \
- V(X64S32x4UnpackHigh) \
- V(X64S16x8UnpackHigh) \
- V(X64S8x16UnpackHigh) \
- V(X64S64x2UnpackLow) \
- V(X64S32x4UnpackLow) \
- V(X64S16x8UnpackLow) \
- V(X64S8x16UnpackLow) \
- V(X64S8x16TransposeLow) \
- V(X64S8x16TransposeHigh) \
- V(X64S8x8Reverse) \
- V(X64S8x4Reverse) \
- V(X64S8x2Reverse) \
- V(X64V128AnyTrue) \
- V(X64I64x2AllTrue) \
- V(X64I32x4AllTrue) \
- V(X64I16x8AllTrue) \
- V(X64I8x16AllTrue) \
- V(X64Word64AtomicAddUint8) \
- V(X64Word64AtomicAddUint16) \
- V(X64Word64AtomicAddUint32) \
- V(X64Word64AtomicAddUint64) \
- V(X64Word64AtomicSubUint8) \
- V(X64Word64AtomicSubUint16) \
- V(X64Word64AtomicSubUint32) \
- V(X64Word64AtomicSubUint64) \
- V(X64Word64AtomicAndUint8) \
- V(X64Word64AtomicAndUint16) \
- V(X64Word64AtomicAndUint32) \
- V(X64Word64AtomicAndUint64) \
- V(X64Word64AtomicOrUint8) \
- V(X64Word64AtomicOrUint16) \
- V(X64Word64AtomicOrUint32) \
- V(X64Word64AtomicOrUint64) \
- V(X64Word64AtomicXorUint8) \
- V(X64Word64AtomicXorUint16) \
- V(X64Word64AtomicXorUint32) \
- V(X64Word64AtomicXorUint64) \
- V(X64Word64AtomicExchangeUint8) \
- V(X64Word64AtomicExchangeUint16) \
- V(X64Word64AtomicExchangeUint32) \
- V(X64Word64AtomicExchangeUint64) \
- V(X64Word64AtomicCompareExchangeUint8) \
- V(X64Word64AtomicCompareExchangeUint16) \
- V(X64Word64AtomicCompareExchangeUint32) \
+#define TARGET_ARCH_OPCODE_LIST(V) \
+ V(X64Add) \
+ V(X64Add32) \
+ V(X64And) \
+ V(X64And32) \
+ V(X64Cmp) \
+ V(X64Cmp32) \
+ V(X64Cmp16) \
+ V(X64Cmp8) \
+ V(X64Test) \
+ V(X64Test32) \
+ V(X64Test16) \
+ V(X64Test8) \
+ V(X64Or) \
+ V(X64Or32) \
+ V(X64Xor) \
+ V(X64Xor32) \
+ V(X64Sub) \
+ V(X64Sub32) \
+ V(X64Imul) \
+ V(X64Imul32) \
+ V(X64ImulHigh32) \
+ V(X64UmulHigh32) \
+ V(X64Idiv) \
+ V(X64Idiv32) \
+ V(X64Udiv) \
+ V(X64Udiv32) \
+ V(X64Not) \
+ V(X64Not32) \
+ V(X64Neg) \
+ V(X64Neg32) \
+ V(X64Shl) \
+ V(X64Shl32) \
+ V(X64Shr) \
+ V(X64Shr32) \
+ V(X64Sar) \
+ V(X64Sar32) \
+ V(X64Rol) \
+ V(X64Rol32) \
+ V(X64Ror) \
+ V(X64Ror32) \
+ V(X64Lzcnt) \
+ V(X64Lzcnt32) \
+ V(X64Tzcnt) \
+ V(X64Tzcnt32) \
+ V(X64Popcnt) \
+ V(X64Popcnt32) \
+ V(X64Bswap) \
+ V(X64Bswap32) \
+ V(X64MFence) \
+ V(X64LFence) \
+ V(SSEFloat32Cmp) \
+ V(SSEFloat32Add) \
+ V(SSEFloat32Sub) \
+ V(SSEFloat32Mul) \
+ V(SSEFloat32Div) \
+ V(SSEFloat32Sqrt) \
+ V(SSEFloat32ToFloat64) \
+ V(SSEFloat32ToInt32) \
+ V(SSEFloat32ToUint32) \
+ V(SSEFloat32Round) \
+ V(SSEFloat64Cmp) \
+ V(SSEFloat64Add) \
+ V(SSEFloat64Sub) \
+ V(SSEFloat64Mul) \
+ V(SSEFloat64Div) \
+ V(SSEFloat64Mod) \
+ V(SSEFloat64Sqrt) \
+ V(SSEFloat64Round) \
+ V(SSEFloat32Max) \
+ V(SSEFloat64Max) \
+ V(SSEFloat32Min) \
+ V(SSEFloat64Min) \
+ V(SSEFloat64ToFloat32) \
+ V(SSEFloat64ToInt32) \
+ V(SSEFloat64ToUint32) \
+ V(SSEFloat32ToInt64) \
+ V(SSEFloat64ToInt64) \
+ V(SSEFloat32ToUint64) \
+ V(SSEFloat64ToUint64) \
+ V(SSEInt32ToFloat64) \
+ V(SSEInt32ToFloat32) \
+ V(SSEInt64ToFloat32) \
+ V(SSEInt64ToFloat64) \
+ V(SSEUint64ToFloat32) \
+ V(SSEUint64ToFloat64) \
+ V(SSEUint32ToFloat64) \
+ V(SSEUint32ToFloat32) \
+ V(SSEFloat64ExtractLowWord32) \
+ V(SSEFloat64ExtractHighWord32) \
+ V(SSEFloat64InsertLowWord32) \
+ V(SSEFloat64InsertHighWord32) \
+ V(SSEFloat64LoadLowWord32) \
+ V(SSEFloat64SilenceNaN) \
+ V(AVXFloat32Cmp) \
+ V(AVXFloat32Add) \
+ V(AVXFloat32Sub) \
+ V(AVXFloat32Mul) \
+ V(AVXFloat32Div) \
+ V(AVXFloat64Cmp) \
+ V(AVXFloat64Add) \
+ V(AVXFloat64Sub) \
+ V(AVXFloat64Mul) \
+ V(AVXFloat64Div) \
+ V(X64Float64Abs) \
+ V(X64Float64Neg) \
+ V(X64Float32Abs) \
+ V(X64Float32Neg) \
+ V(X64Movsxbl) \
+ V(X64Movzxbl) \
+ V(X64Movsxbq) \
+ V(X64Movzxbq) \
+ V(X64Movb) \
+ V(X64Movsxwl) \
+ V(X64Movzxwl) \
+ V(X64Movsxwq) \
+ V(X64Movzxwq) \
+ V(X64Movw) \
+ V(X64Movl) \
+ V(X64Movsxlq) \
+ V(X64MovqDecompressTaggedSigned) \
+ V(X64MovqDecompressTaggedPointer) \
+ V(X64MovqDecompressAnyTagged) \
+ V(X64MovqCompressTagged) \
+ V(X64Movq) \
+ V(X64Movsd) \
+ V(X64Movss) \
+ V(X64Movdqu) \
+ V(X64BitcastFI) \
+ V(X64BitcastDL) \
+ V(X64BitcastIF) \
+ V(X64BitcastLD) \
+ V(X64Lea32) \
+ V(X64Lea) \
+ V(X64Dec32) \
+ V(X64Inc32) \
+ V(X64Push) \
+ V(X64Poke) \
+ V(X64Peek) \
+ V(X64F64x2Splat) \
+ V(X64F64x2ExtractLane) \
+ V(X64F64x2ReplaceLane) \
+ V(X64F64x2Abs) \
+ V(X64F64x2Neg) \
+ V(X64F64x2Sqrt) \
+ V(X64F64x2Add) \
+ V(X64F64x2Sub) \
+ V(X64F64x2Mul) \
+ V(X64F64x2Div) \
+ V(X64F64x2Min) \
+ V(X64F64x2Max) \
+ V(X64F64x2Eq) \
+ V(X64F64x2Ne) \
+ V(X64F64x2Lt) \
+ V(X64F64x2Le) \
+ V(X64F64x2Qfma) \
+ V(X64F64x2Qfms) \
+ V(X64F64x2Pmin) \
+ V(X64F64x2Pmax) \
+ V(X64F64x2Round) \
+ V(X64F64x2ConvertLowI32x4S) \
+ V(X64F64x2ConvertLowI32x4U) \
+ V(X64F64x2PromoteLowF32x4) \
+ V(X64F32x4Splat) \
+ V(X64F32x4ExtractLane) \
+ V(X64F32x4ReplaceLane) \
+ V(X64F32x4SConvertI32x4) \
+ V(X64F32x4UConvertI32x4) \
+ V(X64F32x4Abs) \
+ V(X64F32x4Neg) \
+ V(X64F32x4Sqrt) \
+ V(X64F32x4RecipApprox) \
+ V(X64F32x4RecipSqrtApprox) \
+ V(X64F32x4Add) \
+ V(X64F32x4Sub) \
+ V(X64F32x4Mul) \
+ V(X64F32x4Div) \
+ V(X64F32x4Min) \
+ V(X64F32x4Max) \
+ V(X64F32x4Eq) \
+ V(X64F32x4Ne) \
+ V(X64F32x4Lt) \
+ V(X64F32x4Le) \
+ V(X64F32x4Qfma) \
+ V(X64F32x4Qfms) \
+ V(X64F32x4Pmin) \
+ V(X64F32x4Pmax) \
+ V(X64F32x4Round) \
+ V(X64F32x4DemoteF64x2Zero) \
+ V(X64I64x2Splat) \
+ V(X64I64x2ExtractLane) \
+ V(X64I64x2Abs) \
+ V(X64I64x2Neg) \
+ V(X64I64x2BitMask) \
+ V(X64I64x2Shl) \
+ V(X64I64x2ShrS) \
+ V(X64I64x2Add) \
+ V(X64I64x2Sub) \
+ V(X64I64x2Mul) \
+ V(X64I64x2Eq) \
+ V(X64I64x2GtS) \
+ V(X64I64x2GeS) \
+ V(X64I64x2Ne) \
+ V(X64I64x2ShrU) \
+ V(X64I64x2ExtMulLowI32x4S) \
+ V(X64I64x2ExtMulHighI32x4S) \
+ V(X64I64x2ExtMulLowI32x4U) \
+ V(X64I64x2ExtMulHighI32x4U) \
+ V(X64I64x2SConvertI32x4Low) \
+ V(X64I64x2SConvertI32x4High) \
+ V(X64I64x2UConvertI32x4Low) \
+ V(X64I64x2UConvertI32x4High) \
+ V(X64I32x4Splat) \
+ V(X64I32x4ExtractLane) \
+ V(X64I32x4SConvertF32x4) \
+ V(X64I32x4SConvertI16x8Low) \
+ V(X64I32x4SConvertI16x8High) \
+ V(X64I32x4Neg) \
+ V(X64I32x4Shl) \
+ V(X64I32x4ShrS) \
+ V(X64I32x4Add) \
+ V(X64I32x4Sub) \
+ V(X64I32x4Mul) \
+ V(X64I32x4MinS) \
+ V(X64I32x4MaxS) \
+ V(X64I32x4Eq) \
+ V(X64I32x4Ne) \
+ V(X64I32x4GtS) \
+ V(X64I32x4GeS) \
+ V(X64I32x4UConvertF32x4) \
+ V(X64I32x4UConvertI16x8Low) \
+ V(X64I32x4UConvertI16x8High) \
+ V(X64I32x4ShrU) \
+ V(X64I32x4MinU) \
+ V(X64I32x4MaxU) \
+ V(X64I32x4GtU) \
+ V(X64I32x4GeU) \
+ V(X64I32x4Abs) \
+ V(X64I32x4BitMask) \
+ V(X64I32x4DotI16x8S) \
+ V(X64I32x4ExtMulLowI16x8S) \
+ V(X64I32x4ExtMulHighI16x8S) \
+ V(X64I32x4ExtMulLowI16x8U) \
+ V(X64I32x4ExtMulHighI16x8U) \
+ V(X64I32x4ExtAddPairwiseI16x8S) \
+ V(X64I32x4ExtAddPairwiseI16x8U) \
+ V(X64I32x4TruncSatF64x2SZero) \
+ V(X64I32x4TruncSatF64x2UZero) \
+ V(X64I16x8Splat) \
+ V(X64I16x8ExtractLaneS) \
+ V(X64I16x8SConvertI8x16Low) \
+ V(X64I16x8SConvertI8x16High) \
+ V(X64I16x8Neg) \
+ V(X64I16x8Shl) \
+ V(X64I16x8ShrS) \
+ V(X64I16x8SConvertI32x4) \
+ V(X64I16x8Add) \
+ V(X64I16x8AddSatS) \
+ V(X64I16x8Sub) \
+ V(X64I16x8SubSatS) \
+ V(X64I16x8Mul) \
+ V(X64I16x8MinS) \
+ V(X64I16x8MaxS) \
+ V(X64I16x8Eq) \
+ V(X64I16x8Ne) \
+ V(X64I16x8GtS) \
+ V(X64I16x8GeS) \
+ V(X64I16x8UConvertI8x16Low) \
+ V(X64I16x8UConvertI8x16High) \
+ V(X64I16x8ShrU) \
+ V(X64I16x8UConvertI32x4) \
+ V(X64I16x8AddSatU) \
+ V(X64I16x8SubSatU) \
+ V(X64I16x8MinU) \
+ V(X64I16x8MaxU) \
+ V(X64I16x8GtU) \
+ V(X64I16x8GeU) \
+ V(X64I16x8RoundingAverageU) \
+ V(X64I16x8Abs) \
+ V(X64I16x8BitMask) \
+ V(X64I16x8ExtMulLowI8x16S) \
+ V(X64I16x8ExtMulHighI8x16S) \
+ V(X64I16x8ExtMulLowI8x16U) \
+ V(X64I16x8ExtMulHighI8x16U) \
+ V(X64I16x8ExtAddPairwiseI8x16S) \
+ V(X64I16x8ExtAddPairwiseI8x16U) \
+ V(X64I16x8Q15MulRSatS) \
+ V(X64I8x16Splat) \
+ V(X64I8x16ExtractLaneS) \
+ V(X64Pinsrb) \
+ V(X64Pinsrw) \
+ V(X64Pinsrd) \
+ V(X64Pinsrq) \
+ V(X64Pextrb) \
+ V(X64Pextrw) \
+ V(X64I8x16SConvertI16x8) \
+ V(X64I8x16Neg) \
+ V(X64I8x16Shl) \
+ V(X64I8x16ShrS) \
+ V(X64I8x16Add) \
+ V(X64I8x16AddSatS) \
+ V(X64I8x16Sub) \
+ V(X64I8x16SubSatS) \
+ V(X64I8x16MinS) \
+ V(X64I8x16MaxS) \
+ V(X64I8x16Eq) \
+ V(X64I8x16Ne) \
+ V(X64I8x16GtS) \
+ V(X64I8x16GeS) \
+ V(X64I8x16UConvertI16x8) \
+ V(X64I8x16AddSatU) \
+ V(X64I8x16SubSatU) \
+ V(X64I8x16ShrU) \
+ V(X64I8x16MinU) \
+ V(X64I8x16MaxU) \
+ V(X64I8x16GtU) \
+ V(X64I8x16GeU) \
+ V(X64I8x16RoundingAverageU) \
+ V(X64I8x16Abs) \
+ V(X64I8x16BitMask) \
+ V(X64S128Const) \
+ V(X64S128Zero) \
+ V(X64S128AllOnes) \
+ V(X64S128Not) \
+ V(X64S128And) \
+ V(X64S128Or) \
+ V(X64S128Xor) \
+ V(X64S128Select) \
+ V(X64S128AndNot) \
+ V(X64I8x16Swizzle) \
+ V(X64I8x16Shuffle) \
+ V(X64I8x16Popcnt) \
+ V(X64S128Load8Splat) \
+ V(X64S128Load16Splat) \
+ V(X64S128Load32Splat) \
+ V(X64S128Load64Splat) \
+ V(X64S128Load8x8S) \
+ V(X64S128Load8x8U) \
+ V(X64S128Load16x4S) \
+ V(X64S128Load16x4U) \
+ V(X64S128Load32x2S) \
+ V(X64S128Load32x2U) \
+ V(X64S128Store32Lane) \
+ V(X64S128Store64Lane) \
+ V(X64Shufps) \
+ V(X64S32x4Rotate) \
+ V(X64S32x4Swizzle) \
+ V(X64S32x4Shuffle) \
+ V(X64S16x8Blend) \
+ V(X64S16x8HalfShuffle1) \
+ V(X64S16x8HalfShuffle2) \
+ V(X64S8x16Alignr) \
+ V(X64S16x8Dup) \
+ V(X64S8x16Dup) \
+ V(X64S16x8UnzipHigh) \
+ V(X64S16x8UnzipLow) \
+ V(X64S8x16UnzipHigh) \
+ V(X64S8x16UnzipLow) \
+ V(X64S64x2UnpackHigh) \
+ V(X64S32x4UnpackHigh) \
+ V(X64S16x8UnpackHigh) \
+ V(X64S8x16UnpackHigh) \
+ V(X64S64x2UnpackLow) \
+ V(X64S32x4UnpackLow) \
+ V(X64S16x8UnpackLow) \
+ V(X64S8x16UnpackLow) \
+ V(X64S8x16TransposeLow) \
+ V(X64S8x16TransposeHigh) \
+ V(X64S8x8Reverse) \
+ V(X64S8x4Reverse) \
+ V(X64S8x2Reverse) \
+ V(X64V128AnyTrue) \
+ V(X64I64x2AllTrue) \
+ V(X64I32x4AllTrue) \
+ V(X64I16x8AllTrue) \
+ V(X64I8x16AllTrue) \
+ V(X64Word64AtomicAddUint64) \
+ V(X64Word64AtomicSubUint64) \
+ V(X64Word64AtomicAndUint64) \
+ V(X64Word64AtomicOrUint64) \
+ V(X64Word64AtomicXorUint64) \
+ V(X64Word64AtomicStoreWord64) \
+ V(X64Word64AtomicExchangeUint64) \
V(X64Word64AtomicCompareExchangeUint64)
// Addressing modes represent the "shape" of inputs to an instruction.
diff --git a/deps/v8/src/compiler/backend/x64/instruction-scheduler-x64.cc b/deps/v8/src/compiler/backend/x64/instruction-scheduler-x64.cc
index 4fada93a31..d5f33d86bc 100644
--- a/deps/v8/src/compiler/backend/x64/instruction-scheduler-x64.cc
+++ b/deps/v8/src/compiler/backend/x64/instruction-scheduler-x64.cc
@@ -62,8 +62,6 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kSSEFloat32Sub:
case kSSEFloat32Mul:
case kSSEFloat32Div:
- case kSSEFloat32Abs:
- case kSSEFloat32Neg:
case kSSEFloat32Sqrt:
case kSSEFloat32Round:
case kSSEFloat32ToFloat64:
@@ -73,8 +71,6 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kSSEFloat64Mul:
case kSSEFloat64Div:
case kSSEFloat64Mod:
- case kSSEFloat64Abs:
- case kSSEFloat64Neg:
case kSSEFloat64Sqrt:
case kSSEFloat64Round:
case kSSEFloat32Max:
@@ -114,10 +110,10 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kAVXFloat64Sub:
case kAVXFloat64Mul:
case kAVXFloat64Div:
- case kAVXFloat64Abs:
- case kAVXFloat64Neg:
- case kAVXFloat32Abs:
- case kAVXFloat32Neg:
+ case kX64Float64Abs:
+ case kX64Float64Neg:
+ case kX64Float32Abs:
+ case kX64Float32Neg:
case kX64BitcastFI:
case kX64BitcastDL:
case kX64BitcastIF:
@@ -422,33 +418,13 @@ int InstructionScheduler::GetTargetInstructionFlags(
case kX64LFence:
return kHasSideEffect;
- case kX64Word64AtomicAddUint8:
- case kX64Word64AtomicAddUint16:
- case kX64Word64AtomicAddUint32:
+ case kX64Word64AtomicStoreWord64:
case kX64Word64AtomicAddUint64:
- case kX64Word64AtomicSubUint8:
- case kX64Word64AtomicSubUint16:
- case kX64Word64AtomicSubUint32:
case kX64Word64AtomicSubUint64:
- case kX64Word64AtomicAndUint8:
- case kX64Word64AtomicAndUint16:
- case kX64Word64AtomicAndUint32:
case kX64Word64AtomicAndUint64:
- case kX64Word64AtomicOrUint8:
- case kX64Word64AtomicOrUint16:
- case kX64Word64AtomicOrUint32:
case kX64Word64AtomicOrUint64:
- case kX64Word64AtomicXorUint8:
- case kX64Word64AtomicXorUint16:
- case kX64Word64AtomicXorUint32:
case kX64Word64AtomicXorUint64:
- case kX64Word64AtomicExchangeUint8:
- case kX64Word64AtomicExchangeUint16:
- case kX64Word64AtomicExchangeUint32:
case kX64Word64AtomicExchangeUint64:
- case kX64Word64AtomicCompareExchangeUint8:
- case kX64Word64AtomicCompareExchangeUint16:
- case kX64Word64AtomicCompareExchangeUint32:
case kX64Word64AtomicCompareExchangeUint64:
return kHasSideEffect;
@@ -472,18 +448,18 @@ int InstructionScheduler::GetInstructionLatency(const Instruction* instr) {
case kX64Imul32:
case kX64ImulHigh32:
case kX64UmulHigh32:
+ case kX64Float32Abs:
+ case kX64Float32Neg:
+ case kX64Float64Abs:
+ case kX64Float64Neg:
case kSSEFloat32Cmp:
case kSSEFloat32Add:
case kSSEFloat32Sub:
- case kSSEFloat32Abs:
- case kSSEFloat32Neg:
case kSSEFloat64Cmp:
case kSSEFloat64Add:
case kSSEFloat64Sub:
case kSSEFloat64Max:
case kSSEFloat64Min:
- case kSSEFloat64Abs:
- case kSSEFloat64Neg:
return 3;
case kSSEFloat32Mul:
case kSSEFloat32ToFloat64:
diff --git a/deps/v8/src/compiler/backend/x64/instruction-selector-x64.cc b/deps/v8/src/compiler/backend/x64/instruction-selector-x64.cc
index 53ee75064b..2f44f0dee5 100644
--- a/deps/v8/src/compiler/backend/x64/instruction-selector-x64.cc
+++ b/deps/v8/src/compiler/backend/x64/instruction-selector-x64.cc
@@ -250,6 +250,7 @@ class X64OperandGenerator final : public OperandGenerator {
};
namespace {
+
ArchOpcode GetLoadOpcode(LoadRepresentation load_rep) {
ArchOpcode opcode;
switch (load_rep.representation()) {
@@ -340,6 +341,30 @@ ArchOpcode GetStoreOpcode(StoreRepresentation store_rep) {
UNREACHABLE();
}
+ArchOpcode GetSeqCstStoreOpcode(StoreRepresentation store_rep) {
+ switch (store_rep.representation()) {
+ case MachineRepresentation::kWord8:
+ return kAtomicStoreWord8;
+ case MachineRepresentation::kWord16:
+ return kAtomicStoreWord16;
+ case MachineRepresentation::kWord32:
+ return kAtomicStoreWord32;
+ case MachineRepresentation::kWord64:
+ return kX64Word64AtomicStoreWord64;
+ case MachineRepresentation::kTaggedSigned: // Fall through.
+ case MachineRepresentation::kTaggedPointer: // Fall through.
+ case MachineRepresentation::kTagged:
+ if (COMPRESS_POINTERS_BOOL) return kAtomicStoreWord32;
+ return kX64Word64AtomicStoreWord64;
+ case MachineRepresentation::kCompressedPointer: // Fall through.
+ case MachineRepresentation::kCompressed:
+ CHECK(COMPRESS_POINTERS_BOOL);
+ return kAtomicStoreWord32;
+ default:
+ UNREACHABLE();
+ }
+}
+
} // namespace
void InstructionSelector::VisitStackSlot(Node* node) {
@@ -471,9 +496,6 @@ void InstructionSelector::VisitLoad(Node* node, Node* value,
InstructionCode code = opcode | AddressingModeField::encode(mode);
if (node->opcode() == IrOpcode::kProtectedLoad) {
code |= AccessModeField::encode(kMemoryAccessProtected);
- } else if (node->opcode() == IrOpcode::kPoisonedLoad) {
- CHECK_NE(poisoning_level_, PoisoningMitigationLevel::kDontPoison);
- code |= AccessModeField::encode(kMemoryAccessPoisoned);
}
Emit(code, 1, outputs, input_count, inputs, temp_count, temps);
}
@@ -484,19 +506,39 @@ void InstructionSelector::VisitLoad(Node* node) {
VisitLoad(node, node, GetLoadOpcode(load_rep));
}
-void InstructionSelector::VisitPoisonedLoad(Node* node) { VisitLoad(node); }
-
void InstructionSelector::VisitProtectedLoad(Node* node) { VisitLoad(node); }
-void InstructionSelector::VisitStore(Node* node) {
- X64OperandGenerator g(this);
+namespace {
+
+// Shared routine for Word32/Word64 Atomic Exchange
+void VisitAtomicExchange(InstructionSelector* selector, Node* node,
+ ArchOpcode opcode, AtomicWidth width) {
+ X64OperandGenerator g(selector);
+ Node* base = node->InputAt(0);
+ Node* index = node->InputAt(1);
+ Node* value = node->InputAt(2);
+ AddressingMode addressing_mode;
+ InstructionOperand inputs[] = {
+ g.UseUniqueRegister(value), g.UseUniqueRegister(base),
+ g.GetEffectiveIndexOperand(index, &addressing_mode)};
+ InstructionOperand outputs[] = {g.DefineSameAsFirst(node)};
+ InstructionCode code = opcode | AddressingModeField::encode(addressing_mode) |
+ AtomicWidthField::encode(width);
+ selector->Emit(code, arraysize(outputs), outputs, arraysize(inputs), inputs);
+}
+
+void VisitStoreCommon(InstructionSelector* selector, Node* node,
+ StoreRepresentation store_rep,
+ base::Optional<AtomicMemoryOrder> atomic_order) {
+ X64OperandGenerator g(selector);
Node* base = node->InputAt(0);
Node* index = node->InputAt(1);
Node* value = node->InputAt(2);
- StoreRepresentation store_rep = StoreRepresentationOf(node->op());
DCHECK_NE(store_rep.representation(), MachineRepresentation::kMapWord);
WriteBarrierKind write_barrier_kind = store_rep.write_barrier_kind();
+ const bool is_seqcst =
+ atomic_order && *atomic_order == AtomicMemoryOrder::kSeqCst;
if (FLAG_enable_unconditional_write_barriers &&
CanBeTaggedOrCompressedPointer(store_rep.representation())) {
@@ -513,16 +555,13 @@ void InstructionSelector::VisitStore(Node* node) {
RecordWriteMode record_write_mode =
WriteBarrierKindToRecordWriteMode(write_barrier_kind);
InstructionOperand temps[] = {g.TempRegister(), g.TempRegister()};
- InstructionCode code = kArchStoreWithWriteBarrier;
+ InstructionCode code = is_seqcst ? kArchAtomicStoreWithWriteBarrier
+ : kArchStoreWithWriteBarrier;
code |= AddressingModeField::encode(addressing_mode);
code |= MiscField::encode(static_cast<int>(record_write_mode));
- Emit(code, 0, nullptr, arraysize(inputs), inputs, arraysize(temps), temps);
+ selector->Emit(code, 0, nullptr, arraysize(inputs), inputs,
+ arraysize(temps), temps);
} else {
- if ((ElementSizeLog2Of(store_rep.representation()) <
- kSystemPointerSizeLog2) &&
- value->opcode() == IrOpcode::kTruncateInt64ToInt32) {
- value = value->InputAt(0);
- }
#ifdef V8_IS_TSAN
// On TSAN builds we require two scratch registers. Because of this we also
// have to modify the inputs to take into account possible aliasing and use
@@ -536,22 +575,54 @@ void InstructionSelector::VisitStore(Node* node) {
auto reg_kind = OperandGenerator::RegisterUseKind::kUseRegister;
#endif // V8_IS_TSAN
+ // Release and non-atomic stores emit MOV and sequentially consistent stores
+ // emit XCHG.
+ // https://www.cl.cam.ac.uk/~pes20/cpp/cpp0xmappings.html
+
+ ArchOpcode opcode;
+ AddressingMode addressing_mode;
InstructionOperand inputs[4];
size_t input_count = 0;
- AddressingMode addressing_mode = g.GetEffectiveAddressMemoryOperand(
- node, inputs, &input_count, reg_kind);
- InstructionOperand value_operand = g.CanBeImmediate(value)
- ? g.UseImmediate(value)
- : g.UseRegister(value, reg_kind);
- inputs[input_count++] = value_operand;
- ArchOpcode opcode = GetStoreOpcode(store_rep);
+
+ if (is_seqcst) {
+ // SeqCst stores emit XCHG instead of MOV, so encode the inputs as we
+ // would for XCHG. XCHG can't encode the value as an immediate and has
+ // fewer addressing modes available.
+ inputs[input_count++] = g.UseUniqueRegister(value);
+ inputs[input_count++] = g.UseUniqueRegister(base);
+ inputs[input_count++] =
+ g.GetEffectiveIndexOperand(index, &addressing_mode);
+ opcode = GetSeqCstStoreOpcode(store_rep);
+ } else {
+ if ((ElementSizeLog2Of(store_rep.representation()) <
+ kSystemPointerSizeLog2) &&
+ value->opcode() == IrOpcode::kTruncateInt64ToInt32) {
+ value = value->InputAt(0);
+ }
+
+ addressing_mode = g.GetEffectiveAddressMemoryOperand(
+ node, inputs, &input_count, reg_kind);
+ InstructionOperand value_operand = g.CanBeImmediate(value)
+ ? g.UseImmediate(value)
+ : g.UseRegister(value, reg_kind);
+ inputs[input_count++] = value_operand;
+ opcode = GetStoreOpcode(store_rep);
+ }
+
InstructionCode code =
opcode | AddressingModeField::encode(addressing_mode);
- Emit(code, 0, static_cast<InstructionOperand*>(nullptr), input_count,
- inputs, temp_count, temps);
+ selector->Emit(code, 0, static_cast<InstructionOperand*>(nullptr),
+ input_count, inputs, temp_count, temps);
}
}
+} // namespace
+
+void InstructionSelector::VisitStore(Node* node) {
+ return VisitStoreCommon(this, node, StoreRepresentationOf(node->op()),
+ base::nullopt);
+}
+
void InstructionSelector::VisitProtectedStore(Node* node) {
X64OperandGenerator g(this);
Node* value = node->InputAt(2);
@@ -1502,8 +1573,7 @@ bool InstructionSelector::ZeroExtendsWord32ToWord64NoPhis(Node* node) {
}
case IrOpcode::kLoad:
case IrOpcode::kLoadImmutable:
- case IrOpcode::kProtectedLoad:
- case IrOpcode::kPoisonedLoad: {
+ case IrOpcode::kProtectedLoad: {
// The movzxbl/movsxbl/movzxwl/movsxwl/movl operations implicitly
// zero-extend to 64-bit on x64, so the zero-extension is a no-op.
LoadRepresentation load_rep = LoadRepresentationOf(node->op());
@@ -1622,15 +1692,12 @@ void VisitFloatBinop(InstructionSelector* selector, Node* node,
}
void VisitFloatUnop(InstructionSelector* selector, Node* node, Node* input,
- ArchOpcode avx_opcode, ArchOpcode sse_opcode) {
+ ArchOpcode opcode) {
X64OperandGenerator g(selector);
- InstructionOperand temps[] = {g.TempDoubleRegister()};
if (selector->IsSupported(AVX)) {
- selector->Emit(avx_opcode, g.DefineAsRegister(node), g.UseUnique(input),
- arraysize(temps), temps);
+ selector->Emit(opcode, g.DefineAsRegister(node), g.UseRegister(input));
} else {
- selector->Emit(sse_opcode, g.DefineSameAsFirst(node), g.UseRegister(input),
- arraysize(temps), temps);
+ selector->Emit(opcode, g.DefineSameAsFirst(node), g.UseRegister(input));
}
}
@@ -1770,7 +1837,7 @@ void InstructionSelector::VisitFloat32Div(Node* node) {
}
void InstructionSelector::VisitFloat32Abs(Node* node) {
- VisitFloatUnop(this, node, node->InputAt(0), kAVXFloat32Abs, kSSEFloat32Abs);
+ VisitFloatUnop(this, node, node->InputAt(0), kX64Float32Abs);
}
void InstructionSelector::VisitFloat32Max(Node* node) {
@@ -1814,7 +1881,7 @@ void InstructionSelector::VisitFloat64Min(Node* node) {
}
void InstructionSelector::VisitFloat64Abs(Node* node) {
- VisitFloatUnop(this, node, node->InputAt(0), kAVXFloat64Abs, kSSEFloat64Abs);
+ VisitFloatUnop(this, node, node->InputAt(0), kX64Float64Abs);
}
void InstructionSelector::VisitFloat64RoundTiesAway(Node* node) {
@@ -1822,11 +1889,11 @@ void InstructionSelector::VisitFloat64RoundTiesAway(Node* node) {
}
void InstructionSelector::VisitFloat32Neg(Node* node) {
- VisitFloatUnop(this, node, node->InputAt(0), kAVXFloat32Neg, kSSEFloat32Neg);
+ VisitFloatUnop(this, node, node->InputAt(0), kX64Float32Neg);
}
void InstructionSelector::VisitFloat64Neg(Node* node) {
- VisitFloatUnop(this, node, node->InputAt(0), kAVXFloat64Neg, kSSEFloat64Neg);
+ VisitFloatUnop(this, node, node->InputAt(0), kX64Float64Neg);
}
void InstructionSelector::VisitFloat64Ieee754Binop(Node* node,
@@ -2294,7 +2361,7 @@ void VisitFloat64Compare(InstructionSelector* selector, Node* node,
// Shared routine for Word32/Word64 Atomic Binops
void VisitAtomicBinop(InstructionSelector* selector, Node* node,
- ArchOpcode opcode) {
+ ArchOpcode opcode, AtomicWidth width) {
X64OperandGenerator g(selector);
Node* base = node->InputAt(0);
Node* index = node->InputAt(1);
@@ -2305,14 +2372,15 @@ void VisitAtomicBinop(InstructionSelector* selector, Node* node,
g.GetEffectiveIndexOperand(index, &addressing_mode)};
InstructionOperand outputs[] = {g.DefineAsFixed(node, rax)};
InstructionOperand temps[] = {g.TempRegister()};
- InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
+ InstructionCode code = opcode | AddressingModeField::encode(addressing_mode) |
+ AtomicWidthField::encode(width);
selector->Emit(code, arraysize(outputs), outputs, arraysize(inputs), inputs,
arraysize(temps), temps);
}
// Shared routine for Word32/Word64 Atomic CmpExchg
void VisitAtomicCompareExchange(InstructionSelector* selector, Node* node,
- ArchOpcode opcode) {
+ ArchOpcode opcode, AtomicWidth width) {
X64OperandGenerator g(selector);
Node* base = node->InputAt(0);
Node* index = node->InputAt(1);
@@ -2324,23 +2392,8 @@ void VisitAtomicCompareExchange(InstructionSelector* selector, Node* node,
g.UseUniqueRegister(base),
g.GetEffectiveIndexOperand(index, &addressing_mode)};
InstructionOperand outputs[] = {g.DefineAsFixed(node, rax)};
- InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
- selector->Emit(code, arraysize(outputs), outputs, arraysize(inputs), inputs);
-}
-
-// Shared routine for Word32/Word64 Atomic Exchange
-void VisitAtomicExchange(InstructionSelector* selector, Node* node,
- ArchOpcode opcode) {
- X64OperandGenerator g(selector);
- Node* base = node->InputAt(0);
- Node* index = node->InputAt(1);
- Node* value = node->InputAt(2);
- AddressingMode addressing_mode;
- InstructionOperand inputs[] = {
- g.UseUniqueRegister(value), g.UseUniqueRegister(base),
- g.GetEffectiveIndexOperand(index, &addressing_mode)};
- InstructionOperand outputs[] = {g.DefineSameAsFirst(node)};
- InstructionCode code = opcode | AddressingModeField::encode(addressing_mode);
+ InstructionCode code = opcode | AddressingModeField::encode(addressing_mode) |
+ AtomicWidthField::encode(width);
selector->Emit(code, arraysize(outputs), outputs, arraysize(inputs), inputs);
}
@@ -2711,131 +2764,114 @@ void InstructionSelector::VisitMemoryBarrier(Node* node) {
}
void InstructionSelector::VisitWord32AtomicLoad(Node* node) {
- LoadRepresentation load_rep = LoadRepresentationOf(node->op());
- DCHECK(load_rep.representation() == MachineRepresentation::kWord8 ||
- load_rep.representation() == MachineRepresentation::kWord16 ||
- load_rep.representation() == MachineRepresentation::kWord32);
- USE(load_rep);
- VisitLoad(node);
+ AtomicLoadParameters atomic_load_params = AtomicLoadParametersOf(node->op());
+ LoadRepresentation load_rep = atomic_load_params.representation();
+ DCHECK(IsIntegral(load_rep.representation()) ||
+ IsAnyTagged(load_rep.representation()) ||
+ (COMPRESS_POINTERS_BOOL &&
+ CanBeCompressedPointer(load_rep.representation())));
+ DCHECK_NE(load_rep.representation(), MachineRepresentation::kWord64);
+ DCHECK(!load_rep.IsMapWord());
+ // The memory order is ignored as both acquire and sequentially consistent
+ // loads can emit MOV.
+ // https://www.cl.cam.ac.uk/~pes20/cpp/cpp0xmappings.html
+ VisitLoad(node, node, GetLoadOpcode(load_rep));
}
void InstructionSelector::VisitWord64AtomicLoad(Node* node) {
- LoadRepresentation load_rep = LoadRepresentationOf(node->op());
- USE(load_rep);
- VisitLoad(node);
+ AtomicLoadParameters atomic_load_params = AtomicLoadParametersOf(node->op());
+ DCHECK(!atomic_load_params.representation().IsMapWord());
+ // The memory order is ignored as both acquire and sequentially consistent
+ // loads can emit MOV.
+ // https://www.cl.cam.ac.uk/~pes20/cpp/cpp0xmappings.html
+ VisitLoad(node, node, GetLoadOpcode(atomic_load_params.representation()));
}
void InstructionSelector::VisitWord32AtomicStore(Node* node) {
- MachineRepresentation rep = AtomicStoreRepresentationOf(node->op());
- ArchOpcode opcode;
- switch (rep) {
- case MachineRepresentation::kWord8:
- opcode = kWord32AtomicExchangeInt8;
- break;
- case MachineRepresentation::kWord16:
- opcode = kWord32AtomicExchangeInt16;
- break;
- case MachineRepresentation::kWord32:
- opcode = kWord32AtomicExchangeWord32;
- break;
- default:
- UNREACHABLE();
- }
- VisitAtomicExchange(this, node, opcode);
+ AtomicStoreParameters params = AtomicStoreParametersOf(node->op());
+ DCHECK_NE(params.representation(), MachineRepresentation::kWord64);
+ DCHECK_IMPLIES(CanBeTaggedOrCompressedPointer(params.representation()),
+ kTaggedSize == 4);
+ VisitStoreCommon(this, node, params.store_representation(), params.order());
}
void InstructionSelector::VisitWord64AtomicStore(Node* node) {
- MachineRepresentation rep = AtomicStoreRepresentationOf(node->op());
- ArchOpcode opcode;
- switch (rep) {
- case MachineRepresentation::kWord8:
- opcode = kX64Word64AtomicExchangeUint8;
- break;
- case MachineRepresentation::kWord16:
- opcode = kX64Word64AtomicExchangeUint16;
- break;
- case MachineRepresentation::kWord32:
- opcode = kX64Word64AtomicExchangeUint32;
- break;
- case MachineRepresentation::kWord64:
- opcode = kX64Word64AtomicExchangeUint64;
- break;
- default:
- UNREACHABLE();
- }
- VisitAtomicExchange(this, node, opcode);
+ AtomicStoreParameters params = AtomicStoreParametersOf(node->op());
+ DCHECK_IMPLIES(CanBeTaggedOrCompressedPointer(params.representation()),
+ kTaggedSize == 8);
+ VisitStoreCommon(this, node, params.store_representation(), params.order());
}
void InstructionSelector::VisitWord32AtomicExchange(Node* node) {
MachineType type = AtomicOpType(node->op());
ArchOpcode opcode;
if (type == MachineType::Int8()) {
- opcode = kWord32AtomicExchangeInt8;
+ opcode = kAtomicExchangeInt8;
} else if (type == MachineType::Uint8()) {
- opcode = kWord32AtomicExchangeUint8;
+ opcode = kAtomicExchangeUint8;
} else if (type == MachineType::Int16()) {
- opcode = kWord32AtomicExchangeInt16;
+ opcode = kAtomicExchangeInt16;
} else if (type == MachineType::Uint16()) {
- opcode = kWord32AtomicExchangeUint16;
+ opcode = kAtomicExchangeUint16;
} else if (type == MachineType::Int32() || type == MachineType::Uint32()) {
- opcode = kWord32AtomicExchangeWord32;
+ opcode = kAtomicExchangeWord32;
} else {
UNREACHABLE();
}
- VisitAtomicExchange(this, node, opcode);
+ VisitAtomicExchange(this, node, opcode, AtomicWidth::kWord32);
}
void InstructionSelector::VisitWord64AtomicExchange(Node* node) {
MachineType type = AtomicOpType(node->op());
ArchOpcode opcode;
if (type == MachineType::Uint8()) {
- opcode = kX64Word64AtomicExchangeUint8;
+ opcode = kAtomicExchangeUint8;
} else if (type == MachineType::Uint16()) {
- opcode = kX64Word64AtomicExchangeUint16;
+ opcode = kAtomicExchangeUint16;
} else if (type == MachineType::Uint32()) {
- opcode = kX64Word64AtomicExchangeUint32;
+ opcode = kAtomicExchangeWord32;
} else if (type == MachineType::Uint64()) {
opcode = kX64Word64AtomicExchangeUint64;
} else {
UNREACHABLE();
}
- VisitAtomicExchange(this, node, opcode);
+ VisitAtomicExchange(this, node, opcode, AtomicWidth::kWord64);
}
void InstructionSelector::VisitWord32AtomicCompareExchange(Node* node) {
MachineType type = AtomicOpType(node->op());
ArchOpcode opcode;
if (type == MachineType::Int8()) {
- opcode = kWord32AtomicCompareExchangeInt8;
+ opcode = kAtomicCompareExchangeInt8;
} else if (type == MachineType::Uint8()) {
- opcode = kWord32AtomicCompareExchangeUint8;
+ opcode = kAtomicCompareExchangeUint8;
} else if (type == MachineType::Int16()) {
- opcode = kWord32AtomicCompareExchangeInt16;
+ opcode = kAtomicCompareExchangeInt16;
} else if (type == MachineType::Uint16()) {
- opcode = kWord32AtomicCompareExchangeUint16;
+ opcode = kAtomicCompareExchangeUint16;
} else if (type == MachineType::Int32() || type == MachineType::Uint32()) {
- opcode = kWord32AtomicCompareExchangeWord32;
+ opcode = kAtomicCompareExchangeWord32;
} else {
UNREACHABLE();
}
- VisitAtomicCompareExchange(this, node, opcode);
+ VisitAtomicCompareExchange(this, node, opcode, AtomicWidth::kWord32);
}
void InstructionSelector::VisitWord64AtomicCompareExchange(Node* node) {
MachineType type = AtomicOpType(node->op());
ArchOpcode opcode;
if (type == MachineType::Uint8()) {
- opcode = kX64Word64AtomicCompareExchangeUint8;
+ opcode = kAtomicCompareExchangeUint8;
} else if (type == MachineType::Uint16()) {
- opcode = kX64Word64AtomicCompareExchangeUint16;
+ opcode = kAtomicCompareExchangeUint16;
} else if (type == MachineType::Uint32()) {
- opcode = kX64Word64AtomicCompareExchangeUint32;
+ opcode = kAtomicCompareExchangeWord32;
} else if (type == MachineType::Uint64()) {
opcode = kX64Word64AtomicCompareExchangeUint64;
} else {
UNREACHABLE();
}
- VisitAtomicCompareExchange(this, node, opcode);
+ VisitAtomicCompareExchange(this, node, opcode, AtomicWidth::kWord64);
}
void InstructionSelector::VisitWord32AtomicBinaryOperation(
@@ -2856,15 +2892,14 @@ void InstructionSelector::VisitWord32AtomicBinaryOperation(
} else {
UNREACHABLE();
}
- VisitAtomicBinop(this, node, opcode);
+ VisitAtomicBinop(this, node, opcode, AtomicWidth::kWord32);
}
-#define VISIT_ATOMIC_BINOP(op) \
- void InstructionSelector::VisitWord32Atomic##op(Node* node) { \
- VisitWord32AtomicBinaryOperation( \
- node, kWord32Atomic##op##Int8, kWord32Atomic##op##Uint8, \
- kWord32Atomic##op##Int16, kWord32Atomic##op##Uint16, \
- kWord32Atomic##op##Word32); \
+#define VISIT_ATOMIC_BINOP(op) \
+ void InstructionSelector::VisitWord32Atomic##op(Node* node) { \
+ VisitWord32AtomicBinaryOperation( \
+ node, kAtomic##op##Int8, kAtomic##op##Uint8, kAtomic##op##Int16, \
+ kAtomic##op##Uint16, kAtomic##op##Word32); \
}
VISIT_ATOMIC_BINOP(Add)
VISIT_ATOMIC_BINOP(Sub)
@@ -2889,14 +2924,14 @@ void InstructionSelector::VisitWord64AtomicBinaryOperation(
} else {
UNREACHABLE();
}
- VisitAtomicBinop(this, node, opcode);
+ VisitAtomicBinop(this, node, opcode, AtomicWidth::kWord64);
}
-#define VISIT_ATOMIC_BINOP(op) \
- void InstructionSelector::VisitWord64Atomic##op(Node* node) { \
- VisitWord64AtomicBinaryOperation( \
- node, kX64Word64Atomic##op##Uint8, kX64Word64Atomic##op##Uint16, \
- kX64Word64Atomic##op##Uint32, kX64Word64Atomic##op##Uint64); \
+#define VISIT_ATOMIC_BINOP(op) \
+ void InstructionSelector::VisitWord64Atomic##op(Node* node) { \
+ VisitWord64AtomicBinaryOperation(node, kAtomic##op##Uint8, \
+ kAtomic##op##Uint16, kAtomic##op##Word32, \
+ kX64Word64Atomic##op##Uint64); \
}
VISIT_ATOMIC_BINOP(Add)
VISIT_ATOMIC_BINOP(Sub)
@@ -3053,6 +3088,7 @@ VISIT_ATOMIC_BINOP(Xor)
#define SIMD_NARROW_SHIFT_OPCODES(V) \
V(I8x16Shl) \
+ V(I8x16ShrS) \
V(I8x16ShrU)
void InstructionSelector::VisitS128Const(Node* node) {
@@ -3182,19 +3218,19 @@ SIMD_SHIFT_OPCODES(VISIT_SIMD_SHIFT)
#undef VISIT_SIMD_SHIFT
#undef SIMD_SHIFT_OPCODES
-#define VISIT_SIMD_NARROW_SHIFT(Opcode) \
- void InstructionSelector::Visit##Opcode(Node* node) { \
- X64OperandGenerator g(this); \
- InstructionOperand temps[] = {g.TempRegister(), g.TempSimd128Register()}; \
- if (g.CanBeImmediate(node->InputAt(1))) { \
- Emit(kX64##Opcode, g.DefineSameAsFirst(node), \
- g.UseRegister(node->InputAt(0)), g.UseImmediate(node->InputAt(1)), \
- arraysize(temps), temps); \
- } else { \
- Emit(kX64##Opcode, g.DefineSameAsFirst(node), \
- g.UseUniqueRegister(node->InputAt(0)), \
- g.UseUniqueRegister(node->InputAt(1)), arraysize(temps), temps); \
- } \
+#define VISIT_SIMD_NARROW_SHIFT(Opcode) \
+ void InstructionSelector::Visit##Opcode(Node* node) { \
+ X64OperandGenerator g(this); \
+ InstructionOperand output = \
+ IsSupported(AVX) ? g.UseRegister(node) : g.DefineSameAsFirst(node); \
+ if (g.CanBeImmediate(node->InputAt(1))) { \
+ Emit(kX64##Opcode, output, g.UseRegister(node->InputAt(0)), \
+ g.UseImmediate(node->InputAt(1))); \
+ } else { \
+ InstructionOperand temps[] = {g.TempSimd128Register()}; \
+ Emit(kX64##Opcode, output, g.UseUniqueRegister(node->InputAt(0)), \
+ g.UseUniqueRegister(node->InputAt(1)), arraysize(temps), temps); \
+ } \
}
SIMD_NARROW_SHIFT_OPCODES(VISIT_SIMD_NARROW_SHIFT)
#undef VISIT_SIMD_NARROW_SHIFT
@@ -3257,15 +3293,11 @@ void InstructionSelector::VisitS128AndNot(Node* node) {
}
void InstructionSelector::VisitF64x2Abs(Node* node) {
- X64OperandGenerator g(this);
- Emit(kX64F64x2Abs, g.DefineSameAsFirst(node),
- g.UseRegister(node->InputAt(0)));
+ VisitFloatUnop(this, node, node->InputAt(0), kX64F64x2Abs);
}
void InstructionSelector::VisitF64x2Neg(Node* node) {
- X64OperandGenerator g(this);
- Emit(kX64F64x2Neg, g.DefineSameAsFirst(node),
- g.UseRegister(node->InputAt(0)));
+ VisitFloatUnop(this, node, node->InputAt(0), kX64F64x2Neg);
}
void InstructionSelector::VisitF32x4UConvertI32x4(Node* node) {
@@ -3274,12 +3306,11 @@ void InstructionSelector::VisitF32x4UConvertI32x4(Node* node) {
g.UseRegister(node->InputAt(0)));
}
-#define VISIT_SIMD_QFMOP(Opcode) \
- void InstructionSelector::Visit##Opcode(Node* node) { \
- X64OperandGenerator g(this); \
- Emit(kX64##Opcode, g.DefineSameAsFirst(node), \
- g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)), \
- g.UseRegister(node->InputAt(2))); \
+#define VISIT_SIMD_QFMOP(Opcode) \
+ void InstructionSelector::Visit##Opcode(Node* node) { \
+ X64OperandGenerator g(this); \
+ Emit(kX64##Opcode, g.UseRegister(node), g.UseRegister(node->InputAt(0)), \
+ g.UseRegister(node->InputAt(1)), g.UseRegister(node->InputAt(2))); \
}
VISIT_SIMD_QFMOP(F64x2Qfma)
VISIT_SIMD_QFMOP(F64x2Qfms)
@@ -3321,7 +3352,8 @@ void InstructionSelector::VisitI64x2Mul(Node* node) {
void InstructionSelector::VisitI32x4SConvertF32x4(Node* node) {
X64OperandGenerator g(this);
- Emit(kX64I32x4SConvertF32x4, g.DefineSameAsFirst(node),
+ Emit(kX64I32x4SConvertF32x4,
+ IsSupported(AVX) ? g.DefineAsRegister(node) : g.DefineSameAsFirst(node),
g.UseRegister(node->InputAt(0)));
}
@@ -3333,19 +3365,6 @@ void InstructionSelector::VisitI32x4UConvertF32x4(Node* node) {
g.UseRegister(node->InputAt(0)), arraysize(temps), temps);
}
-void InstructionSelector::VisitI8x16ShrS(Node* node) {
- X64OperandGenerator g(this);
- if (g.CanBeImmediate(node->InputAt(1))) {
- Emit(kX64I8x16ShrS, g.DefineSameAsFirst(node),
- g.UseRegister(node->InputAt(0)), g.UseImmediate(node->InputAt(1)));
- } else {
- InstructionOperand temps[] = {g.TempRegister(), g.TempSimd128Register()};
- Emit(kX64I8x16ShrS, g.DefineSameAsFirst(node),
- g.UseUniqueRegister(node->InputAt(0)),
- g.UseUniqueRegister(node->InputAt(1)), arraysize(temps), temps);
- }
-}
-
void InstructionSelector::VisitInt32AbsWithOverflow(Node* node) {
UNREACHABLE();
}
diff --git a/deps/v8/src/compiler/branch-elimination.cc b/deps/v8/src/compiler/branch-elimination.cc
index a864012a7a..1515340503 100644
--- a/deps/v8/src/compiler/branch-elimination.cc
+++ b/deps/v8/src/compiler/branch-elimination.cc
@@ -135,7 +135,6 @@ Reduction BranchElimination::ReduceBranch(Node* node) {
bool condition_value;
// If we know the condition we can discard the branch.
if (from_input.LookupCondition(condition, &branch, &condition_value)) {
- MarkAsSafetyCheckIfNeeded(branch, node);
for (Node* const use : node->uses()) {
switch (use->opcode()) {
case IrOpcode::kIfTrue:
@@ -215,7 +214,6 @@ Reduction BranchElimination::ReduceDeoptimizeConditional(Node* node) {
Node* branch;
// If we know the condition we can discard the branch.
if (conditions.LookupCondition(condition, &branch, &condition_value)) {
- MarkAsSafetyCheckIfNeeded(branch, node);
if (condition_is_true == condition_value) {
// We don't update the conditions here, because we're replacing {node}
// with the {control} node that already contains the right information.
@@ -410,21 +408,6 @@ bool BranchElimination::ControlPathConditions::BlocksAndConditionsInvariant() {
}
#endif
-void BranchElimination::MarkAsSafetyCheckIfNeeded(Node* branch, Node* node) {
- // Check if {branch} is dead because we might have a stale side-table entry.
- if (!branch->IsDead() && branch->opcode() != IrOpcode::kDead &&
- branch->opcode() != IrOpcode::kTrapIf &&
- branch->opcode() != IrOpcode::kTrapUnless) {
- IsSafetyCheck branch_safety = IsSafetyCheckOf(branch->op());
- IsSafetyCheck combined_safety =
- CombineSafetyChecks(branch_safety, IsSafetyCheckOf(node->op()));
- if (branch_safety != combined_safety) {
- NodeProperties::ChangeOp(
- branch, common()->MarkAsSafetyCheck(branch->op(), combined_safety));
- }
- }
-}
-
Graph* BranchElimination::graph() const { return jsgraph()->graph(); }
Isolate* BranchElimination::isolate() const { return jsgraph()->isolate(); }
diff --git a/deps/v8/src/compiler/branch-elimination.h b/deps/v8/src/compiler/branch-elimination.h
index 9078c39038..93bacbff7b 100644
--- a/deps/v8/src/compiler/branch-elimination.h
+++ b/deps/v8/src/compiler/branch-elimination.h
@@ -114,7 +114,6 @@ class V8_EXPORT_PRIVATE BranchElimination final
Reduction UpdateConditions(Node* node, ControlPathConditions prev_conditions,
Node* current_condition, Node* current_branch,
bool is_true_branch, bool in_new_block);
- void MarkAsSafetyCheckIfNeeded(Node* branch, Node* node);
Node* dead() const { return dead_; }
Graph* graph() const;
diff --git a/deps/v8/src/compiler/bytecode-graph-builder.cc b/deps/v8/src/compiler/bytecode-graph-builder.cc
index 985a256c57..019f0bc954 100644
--- a/deps/v8/src/compiler/bytecode-graph-builder.cc
+++ b/deps/v8/src/compiler/bytecode-graph-builder.cc
@@ -141,9 +141,8 @@ class BytecodeGraphBuilder {
Node* NewIfDefault() { return NewNode(common()->IfDefault()); }
Node* NewMerge() { return NewNode(common()->Merge(1), true); }
Node* NewLoop() { return NewNode(common()->Loop(1), true); }
- Node* NewBranch(Node* condition, BranchHint hint = BranchHint::kNone,
- IsSafetyCheck is_safety_check = IsSafetyCheck::kSafetyCheck) {
- return NewNode(common()->Branch(hint, is_safety_check), condition);
+ Node* NewBranch(Node* condition, BranchHint hint = BranchHint::kNone) {
+ return NewNode(common()->Branch(hint), condition);
}
Node* NewSwitch(Node* condition, int control_output_count) {
return NewNode(common()->Switch(control_output_count), condition);
@@ -1053,7 +1052,7 @@ BytecodeGraphBuilder::BytecodeGraphBuilder(
shared_info_(shared_info),
bytecode_array_(shared_info.GetBytecodeArray()),
feedback_cell_(feedback_cell),
- feedback_vector_(feedback_cell.value().value()),
+ feedback_vector_(feedback_cell.feedback_vector().value()),
invocation_frequency_(invocation_frequency),
type_hint_lowering_(
broker, jsgraph, feedback_vector_,
@@ -3959,7 +3958,7 @@ void BytecodeGraphBuilder::BuildJump() {
}
void BytecodeGraphBuilder::BuildJumpIf(Node* condition) {
- NewBranch(condition, BranchHint::kNone, IsSafetyCheck::kNoSafetyCheck);
+ NewBranch(condition, BranchHint::kNone);
{
SubEnvironment sub_environment(this);
NewIfTrue();
@@ -3971,7 +3970,7 @@ void BytecodeGraphBuilder::BuildJumpIf(Node* condition) {
}
void BytecodeGraphBuilder::BuildJumpIfNot(Node* condition) {
- NewBranch(condition, BranchHint::kNone, IsSafetyCheck::kNoSafetyCheck);
+ NewBranch(condition, BranchHint::kNone);
{
SubEnvironment sub_environment(this);
NewIfFalse();
@@ -3997,8 +3996,7 @@ void BytecodeGraphBuilder::BuildJumpIfNotEqual(Node* comperand) {
}
void BytecodeGraphBuilder::BuildJumpIfFalse() {
- NewBranch(environment()->LookupAccumulator(), BranchHint::kNone,
- IsSafetyCheck::kNoSafetyCheck);
+ NewBranch(environment()->LookupAccumulator(), BranchHint::kNone);
{
SubEnvironment sub_environment(this);
NewIfFalse();
@@ -4012,8 +4010,7 @@ void BytecodeGraphBuilder::BuildJumpIfFalse() {
}
void BytecodeGraphBuilder::BuildJumpIfTrue() {
- NewBranch(environment()->LookupAccumulator(), BranchHint::kNone,
- IsSafetyCheck::kNoSafetyCheck);
+ NewBranch(environment()->LookupAccumulator(), BranchHint::kNone);
{
SubEnvironment sub_environment(this);
NewIfTrue();
diff --git a/deps/v8/src/compiler/c-linkage.cc b/deps/v8/src/compiler/c-linkage.cc
index 5950541111..e62babccf1 100644
--- a/deps/v8/src/compiler/c-linkage.cc
+++ b/deps/v8/src/compiler/c-linkage.cc
@@ -100,6 +100,18 @@ namespace {
#define CALLEE_SAVE_FP_REGISTERS \
f20.bit() | f22.bit() | f24.bit() | f26.bit() | f28.bit() | f30.bit()
+#elif V8_TARGET_ARCH_LOONG64
+// ===========================================================================
+// == loong64 ================================================================
+// ===========================================================================
+#define PARAM_REGISTERS a0, a1, a2, a3, a4, a5, a6, a7
+#define CALLEE_SAVE_REGISTERS \
+ s0.bit() | s1.bit() | s2.bit() | s3.bit() | s4.bit() | s5.bit() | s6.bit() | \
+ s7.bit() | s8.bit() | fp.bit()
+#define CALLEE_SAVE_FP_REGISTERS \
+ f24.bit() | f25.bit() | f26.bit() | f27.bit() | f28.bit() | f29.bit() | \
+ f30.bit() | f31.bit()
+
#elif V8_TARGET_ARCH_PPC64
// ===========================================================================
// == ppc & ppc64 ============================================================
diff --git a/deps/v8/src/compiler/code-assembler.cc b/deps/v8/src/compiler/code-assembler.cc
index 2cbcce236f..d27744072a 100644
--- a/deps/v8/src/compiler/code-assembler.cc
+++ b/deps/v8/src/compiler/code-assembler.cc
@@ -48,8 +48,7 @@ static_assert(
CodeAssemblerState::CodeAssemblerState(
Isolate* isolate, Zone* zone, const CallInterfaceDescriptor& descriptor,
- CodeKind kind, const char* name, PoisoningMitigationLevel poisoning_level,
- Builtin builtin)
+ CodeKind kind, const char* name, Builtin builtin)
// TODO(rmcilroy): Should we use Linkage::GetBytecodeDispatchDescriptor for
// bytecode handlers?
: CodeAssemblerState(
@@ -57,29 +56,26 @@ CodeAssemblerState::CodeAssemblerState(
Linkage::GetStubCallDescriptor(
zone, descriptor, descriptor.GetStackParameterCount(),
CallDescriptor::kNoFlags, Operator::kNoProperties),
- kind, name, poisoning_level, builtin) {}
+ kind, name, builtin) {}
CodeAssemblerState::CodeAssemblerState(Isolate* isolate, Zone* zone,
int parameter_count, CodeKind kind,
- const char* name,
- PoisoningMitigationLevel poisoning_level,
- Builtin builtin)
+ const char* name, Builtin builtin)
: CodeAssemblerState(
isolate, zone,
Linkage::GetJSCallDescriptor(zone, false, parameter_count,
CallDescriptor::kCanUseRoots),
- kind, name, poisoning_level, builtin) {}
+ kind, name, builtin) {}
CodeAssemblerState::CodeAssemblerState(Isolate* isolate, Zone* zone,
CallDescriptor* call_descriptor,
CodeKind kind, const char* name,
- PoisoningMitigationLevel poisoning_level,
Builtin builtin)
: raw_assembler_(new RawMachineAssembler(
isolate, zone->New<Graph>(zone), call_descriptor,
MachineType::PointerRepresentation(),
InstructionSelector::SupportedMachineOperatorFlags(),
- InstructionSelector::AlignmentRequirements(), poisoning_level)),
+ InstructionSelector::AlignmentRequirements())),
kind_(kind),
name_(name),
builtin_(builtin),
@@ -169,10 +165,6 @@ bool CodeAssembler::Word32ShiftIsSafe() const {
return raw_assembler()->machine()->Word32ShiftIsSafe();
}
-PoisoningMitigationLevel CodeAssembler::poisoning_level() const {
- return raw_assembler()->poisoning_level();
-}
-
// static
Handle<Code> CodeAssembler::GenerateCode(
CodeAssemblerState* state, const AssemblerOptions& options,
@@ -187,7 +179,7 @@ Handle<Code> CodeAssembler::GenerateCode(
code = Pipeline::GenerateCodeForCodeStub(
rasm->isolate(), rasm->call_descriptor(), graph, state->jsgraph_,
rasm->source_positions(), state->kind_, state->name_,
- state->builtin_, rasm->poisoning_level(), options, profile_data)
+ state->builtin_, options, profile_data)
.ToHandleChecked();
state->code_generated_ = true;
@@ -565,15 +557,6 @@ TNode<RawPtrT> CodeAssembler::LoadParentFramePointer() {
return UncheckedCast<RawPtrT>(raw_assembler()->LoadParentFramePointer());
}
-TNode<Object> CodeAssembler::TaggedPoisonOnSpeculation(TNode<Object> value) {
- return UncheckedCast<Object>(
- raw_assembler()->TaggedPoisonOnSpeculation(value));
-}
-
-TNode<WordT> CodeAssembler::WordPoisonOnSpeculation(TNode<WordT> value) {
- return UncheckedCast<WordT>(raw_assembler()->WordPoisonOnSpeculation(value));
-}
-
#define DEFINE_CODE_ASSEMBLER_BINARY_OP(name, ResType, Arg1Type, Arg2Type) \
TNode<ResType> CodeAssembler::name(TNode<Arg1Type> a, TNode<Arg2Type> b) { \
return UncheckedCast<ResType>(raw_assembler()->name(a, b)); \
@@ -677,45 +660,44 @@ TNode<Int32T> CodeAssembler::TruncateFloat32ToInt32(TNode<Float32T> value) {
CODE_ASSEMBLER_UNARY_OP_LIST(DEFINE_CODE_ASSEMBLER_UNARY_OP)
#undef DEFINE_CODE_ASSEMBLER_UNARY_OP
-Node* CodeAssembler::Load(MachineType type, Node* base,
- LoadSensitivity needs_poisoning) {
- return raw_assembler()->Load(type, base, needs_poisoning);
+Node* CodeAssembler::Load(MachineType type, Node* base) {
+ return raw_assembler()->Load(type, base);
}
-Node* CodeAssembler::Load(MachineType type, Node* base, Node* offset,
- LoadSensitivity needs_poisoning) {
- return raw_assembler()->Load(type, base, offset, needs_poisoning);
+Node* CodeAssembler::Load(MachineType type, Node* base, Node* offset) {
+ return raw_assembler()->Load(type, base, offset);
}
-TNode<Object> CodeAssembler::LoadFullTagged(Node* base,
- LoadSensitivity needs_poisoning) {
- return BitcastWordToTagged(Load<RawPtrT>(base, needs_poisoning));
+TNode<Object> CodeAssembler::LoadFullTagged(Node* base) {
+ return BitcastWordToTagged(Load<RawPtrT>(base));
}
-TNode<Object> CodeAssembler::LoadFullTagged(Node* base, TNode<IntPtrT> offset,
- LoadSensitivity needs_poisoning) {
+TNode<Object> CodeAssembler::LoadFullTagged(Node* base, TNode<IntPtrT> offset) {
// Please use LoadFromObject(MachineType::MapInHeader(), object,
// IntPtrConstant(-kHeapObjectTag)) instead.
DCHECK(!raw_assembler()->IsMapOffsetConstantMinusTag(offset));
- return BitcastWordToTagged(Load<RawPtrT>(base, offset, needs_poisoning));
+ return BitcastWordToTagged(Load<RawPtrT>(base, offset));
}
-Node* CodeAssembler::AtomicLoad(MachineType type, TNode<RawPtrT> base,
- TNode<WordT> offset) {
+Node* CodeAssembler::AtomicLoad(MachineType type, AtomicMemoryOrder order,
+ TNode<RawPtrT> base, TNode<WordT> offset) {
DCHECK(!raw_assembler()->IsMapOffsetConstantMinusTag(offset));
- return raw_assembler()->AtomicLoad(type, base, offset);
+ return raw_assembler()->AtomicLoad(AtomicLoadParameters(type, order), base,
+ offset);
}
template <class Type>
-TNode<Type> CodeAssembler::AtomicLoad64(TNode<RawPtrT> base,
+TNode<Type> CodeAssembler::AtomicLoad64(AtomicMemoryOrder order,
+ TNode<RawPtrT> base,
TNode<WordT> offset) {
- return UncheckedCast<Type>(raw_assembler()->AtomicLoad64(base, offset));
+ return UncheckedCast<Type>(raw_assembler()->AtomicLoad64(
+ AtomicLoadParameters(MachineType::Uint64(), order), base, offset));
}
template TNode<AtomicInt64> CodeAssembler::AtomicLoad64<AtomicInt64>(
- TNode<RawPtrT> base, TNode<WordT> offset);
+ AtomicMemoryOrder order, TNode<RawPtrT> base, TNode<WordT> offset);
template TNode<AtomicUint64> CodeAssembler::AtomicLoad64<AtomicUint64>(
- TNode<RawPtrT> base, TNode<WordT> offset);
+ AtomicMemoryOrder order, TNode<RawPtrT> base, TNode<WordT> offset);
Node* CodeAssembler::LoadFromObject(MachineType type, TNode<Object> object,
TNode<IntPtrT> offset) {
@@ -880,16 +862,22 @@ void CodeAssembler::StoreFullTaggedNoWriteBarrier(TNode<RawPtrT> base,
BitcastTaggedToWord(tagged_value));
}
-void CodeAssembler::AtomicStore(MachineRepresentation rep, TNode<RawPtrT> base,
+void CodeAssembler::AtomicStore(MachineRepresentation rep,
+ AtomicMemoryOrder order, TNode<RawPtrT> base,
TNode<WordT> offset, TNode<Word32T> value) {
DCHECK(!raw_assembler()->IsMapOffsetConstantMinusTag(offset));
- raw_assembler()->AtomicStore(rep, base, offset, value);
+ raw_assembler()->AtomicStore(
+ AtomicStoreParameters(rep, WriteBarrierKind::kNoWriteBarrier, order),
+ base, offset, value);
}
-void CodeAssembler::AtomicStore64(TNode<RawPtrT> base, TNode<WordT> offset,
- TNode<UintPtrT> value,
+void CodeAssembler::AtomicStore64(AtomicMemoryOrder order, TNode<RawPtrT> base,
+ TNode<WordT> offset, TNode<UintPtrT> value,
TNode<UintPtrT> value_high) {
- raw_assembler()->AtomicStore64(base, offset, value, value_high);
+ raw_assembler()->AtomicStore64(
+ AtomicStoreParameters(MachineRepresentation::kWord64,
+ WriteBarrierKind::kNoWriteBarrier, order),
+ base, offset, value, value_high);
}
#define ATOMIC_FUNCTION(name) \
diff --git a/deps/v8/src/compiler/code-assembler.h b/deps/v8/src/compiler/code-assembler.h
index 0e6872aa66..7a22086260 100644
--- a/deps/v8/src/compiler/code-assembler.h
+++ b/deps/v8/src/compiler/code-assembler.h
@@ -17,6 +17,7 @@
#include "src/base/optional.h"
#include "src/base/type-traits.h"
#include "src/builtins/builtins.h"
+#include "src/codegen/atomic-memory-order.h"
#include "src/codegen/code-factory.h"
#include "src/codegen/machine-type.h"
#include "src/codegen/source-position.h"
@@ -725,47 +726,36 @@ class V8_EXPORT_PRIVATE CodeAssembler {
TNode<RawPtrT> LoadFramePointer();
TNode<RawPtrT> LoadParentFramePointer();
- // Poison |value| on speculative paths.
- TNode<Object> TaggedPoisonOnSpeculation(TNode<Object> value);
- TNode<WordT> WordPoisonOnSpeculation(TNode<WordT> value);
-
// Load raw memory location.
- Node* Load(MachineType type, Node* base,
- LoadSensitivity needs_poisoning = LoadSensitivity::kSafe);
+ Node* Load(MachineType type, Node* base);
template <class Type>
TNode<Type> Load(MachineType type, TNode<RawPtr<Type>> base) {
DCHECK(
IsSubtype(type.representation(), MachineRepresentationOf<Type>::value));
return UncheckedCast<Type>(Load(type, static_cast<Node*>(base)));
}
- Node* Load(MachineType type, Node* base, Node* offset,
- LoadSensitivity needs_poisoning = LoadSensitivity::kSafe);
+ Node* Load(MachineType type, Node* base, Node* offset);
template <class Type>
- TNode<Type> Load(Node* base,
- LoadSensitivity needs_poisoning = LoadSensitivity::kSafe) {
- return UncheckedCast<Type>(
- Load(MachineTypeOf<Type>::value, base, needs_poisoning));
+ TNode<Type> Load(Node* base) {
+ return UncheckedCast<Type>(Load(MachineTypeOf<Type>::value, base));
}
template <class Type>
- TNode<Type> Load(Node* base, TNode<WordT> offset,
- LoadSensitivity needs_poisoning = LoadSensitivity::kSafe) {
- return UncheckedCast<Type>(
- Load(MachineTypeOf<Type>::value, base, offset, needs_poisoning));
+ TNode<Type> Load(Node* base, TNode<WordT> offset) {
+ return UncheckedCast<Type>(Load(MachineTypeOf<Type>::value, base, offset));
}
template <class Type>
- TNode<Type> AtomicLoad(TNode<RawPtrT> base, TNode<WordT> offset) {
+ TNode<Type> AtomicLoad(AtomicMemoryOrder order, TNode<RawPtrT> base,
+ TNode<WordT> offset) {
return UncheckedCast<Type>(
- AtomicLoad(MachineTypeOf<Type>::value, base, offset));
+ AtomicLoad(MachineTypeOf<Type>::value, order, base, offset));
}
template <class Type>
- TNode<Type> AtomicLoad64(TNode<RawPtrT> base, TNode<WordT> offset);
+ TNode<Type> AtomicLoad64(AtomicMemoryOrder order, TNode<RawPtrT> base,
+ TNode<WordT> offset);
// Load uncompressed tagged value from (most likely off JS heap) memory
// location.
- TNode<Object> LoadFullTagged(
- Node* base, LoadSensitivity needs_poisoning = LoadSensitivity::kSafe);
- TNode<Object> LoadFullTagged(
- Node* base, TNode<IntPtrT> offset,
- LoadSensitivity needs_poisoning = LoadSensitivity::kSafe);
+ TNode<Object> LoadFullTagged(Node* base);
+ TNode<Object> LoadFullTagged(Node* base, TNode<IntPtrT> offset);
Node* LoadFromObject(MachineType type, TNode<Object> object,
TNode<IntPtrT> offset);
@@ -822,12 +812,14 @@ class V8_EXPORT_PRIVATE CodeAssembler {
TNode<HeapObject> object,
int offset, Node* value);
void OptimizedStoreMap(TNode<HeapObject> object, TNode<Map>);
- void AtomicStore(MachineRepresentation rep, TNode<RawPtrT> base,
- TNode<WordT> offset, TNode<Word32T> value);
+ void AtomicStore(MachineRepresentation rep, AtomicMemoryOrder order,
+ TNode<RawPtrT> base, TNode<WordT> offset,
+ TNode<Word32T> value);
// {value_high} is used for 64-bit stores on 32-bit platforms, must be
// nullptr in other cases.
- void AtomicStore64(TNode<RawPtrT> base, TNode<WordT> offset,
- TNode<UintPtrT> value, TNode<UintPtrT> value_high);
+ void AtomicStore64(AtomicMemoryOrder order, TNode<RawPtrT> base,
+ TNode<WordT> offset, TNode<UintPtrT> value,
+ TNode<UintPtrT> value_high);
TNode<Word32T> AtomicAdd(MachineType type, TNode<RawPtrT> base,
TNode<UintPtrT> offset, TNode<Word32T> value);
@@ -1225,7 +1217,7 @@ class V8_EXPORT_PRIVATE CodeAssembler {
template <class... TArgs>
TNode<Object> CallJS(Callable const& callable, Node* context, Node* function,
Node* receiver, TArgs... args) {
- int argc = static_cast<int>(sizeof...(args));
+ int argc = JSParameterCount(static_cast<int>(sizeof...(args)));
TNode<Int32T> arity = Int32Constant(argc);
TNode<Code> target = HeapConstant(callable.code());
return CAST(CallJSStubImpl(callable.descriptor(), target, CAST(context),
@@ -1235,7 +1227,7 @@ class V8_EXPORT_PRIVATE CodeAssembler {
template <class... TArgs>
Node* ConstructJSWithTarget(Callable const& callable, Node* context,
Node* function, Node* new_target, TArgs... args) {
- int argc = static_cast<int>(sizeof...(args));
+ int argc = JSParameterCount(static_cast<int>(sizeof...(args)));
TNode<Int32T> arity = Int32Constant(argc);
TNode<Object> receiver = LoadRoot(RootIndex::kUndefinedValue);
TNode<Code> target = HeapConstant(callable.code());
@@ -1312,7 +1304,6 @@ class V8_EXPORT_PRIVATE CodeAssembler {
void UnregisterCallGenerationCallbacks();
bool Word32ShiftIsSafe() const;
- PoisoningMitigationLevel poisoning_level() const;
bool IsJSFunctionCall() const;
@@ -1367,7 +1358,8 @@ class V8_EXPORT_PRIVATE CodeAssembler {
const CallInterfaceDescriptor& descriptor, int input_count,
Node* const* inputs);
- Node* AtomicLoad(MachineType type, TNode<RawPtrT> base, TNode<WordT> offset);
+ Node* AtomicLoad(MachineType type, AtomicMemoryOrder order,
+ TNode<RawPtrT> base, TNode<WordT> offset);
Node* UnalignedLoad(MachineType type, TNode<RawPtrT> base,
TNode<WordT> offset);
@@ -1595,13 +1587,11 @@ class V8_EXPORT_PRIVATE CodeAssemblerState {
// TODO(rmcilroy): move result_size to the CallInterfaceDescriptor.
CodeAssemblerState(Isolate* isolate, Zone* zone,
const CallInterfaceDescriptor& descriptor, CodeKind kind,
- const char* name, PoisoningMitigationLevel poisoning_level,
- Builtin builtin = Builtin::kNoBuiltinId);
+ const char* name, Builtin builtin = Builtin::kNoBuiltinId);
// Create with JSCall linkage.
CodeAssemblerState(Isolate* isolate, Zone* zone, int parameter_count,
CodeKind kind, const char* name,
- PoisoningMitigationLevel poisoning_level,
Builtin builtin = Builtin::kNoBuiltinId);
~CodeAssemblerState();
@@ -1628,8 +1618,7 @@ class V8_EXPORT_PRIVATE CodeAssemblerState {
CodeAssemblerState(Isolate* isolate, Zone* zone,
CallDescriptor* call_descriptor, CodeKind kind,
- const char* name, PoisoningMitigationLevel poisoning_level,
- Builtin builtin);
+ const char* name, Builtin builtin);
void PushExceptionHandler(CodeAssemblerExceptionHandlerLabel* label);
void PopExceptionHandler();
diff --git a/deps/v8/src/compiler/common-operator.cc b/deps/v8/src/compiler/common-operator.cc
index b370a673b9..329ccc7e86 100644
--- a/deps/v8/src/compiler/common-operator.cc
+++ b/deps/v8/src/compiler/common-operator.cc
@@ -28,18 +28,6 @@ std::ostream& operator<<(std::ostream& os, BranchHint hint) {
UNREACHABLE();
}
-std::ostream& operator<<(std::ostream& os, IsSafetyCheck is_safety_check) {
- switch (is_safety_check) {
- case IsSafetyCheck::kCriticalSafetyCheck:
- return os << "CriticalSafetyCheck";
- case IsSafetyCheck::kSafetyCheck:
- return os << "SafetyCheck";
- case IsSafetyCheck::kNoSafetyCheck:
- return os << "NoSafetyCheck";
- }
- UNREACHABLE();
-}
-
std::ostream& operator<<(std::ostream& os, TrapId trap_id) {
switch (trap_id) {
#define TRAP_CASE(Name) \
@@ -59,22 +47,12 @@ TrapId TrapIdOf(const Operator* const op) {
return OpParameter<TrapId>(op);
}
-std::ostream& operator<<(std::ostream& os, BranchOperatorInfo info) {
- return os << info.hint << ", " << info.is_safety_check;
-}
-
-const BranchOperatorInfo& BranchOperatorInfoOf(const Operator* const op) {
- DCHECK_EQ(IrOpcode::kBranch, op->opcode());
- return OpParameter<BranchOperatorInfo>(op);
-}
-
BranchHint BranchHintOf(const Operator* const op) {
switch (op->opcode()) {
- case IrOpcode::kBranch:
- return BranchOperatorInfoOf(op).hint;
case IrOpcode::kIfValue:
return IfValueParametersOf(op).hint();
case IrOpcode::kIfDefault:
+ case IrOpcode::kBranch:
return OpParameter<BranchHint>(op);
default:
UNREACHABLE();
@@ -90,8 +68,7 @@ int ValueInputCountOfReturn(Operator const* const op) {
bool operator==(DeoptimizeParameters lhs, DeoptimizeParameters rhs) {
return lhs.kind() == rhs.kind() && lhs.reason() == rhs.reason() &&
- lhs.feedback() == rhs.feedback() &&
- lhs.is_safety_check() == rhs.is_safety_check();
+ lhs.feedback() == rhs.feedback();
}
bool operator!=(DeoptimizeParameters lhs, DeoptimizeParameters rhs) {
@@ -100,13 +77,11 @@ bool operator!=(DeoptimizeParameters lhs, DeoptimizeParameters rhs) {
size_t hash_value(DeoptimizeParameters p) {
FeedbackSource::Hash feebdack_hash;
- return base::hash_combine(p.kind(), p.reason(), feebdack_hash(p.feedback()),
- p.is_safety_check());
+ return base::hash_combine(p.kind(), p.reason(), feebdack_hash(p.feedback()));
}
std::ostream& operator<<(std::ostream& os, DeoptimizeParameters p) {
- return os << p.kind() << ", " << p.reason() << ", " << p.is_safety_check()
- << ", " << p.feedback();
+ return os << p.kind() << ", " << p.reason() << ", " << p.feedback();
}
DeoptimizeParameters const& DeoptimizeParametersOf(Operator const* const op) {
@@ -117,32 +92,6 @@ DeoptimizeParameters const& DeoptimizeParametersOf(Operator const* const op) {
return OpParameter<DeoptimizeParameters>(op);
}
-IsSafetyCheck IsSafetyCheckOf(const Operator* op) {
- if (op->opcode() == IrOpcode::kBranch) {
- return BranchOperatorInfoOf(op).is_safety_check;
- }
- return DeoptimizeParametersOf(op).is_safety_check();
-}
-
-const Operator* CommonOperatorBuilder::MarkAsSafetyCheck(
- const Operator* op, IsSafetyCheck safety_check) {
- if (op->opcode() == IrOpcode::kBranch) {
- BranchOperatorInfo info = BranchOperatorInfoOf(op);
- if (info.is_safety_check == safety_check) return op;
- return Branch(info.hint, safety_check);
- }
- DeoptimizeParameters p = DeoptimizeParametersOf(op);
- if (p.is_safety_check() == safety_check) return op;
- switch (op->opcode()) {
- case IrOpcode::kDeoptimizeIf:
- return DeoptimizeIf(p.kind(), p.reason(), p.feedback(), safety_check);
- case IrOpcode::kDeoptimizeUnless:
- return DeoptimizeUnless(p.kind(), p.reason(), p.feedback(), safety_check);
- default:
- UNREACHABLE();
- }
-}
-
const Operator* CommonOperatorBuilder::DelayedStringConstant(
const StringConstantBase* str) {
return zone()->New<Operator1<const StringConstantBase*>>(
@@ -478,16 +427,10 @@ IfValueParameters const& IfValueParametersOf(const Operator* op) {
#define CACHED_LOOP_EXIT_VALUE_LIST(V) V(kTagged)
-#define CACHED_BRANCH_LIST(V) \
- V(None, CriticalSafetyCheck) \
- V(True, CriticalSafetyCheck) \
- V(False, CriticalSafetyCheck) \
- V(None, SafetyCheck) \
- V(True, SafetyCheck) \
- V(False, SafetyCheck) \
- V(None, NoSafetyCheck) \
- V(True, NoSafetyCheck) \
- V(False, NoSafetyCheck)
+#define CACHED_BRANCH_LIST(V) \
+ V(None) \
+ V(True) \
+ V(False)
#define CACHED_RETURN_LIST(V) \
V(1) \
@@ -541,28 +484,22 @@ IfValueParameters const& IfValueParametersOf(const Operator* op) {
V(Soft, InsufficientTypeFeedbackForGenericKeyedAccess) \
V(Soft, InsufficientTypeFeedbackForGenericNamedAccess)
-#define CACHED_DEOPTIMIZE_IF_LIST(V) \
- V(Eager, DivisionByZero, NoSafetyCheck) \
- V(Eager, DivisionByZero, SafetyCheck) \
- V(Eager, Hole, NoSafetyCheck) \
- V(Eager, Hole, SafetyCheck) \
- V(Eager, MinusZero, NoSafetyCheck) \
- V(Eager, MinusZero, SafetyCheck) \
- V(Eager, Overflow, NoSafetyCheck) \
- V(Eager, Overflow, SafetyCheck) \
- V(Eager, Smi, SafetyCheck)
-
-#define CACHED_DEOPTIMIZE_UNLESS_LIST(V) \
- V(Eager, LostPrecision, NoSafetyCheck) \
- V(Eager, LostPrecision, SafetyCheck) \
- V(Eager, LostPrecisionOrNaN, NoSafetyCheck) \
- V(Eager, LostPrecisionOrNaN, SafetyCheck) \
- V(Eager, NotAHeapNumber, SafetyCheck) \
- V(Eager, NotANumberOrOddball, SafetyCheck) \
- V(Eager, NotASmi, SafetyCheck) \
- V(Eager, OutOfBounds, SafetyCheck) \
- V(Eager, WrongInstanceType, SafetyCheck) \
- V(Eager, WrongMap, SafetyCheck)
+#define CACHED_DEOPTIMIZE_IF_LIST(V) \
+ V(Eager, DivisionByZero) \
+ V(Eager, Hole) \
+ V(Eager, MinusZero) \
+ V(Eager, Overflow) \
+ V(Eager, Smi)
+
+#define CACHED_DEOPTIMIZE_UNLESS_LIST(V) \
+ V(Eager, LostPrecision) \
+ V(Eager, LostPrecisionOrNaN) \
+ V(Eager, NotAHeapNumber) \
+ V(Eager, NotANumberOrOddball) \
+ V(Eager, NotASmi) \
+ V(Eager, OutOfBounds) \
+ V(Eager, WrongInstanceType) \
+ V(Eager, WrongMap)
#define CACHED_DYNAMIC_CHECK_MAPS_LIST(V) \
V(DynamicCheckMaps) \
@@ -668,18 +605,17 @@ struct CommonOperatorGlobalCache final {
CACHED_RETURN_LIST(CACHED_RETURN)
#undef CACHED_RETURN
- template <BranchHint hint, IsSafetyCheck is_safety_check>
- struct BranchOperator final : public Operator1<BranchOperatorInfo> {
+ template <BranchHint hint>
+ struct BranchOperator final : public Operator1<BranchHint> {
BranchOperator()
- : Operator1<BranchOperatorInfo>( // --
- IrOpcode::kBranch, Operator::kKontrol, // opcode
- "Branch", // name
- 1, 0, 1, 0, 0, 2, // counts
- BranchOperatorInfo{hint, is_safety_check}) {} // parameter
+ : Operator1<BranchHint>( // --
+ IrOpcode::kBranch, Operator::kKontrol, // opcode
+ "Branch", // name
+ 1, 0, 1, 0, 0, 2, // counts
+ hint) {} // parameter
};
-#define CACHED_BRANCH(Hint, IsCheck) \
- BranchOperator<BranchHint::k##Hint, IsSafetyCheck::k##IsCheck> \
- kBranch##Hint##IsCheck##Operator;
+#define CACHED_BRANCH(Hint) \
+ BranchOperator<BranchHint::k##Hint> kBranch##Hint##Operator;
CACHED_BRANCH_LIST(CACHED_BRANCH)
#undef CACHED_BRANCH
@@ -757,8 +693,7 @@ struct CommonOperatorGlobalCache final {
Operator::kFoldable | Operator::kNoThrow, // properties
"Deoptimize", // name
1, 1, 1, 0, 0, 1, // counts
- DeoptimizeParameters(kKind, kReason, FeedbackSource(),
- IsSafetyCheck::kNoSafetyCheck)) {}
+ DeoptimizeParameters(kKind, kReason, FeedbackSource())) {}
};
#define CACHED_DEOPTIMIZE(Kind, Reason) \
DeoptimizeOperator<DeoptimizeKind::k##Kind, DeoptimizeReason::k##Reason> \
@@ -766,8 +701,7 @@ struct CommonOperatorGlobalCache final {
CACHED_DEOPTIMIZE_LIST(CACHED_DEOPTIMIZE)
#undef CACHED_DEOPTIMIZE
- template <DeoptimizeKind kKind, DeoptimizeReason kReason,
- IsSafetyCheck is_safety_check>
+ template <DeoptimizeKind kKind, DeoptimizeReason kReason>
struct DeoptimizeIfOperator final : public Operator1<DeoptimizeParameters> {
DeoptimizeIfOperator()
: Operator1<DeoptimizeParameters>( // --
@@ -775,18 +709,15 @@ struct CommonOperatorGlobalCache final {
Operator::kFoldable | Operator::kNoThrow, // properties
"DeoptimizeIf", // name
2, 1, 1, 0, 1, 1, // counts
- DeoptimizeParameters(kKind, kReason, FeedbackSource(),
- is_safety_check)) {}
+ DeoptimizeParameters(kKind, kReason, FeedbackSource())) {}
};
-#define CACHED_DEOPTIMIZE_IF(Kind, Reason, IsCheck) \
- DeoptimizeIfOperator<DeoptimizeKind::k##Kind, DeoptimizeReason::k##Reason, \
- IsSafetyCheck::k##IsCheck> \
- kDeoptimizeIf##Kind##Reason##IsCheck##Operator;
+#define CACHED_DEOPTIMIZE_IF(Kind, Reason) \
+ DeoptimizeIfOperator<DeoptimizeKind::k##Kind, DeoptimizeReason::k##Reason> \
+ kDeoptimizeIf##Kind##Reason##Operator;
CACHED_DEOPTIMIZE_IF_LIST(CACHED_DEOPTIMIZE_IF)
#undef CACHED_DEOPTIMIZE_IF
- template <DeoptimizeKind kKind, DeoptimizeReason kReason,
- IsSafetyCheck is_safety_check>
+ template <DeoptimizeKind kKind, DeoptimizeReason kReason>
struct DeoptimizeUnlessOperator final
: public Operator1<DeoptimizeParameters> {
DeoptimizeUnlessOperator()
@@ -795,14 +726,12 @@ struct CommonOperatorGlobalCache final {
Operator::kFoldable | Operator::kNoThrow, // properties
"DeoptimizeUnless", // name
2, 1, 1, 0, 1, 1, // counts
- DeoptimizeParameters(kKind, kReason, FeedbackSource(),
- is_safety_check)) {}
+ DeoptimizeParameters(kKind, kReason, FeedbackSource())) {}
};
-#define CACHED_DEOPTIMIZE_UNLESS(Kind, Reason, IsCheck) \
+#define CACHED_DEOPTIMIZE_UNLESS(Kind, Reason) \
DeoptimizeUnlessOperator<DeoptimizeKind::k##Kind, \
- DeoptimizeReason::k##Reason, \
- IsSafetyCheck::k##IsCheck> \
- kDeoptimizeUnless##Kind##Reason##IsCheck##Operator;
+ DeoptimizeReason::k##Reason> \
+ kDeoptimizeUnless##Kind##Reason##Operator;
CACHED_DEOPTIMIZE_UNLESS_LIST(CACHED_DEOPTIMIZE_UNLESS)
#undef CACHED_DEOPTIMIZE_UNLESS
@@ -815,8 +744,7 @@ struct CommonOperatorGlobalCache final {
"DynamicCheckMapsWithDeoptUnless", // name
6, 1, 1, 0, 1, 1, // counts
DeoptimizeParameters(DeoptimizeKind::kEagerWithResume, kReason,
- FeedbackSource(),
- IsSafetyCheck::kCriticalSafetyCheck)) {}
+ FeedbackSource())) {}
};
#define CACHED_DYNAMIC_CHECK_MAPS(Reason) \
DynamicMapCheckOperator<DeoptimizeReason::k##Reason> k##Reason##Operator;
@@ -985,12 +913,10 @@ const Operator* CommonOperatorBuilder::StaticAssert(const char* source) {
1, 0, source);
}
-const Operator* CommonOperatorBuilder::Branch(BranchHint hint,
- IsSafetyCheck is_safety_check) {
-#define CACHED_BRANCH(Hint, IsCheck) \
- if (hint == BranchHint::k##Hint && \
- is_safety_check == IsSafetyCheck::k##IsCheck) { \
- return &cache_.kBranch##Hint##IsCheck##Operator; \
+const Operator* CommonOperatorBuilder::Branch(BranchHint hint) {
+#define CACHED_BRANCH(Hint) \
+ if (hint == BranchHint::k##Hint) { \
+ return &cache_.kBranch##Hint##Operator; \
}
CACHED_BRANCH_LIST(CACHED_BRANCH)
#undef CACHED_BRANCH
@@ -1008,8 +934,7 @@ const Operator* CommonOperatorBuilder::Deoptimize(
CACHED_DEOPTIMIZE_LIST(CACHED_DEOPTIMIZE)
#undef CACHED_DEOPTIMIZE
// Uncached
- DeoptimizeParameters parameter(kind, reason, feedback,
- IsSafetyCheck::kNoSafetyCheck);
+ DeoptimizeParameters parameter(kind, reason, feedback);
return zone()->New<Operator1<DeoptimizeParameters>>( // --
IrOpcode::kDeoptimize, // opcodes
Operator::kFoldable | Operator::kNoThrow, // properties
@@ -1020,17 +945,16 @@ const Operator* CommonOperatorBuilder::Deoptimize(
const Operator* CommonOperatorBuilder::DeoptimizeIf(
DeoptimizeKind kind, DeoptimizeReason reason,
- FeedbackSource const& feedback, IsSafetyCheck is_safety_check) {
-#define CACHED_DEOPTIMIZE_IF(Kind, Reason, IsCheck) \
- if (kind == DeoptimizeKind::k##Kind && \
- reason == DeoptimizeReason::k##Reason && \
- is_safety_check == IsSafetyCheck::k##IsCheck && !feedback.IsValid()) { \
- return &cache_.kDeoptimizeIf##Kind##Reason##IsCheck##Operator; \
+ FeedbackSource const& feedback) {
+#define CACHED_DEOPTIMIZE_IF(Kind, Reason) \
+ if (kind == DeoptimizeKind::k##Kind && \
+ reason == DeoptimizeReason::k##Reason && !feedback.IsValid()) { \
+ return &cache_.kDeoptimizeIf##Kind##Reason##Operator; \
}
CACHED_DEOPTIMIZE_IF_LIST(CACHED_DEOPTIMIZE_IF)
#undef CACHED_DEOPTIMIZE_IF
// Uncached
- DeoptimizeParameters parameter(kind, reason, feedback, is_safety_check);
+ DeoptimizeParameters parameter(kind, reason, feedback);
return zone()->New<Operator1<DeoptimizeParameters>>( // --
IrOpcode::kDeoptimizeIf, // opcode
Operator::kFoldable | Operator::kNoThrow, // properties
@@ -1041,17 +965,16 @@ const Operator* CommonOperatorBuilder::DeoptimizeIf(
const Operator* CommonOperatorBuilder::DeoptimizeUnless(
DeoptimizeKind kind, DeoptimizeReason reason,
- FeedbackSource const& feedback, IsSafetyCheck is_safety_check) {
-#define CACHED_DEOPTIMIZE_UNLESS(Kind, Reason, IsCheck) \
- if (kind == DeoptimizeKind::k##Kind && \
- reason == DeoptimizeReason::k##Reason && \
- is_safety_check == IsSafetyCheck::k##IsCheck && !feedback.IsValid()) { \
- return &cache_.kDeoptimizeUnless##Kind##Reason##IsCheck##Operator; \
+ FeedbackSource const& feedback) {
+#define CACHED_DEOPTIMIZE_UNLESS(Kind, Reason) \
+ if (kind == DeoptimizeKind::k##Kind && \
+ reason == DeoptimizeReason::k##Reason && !feedback.IsValid()) { \
+ return &cache_.kDeoptimizeUnless##Kind##Reason##Operator; \
}
CACHED_DEOPTIMIZE_UNLESS_LIST(CACHED_DEOPTIMIZE_UNLESS)
#undef CACHED_DEOPTIMIZE_UNLESS
// Uncached
- DeoptimizeParameters parameter(kind, reason, feedback, is_safety_check);
+ DeoptimizeParameters parameter(kind, reason, feedback);
return zone()->New<Operator1<DeoptimizeParameters>>( // --
IrOpcode::kDeoptimizeUnless, // opcode
Operator::kFoldable | Operator::kNoThrow, // properties
@@ -1664,17 +1587,6 @@ const FrameStateInfo& FrameStateInfoOf(const Operator* op) {
return OpParameter<FrameStateInfo>(op);
}
-IsSafetyCheck CombineSafetyChecks(IsSafetyCheck a, IsSafetyCheck b) {
- if (a == IsSafetyCheck::kCriticalSafetyCheck ||
- b == IsSafetyCheck::kCriticalSafetyCheck) {
- return IsSafetyCheck::kCriticalSafetyCheck;
- }
- if (a == IsSafetyCheck::kSafetyCheck || b == IsSafetyCheck::kSafetyCheck) {
- return IsSafetyCheck::kSafetyCheck;
- }
- return IsSafetyCheck::kNoSafetyCheck;
-}
-
#undef COMMON_CACHED_OP_LIST
#undef CACHED_BRANCH_LIST
#undef CACHED_RETURN_LIST
diff --git a/deps/v8/src/compiler/common-operator.h b/deps/v8/src/compiler/common-operator.h
index fa49d3b992..f691c1fbf4 100644
--- a/deps/v8/src/compiler/common-operator.h
+++ b/deps/v8/src/compiler/common-operator.h
@@ -51,20 +51,6 @@ inline size_t hash_value(BranchHint hint) { return static_cast<size_t>(hint); }
V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream&, BranchHint);
-enum class IsSafetyCheck : uint8_t {
- kCriticalSafetyCheck,
- kSafetyCheck,
- kNoSafetyCheck
-};
-
-// Get the more critical safety check of the two arguments.
-IsSafetyCheck CombineSafetyChecks(IsSafetyCheck, IsSafetyCheck);
-
-V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream&, IsSafetyCheck);
-inline size_t hash_value(IsSafetyCheck is_safety_check) {
- return static_cast<size_t>(is_safety_check);
-}
-
enum class TrapId : uint32_t {
#define DEF_ENUM(Name, ...) k##Name,
FOREACH_WASM_TRAPREASON(DEF_ENUM)
@@ -78,24 +64,6 @@ std::ostream& operator<<(std::ostream&, TrapId trap_id);
TrapId TrapIdOf(const Operator* const op);
-struct BranchOperatorInfo {
- BranchHint hint;
- IsSafetyCheck is_safety_check;
-};
-
-inline size_t hash_value(const BranchOperatorInfo& info) {
- return base::hash_combine(info.hint, info.is_safety_check);
-}
-
-V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream&, BranchOperatorInfo);
-
-inline bool operator==(const BranchOperatorInfo& a,
- const BranchOperatorInfo& b) {
- return a.hint == b.hint && a.is_safety_check == b.is_safety_check;
-}
-
-V8_EXPORT_PRIVATE const BranchOperatorInfo& BranchOperatorInfoOf(
- const Operator* const) V8_WARN_UNUSED_RESULT;
V8_EXPORT_PRIVATE BranchHint BranchHintOf(const Operator* const)
V8_WARN_UNUSED_RESULT;
@@ -106,23 +74,17 @@ int ValueInputCountOfReturn(Operator const* const op);
class DeoptimizeParameters final {
public:
DeoptimizeParameters(DeoptimizeKind kind, DeoptimizeReason reason,
- FeedbackSource const& feedback,
- IsSafetyCheck is_safety_check)
- : kind_(kind),
- reason_(reason),
- feedback_(feedback),
- is_safety_check_(is_safety_check) {}
+ FeedbackSource const& feedback)
+ : kind_(kind), reason_(reason), feedback_(feedback) {}
DeoptimizeKind kind() const { return kind_; }
DeoptimizeReason reason() const { return reason_; }
const FeedbackSource& feedback() const { return feedback_; }
- IsSafetyCheck is_safety_check() const { return is_safety_check_; }
private:
DeoptimizeKind const kind_;
DeoptimizeReason const reason_;
FeedbackSource const feedback_;
- IsSafetyCheck is_safety_check_;
};
bool operator==(DeoptimizeParameters, DeoptimizeParameters);
@@ -135,8 +97,6 @@ std::ostream& operator<<(std::ostream&, DeoptimizeParameters p);
DeoptimizeParameters const& DeoptimizeParametersOf(Operator const* const)
V8_WARN_UNUSED_RESULT;
-IsSafetyCheck IsSafetyCheckOf(const Operator* op) V8_WARN_UNUSED_RESULT;
-
class SelectParameters final {
public:
explicit SelectParameters(MachineRepresentation representation,
@@ -479,8 +439,7 @@ class V8_EXPORT_PRIVATE CommonOperatorBuilder final
const Operator* Unreachable();
const Operator* StaticAssert(const char* source);
const Operator* End(size_t control_input_count);
- const Operator* Branch(BranchHint = BranchHint::kNone,
- IsSafetyCheck = IsSafetyCheck::kSafetyCheck);
+ const Operator* Branch(BranchHint = BranchHint::kNone);
const Operator* IfTrue();
const Operator* IfFalse();
const Operator* IfSuccess();
@@ -492,14 +451,10 @@ class V8_EXPORT_PRIVATE CommonOperatorBuilder final
const Operator* Throw();
const Operator* Deoptimize(DeoptimizeKind kind, DeoptimizeReason reason,
FeedbackSource const& feedback);
- const Operator* DeoptimizeIf(
- DeoptimizeKind kind, DeoptimizeReason reason,
- FeedbackSource const& feedback,
- IsSafetyCheck is_safety_check = IsSafetyCheck::kSafetyCheck);
- const Operator* DeoptimizeUnless(
- DeoptimizeKind kind, DeoptimizeReason reason,
- FeedbackSource const& feedback,
- IsSafetyCheck is_safety_check = IsSafetyCheck::kSafetyCheck);
+ const Operator* DeoptimizeIf(DeoptimizeKind kind, DeoptimizeReason reason,
+ FeedbackSource const& feedback);
+ const Operator* DeoptimizeUnless(DeoptimizeKind kind, DeoptimizeReason reason,
+ FeedbackSource const& feedback);
// DynamicCheckMapsWithDeoptUnless will call the dynamic map check builtin if
// the condition is false, which may then either deoptimize or resume
// execution.
@@ -577,9 +532,6 @@ class V8_EXPORT_PRIVATE CommonOperatorBuilder final
const wasm::FunctionSig* signature);
#endif // V8_ENABLE_WEBASSEMBLY
- const Operator* MarkAsSafetyCheck(const Operator* op,
- IsSafetyCheck safety_check);
-
const Operator* DelayedStringConstant(const StringConstantBase* str);
private:
diff --git a/deps/v8/src/compiler/compilation-dependencies.cc b/deps/v8/src/compiler/compilation-dependencies.cc
index dc2db32753..27720c80ed 100644
--- a/deps/v8/src/compiler/compilation-dependencies.cc
+++ b/deps/v8/src/compiler/compilation-dependencies.cc
@@ -5,7 +5,6 @@
#include "src/compiler/compilation-dependencies.h"
#include "src/base/optional.h"
-#include "src/compiler/compilation-dependency.h"
#include "src/execution/protectors.h"
#include "src/handles/handles-inl.h"
#include "src/objects/allocation-site-inl.h"
@@ -19,18 +18,84 @@ namespace v8 {
namespace internal {
namespace compiler {
+#define DEPENDENCY_LIST(V) \
+ V(ConsistentJSFunctionView) \
+ V(ConstantInDictionaryPrototypeChain) \
+ V(ElementsKind) \
+ V(FieldConstness) \
+ V(FieldRepresentation) \
+ V(FieldType) \
+ V(GlobalProperty) \
+ V(InitialMap) \
+ V(InitialMapInstanceSizePrediction) \
+ V(OwnConstantDataProperty) \
+ V(OwnConstantDictionaryProperty) \
+ V(OwnConstantElement) \
+ V(PretenureMode) \
+ V(Protector) \
+ V(PrototypeProperty) \
+ V(StableMap) \
+ V(Transition)
+
CompilationDependencies::CompilationDependencies(JSHeapBroker* broker,
Zone* zone)
: zone_(zone), broker_(broker), dependencies_(zone) {
broker->set_dependencies(this);
}
+namespace {
+
+enum CompilationDependencyKind {
+#define V(Name) k##Name,
+ DEPENDENCY_LIST(V)
+#undef V
+};
+
+#define V(Name) class Name##Dependency;
+DEPENDENCY_LIST(V)
+#undef V
+
+const char* CompilationDependencyKindToString(CompilationDependencyKind kind) {
+#define V(Name) #Name "Dependency",
+ static const char* const names[] = {DEPENDENCY_LIST(V)};
+#undef V
+ return names[kind];
+}
+
+} // namespace
+
+class CompilationDependency : public ZoneObject {
+ public:
+ explicit CompilationDependency(CompilationDependencyKind kind) : kind(kind) {}
+
+ virtual bool IsValid() const = 0;
+ virtual void PrepareInstall() const {}
+ virtual void Install(Handle<Code> code) const = 0;
+
+#ifdef DEBUG
+#define V(Name) \
+ bool Is##Name() const { return kind == k##Name; } \
+ V8_ALLOW_UNUSED const Name##Dependency* As##Name() const;
+ DEPENDENCY_LIST(V)
+#undef V
+#endif
+
+ const char* ToString() const {
+ return CompilationDependencyKindToString(kind);
+ }
+
+ const CompilationDependencyKind kind;
+};
+
+namespace {
+
class InitialMapDependency final : public CompilationDependency {
public:
InitialMapDependency(JSHeapBroker* broker, const JSFunctionRef& function,
const MapRef& initial_map)
- : function_(function), initial_map_(initial_map) {
- }
+ : CompilationDependency(kInitialMap),
+ function_(function),
+ initial_map_(initial_map) {}
bool IsValid() const override {
Handle<JSFunction> function = function_.object();
@@ -55,7 +120,9 @@ class PrototypePropertyDependency final : public CompilationDependency {
PrototypePropertyDependency(JSHeapBroker* broker,
const JSFunctionRef& function,
const ObjectRef& prototype)
- : function_(function), prototype_(prototype) {
+ : CompilationDependency(kPrototypeProperty),
+ function_(function),
+ prototype_(prototype) {
DCHECK(function_.has_instance_prototype(broker->dependencies()));
DCHECK(!function_.PrototypeRequiresRuntimeLookup(broker->dependencies()));
DCHECK(function_.instance_prototype(broker->dependencies())
@@ -92,7 +159,8 @@ class PrototypePropertyDependency final : public CompilationDependency {
class StableMapDependency final : public CompilationDependency {
public:
- explicit StableMapDependency(const MapRef& map) : map_(map) {}
+ explicit StableMapDependency(const MapRef& map)
+ : CompilationDependency(kStableMap), map_(map) {}
bool IsValid() const override {
// TODO(v8:11670): Consider turn this back into a CHECK inside the
@@ -117,7 +185,8 @@ class ConstantInDictionaryPrototypeChainDependency final
explicit ConstantInDictionaryPrototypeChainDependency(
const MapRef receiver_map, const NameRef property_name,
const ObjectRef constant, PropertyKind kind)
- : receiver_map_(receiver_map),
+ : CompilationDependency(kConstantInDictionaryPrototypeChain),
+ receiver_map_(receiver_map),
property_name_{property_name},
constant_{constant},
kind_{kind} {
@@ -240,7 +309,8 @@ class OwnConstantDataPropertyDependency final : public CompilationDependency {
const MapRef& map,
Representation representation,
FieldIndex index, const ObjectRef& value)
- : broker_(broker),
+ : CompilationDependency(kOwnConstantDataProperty),
+ broker_(broker),
holder_(holder),
map_(map),
representation_(representation),
@@ -294,7 +364,8 @@ class OwnConstantDictionaryPropertyDependency final
const JSObjectRef& holder,
InternalIndex index,
const ObjectRef& value)
- : broker_(broker),
+ : CompilationDependency(kOwnConstantDictionaryProperty),
+ broker_(broker),
holder_(holder),
map_(holder.map()),
index_(index),
@@ -345,7 +416,7 @@ class OwnConstantDictionaryPropertyDependency final
class ConsistentJSFunctionViewDependency final : public CompilationDependency {
public:
explicit ConsistentJSFunctionViewDependency(const JSFunctionRef& function)
- : function_(function) {}
+ : CompilationDependency(kConsistentJSFunctionView), function_(function) {}
bool IsValid() const override {
return function_.IsConsistentWithHeapState();
@@ -353,17 +424,14 @@ class ConsistentJSFunctionViewDependency final : public CompilationDependency {
void Install(Handle<Code> code) const override {}
-#ifdef DEBUG
- bool IsConsistentJSFunctionViewDependency() const override { return true; }
-#endif
-
private:
const JSFunctionRef function_;
};
class TransitionDependency final : public CompilationDependency {
public:
- explicit TransitionDependency(const MapRef& map) : map_(map) {
+ explicit TransitionDependency(const MapRef& map)
+ : CompilationDependency(kTransition), map_(map) {
DCHECK(map_.CanBeDeprecated());
}
@@ -383,7 +451,9 @@ class PretenureModeDependency final : public CompilationDependency {
public:
PretenureModeDependency(const AllocationSiteRef& site,
AllocationType allocation)
- : site_(site), allocation_(allocation) {}
+ : CompilationDependency(kPretenureMode),
+ site_(site),
+ allocation_(allocation) {}
bool IsValid() const override {
return allocation_ == site_.object()->GetAllocationType();
@@ -396,10 +466,6 @@ class PretenureModeDependency final : public CompilationDependency {
DependentCode::kAllocationSiteTenuringChangedGroup);
}
-#ifdef DEBUG
- bool IsPretenureModeDependency() const override { return true; }
-#endif
-
private:
AllocationSiteRef site_;
AllocationType allocation_;
@@ -409,7 +475,10 @@ class FieldRepresentationDependency final : public CompilationDependency {
public:
FieldRepresentationDependency(const MapRef& map, InternalIndex descriptor,
Representation representation)
- : map_(map), descriptor_(descriptor), representation_(representation) {}
+ : CompilationDependency(kFieldRepresentation),
+ map_(map),
+ descriptor_(descriptor),
+ representation_(representation) {}
bool IsValid() const override {
DisallowGarbageCollection no_heap_allocation;
@@ -433,12 +502,9 @@ class FieldRepresentationDependency final : public CompilationDependency {
DependentCode::kFieldRepresentationGroup);
}
-#ifdef DEBUG
- bool IsFieldRepresentationDependencyOnMap(
- Handle<Map> const& receiver_map) const override {
+ bool DependsOn(const Handle<Map>& receiver_map) const {
return map_.object().equals(receiver_map);
}
-#endif
private:
MapRef map_;
@@ -450,7 +516,10 @@ class FieldTypeDependency final : public CompilationDependency {
public:
FieldTypeDependency(const MapRef& map, InternalIndex descriptor,
const ObjectRef& type)
- : map_(map), descriptor_(descriptor), type_(type) {}
+ : CompilationDependency(kFieldType),
+ map_(map),
+ descriptor_(descriptor),
+ type_(type) {}
bool IsValid() const override {
DisallowGarbageCollection no_heap_allocation;
@@ -481,7 +550,9 @@ class FieldTypeDependency final : public CompilationDependency {
class FieldConstnessDependency final : public CompilationDependency {
public:
FieldConstnessDependency(const MapRef& map, InternalIndex descriptor)
- : map_(map), descriptor_(descriptor) {}
+ : CompilationDependency(kFieldConstness),
+ map_(map),
+ descriptor_(descriptor) {}
bool IsValid() const override {
DisallowGarbageCollection no_heap_allocation;
@@ -515,7 +586,10 @@ class GlobalPropertyDependency final : public CompilationDependency {
public:
GlobalPropertyDependency(const PropertyCellRef& cell, PropertyCellType type,
bool read_only)
- : cell_(cell), type_(type), read_only_(read_only) {
+ : CompilationDependency(kGlobalProperty),
+ cell_(cell),
+ type_(type),
+ read_only_(read_only) {
DCHECK_EQ(type_, cell_.property_details().cell_type());
DCHECK_EQ(read_only_, cell_.property_details().IsReadOnly());
}
@@ -545,7 +619,8 @@ class GlobalPropertyDependency final : public CompilationDependency {
class ProtectorDependency final : public CompilationDependency {
public:
- explicit ProtectorDependency(const PropertyCellRef& cell) : cell_(cell) {}
+ explicit ProtectorDependency(const PropertyCellRef& cell)
+ : CompilationDependency(kProtector), cell_(cell) {}
bool IsValid() const override {
Handle<PropertyCell> cell = cell_.object();
@@ -565,7 +640,7 @@ class ProtectorDependency final : public CompilationDependency {
class ElementsKindDependency final : public CompilationDependency {
public:
ElementsKindDependency(const AllocationSiteRef& site, ElementsKind kind)
- : site_(site), kind_(kind) {
+ : CompilationDependency(kElementsKind), site_(site), kind_(kind) {
DCHECK(AllocationSite::ShouldTrack(kind_));
}
@@ -596,7 +671,10 @@ class OwnConstantElementDependency final : public CompilationDependency {
public:
OwnConstantElementDependency(const JSObjectRef& holder, uint32_t index,
const ObjectRef& element)
- : holder_(holder), index_(index), element_(element) {}
+ : CompilationDependency(kOwnConstantElement),
+ holder_(holder),
+ index_(index),
+ element_(element) {}
bool IsValid() const override {
DisallowGarbageCollection no_gc;
@@ -624,7 +702,9 @@ class InitialMapInstanceSizePredictionDependency final
public:
InitialMapInstanceSizePredictionDependency(const JSFunctionRef& function,
int instance_size)
- : function_(function), instance_size_(instance_size) {}
+ : CompilationDependency(kInitialMapInstanceSizePrediction),
+ function_(function),
+ instance_size_(instance_size) {}
bool IsValid() const override {
// The dependency is valid if the prediction is the same as the current
@@ -651,6 +731,8 @@ class InitialMapInstanceSizePredictionDependency final
int instance_size_;
};
+} // namespace
+
void CompilationDependencies::RecordDependency(
CompilationDependency const* dependency) {
if (dependency != nullptr) dependencies_.push_front(dependency);
@@ -795,9 +877,19 @@ void CompilationDependencies::DependOnOwnConstantDictionaryProperty(
broker_, holder, index, value));
}
+V8_INLINE void TraceInvalidCompilationDependency(
+ const CompilationDependency* d) {
+ DCHECK(FLAG_trace_compilation_dependencies);
+ DCHECK(!d->IsValid());
+ PrintF("Compilation aborted due to invalid dependency: %s\n", d->ToString());
+}
+
bool CompilationDependencies::Commit(Handle<Code> code) {
for (auto dep : dependencies_) {
if (!dep->IsValid()) {
+ if (FLAG_trace_compilation_dependencies) {
+ TraceInvalidCompilationDependency(dep);
+ }
dependencies_.clear();
return false;
}
@@ -812,6 +904,9 @@ bool CompilationDependencies::Commit(Handle<Code> code) {
// can call EnsureHasInitialMap, which can invalidate a StableMapDependency
// on the prototype object's map.
if (!dep->IsValid()) {
+ if (FLAG_trace_compilation_dependencies) {
+ TraceInvalidCompilationDependency(dep);
+ }
dependencies_.clear();
return false;
}
@@ -838,8 +933,7 @@ bool CompilationDependencies::Commit(Handle<Code> code) {
#ifdef DEBUG
for (auto dep : dependencies_) {
CHECK_IMPLIES(!dep->IsValid(),
- dep->IsPretenureModeDependency() ||
- dep->IsConsistentJSFunctionViewDependency());
+ dep->IsPretenureMode() || dep->IsConsistentJSFunctionView());
}
#endif
@@ -848,6 +942,7 @@ bool CompilationDependencies::Commit(Handle<Code> code) {
}
namespace {
+
// This function expects to never see a JSProxy.
void DependOnStablePrototypeChain(CompilationDependencies* deps, MapRef map,
base::Optional<JSObjectRef> last_prototype) {
@@ -862,8 +957,19 @@ void DependOnStablePrototypeChain(CompilationDependencies* deps, MapRef map,
if (last_prototype.has_value() && proto.equals(*last_prototype)) break;
}
}
+
} // namespace
+#ifdef DEBUG
+#define V(Name) \
+ const Name##Dependency* CompilationDependency::As##Name() const { \
+ DCHECK(Is##Name()); \
+ return static_cast<const Name##Dependency*>(this); \
+ }
+DEPENDENCY_LIST(V)
+#undef V
+#endif // DEBUG
+
void CompilationDependencies::DependOnStablePrototypeChains(
ZoneVector<MapRef> const& receiver_maps, WhereToStart start,
base::Optional<JSObjectRef> last_prototype) {
@@ -944,6 +1050,17 @@ CompilationDependencies::FieldTypeDependencyOffTheRecord(
return zone_->New<FieldTypeDependency>(map, descriptor, type);
}
+#ifdef DEBUG
+// static
+bool CompilationDependencies::IsFieldRepresentationDependencyOnMap(
+ const CompilationDependency* dep, const Handle<Map>& receiver_map) {
+ return dep->IsFieldRepresentation() &&
+ dep->AsFieldRepresentation()->DependsOn(receiver_map);
+}
+#endif // DEBUG
+
+#undef DEPENDENCY_LIST
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/compiler/compilation-dependencies.h b/deps/v8/src/compiler/compilation-dependencies.h
index be507c6843..f4b49878c8 100644
--- a/deps/v8/src/compiler/compilation-dependencies.h
+++ b/deps/v8/src/compiler/compilation-dependencies.h
@@ -154,6 +154,11 @@ class V8_EXPORT_PRIVATE CompilationDependencies : public ZoneObject {
const MapRef& map, InternalIndex descriptor,
const ObjectRef& /* Contains a FieldType underneath. */ type) const;
+#ifdef DEBUG
+ static bool IsFieldRepresentationDependencyOnMap(
+ const CompilationDependency* dep, const Handle<Map>& receiver_map);
+#endif // DEBUG
+
private:
Zone* const zone_;
JSHeapBroker* const broker_;
diff --git a/deps/v8/src/compiler/compilation-dependency.h b/deps/v8/src/compiler/compilation-dependency.h
deleted file mode 100644
index 852c7b7640..0000000000
--- a/deps/v8/src/compiler/compilation-dependency.h
+++ /dev/null
@@ -1,37 +0,0 @@
-// Copyright 2019 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_COMPILER_COMPILATION_DEPENDENCY_H_
-#define V8_COMPILER_COMPILATION_DEPENDENCY_H_
-
-#include "src/zone/zone.h"
-
-namespace v8 {
-namespace internal {
-
-class MaybeObjectHandle;
-
-namespace compiler {
-
-class CompilationDependency : public ZoneObject {
- public:
- virtual bool IsValid() const = 0;
- virtual void PrepareInstall() const {}
- virtual void Install(Handle<Code> code) const = 0;
-
-#ifdef DEBUG
- virtual bool IsPretenureModeDependency() const { return false; }
- virtual bool IsFieldRepresentationDependencyOnMap(
- Handle<Map> const& receiver_map) const {
- return false;
- }
- virtual bool IsConsistentJSFunctionViewDependency() const { return false; }
-#endif
-};
-
-} // namespace compiler
-} // namespace internal
-} // namespace v8
-
-#endif // V8_COMPILER_COMPILATION_DEPENDENCY_H_
diff --git a/deps/v8/src/compiler/decompression-optimizer.cc b/deps/v8/src/compiler/decompression-optimizer.cc
index 79e77fcee6..c0068489f7 100644
--- a/deps/v8/src/compiler/decompression-optimizer.cc
+++ b/deps/v8/src/compiler/decompression-optimizer.cc
@@ -15,8 +15,7 @@ namespace {
bool IsMachineLoad(Node* const node) {
const IrOpcode::Value opcode = node->opcode();
- return opcode == IrOpcode::kLoad || opcode == IrOpcode::kPoisonedLoad ||
- opcode == IrOpcode::kProtectedLoad ||
+ return opcode == IrOpcode::kLoad || opcode == IrOpcode::kProtectedLoad ||
opcode == IrOpcode::kUnalignedLoad ||
opcode == IrOpcode::kLoadImmutable;
}
@@ -212,10 +211,6 @@ void DecompressionOptimizer::ChangeLoad(Node* const node) {
NodeProperties::ChangeOp(node,
machine()->LoadImmutable(compressed_load_rep));
break;
- case IrOpcode::kPoisonedLoad:
- NodeProperties::ChangeOp(node,
- machine()->PoisonedLoad(compressed_load_rep));
- break;
case IrOpcode::kProtectedLoad:
NodeProperties::ChangeOp(node,
machine()->ProtectedLoad(compressed_load_rep));
diff --git a/deps/v8/src/compiler/effect-control-linearizer.cc b/deps/v8/src/compiler/effect-control-linearizer.cc
index d7a0ca62dd..83eb6c215c 100644
--- a/deps/v8/src/compiler/effect-control-linearizer.cc
+++ b/deps/v8/src/compiler/effect-control-linearizer.cc
@@ -36,7 +36,6 @@ namespace internal {
namespace compiler {
enum class MaintainSchedule { kMaintain, kDiscard };
-enum class MaskArrayIndexEnable { kDoNotMaskArrayIndex, kMaskArrayIndex };
class EffectControlLinearizer {
public:
@@ -44,13 +43,11 @@ class EffectControlLinearizer {
JSGraphAssembler* graph_assembler, Zone* temp_zone,
SourcePositionTable* source_positions,
NodeOriginTable* node_origins,
- MaskArrayIndexEnable mask_array_index,
MaintainSchedule maintain_schedule,
JSHeapBroker* broker)
: js_graph_(js_graph),
schedule_(schedule),
temp_zone_(temp_zone),
- mask_array_index_(mask_array_index),
maintain_schedule_(maintain_schedule),
source_positions_(source_positions),
node_origins_(node_origins),
@@ -80,7 +77,6 @@ class EffectControlLinearizer {
Node* LowerChangeTaggedToUint32(Node* node);
Node* LowerChangeTaggedToInt64(Node* node);
Node* LowerChangeTaggedToTaggedSigned(Node* node);
- Node* LowerPoisonIndex(Node* node);
Node* LowerCheckInternalizedString(Node* node, Node* frame_state);
void LowerCheckMaps(Node* node, Node* frame_state);
void LowerDynamicCheckMaps(Node* node, Node* frame_state);
@@ -338,7 +334,6 @@ class EffectControlLinearizer {
JSGraph* js_graph_;
Schedule* schedule_;
Zone* temp_zone_;
- MaskArrayIndexEnable mask_array_index_;
MaintainSchedule maintain_schedule_;
RegionObservability region_observability_ = RegionObservability::kObservable;
SourcePositionTable* source_positions_;
@@ -966,9 +961,6 @@ bool EffectControlLinearizer::TryWireInStateEffect(Node* node,
case IrOpcode::kTruncateTaggedToFloat64:
result = LowerTruncateTaggedToFloat64(node);
break;
- case IrOpcode::kPoisonIndex:
- result = LowerPoisonIndex(node);
- break;
case IrOpcode::kCheckClosure:
result = LowerCheckClosure(node, frame_state);
break;
@@ -1788,14 +1780,6 @@ Node* EffectControlLinearizer::LowerTruncateTaggedToFloat64(Node* node) {
return done.PhiAt(0);
}
-Node* EffectControlLinearizer::LowerPoisonIndex(Node* node) {
- Node* index = node->InputAt(0);
- if (mask_array_index_ == MaskArrayIndexEnable::kMaskArrayIndex) {
- index = __ Word32PoisonOnSpeculation(index);
- }
- return index;
-}
-
Node* EffectControlLinearizer::LowerCheckClosure(Node* node,
Node* frame_state) {
Handle<FeedbackCell> feedback_cell = FeedbackCellOf(node->op());
@@ -1831,8 +1815,7 @@ void EffectControlLinearizer::MigrateInstanceOrDeopt(
__ Word32And(bitfield3,
__ Int32Constant(Map::Bits3::IsDeprecatedBit::kMask)),
__ Int32Constant(0));
- __ DeoptimizeIf(reason, feedback_source, is_not_deprecated, frame_state,
- IsSafetyCheck::kCriticalSafetyCheck);
+ __ DeoptimizeIf(reason, feedback_source, is_not_deprecated, frame_state);
Operator::Properties properties = Operator::kNoDeopt | Operator::kNoThrow;
Runtime::FunctionId id = Runtime::kTryMigrateInstance;
auto call_descriptor = Linkage::GetRuntimeCallDescriptor(
@@ -1842,7 +1825,7 @@ void EffectControlLinearizer::MigrateInstanceOrDeopt(
__ Int32Constant(1), __ NoContextConstant());
Node* check = ObjectIsSmi(result);
__ DeoptimizeIf(DeoptimizeReason::kInstanceMigrationFailed, feedback_source,
- check, frame_state, IsSafetyCheck::kCriticalSafetyCheck);
+ check, frame_state);
}
void EffectControlLinearizer::LowerCheckMaps(Node* node, Node* frame_state) {
@@ -1886,7 +1869,7 @@ void EffectControlLinearizer::LowerCheckMaps(Node* node, Node* frame_state) {
Node* check = __ TaggedEqual(value_map, map);
if (i == map_count - 1) {
__ DeoptimizeIfNot(DeoptimizeReason::kWrongMap, p.feedback(), check,
- frame_state, IsSafetyCheck::kCriticalSafetyCheck);
+ frame_state);
} else {
auto next_map = __ MakeLabel();
__ BranchWithCriticalSafetyCheck(check, &done, &next_map);
@@ -1908,7 +1891,7 @@ void EffectControlLinearizer::LowerCheckMaps(Node* node, Node* frame_state) {
if (i == map_count - 1) {
__ DeoptimizeIfNot(DeoptimizeReason::kWrongMap, p.feedback(), check,
- frame_state, IsSafetyCheck::kCriticalSafetyCheck);
+ frame_state);
} else {
auto next_map = __ MakeLabel();
__ BranchWithCriticalSafetyCheck(check, &done, &next_map);
@@ -2528,8 +2511,8 @@ Node* EffectControlLinearizer::LowerCheckedUint32Bounds(Node* node,
Node* check = __ Uint32LessThan(index, limit);
if (!(params.flags() & CheckBoundsFlag::kAbortOnOutOfBounds)) {
__ DeoptimizeIfNot(DeoptimizeReason::kOutOfBounds,
- params.check_parameters().feedback(), check, frame_state,
- IsSafetyCheck::kCriticalSafetyCheck);
+ params.check_parameters().feedback(), check,
+ frame_state);
} else {
auto if_abort = __ MakeDeferredLabel();
auto done = __ MakeLabel();
@@ -2574,8 +2557,8 @@ Node* EffectControlLinearizer::LowerCheckedUint64Bounds(Node* node,
Node* check = __ Uint64LessThan(index, limit);
if (!(params.flags() & CheckBoundsFlag::kAbortOnOutOfBounds)) {
__ DeoptimizeIfNot(DeoptimizeReason::kOutOfBounds,
- params.check_parameters().feedback(), check, frame_state,
- IsSafetyCheck::kCriticalSafetyCheck);
+ params.check_parameters().feedback(), check,
+ frame_state);
} else {
auto if_abort = __ MakeDeferredLabel();
auto done = __ MakeLabel();
@@ -3696,9 +3679,14 @@ Node* EffectControlLinearizer::LowerToBoolean(Node* node) {
}
Node* EffectControlLinearizer::LowerArgumentsLength(Node* node) {
- return ChangeIntPtrToSmi(
+ Node* arguments_length = ChangeIntPtrToSmi(
__ Load(MachineType::Pointer(), __ LoadFramePointer(),
__ IntPtrConstant(StandardFrameConstants::kArgCOffset)));
+ if (kJSArgcIncludesReceiver) {
+ arguments_length =
+ __ SmiSub(arguments_length, __ SmiConstant(kJSArgcReceiverSlots));
+ }
+ return arguments_length;
}
Node* EffectControlLinearizer::LowerRestLength(Node* node) {
@@ -3711,6 +3699,10 @@ Node* EffectControlLinearizer::LowerRestLength(Node* node) {
Node* arguments_length = ChangeIntPtrToSmi(
__ Load(MachineType::Pointer(), frame,
__ IntPtrConstant(StandardFrameConstants::kArgCOffset)));
+ if (kJSArgcIncludesReceiver) {
+ arguments_length =
+ __ SmiSub(arguments_length, __ SmiConstant(kJSArgcReceiverSlots));
+ }
Node* rest_length =
__ SmiSub(arguments_length, __ SmiConstant(formal_parameter_count));
__ GotoIf(__ SmiLessThan(rest_length, __ SmiConstant(0)), &done,
@@ -4263,12 +4255,10 @@ Node* EffectControlLinearizer::LowerStringToUpperCaseIntl(Node* node) {
Node* EffectControlLinearizer::LowerStringToLowerCaseIntl(Node* node) {
UNREACHABLE();
- return nullptr;
}
Node* EffectControlLinearizer::LowerStringToUpperCaseIntl(Node* node) {
UNREACHABLE();
- return nullptr;
}
#endif // V8_INTL_SUPPORT
@@ -5776,8 +5766,7 @@ Node* EffectControlLinearizer::LowerLoadTypedElement(Node* node) {
Node* data_ptr = BuildTypedArrayDataPointer(base, external);
// Perform the actual typed element access.
- return __ LoadElement(AccessBuilder::ForTypedArrayElement(
- array_type, true, LoadSensitivity::kCritical),
+ return __ LoadElement(AccessBuilder::ForTypedArrayElement(array_type, true),
data_ptr, index);
}
@@ -6796,26 +6785,13 @@ Node* EffectControlLinearizer::BuildIsClearedWeakReference(Node* maybe_object) {
#undef __
-namespace {
-
-MaskArrayIndexEnable MaskArrayForPoisonLevel(
- PoisoningMitigationLevel poison_level) {
- return (poison_level != PoisoningMitigationLevel::kDontPoison)
- ? MaskArrayIndexEnable::kMaskArrayIndex
- : MaskArrayIndexEnable::kDoNotMaskArrayIndex;
-}
-
-} // namespace
-
void LinearizeEffectControl(JSGraph* graph, Schedule* schedule, Zone* temp_zone,
SourcePositionTable* source_positions,
NodeOriginTable* node_origins,
- PoisoningMitigationLevel poison_level,
JSHeapBroker* broker) {
JSGraphAssembler graph_assembler_(graph, temp_zone, base::nullopt, nullptr);
EffectControlLinearizer linearizer(graph, schedule, &graph_assembler_,
temp_zone, source_positions, node_origins,
- MaskArrayForPoisonLevel(poison_level),
MaintainSchedule::kDiscard, broker);
linearizer.Run();
}
@@ -6824,16 +6800,13 @@ void LowerToMachineSchedule(JSGraph* js_graph, Schedule* schedule,
Zone* temp_zone,
SourcePositionTable* source_positions,
NodeOriginTable* node_origins,
- PoisoningMitigationLevel poison_level,
JSHeapBroker* broker) {
JSGraphAssembler graph_assembler(js_graph, temp_zone, base::nullopt,
schedule);
EffectControlLinearizer linearizer(js_graph, schedule, &graph_assembler,
temp_zone, source_positions, node_origins,
- MaskArrayForPoisonLevel(poison_level),
MaintainSchedule::kMaintain, broker);
- MemoryLowering memory_lowering(js_graph, temp_zone, &graph_assembler,
- poison_level);
+ MemoryLowering memory_lowering(js_graph, temp_zone, &graph_assembler);
SelectLowering select_lowering(&graph_assembler, js_graph->graph());
graph_assembler.AddInlineReducer(&memory_lowering);
graph_assembler.AddInlineReducer(&select_lowering);
diff --git a/deps/v8/src/compiler/effect-control-linearizer.h b/deps/v8/src/compiler/effect-control-linearizer.h
index fca4899263..97467391e2 100644
--- a/deps/v8/src/compiler/effect-control-linearizer.h
+++ b/deps/v8/src/compiler/effect-control-linearizer.h
@@ -26,7 +26,7 @@ class JSHeapBroker;
V8_EXPORT_PRIVATE void LinearizeEffectControl(
JSGraph* graph, Schedule* schedule, Zone* temp_zone,
SourcePositionTable* source_positions, NodeOriginTable* node_origins,
- PoisoningMitigationLevel poison_level, JSHeapBroker* broker);
+ JSHeapBroker* broker);
// Performs effect control linearization lowering in addition to machine
// lowering, producing a scheduled graph that is ready for instruction
@@ -34,7 +34,7 @@ V8_EXPORT_PRIVATE void LinearizeEffectControl(
V8_EXPORT_PRIVATE void LowerToMachineSchedule(
JSGraph* graph, Schedule* schedule, Zone* temp_zone,
SourcePositionTable* source_positions, NodeOriginTable* node_origins,
- PoisoningMitigationLevel poison_level, JSHeapBroker* broker);
+ JSHeapBroker* broker);
} // namespace compiler
} // namespace internal
diff --git a/deps/v8/src/compiler/frame-states.cc b/deps/v8/src/compiler/frame-states.cc
index bbc2049ae5..c5199f1e64 100644
--- a/deps/v8/src/compiler/frame-states.cc
+++ b/deps/v8/src/compiler/frame-states.cc
@@ -214,8 +214,11 @@ FrameState CreateJavaScriptBuiltinContinuationFrameState(
ContinuationFrameStateMode mode) {
// Depending on {mode}, final parameters are added by the deoptimizer
// and aren't explicitly passed in the frame state.
- DCHECK_EQ(Builtins::GetStackParameterCount(name) + 1, // add receiver
- stack_parameter_count + DeoptimizerParameterCountFor(mode));
+ DCHECK_EQ(
+ Builtins::GetStackParameterCount(name) +
+ (kJSArgcIncludesReceiver ? 0
+ : 1), // Add receiver if it is not included.
+ stack_parameter_count + DeoptimizerParameterCountFor(mode));
Node* argc = jsgraph->Constant(Builtins::GetStackParameterCount(name));
diff --git a/deps/v8/src/compiler/graph-assembler.cc b/deps/v8/src/compiler/graph-assembler.cc
index 26ae88362d..6bfd6f8c22 100644
--- a/deps/v8/src/compiler/graph-assembler.cc
+++ b/deps/v8/src/compiler/graph-assembler.cc
@@ -829,46 +829,36 @@ Node* GraphAssembler::BitcastMaybeObjectToWord(Node* value) {
effect(), control()));
}
-Node* GraphAssembler::Word32PoisonOnSpeculation(Node* value) {
- return AddNode(graph()->NewNode(machine()->Word32PoisonOnSpeculation(), value,
- effect(), control()));
-}
-
Node* GraphAssembler::DeoptimizeIf(DeoptimizeReason reason,
FeedbackSource const& feedback,
- Node* condition, Node* frame_state,
- IsSafetyCheck is_safety_check) {
- return AddNode(
- graph()->NewNode(common()->DeoptimizeIf(DeoptimizeKind::kEager, reason,
- feedback, is_safety_check),
- condition, frame_state, effect(), control()));
+ Node* condition, Node* frame_state) {
+ return AddNode(graph()->NewNode(
+ common()->DeoptimizeIf(DeoptimizeKind::kEager, reason, feedback),
+ condition, frame_state, effect(), control()));
}
Node* GraphAssembler::DeoptimizeIf(DeoptimizeKind kind, DeoptimizeReason reason,
FeedbackSource const& feedback,
- Node* condition, Node* frame_state,
- IsSafetyCheck is_safety_check) {
- return AddNode(graph()->NewNode(
- common()->DeoptimizeIf(kind, reason, feedback, is_safety_check),
- condition, frame_state, effect(), control()));
+ Node* condition, Node* frame_state) {
+ return AddNode(
+ graph()->NewNode(common()->DeoptimizeIf(kind, reason, feedback),
+ condition, frame_state, effect(), control()));
}
Node* GraphAssembler::DeoptimizeIfNot(DeoptimizeKind kind,
DeoptimizeReason reason,
FeedbackSource const& feedback,
- Node* condition, Node* frame_state,
- IsSafetyCheck is_safety_check) {
- return AddNode(graph()->NewNode(
- common()->DeoptimizeUnless(kind, reason, feedback, is_safety_check),
- condition, frame_state, effect(), control()));
+ Node* condition, Node* frame_state) {
+ return AddNode(
+ graph()->NewNode(common()->DeoptimizeUnless(kind, reason, feedback),
+ condition, frame_state, effect(), control()));
}
Node* GraphAssembler::DeoptimizeIfNot(DeoptimizeReason reason,
FeedbackSource const& feedback,
- Node* condition, Node* frame_state,
- IsSafetyCheck is_safety_check) {
+ Node* condition, Node* frame_state) {
return DeoptimizeIfNot(DeoptimizeKind::kEager, reason, feedback, condition,
- frame_state, is_safety_check);
+ frame_state);
}
Node* GraphAssembler::DynamicCheckMapsWithDeoptUnless(Node* condition,
@@ -924,8 +914,7 @@ void GraphAssembler::BranchWithCriticalSafetyCheck(
hint = if_false->IsDeferred() ? BranchHint::kTrue : BranchHint::kFalse;
}
- BranchImpl(condition, if_true, if_false, hint,
- IsSafetyCheck::kCriticalSafetyCheck);
+ BranchImpl(condition, if_true, if_false, hint);
}
void GraphAssembler::RecordBranchInBlockUpdater(Node* branch,
diff --git a/deps/v8/src/compiler/graph-assembler.h b/deps/v8/src/compiler/graph-assembler.h
index 5efe6dd9c3..c9ddd63e71 100644
--- a/deps/v8/src/compiler/graph-assembler.h
+++ b/deps/v8/src/compiler/graph-assembler.h
@@ -330,24 +330,16 @@ class V8_EXPORT_PRIVATE GraphAssembler {
Node* Retain(Node* buffer);
Node* UnsafePointerAdd(Node* base, Node* external);
- Node* Word32PoisonOnSpeculation(Node* value);
-
- Node* DeoptimizeIf(
- DeoptimizeReason reason, FeedbackSource const& feedback, Node* condition,
- Node* frame_state,
- IsSafetyCheck is_safety_check = IsSafetyCheck::kSafetyCheck);
- Node* DeoptimizeIf(
- DeoptimizeKind kind, DeoptimizeReason reason,
- FeedbackSource const& feedback, Node* condition, Node* frame_state,
- IsSafetyCheck is_safety_check = IsSafetyCheck::kSafetyCheck);
- Node* DeoptimizeIfNot(
- DeoptimizeKind kind, DeoptimizeReason reason,
- FeedbackSource const& feedback, Node* condition, Node* frame_state,
- IsSafetyCheck is_safety_check = IsSafetyCheck::kSafetyCheck);
- Node* DeoptimizeIfNot(
- DeoptimizeReason reason, FeedbackSource const& feedback, Node* condition,
- Node* frame_state,
- IsSafetyCheck is_safety_check = IsSafetyCheck::kSafetyCheck);
+ Node* DeoptimizeIf(DeoptimizeReason reason, FeedbackSource const& feedback,
+ Node* condition, Node* frame_state);
+ Node* DeoptimizeIf(DeoptimizeKind kind, DeoptimizeReason reason,
+ FeedbackSource const& feedback, Node* condition,
+ Node* frame_state);
+ Node* DeoptimizeIfNot(DeoptimizeKind kind, DeoptimizeReason reason,
+ FeedbackSource const& feedback, Node* condition,
+ Node* frame_state);
+ Node* DeoptimizeIfNot(DeoptimizeReason reason, FeedbackSource const& feedback,
+ Node* condition, Node* frame_state);
Node* DynamicCheckMapsWithDeoptUnless(Node* condition, Node* slot_index,
Node* map, Node* handler,
Node* feedback_vector,
@@ -557,7 +549,7 @@ class V8_EXPORT_PRIVATE GraphAssembler {
void BranchImpl(Node* condition,
GraphAssemblerLabel<sizeof...(Vars)>* if_true,
GraphAssemblerLabel<sizeof...(Vars)>* if_false,
- BranchHint hint, IsSafetyCheck is_safety_check, Vars...);
+ BranchHint hint, Vars...);
void RecordBranchInBlockUpdater(Node* branch, Node* if_true_control,
Node* if_false_control,
BasicBlock* if_true_block,
@@ -742,8 +734,7 @@ void GraphAssembler::Branch(Node* condition,
hint = if_false->IsDeferred() ? BranchHint::kTrue : BranchHint::kFalse;
}
- BranchImpl(condition, if_true, if_false, hint, IsSafetyCheck::kNoSafetyCheck,
- vars...);
+ BranchImpl(condition, if_true, if_false, hint, vars...);
}
template <typename... Vars>
@@ -751,20 +742,17 @@ void GraphAssembler::BranchWithHint(
Node* condition, GraphAssemblerLabel<sizeof...(Vars)>* if_true,
GraphAssemblerLabel<sizeof...(Vars)>* if_false, BranchHint hint,
Vars... vars) {
- BranchImpl(condition, if_true, if_false, hint, IsSafetyCheck::kNoSafetyCheck,
- vars...);
+ BranchImpl(condition, if_true, if_false, hint, vars...);
}
template <typename... Vars>
void GraphAssembler::BranchImpl(Node* condition,
GraphAssemblerLabel<sizeof...(Vars)>* if_true,
GraphAssemblerLabel<sizeof...(Vars)>* if_false,
- BranchHint hint, IsSafetyCheck is_safety_check,
- Vars... vars) {
+ BranchHint hint, Vars... vars) {
DCHECK_NOT_NULL(control());
- Node* branch = graph()->NewNode(common()->Branch(hint, is_safety_check),
- condition, control());
+ Node* branch = graph()->NewNode(common()->Branch(hint), condition, control());
Node* if_true_control = control_ =
graph()->NewNode(common()->IfTrue(), branch);
diff --git a/deps/v8/src/compiler/heap-refs.cc b/deps/v8/src/compiler/heap-refs.cc
index 1688a14a04..c246430de2 100644
--- a/deps/v8/src/compiler/heap-refs.cc
+++ b/deps/v8/src/compiler/heap-refs.cc
@@ -14,7 +14,6 @@
#include "src/base/platform/platform.h"
#include "src/codegen/code-factory.h"
#include "src/compiler/compilation-dependencies.h"
-#include "src/compiler/graph-reducer.h"
#include "src/compiler/js-heap-broker.h"
#include "src/execution/protectors-inl.h"
#include "src/objects/allocation-site-inl.h"
@@ -41,7 +40,7 @@ namespace compiler {
//
// kBackgroundSerializedHeapObject: The underlying V8 object is a HeapObject
// and the data is an instance of the corresponding (most-specific) subclass,
-// e.g. JSFunctionData, which provides serialized information about the
+// e.g. JSFunctionData, which provides serialized information about the
// object. Allows serialization from the background thread.
//
// kUnserializedHeapObject: The underlying V8 object is a HeapObject and the
@@ -257,13 +256,9 @@ bool PropertyCellData::Cache(JSHeapBroker* broker) {
}
}
- if (property_details.cell_type() == PropertyCellType::kConstant) {
- Handle<Object> value_again =
- broker->CanonicalPersistentHandle(cell->value(kAcquireLoad));
- if (*value != *value_again) {
- DCHECK(!broker->IsMainThread());
- return false;
- }
+ if (property_details.cell_type() == PropertyCellType::kInTransition) {
+ DCHECK(!broker->IsMainThread());
+ return false;
}
ObjectData* value_data = broker->TryGetOrCreateData(value);
@@ -317,17 +312,6 @@ class JSObjectData : public JSReceiverData {
return object_create_map_;
}
- ObjectData* GetOwnConstantElement(
- JSHeapBroker* broker, uint32_t index,
- SerializationPolicy policy = SerializationPolicy::kAssumeSerialized);
- ObjectData* GetOwnFastDataProperty(
- JSHeapBroker* broker, Representation representation,
- FieldIndex field_index,
- SerializationPolicy policy = SerializationPolicy::kAssumeSerialized);
- ObjectData* GetOwnDictionaryProperty(JSHeapBroker* broker,
- InternalIndex dict_index,
- SerializationPolicy policy);
-
// This method is only used to assert our invariants.
bool cow_or_empty_elements_tenured() const;
@@ -349,21 +333,6 @@ class JSObjectData : public JSReceiverData {
bool serialized_object_create_map_ = false;
ObjectData* object_create_map_ = nullptr;
-
- // Elements (indexed properties) that either
- // (1) are known to exist directly on the object as non-writable and
- // non-configurable, or (2) are known not to (possibly they don't exist at
- // all). In case (2), the second pair component is nullptr.
- ZoneVector<std::pair<uint32_t, ObjectData*>> own_constant_elements_;
- // Properties that either:
- // (1) are known to exist directly on the object, or
- // (2) are known not to (possibly they don't exist at all).
- // In case (2), the second pair component is nullptr.
- // For simplicity, this may in theory overlap with inobject_fields_.
- // For fast mode objects, the keys of the map are the property_index() values
- // of the respective property FieldIndex'es. For slow mode objects, the keys
- // are the dictionary indicies.
- ZoneUnorderedMap<int, ObjectData*> own_properties_;
};
void JSObjectData::SerializeObjectCreateMap(JSHeapBroker* broker,
@@ -390,18 +359,6 @@ void JSObjectData::SerializeObjectCreateMap(JSHeapBroker* broker,
namespace {
-base::Optional<ObjectRef> GetOwnElementFromHeap(JSHeapBroker* broker,
- Handle<Object> receiver,
- uint32_t index,
- bool constant_only) {
- LookupIterator it(broker->isolate(), receiver, index, LookupIterator::OWN);
- if (it.state() == LookupIterator::DATA &&
- (!constant_only || (it.IsReadOnly() && !it.IsConfigurable()))) {
- return MakeRef(broker, it.GetDataValue());
- }
- return base::nullopt;
-}
-
base::Optional<ObjectRef> GetOwnFastDataPropertyFromHeap(
JSHeapBroker* broker, JSObjectRef holder, Representation representation,
FieldIndex field_index) {
@@ -496,70 +453,6 @@ base::Optional<ObjectRef> GetOwnDictionaryPropertyFromHeap(
} // namespace
-ObjectData* JSObjectData::GetOwnConstantElement(JSHeapBroker* broker,
- uint32_t index,
- SerializationPolicy policy) {
- for (auto const& p : own_constant_elements_) {
- if (p.first == index) return p.second;
- }
-
- if (policy == SerializationPolicy::kAssumeSerialized) {
- TRACE_MISSING(broker, "knowledge about index " << index << " on " << this);
- return nullptr;
- }
-
- base::Optional<ObjectRef> element =
- GetOwnElementFromHeap(broker, object(), index, true);
- ObjectData* result = element.has_value() ? element->data() : nullptr;
- own_constant_elements_.push_back({index, result});
- return result;
-}
-
-ObjectData* JSObjectData::GetOwnFastDataProperty(JSHeapBroker* broker,
- Representation representation,
- FieldIndex field_index,
- SerializationPolicy policy) {
- auto p = own_properties_.find(field_index.property_index());
- if (p != own_properties_.end()) return p->second;
-
- if (policy == SerializationPolicy::kAssumeSerialized) {
- TRACE_MISSING(broker, "knowledge about fast property with index "
- << field_index.property_index() << " on "
- << this);
- return nullptr;
- }
-
- // This call will always succeed on the main thread.
- CHECK(broker->IsMainThread());
- JSObjectRef object_ref = MakeRef(broker, Handle<JSObject>::cast(object()));
- ObjectRef property = GetOwnFastDataPropertyFromHeap(
- broker, object_ref, representation, field_index)
- .value();
- ObjectData* result(property.data());
- own_properties_.insert(std::make_pair(field_index.property_index(), result));
- return result;
-}
-
-ObjectData* JSObjectData::GetOwnDictionaryProperty(JSHeapBroker* broker,
- InternalIndex dict_index,
- SerializationPolicy policy) {
- auto p = own_properties_.find(dict_index.as_int());
- if (p != own_properties_.end()) return p->second;
-
- if (policy == SerializationPolicy::kAssumeSerialized) {
- TRACE_MISSING(broker, "knowledge about dictionary property with index "
- << dict_index.as_int() << " on " << this);
- return nullptr;
- }
-
- ObjectRef property = GetOwnDictionaryPropertyFromHeap(
- broker, Handle<JSObject>::cast(object()), dict_index)
- .value();
- ObjectData* result(property.data());
- own_properties_.insert(std::make_pair(dict_index.as_int(), result));
- return result;
-}
-
class JSTypedArrayData : public JSObjectData {
public:
JSTypedArrayData(JSHeapBroker* broker, ObjectData** storage,
@@ -625,28 +518,6 @@ class JSBoundFunctionData : public JSObjectData {
JSBoundFunctionData(JSHeapBroker* broker, ObjectData** storage,
Handle<JSBoundFunction> object, ObjectDataKind kind)
: JSObjectData(broker, storage, object, kind) {}
-
- bool Serialize(JSHeapBroker* broker, NotConcurrentInliningTag tag);
-
- ObjectData* bound_target_function() const {
- DCHECK(!broker()->is_concurrent_inlining());
- return bound_target_function_;
- }
- ObjectData* bound_this() const {
- DCHECK(!broker()->is_concurrent_inlining());
- return bound_this_;
- }
- ObjectData* bound_arguments() const {
- DCHECK(!broker()->is_concurrent_inlining());
- return bound_arguments_;
- }
-
- private:
- bool serialized_ = false;
-
- ObjectData* bound_target_function_ = nullptr;
- ObjectData* bound_this_ = nullptr;
- ObjectData* bound_arguments_ = nullptr;
};
class JSFunctionData : public JSObjectData {
@@ -659,10 +530,6 @@ class JSFunctionData : public JSObjectData {
bool IsConsistentWithHeapState(JSHeapBroker* broker) const;
- bool has_feedback_vector() const {
- DCHECK(serialized_);
- return has_feedback_vector_;
- }
bool has_initial_map() const {
DCHECK(serialized_);
return has_initial_map_;
@@ -680,10 +547,6 @@ class JSFunctionData : public JSObjectData {
DCHECK(serialized_);
return context_;
}
- ObjectData* native_context() const {
- DCHECK(serialized_);
- return native_context_;
- }
MapData* initial_map() const {
DCHECK(serialized_);
return initial_map_;
@@ -700,10 +563,6 @@ class JSFunctionData : public JSObjectData {
DCHECK(serialized_);
return feedback_cell_;
}
- ObjectData* feedback_vector() const {
- DCHECK(serialized_);
- return feedback_vector_;
- }
int initial_map_instance_size_with_min_slack() const {
DCHECK(serialized_);
return initial_map_instance_size_with_min_slack_;
@@ -740,19 +599,16 @@ class JSFunctionData : public JSObjectData {
using UsedFields = base::Flags<UsedField>;
UsedFields used_fields_;
- bool has_feedback_vector_ = false;
ObjectData* prototype_or_initial_map_ = nullptr;
bool has_initial_map_ = false;
bool has_instance_prototype_ = false;
bool PrototypeRequiresRuntimeLookup_ = false;
ObjectData* context_ = nullptr;
- ObjectData* native_context_ = nullptr; // Derives from context_.
MapData* initial_map_ = nullptr; // Derives from prototype_or_initial_map_.
ObjectData* instance_prototype_ =
nullptr; // Derives from prototype_or_initial_map_.
ObjectData* shared_ = nullptr;
- ObjectData* feedback_vector_ = nullptr; // Derives from feedback_cell.
ObjectData* feedback_cell_ = nullptr;
int initial_map_instance_size_with_min_slack_; // Derives from
// prototype_or_initial_map_.
@@ -809,10 +665,6 @@ class MapData : public HeapObjectData {
return is_abandoned_prototype_map_;
}
- // Extra information.
- void SerializeRootMap(JSHeapBroker* broker, NotConcurrentInliningTag tag);
- ObjectData* FindRootMap() const;
-
void SerializeConstructor(JSHeapBroker* broker, NotConcurrentInliningTag tag);
ObjectData* GetConstructor() const {
CHECK(serialized_constructor_);
@@ -840,8 +692,7 @@ class MapData : public HeapObjectData {
bool has_extra_serialized_data() const {
return serialized_constructor_ || serialized_backpointer_ ||
- serialized_prototype_ || serialized_root_map_ ||
- serialized_for_element_store_;
+ serialized_prototype_ || serialized_for_element_store_;
}
private:
@@ -881,9 +732,6 @@ class MapData : public HeapObjectData {
bool serialized_prototype_ = false;
ObjectData* prototype_ = nullptr;
- bool serialized_root_map_ = false;
- ObjectData* root_map_ = nullptr;
-
bool serialized_for_element_store_ = false;
};
@@ -938,16 +786,13 @@ void JSFunctionData::Cache(JSHeapBroker* broker) {
// guaranteed to see an initialized JSFunction object, and after
// initialization fields remain in a valid state.
- Context context = function->context(kRelaxedLoad);
- context_ = broker->GetOrCreateData(context, kAssumeMemoryFence);
- CHECK(context_->IsContext());
+ ContextRef context =
+ MakeRefAssumeMemoryFence(broker, function->context(kRelaxedLoad));
+ context_ = context.data();
- native_context_ = broker->GetOrCreateData(context.map().native_context(),
- kAssumeMemoryFence);
- CHECK(native_context_->IsNativeContext());
-
- SharedFunctionInfo shared = function->shared(kRelaxedLoad);
- shared_ = broker->GetOrCreateData(shared, kAssumeMemoryFence);
+ SharedFunctionInfoRef shared =
+ MakeRefAssumeMemoryFence(broker, function->shared(kRelaxedLoad));
+ shared_ = shared.data();
if (function->has_prototype_slot()) {
prototype_or_initial_map_ = broker->GetOrCreateData(
@@ -981,9 +826,10 @@ void JSFunctionData::Cache(JSHeapBroker* broker) {
if (has_initial_map_) {
has_instance_prototype_ = true;
- instance_prototype_ = broker->GetOrCreateData(
- Handle<Map>::cast(initial_map_->object())->prototype(),
- kAssumeMemoryFence);
+ instance_prototype_ =
+ MakeRefAssumeMemoryFence(
+ broker, Handle<Map>::cast(initial_map_->object())->prototype())
+ .data();
} else if (prototype_or_initial_map_->IsHeapObject() &&
!Handle<HeapObject>::cast(prototype_or_initial_map_->object())
->IsTheHole()) {
@@ -994,15 +840,9 @@ void JSFunctionData::Cache(JSHeapBroker* broker) {
PrototypeRequiresRuntimeLookup_ = function->PrototypeRequiresRuntimeLookup();
- FeedbackCell feedback_cell = function->raw_feedback_cell(kAcquireLoad);
- feedback_cell_ = broker->GetOrCreateData(feedback_cell, kAssumeMemoryFence);
-
- ObjectData* maybe_feedback_vector = broker->GetOrCreateData(
- feedback_cell.value(kAcquireLoad), kAssumeMemoryFence);
- if (shared.is_compiled() && maybe_feedback_vector->IsFeedbackVector()) {
- has_feedback_vector_ = true;
- feedback_vector_ = maybe_feedback_vector;
- }
+ FeedbackCellRef feedback_cell = MakeRefAssumeMemoryFence(
+ broker, function->raw_feedback_cell(kAcquireLoad));
+ feedback_cell_ = feedback_cell.data();
#ifdef DEBUG
serialized_ = true;
@@ -1016,7 +856,6 @@ bool JSFunctionData::IsConsistentWithHeapState(JSHeapBroker* broker) const {
Handle<JSFunction> f = Handle<JSFunction>::cast(object());
CHECK_EQ(*context_->object(), f->context());
- CHECK_EQ(*native_context_->object(), f->native_context());
CHECK_EQ(*shared_->object(), f->shared());
if (f->has_prototype_slot()) {
@@ -1080,22 +919,6 @@ bool JSFunctionData::IsConsistentWithHeapState(JSHeapBroker* broker) const {
return false;
}
- if (has_used_field(kHasFeedbackVector) &&
- has_feedback_vector_ != f->has_feedback_vector()) {
- TRACE_BROKER_MISSING(broker, "JSFunction::has_feedback_vector");
- return false;
- }
-
- if (has_feedback_vector_) {
- if (has_used_field(kFeedbackVector) &&
- *feedback_vector_->object() != f->feedback_vector()) {
- TRACE_BROKER_MISSING(broker, "JSFunction::feedback_vector");
- return false;
- }
- } else {
- DCHECK_NULL(feedback_vector_);
- }
-
return true;
}
@@ -1269,61 +1092,16 @@ class ScriptContextTableData : public FixedArrayData {
: FixedArrayData(broker, storage, object, kind) {}
};
-bool JSBoundFunctionData::Serialize(JSHeapBroker* broker,
- NotConcurrentInliningTag tag) {
- DCHECK(!broker->is_concurrent_inlining());
-
- if (serialized_) return true;
- if (broker->StackHasOverflowed()) return false;
-
- TraceScope tracer(broker, this, "JSBoundFunctionData::Serialize");
- Handle<JSBoundFunction> function = Handle<JSBoundFunction>::cast(object());
-
- // We don't immediately set {serialized_} in order to correctly handle the
- // case where a recursive call to this method reaches the stack limit.
-
- DCHECK_NULL(bound_target_function_);
- bound_target_function_ =
- broker->GetOrCreateData(function->bound_target_function());
- bool serialized_nested = true;
- if (!bound_target_function_->should_access_heap()) {
- if (bound_target_function_->IsJSBoundFunction()) {
- serialized_nested =
- bound_target_function_->AsJSBoundFunction()->Serialize(broker, tag);
- }
- }
- if (!serialized_nested) {
- // We couldn't serialize all nested bound functions due to stack
- // overflow. Give up.
- DCHECK(!serialized_);
- bound_target_function_ = nullptr; // Reset to sync with serialized_.
- return false;
- }
-
- serialized_ = true;
-
- DCHECK_NULL(bound_arguments_);
- bound_arguments_ = broker->GetOrCreateData(function->bound_arguments());
-
- DCHECK_NULL(bound_this_);
- bound_this_ = broker->GetOrCreateData(function->bound_this());
-
- return true;
-}
-
JSObjectData::JSObjectData(JSHeapBroker* broker, ObjectData** storage,
Handle<JSObject> object, ObjectDataKind kind)
: JSReceiverData(broker, storage, object, kind),
- inobject_fields_(broker->zone()),
- own_constant_elements_(broker->zone()),
- own_properties_(broker->zone()) {}
+ inobject_fields_(broker->zone()) {}
class JSArrayData : public JSObjectData {
public:
JSArrayData(JSHeapBroker* broker, ObjectData** storage,
Handle<JSArray> object, ObjectDataKind kind)
- : JSObjectData(broker, storage, object, kind),
- own_elements_(broker->zone()) {}
+ : JSObjectData(broker, storage, object, kind) {}
void Serialize(JSHeapBroker* broker, NotConcurrentInliningTag tag);
ObjectData* length() const {
@@ -1331,19 +1109,9 @@ class JSArrayData : public JSObjectData {
return length_;
}
- ObjectData* GetOwnElement(
- JSHeapBroker* broker, uint32_t index,
- SerializationPolicy policy = SerializationPolicy::kAssumeSerialized);
-
private:
bool serialized_ = false;
ObjectData* length_ = nullptr;
-
- // Elements (indexed properties) that either
- // (1) are known to exist directly on the object, or
- // (2) are known not to (possibly they don't exist at all).
- // In case (2), the second pair component is nullptr.
- ZoneVector<std::pair<uint32_t, ObjectData*>> own_elements_;
};
void JSArrayData::Serialize(JSHeapBroker* broker,
@@ -1358,52 +1126,11 @@ void JSArrayData::Serialize(JSHeapBroker* broker,
length_ = broker->GetOrCreateData(jsarray->length());
}
-ObjectData* JSArrayData::GetOwnElement(JSHeapBroker* broker, uint32_t index,
- SerializationPolicy policy) {
- for (auto const& p : own_elements_) {
- if (p.first == index) return p.second;
- }
-
- if (policy == SerializationPolicy::kAssumeSerialized) {
- TRACE_MISSING(broker, "knowledge about index " << index << " on " << this);
- return nullptr;
- }
-
- base::Optional<ObjectRef> element =
- GetOwnElementFromHeap(broker, object(), index, false);
- ObjectData* result = element.has_value() ? element->data() : nullptr;
- own_elements_.push_back({index, result});
- return result;
-}
-
class JSGlobalObjectData : public JSObjectData {
public:
JSGlobalObjectData(JSHeapBroker* broker, ObjectData** storage,
Handle<JSGlobalObject> object, ObjectDataKind kind)
- : JSObjectData(broker, storage, object, kind),
- properties_(broker->zone()) {
- if (!broker->is_concurrent_inlining()) {
- is_detached_ = object->IsDetached();
- }
- }
-
- bool IsDetached() const {
- return is_detached_;
- }
-
- ObjectData* GetPropertyCell(
- JSHeapBroker* broker, ObjectData* name,
- SerializationPolicy policy = SerializationPolicy::kAssumeSerialized);
-
- private:
- // Only valid if not concurrent inlining.
- bool is_detached_ = false;
-
- // Properties that either
- // (1) are known to exist as property cells on the global object, or
- // (2) are known not to (possibly they don't exist at all).
- // In case (2), the second pair component is nullptr.
- ZoneVector<std::pair<ObjectData*, ObjectData*>> properties_;
+ : JSObjectData(broker, storage, object, kind) {}
};
class JSGlobalProxyData : public JSObjectData {
@@ -1413,46 +1140,6 @@ class JSGlobalProxyData : public JSObjectData {
: JSObjectData(broker, storage, object, kind) {}
};
-namespace {
-
-base::Optional<PropertyCellRef> GetPropertyCellFromHeap(JSHeapBroker* broker,
- Handle<Name> name) {
- base::Optional<PropertyCell> maybe_cell =
- ConcurrentLookupIterator::TryGetPropertyCell(
- broker->isolate(), broker->local_isolate_or_isolate(),
- broker->target_native_context().global_object().object(), name);
- if (!maybe_cell.has_value()) return {};
- return TryMakeRef(broker, *maybe_cell);
-}
-
-} // namespace
-
-ObjectData* JSGlobalObjectData::GetPropertyCell(JSHeapBroker* broker,
- ObjectData* name,
- SerializationPolicy policy) {
- CHECK_NOT_NULL(name);
- for (auto const& p : properties_) {
- if (p.first == name) return p.second;
- }
-
- if (policy == SerializationPolicy::kAssumeSerialized) {
- TRACE_MISSING(broker, "knowledge about global property " << name);
- return nullptr;
- }
-
- ObjectData* result = nullptr;
- base::Optional<PropertyCellRef> cell =
- GetPropertyCellFromHeap(broker, Handle<Name>::cast(name->object()));
- if (cell.has_value()) {
- result = cell->data();
- if (!result->should_access_heap()) {
- result->AsPropertyCell()->Cache(broker);
- }
- }
- properties_.push_back({name, result});
- return result;
-}
-
#define DEFINE_IS(Name) \
bool ObjectData::Is##Name() const { \
if (should_access_heap()) { \
@@ -1540,19 +1227,6 @@ bool MapData::TrySerializePrototype(JSHeapBroker* broker,
return true;
}
-void MapData::SerializeRootMap(JSHeapBroker* broker,
- NotConcurrentInliningTag tag) {
- if (serialized_root_map_) return;
- serialized_root_map_ = true;
-
- TraceScope tracer(broker, this, "MapData::SerializeRootMap");
- Handle<Map> map = Handle<Map>::cast(object());
- DCHECK_NULL(root_map_);
- root_map_ = broker->GetOrCreateData(map->FindRootMap(broker->isolate()));
-}
-
-ObjectData* MapData::FindRootMap() const { return root_map_; }
-
bool JSObjectData::SerializeAsBoilerplateRecursive(JSHeapBroker* broker,
NotConcurrentInliningTag tag,
int max_depth) {
@@ -1693,8 +1367,6 @@ void JSHeapBroker::InitializeAndStartSerializing() {
SetTargetNativeContextRef(target_native_context().object());
if (!is_concurrent_inlining()) {
- target_native_context().Serialize(NotConcurrentInliningTag{this});
-
Factory* const f = isolate()->factory();
ObjectData* data;
data = GetOrCreateData(f->array_buffer_detaching_protector());
@@ -1838,6 +1510,19 @@ int ObjectRef::AsSmi() const {
INSTANCE_TYPE_CHECKERS(DEF_TESTER)
#undef DEF_TESTER
+bool MapRef::CanInlineElementAccess() const {
+ if (!IsJSObjectMap()) return false;
+ if (is_access_check_needed()) return false;
+ if (has_indexed_interceptor()) return false;
+ ElementsKind kind = elements_kind();
+ if (IsFastElementsKind(kind)) return true;
+ if (IsTypedArrayElementsKind(kind) && kind != BIGUINT64_ELEMENTS &&
+ kind != BIGINT64_ELEMENTS) {
+ return true;
+ }
+ return false;
+}
+
base::Optional<MapRef> MapRef::AsElementsKind(ElementsKind kind) const {
const ElementsKind current_kind = elements_kind();
if (kind == current_kind) return *this;
@@ -1931,6 +1616,11 @@ void RecordConsistentJSFunctionViewDependencyIfNeeded(
} // namespace
+base::Optional<FeedbackVectorRef> JSFunctionRef::feedback_vector(
+ CompilationDependencies* dependencies) const {
+ return raw_feedback_cell(dependencies).feedback_vector();
+}
+
int JSFunctionRef::InitialMapInstanceSizeWithMinSlack(
CompilationDependencies* dependencies) const {
if (data_->should_access_heap()) {
@@ -2096,25 +1786,21 @@ ObjectRef MapRef::GetFieldType(InternalIndex descriptor_index) const {
}
base::Optional<ObjectRef> StringRef::GetCharAsStringOrUndefined(
- uint32_t index, SerializationPolicy policy) const {
- if (broker()->is_concurrent_inlining()) {
- String maybe_char;
- auto result = ConcurrentLookupIterator::TryGetOwnChar(
- &maybe_char, broker()->isolate(), broker()->local_isolate(), *object(),
- index);
-
- if (result == ConcurrentLookupIterator::kGaveUp) {
- TRACE_BROKER_MISSING(broker(), "StringRef::GetCharAsStringOrUndefined on "
- << *this << " at index " << index);
- return {};
- }
+ uint32_t index) const {
+ DCHECK(data_->should_access_heap() || broker()->is_concurrent_inlining());
+ String maybe_char;
+ auto result = ConcurrentLookupIterator::TryGetOwnChar(
+ &maybe_char, broker()->isolate(), broker()->local_isolate(), *object(),
+ index);
- DCHECK_EQ(result, ConcurrentLookupIterator::kPresent);
- return TryMakeRef(broker(), maybe_char);
+ if (result == ConcurrentLookupIterator::kGaveUp) {
+ TRACE_BROKER_MISSING(broker(), "StringRef::GetCharAsStringOrUndefined on "
+ << *this << " at index " << index);
+ return {};
}
- CHECK_EQ(data_->kind(), ObjectDataKind::kUnserializedHeapObject);
- return GetOwnElementFromHeap(broker(), object(), index, true);
+ DCHECK_EQ(result, ConcurrentLookupIterator::kPresent);
+ return TryMakeRef(broker(), maybe_char);
}
bool StringRef::SupportedStringKind() const {
@@ -2165,8 +1851,6 @@ int ArrayBoilerplateDescriptionRef::constants_elements_length() const {
return object()->constant_elements().length();
}
-ObjectRef FixedArrayRef::get(int i) const { return TryGet(i).value(); }
-
base::Optional<ObjectRef> FixedArrayRef::TryGet(int i) const {
Handle<Object> value;
{
@@ -2234,26 +1918,17 @@ int BytecodeArrayRef::handler_table_size() const {
return BitField::decode(ObjectRef::data()->As##holder()->field()); \
}
-// Like IF_ACCESS_FROM_HEAP[_C] but we also allow direct heap access for
+// Like IF_ACCESS_FROM_HEAP but we also allow direct heap access for
// kBackgroundSerialized only for methods that we identified to be safe.
-#define IF_ACCESS_FROM_HEAP_WITH_FLAG(result, name) \
- if (data_->should_access_heap() || broker()->is_concurrent_inlining()) { \
- return MakeRef(broker(), result::cast(object()->name())); \
- }
#define IF_ACCESS_FROM_HEAP_WITH_FLAG_C(name) \
if (data_->should_access_heap() || broker()->is_concurrent_inlining()) { \
return object()->name(); \
}
-// Like BIMODAL_ACCESSOR[_C] except that we force a direct heap access if
+// Like BIMODAL_ACCESSOR except that we force a direct heap access if
// broker()->is_concurrent_inlining() is true (even for kBackgroundSerialized).
// This is because we identified the method to be safe to use direct heap
// access, but the holder##Data class still needs to be serialized.
-#define BIMODAL_ACCESSOR_WITH_FLAG(holder, result, name) \
- result##Ref holder##Ref::name() const { \
- IF_ACCESS_FROM_HEAP_WITH_FLAG(result, name); \
- return result##Ref(broker(), ObjectRef::data()->As##holder()->name()); \
- }
#define BIMODAL_ACCESSOR_WITH_FLAG_C(holder, result, name) \
result holder##Ref::name() const { \
IF_ACCESS_FROM_HEAP_WITH_FLAG_C(name); \
@@ -2298,31 +1973,22 @@ uint64_t HeapNumberRef::value_as_bits() const {
return object()->value_as_bits(kRelaxedLoad);
}
-base::Optional<JSReceiverRef> JSBoundFunctionRef::bound_target_function()
- const {
- if (data_->should_access_heap() || broker()->is_concurrent_inlining()) {
- // Immutable after initialization.
- return TryMakeRef(broker(), object()->bound_target_function(),
- kAssumeMemoryFence);
- }
- return TryMakeRef<JSReceiver>(
- broker(), data()->AsJSBoundFunction()->bound_target_function());
+JSReceiverRef JSBoundFunctionRef::bound_target_function() const {
+ DCHECK(data_->should_access_heap() || broker()->is_concurrent_inlining());
+ // Immutable after initialization.
+ return MakeRefAssumeMemoryFence(broker(), object()->bound_target_function());
}
-base::Optional<ObjectRef> JSBoundFunctionRef::bound_this() const {
- if (data_->should_access_heap() || broker()->is_concurrent_inlining()) {
- // Immutable after initialization.
- return TryMakeRef(broker(), object()->bound_this(), kAssumeMemoryFence);
- }
- return TryMakeRef<Object>(broker(),
- data()->AsJSBoundFunction()->bound_this());
+
+ObjectRef JSBoundFunctionRef::bound_this() const {
+ DCHECK(data_->should_access_heap() || broker()->is_concurrent_inlining());
+ // Immutable after initialization.
+ return MakeRefAssumeMemoryFence(broker(), object()->bound_this());
}
+
FixedArrayRef JSBoundFunctionRef::bound_arguments() const {
- if (data_->should_access_heap() || broker()->is_concurrent_inlining()) {
- // Immutable after initialization.
- return MakeRefAssumeMemoryFence(broker(), object()->bound_arguments());
- }
- return FixedArrayRef(broker(),
- data()->AsJSBoundFunction()->bound_arguments());
+ DCHECK(data_->should_access_heap() || broker()->is_concurrent_inlining());
+ // Immutable after initialization.
+ return MakeRefAssumeMemoryFence(broker(), object()->bound_arguments());
}
// Immutable after initialization.
@@ -2354,8 +2020,6 @@ BIMODAL_ACCESSOR_C(Map, int, instance_size)
BIMODAL_ACCESSOR_WITH_FLAG_C(Map, int, NextFreePropertyIndex)
BIMODAL_ACCESSOR_C(Map, int, UnusedPropertyFields)
BIMODAL_ACCESSOR_WITH_FLAG_C(Map, InstanceType, instance_type)
-BIMODAL_ACCESSOR_WITH_FLAG(Map, Object, GetConstructor)
-BIMODAL_ACCESSOR_WITH_FLAG(Map, HeapObject, GetBackPointer)
BIMODAL_ACCESSOR_C(Map, bool, is_abandoned_prototype_map)
int ObjectBoilerplateDescriptionRef::size() const { return object()->size(); }
@@ -2385,33 +2049,16 @@ bool FunctionTemplateInfoRef::is_signature_undefined() const {
return object()->signature().IsUndefined(broker()->isolate());
}
-bool FunctionTemplateInfoRef::has_call_code() const {
- HeapObject call_code = object()->call_code(kAcquireLoad);
- return !call_code.IsUndefined();
-}
-
HEAP_ACCESSOR_C(FunctionTemplateInfo, bool, accept_any_receiver)
HolderLookupResult FunctionTemplateInfoRef::LookupHolderOfExpectedType(
- MapRef receiver_map, SerializationPolicy policy) {
+ MapRef receiver_map) {
const HolderLookupResult not_found;
- // There are currently two ways we can see a FunctionTemplateInfo on the
- // background thread: 1.) As part of a SharedFunctionInfo and 2.) in an
- // AccessorPair. In both cases, the FTI is fully constructed on the main
- // thread before.
- // TODO(nicohartmann@, v8:7790): Once the above no longer holds, we might
- // have to use the GC predicate to check whether objects are fully
- // initialized and safe to read.
- if (!receiver_map.IsJSReceiverMap() ||
- (receiver_map.is_access_check_needed() &&
- !object()->accept_any_receiver())) {
+ if (!receiver_map.IsJSObjectMap() || (receiver_map.is_access_check_needed() &&
+ !object()->accept_any_receiver())) {
return not_found;
}
- if (!receiver_map.IsJSObjectMap()) return not_found;
-
- DCHECK(has_call_code());
-
Handle<FunctionTemplateInfo> expected_receiver_type;
{
DisallowGarbageCollection no_gc;
@@ -2424,17 +2071,11 @@ HolderLookupResult FunctionTemplateInfoRef::LookupHolderOfExpectedType(
if (expected_receiver_type->IsTemplateFor(*receiver_map.object())) {
return HolderLookupResult(CallOptimization::kHolderIsReceiver);
}
-
if (!receiver_map.IsJSGlobalProxyMap()) return not_found;
}
- if (policy == SerializationPolicy::kSerializeIfNeeded) {
- receiver_map.SerializePrototype(NotConcurrentInliningTag{broker()});
- }
base::Optional<HeapObjectRef> prototype = receiver_map.prototype();
- if (!prototype.has_value()) return not_found;
- if (prototype->IsNull()) return not_found;
-
+ if (!prototype.has_value() || prototype->IsNull()) return not_found;
if (!expected_receiver_type->IsTemplateFor(prototype->object()->map())) {
return not_found;
}
@@ -2457,6 +2098,7 @@ ScopeInfoRef ScopeInfoRef::OuterScopeInfo() const {
HEAP_ACCESSOR_C(SharedFunctionInfo, Builtin, builtin_id)
BytecodeArrayRef SharedFunctionInfoRef::GetBytecodeArray() const {
+ CHECK(HasBytecodeArray());
BytecodeArray bytecode_array;
if (!broker()->IsMainThread()) {
bytecode_array = object()->GetBytecodeArray(broker()->local_isolate());
@@ -2480,12 +2122,9 @@ SharedFunctionInfo::Inlineability SharedFunctionInfoRef::GetInlineability()
broker()->is_turboprop());
}
-base::Optional<FeedbackVectorRef> FeedbackCellRef::value() const {
- DisallowGarbageCollection no_gc;
+ObjectRef FeedbackCellRef::value() const {
DCHECK(data_->should_access_heap());
- Object value = object()->value(kAcquireLoad);
- if (!value.IsFeedbackVector()) return base::nullopt;
- return MakeRefAssumeMemoryFence(broker(), FeedbackVector::cast(value));
+ return MakeRefAssumeMemoryFence(broker(), object()->value(kAcquireLoad));
}
base::Optional<ObjectRef> MapRef::GetStrongValue(
@@ -2513,75 +2152,59 @@ base::Optional<HeapObjectRef> MapRef::prototype() const {
return HeapObjectRef(broker(), prototype_data);
}
-void MapRef::SerializeRootMap(NotConcurrentInliningTag tag) {
- if (data_->should_access_heap()) return;
- CHECK_EQ(broker()->mode(), JSHeapBroker::kSerializing);
- data()->AsMap()->SerializeRootMap(broker(), tag);
+MapRef MapRef::FindRootMap() const {
+ DCHECK(data_->should_access_heap() || broker()->is_concurrent_inlining());
+ // TODO(solanes, v8:7790): Consider caching the result of the root map.
+ return MakeRefAssumeMemoryFence(broker(),
+ object()->FindRootMap(broker()->isolate()));
}
-// TODO(solanes, v8:7790): Remove base::Optional from the return type when
-// deleting serialization.
-base::Optional<MapRef> MapRef::FindRootMap() const {
- if (data_->should_access_heap() || broker()->is_concurrent_inlining()) {
- // TODO(solanes): Change TryMakeRef to MakeRef when Map is moved to
- // kNeverSerialized.
- // TODO(solanes, v8:7790): Consider caching the result of the root map.
- return TryMakeRef(broker(), object()->FindRootMap(broker()->isolate()));
+ObjectRef MapRef::GetConstructor() const {
+ if (data()->should_access_heap() || broker()->is_concurrent_inlining()) {
+ // Immutable after initialization.
+ return MakeRefAssumeMemoryFence(broker(), object()->GetConstructor());
}
- ObjectData* map_data = data()->AsMap()->FindRootMap();
- if (map_data != nullptr) {
- return MapRef(broker(), map_data);
+ return ObjectRef(broker(), data()->AsMap()->GetConstructor());
+}
+
+HeapObjectRef MapRef::GetBackPointer() const {
+ if (data()->should_access_heap() || broker()->is_concurrent_inlining()) {
+ // Immutable after initialization.
+ return MakeRefAssumeMemoryFence(
+ broker(), HeapObject::cast(object()->GetBackPointer()));
}
- TRACE_BROKER_MISSING(broker(), "root map for object " << *this);
- return base::nullopt;
+ return HeapObjectRef(broker(), ObjectRef::data()->AsMap()->GetBackPointer());
}
bool JSTypedArrayRef::is_on_heap() const {
- if (data_->should_access_heap() || broker()->is_concurrent_inlining()) {
- // Safe to read concurrently because:
- // - host object seen by serializer.
- // - underlying field written 1. during initialization or 2. with
- // release-store.
- return object()->is_on_heap(kAcquireLoad);
- }
- return data()->AsJSTypedArray()->data_ptr();
+ DCHECK(data_->should_access_heap() || broker()->is_concurrent_inlining());
+ // Underlying field written 1. during initialization or 2. with release-store.
+ return object()->is_on_heap(kAcquireLoad);
}
size_t JSTypedArrayRef::length() const {
+ DCHECK(data_->should_access_heap() || broker()->is_concurrent_inlining());
CHECK(!is_on_heap());
- if (data_->should_access_heap() || broker()->is_concurrent_inlining()) {
- // Safe to read concurrently because:
- // - immutable after initialization.
- // - host object seen by serializer.
- return object()->length();
- }
- return data()->AsJSTypedArray()->length();
+ // Immutable after initialization.
+ return object()->length();
}
HeapObjectRef JSTypedArrayRef::buffer() const {
+ DCHECK(data_->should_access_heap() || broker()->is_concurrent_inlining());
CHECK(!is_on_heap());
- if (data_->should_access_heap() || broker()->is_concurrent_inlining()) {
- // Safe to read concurrently because:
- // - immutable after initialization.
- // - host object seen by serializer.
- return MakeRef<HeapObject>(broker(), object()->buffer());
- }
- return HeapObjectRef{broker(), data()->AsJSTypedArray()->buffer()};
+ // Immutable after initialization.
+ return MakeRef<HeapObject>(broker(), object()->buffer());
}
void* JSTypedArrayRef::data_ptr() const {
+ DCHECK(data_->should_access_heap() || broker()->is_concurrent_inlining());
CHECK(!is_on_heap());
- if (data_->should_access_heap() || broker()->is_concurrent_inlining()) {
- // Safe to read concurrently because:
- // - host object seen by serializer.
- // - underlying field written 1. during initialization or 2. protected by
- // the is_on_heap release/acquire semantics (external_pointer store
- // happens-before base_pointer store, and this external_pointer load
- // happens-after base_pointer load).
- STATIC_ASSERT(JSTypedArray::kOffHeapDataPtrEqualsExternalPointer);
- return object()->DataPtr();
- }
- return data()->AsJSTypedArray()->data_ptr();
+ // Underlying field written 1. during initialization or 2. protected by the
+ // is_on_heap release/acquire semantics (external_pointer store happens-before
+ // base_pointer store, and this external_pointer load happens-after
+ // base_pointer load).
+ STATIC_ASSERT(JSTypedArray::kOffHeapDataPtrEqualsExternalPointer);
+ return object()->DataPtr();
}
bool MapRef::IsInobjectSlackTrackingInProgress() const {
@@ -2642,32 +2265,6 @@ ZoneVector<const CFunctionInfo*> FunctionTemplateInfoRef::c_signatures() const {
bool StringRef::IsSeqString() const { return object()->IsSeqString(); }
-void NativeContextRef::Serialize(NotConcurrentInliningTag tag) {
- // TODO(jgruber): Disable visitation if should_access_heap() once all
- // NativeContext element refs can be created on background threads. Until
- // then, we *must* iterate them and create refs at serialization-time (even
- // though NativeContextRef itself is never-serialized).
- CHECK_EQ(broker()->mode(), JSHeapBroker::kSerializing);
-#define SERIALIZE_MEMBER(type, name) \
- { \
- ObjectData* member_data = broker()->GetOrCreateData(object()->name()); \
- if (member_data->IsMap() && !InstanceTypeChecker::IsContext( \
- member_data->AsMap()->instance_type())) { \
- member_data->AsMap()->SerializeConstructor(broker(), tag); \
- } \
- }
- BROKER_NATIVE_CONTEXT_FIELDS(SERIALIZE_MEMBER)
-#undef SERIALIZE_MEMBER
-
- for (int i = Context::FIRST_FUNCTION_MAP_INDEX;
- i <= Context::LAST_FUNCTION_MAP_INDEX; i++) {
- MapData* member_data = broker()->GetOrCreateData(object()->get(i))->AsMap();
- if (!InstanceTypeChecker::IsContext(member_data->instance_type())) {
- member_data->SerializeConstructor(broker(), tag);
- }
- }
-}
-
ScopeInfoRef NativeContextRef::scope_info() const {
// The scope_info is immutable after initialization.
return MakeRefAssumeMemoryFence(broker(), object()->scope_info());
@@ -2777,25 +2374,18 @@ bool ObjectRef::should_access_heap() const {
base::Optional<ObjectRef> JSObjectRef::GetOwnConstantElement(
const FixedArrayBaseRef& elements_ref, uint32_t index,
- CompilationDependencies* dependencies, SerializationPolicy policy) const {
- if (data_->should_access_heap() || broker()->is_concurrent_inlining()) {
- base::Optional<Object> maybe_element = GetOwnConstantElementFromHeap(
- *elements_ref.object(), map().elements_kind(), index);
-
- if (!maybe_element.has_value()) return {};
+ CompilationDependencies* dependencies) const {
+ DCHECK(data_->should_access_heap() || broker()->is_concurrent_inlining());
+ base::Optional<Object> maybe_element = GetOwnConstantElementFromHeap(
+ *elements_ref.object(), map().elements_kind(), index);
+ if (!maybe_element.has_value()) return {};
- base::Optional<ObjectRef> result =
- TryMakeRef(broker(), maybe_element.value());
- if (policy == SerializationPolicy::kAssumeSerialized &&
- result.has_value()) {
- dependencies->DependOnOwnConstantElement(*this, index, *result);
- }
- return result;
- } else {
- ObjectData* element =
- data()->AsJSObject()->GetOwnConstantElement(broker(), index, policy);
- return TryMakeRef<Object>(broker(), element);
+ base::Optional<ObjectRef> result =
+ TryMakeRef(broker(), maybe_element.value());
+ if (result.has_value()) {
+ dependencies->DependOnOwnConstantElement(*this, index, *result);
}
+ return result;
}
base::Optional<Object> JSObjectRef::GetOwnConstantElementFromHeap(
@@ -2844,109 +2434,82 @@ base::Optional<Object> JSObjectRef::GetOwnConstantElementFromHeap(
base::Optional<ObjectRef> JSObjectRef::GetOwnFastDataProperty(
Representation field_representation, FieldIndex index,
- CompilationDependencies* dependencies, SerializationPolicy policy) const {
- if (data_->should_access_heap() || broker()->is_concurrent_inlining()) {
- base::Optional<ObjectRef> result = GetOwnFastDataPropertyFromHeap(
- broker(), *this, field_representation, index);
- if (policy == SerializationPolicy::kAssumeSerialized &&
- result.has_value()) {
- dependencies->DependOnOwnConstantDataProperty(
- *this, map(), field_representation, index, *result);
- }
- return result;
+ CompilationDependencies* dependencies) const {
+ DCHECK(data_->should_access_heap() || broker()->is_concurrent_inlining());
+ base::Optional<ObjectRef> result = GetOwnFastDataPropertyFromHeap(
+ broker(), *this, field_representation, index);
+ if (result.has_value()) {
+ dependencies->DependOnOwnConstantDataProperty(
+ *this, map(), field_representation, index, *result);
}
- ObjectData* property = data()->AsJSObject()->GetOwnFastDataProperty(
- broker(), field_representation, index, policy);
- return TryMakeRef<Object>(broker(), property);
+ return result;
}
base::Optional<ObjectRef> JSObjectRef::GetOwnDictionaryProperty(
- InternalIndex index, CompilationDependencies* dependencies,
- SerializationPolicy policy) const {
+ InternalIndex index, CompilationDependencies* dependencies) const {
+ DCHECK(data_->should_access_heap() || broker()->is_concurrent_inlining());
CHECK(index.is_found());
- if (data_->should_access_heap() || broker()->is_concurrent_inlining()) {
- base::Optional<ObjectRef> result =
- GetOwnDictionaryPropertyFromHeap(broker(), object(), index);
- if (policy == SerializationPolicy::kAssumeSerialized &&
- result.has_value()) {
- dependencies->DependOnOwnConstantDictionaryProperty(*this, index,
- *result);
- }
- return result;
+ base::Optional<ObjectRef> result =
+ GetOwnDictionaryPropertyFromHeap(broker(), object(), index);
+ if (result.has_value()) {
+ dependencies->DependOnOwnConstantDictionaryProperty(*this, index, *result);
}
- ObjectData* property =
- data()->AsJSObject()->GetOwnDictionaryProperty(broker(), index, policy);
- CHECK_NE(property, nullptr);
- return ObjectRef(broker(), property);
+ return result;
}
ObjectRef JSArrayRef::GetBoilerplateLength() const {
// Safe to read concurrently because:
// - boilerplates are immutable after initialization.
// - boilerplates are published into the feedback vector.
- return length_unsafe();
+ // These facts also mean we can expect a valid value.
+ return length_unsafe().value();
}
-ObjectRef JSArrayRef::length_unsafe() const {
+base::Optional<ObjectRef> JSArrayRef::length_unsafe() const {
if (data_->should_access_heap() || broker()->is_concurrent_inlining()) {
- return MakeRef(broker(),
- object()->length(broker()->isolate(), kRelaxedLoad));
+ return TryMakeRef(broker(),
+ object()->length(broker()->isolate(), kRelaxedLoad));
} else {
return ObjectRef{broker(), data()->AsJSArray()->length()};
}
}
base::Optional<ObjectRef> JSArrayRef::GetOwnCowElement(
- FixedArrayBaseRef elements_ref, uint32_t index,
- SerializationPolicy policy) const {
- if (data_->should_access_heap() || broker()->is_concurrent_inlining()) {
- // Note: we'd like to check `elements_ref == elements()` here, but due to
- // concurrency this may not hold. The code below must be able to deal with
- // concurrent `elements` modifications.
-
- // Due to concurrency, the kind read here may not be consistent with
- // `elements_ref`. The caller has to guarantee consistency at runtime by
- // other means (e.g. through a runtime equality check or a compilation
- // dependency).
- ElementsKind elements_kind = map().elements_kind();
-
- // We only inspect fixed COW arrays, which may only occur for fast
- // smi/objects elements kinds.
- if (!IsSmiOrObjectElementsKind(elements_kind)) return {};
- DCHECK(IsFastElementsKind(elements_kind));
- if (!elements_ref.map().IsFixedCowArrayMap()) return {};
-
- // As the name says, the `length` read here is unsafe and may not match
- // `elements`. We rely on the invariant that any `length` change will
- // also result in an `elements` change to make this safe. The `elements`
- // consistency check in the caller thus also guards the value of `length`.
- ObjectRef length_ref = length_unsafe();
-
- // Likewise we only deal with smi lengths.
- if (!length_ref.IsSmi()) return {};
-
- base::Optional<Object> result =
- ConcurrentLookupIterator::TryGetOwnCowElement(
- broker()->isolate(), *elements_ref.AsFixedArray().object(),
- elements_kind, length_ref.AsSmi(), index);
- if (!result.has_value()) return {};
-
- return TryMakeRef(broker(), result.value());
- } else {
- DCHECK(!data_->should_access_heap());
- DCHECK(!broker()->is_concurrent_inlining());
+ FixedArrayBaseRef elements_ref, uint32_t index) const {
+ DCHECK(data_->should_access_heap() || broker()->is_concurrent_inlining());
+ // Note: we'd like to check `elements_ref == elements()` here, but due to
+ // concurrency this may not hold. The code below must be able to deal with
+ // concurrent `elements` modifications.
- // Just to clarify that `elements_ref` is not used on this path.
- // GetOwnElement accesses the serialized `elements` field on its own.
- USE(elements_ref);
+ // Due to concurrency, the kind read here may not be consistent with
+ // `elements_ref`. The caller has to guarantee consistency at runtime by
+ // other means (e.g. through a runtime equality check or a compilation
+ // dependency).
+ ElementsKind elements_kind = map().elements_kind();
- if (!elements(kRelaxedLoad).value().map().IsFixedCowArrayMap()) return {};
+ // We only inspect fixed COW arrays, which may only occur for fast
+ // smi/objects elements kinds.
+ if (!IsSmiOrObjectElementsKind(elements_kind)) return {};
+ DCHECK(IsFastElementsKind(elements_kind));
+ if (!elements_ref.map().IsFixedCowArrayMap()) return {};
- ObjectData* element =
- data()->AsJSArray()->GetOwnElement(broker(), index, policy);
- if (element == nullptr) return base::nullopt;
- return ObjectRef(broker(), element);
- }
+ // As the name says, the `length` read here is unsafe and may not match
+ // `elements`. We rely on the invariant that any `length` change will
+ // also result in an `elements` change to make this safe. The `elements`
+ // consistency check in the caller thus also guards the value of `length`.
+ base::Optional<ObjectRef> length_ref = length_unsafe();
+
+ if (!length_ref.has_value()) return {};
+
+ // Likewise we only deal with smi lengths.
+ if (!length_ref->IsSmi()) return {};
+
+ base::Optional<Object> result = ConcurrentLookupIterator::TryGetOwnCowElement(
+ broker()->isolate(), *elements_ref.AsFixedArray().object(), elements_kind,
+ length_ref->AsSmi(), index);
+ if (!result.has_value()) return {};
+
+ return TryMakeRef(broker(), result.value());
}
base::Optional<CellRef> SourceTextModuleRef::GetCell(int cell_index) const {
@@ -3062,15 +2625,22 @@ base::Optional<ObjectRef> DescriptorArrayRef::GetStrongValue(
return TryMakeRef(broker(), heap_object);
}
+base::Optional<FeedbackVectorRef> FeedbackCellRef::feedback_vector() const {
+ ObjectRef contents = value();
+ if (!contents.IsFeedbackVector()) return {};
+ return contents.AsFeedbackVector();
+}
+
base::Optional<SharedFunctionInfoRef> FeedbackCellRef::shared_function_info()
const {
- base::Optional<FeedbackVectorRef> feedback_vector = value();
- if (!feedback_vector.has_value()) return {};
- return feedback_vector->shared_function_info();
+ base::Optional<FeedbackVectorRef> vector = feedback_vector();
+ if (!vector.has_value()) return {};
+ return vector->shared_function_info();
}
SharedFunctionInfoRef FeedbackVectorRef::shared_function_info() const {
- return MakeRef(broker(), object()->shared_function_info());
+ // Immutable after initialization.
+ return MakeRefAssumeMemoryFence(broker(), object()->shared_function_info());
}
bool NameRef::IsUniqueName() const {
@@ -3143,20 +2713,6 @@ Handle<T> TinyRef<T>::object() const {
HEAP_BROKER_OBJECT_LIST(V)
#undef V
-Reduction NoChangeBecauseOfMissingData(JSHeapBroker* broker,
- const char* function, int line) {
- TRACE_MISSING(broker, "data in function " << function << " at line " << line);
- return AdvancedReducer::NoChange();
-}
-
-bool JSBoundFunctionRef::Serialize(NotConcurrentInliningTag tag) {
- if (data_->should_access_heap()) {
- return true;
- }
- CHECK_EQ(broker()->mode(), JSHeapBroker::kSerializing);
- return data()->AsJSBoundFunction()->Serialize(broker(), tag);
-}
-
#define JSFUNCTION_BIMODAL_ACCESSOR_WITH_DEP(Result, Name, UsedField) \
Result##Ref JSFunctionRef::Name(CompilationDependencies* dependencies) \
const { \
@@ -3174,26 +2730,40 @@ bool JSBoundFunctionRef::Serialize(NotConcurrentInliningTag tag) {
return data()->AsJSFunction()->Name(); \
}
-JSFUNCTION_BIMODAL_ACCESSOR_WITH_DEP_C(bool, has_feedback_vector,
- JSFunctionData::kHasFeedbackVector)
-JSFUNCTION_BIMODAL_ACCESSOR_WITH_DEP_C(bool, has_initial_map,
- JSFunctionData::kHasInitialMap)
-JSFUNCTION_BIMODAL_ACCESSOR_WITH_DEP_C(bool, has_instance_prototype,
- JSFunctionData::kHasInstancePrototype)
-JSFUNCTION_BIMODAL_ACCESSOR_WITH_DEP_C(
+// Like JSFUNCTION_BIMODAL_ACCESSOR_WITH_DEP_C but only depend on the
+// field in question if its recorded value is "relevant". This is in order to
+// tolerate certain state changes during compilation, e.g. from "has no feedback
+// vector" (in which case we would simply do less optimization) to "has feedback
+// vector".
+#define JSFUNCTION_BIMODAL_ACCESSOR_WITH_DEP_RELEVANT_C( \
+ Result, Name, UsedField, RelevantValue) \
+ Result JSFunctionRef::Name(CompilationDependencies* dependencies) const { \
+ IF_ACCESS_FROM_HEAP_C(Name); \
+ Result const result = data()->AsJSFunction()->Name(); \
+ if (result == RelevantValue) { \
+ RecordConsistentJSFunctionViewDependencyIfNeeded( \
+ broker(), *this, data()->AsJSFunction(), UsedField); \
+ } \
+ return result; \
+ }
+
+JSFUNCTION_BIMODAL_ACCESSOR_WITH_DEP_RELEVANT_C(bool, has_initial_map,
+ JSFunctionData::kHasInitialMap,
+ true)
+JSFUNCTION_BIMODAL_ACCESSOR_WITH_DEP_RELEVANT_C(
+ bool, has_instance_prototype, JSFunctionData::kHasInstancePrototype, true)
+JSFUNCTION_BIMODAL_ACCESSOR_WITH_DEP_RELEVANT_C(
bool, PrototypeRequiresRuntimeLookup,
- JSFunctionData::kPrototypeRequiresRuntimeLookup)
+ JSFunctionData::kPrototypeRequiresRuntimeLookup, false)
+
JSFUNCTION_BIMODAL_ACCESSOR_WITH_DEP(Map, initial_map,
JSFunctionData::kInitialMap)
JSFUNCTION_BIMODAL_ACCESSOR_WITH_DEP(Object, instance_prototype,
JSFunctionData::kInstancePrototype)
JSFUNCTION_BIMODAL_ACCESSOR_WITH_DEP(FeedbackCell, raw_feedback_cell,
JSFunctionData::kFeedbackCell)
-JSFUNCTION_BIMODAL_ACCESSOR_WITH_DEP(FeedbackVector, feedback_vector,
- JSFunctionData::kFeedbackVector)
BIMODAL_ACCESSOR(JSFunction, Context, context)
-BIMODAL_ACCESSOR(JSFunction, NativeContext, native_context)
BIMODAL_ACCESSOR(JSFunction, SharedFunctionInfo, shared)
#undef JSFUNCTION_BIMODAL_ACCESSOR_WITH_DEP
@@ -3203,6 +2773,11 @@ CodeRef JSFunctionRef::code() const {
return MakeRefAssumeMemoryFence(broker(), object()->code(kAcquireLoad));
}
+NativeContextRef JSFunctionRef::native_context() const {
+ return MakeRefAssumeMemoryFence(broker(),
+ context().object()->native_context());
+}
+
base::Optional<FunctionTemplateInfoRef>
SharedFunctionInfoRef::function_template_info() const {
if (!object()->IsApiFunction()) return {};
@@ -3269,23 +2844,6 @@ void MapRef::SerializePrototype(NotConcurrentInliningTag tag) {
CHECK(TrySerializePrototype(tag));
}
-void JSTypedArrayRef::Serialize(NotConcurrentInliningTag tag) {
- if (data_->should_access_heap() || broker()->is_concurrent_inlining()) {
- // Nothing to do.
- } else {
- CHECK_EQ(broker()->mode(), JSHeapBroker::kSerializing);
- data()->AsJSTypedArray()->Serialize(broker(), tag);
- }
-}
-
-bool JSTypedArrayRef::serialized() const {
- if (data_->should_access_heap()) return true;
- if (broker()->is_concurrent_inlining()) return true;
- if (data_->AsJSTypedArray()->serialized()) return true;
- TRACE_BROKER_MISSING(broker(), "data for JSTypedArray " << this);
- return false;
-}
-
bool PropertyCellRef::Cache() const {
if (data_->should_access_heap()) return true;
CHECK(broker()->mode() == JSHeapBroker::kSerializing ||
@@ -3293,18 +2851,6 @@ bool PropertyCellRef::Cache() const {
return data()->AsPropertyCell()->Cache(broker());
}
-void FunctionTemplateInfoRef::SerializeCallCode(NotConcurrentInliningTag tag) {
- CHECK_EQ(broker()->mode(), JSHeapBroker::kSerializing);
- // CallHandlerInfo::data may still hold a serialized heap object, so we
- // have to make the broker aware of it.
- // TODO(v8:7790): Remove this case once ObjectRef is never serialized.
- Handle<HeapObject> call_code(object()->call_code(kAcquireLoad),
- broker()->isolate());
- if (call_code->IsCallHandlerInfo()) {
- broker()->GetOrCreateData(Handle<CallHandlerInfo>::cast(call_code)->data());
- }
-}
-
bool NativeContextRef::GlobalIsDetached() const {
base::Optional<ObjectRef> proxy_proto =
global_proxy_object().map().prototype();
@@ -3312,14 +2858,15 @@ bool NativeContextRef::GlobalIsDetached() const {
}
base::Optional<PropertyCellRef> JSGlobalObjectRef::GetPropertyCell(
- NameRef const& name, SerializationPolicy policy) const {
- if (data_->should_access_heap() || broker()->is_concurrent_inlining()) {
- return GetPropertyCellFromHeap(broker(), name.object());
- }
-
- ObjectData* property_cell_data = data()->AsJSGlobalObject()->GetPropertyCell(
- broker(), name.data(), policy);
- return TryMakeRef<PropertyCell>(broker(), property_cell_data);
+ NameRef const& name) const {
+ DCHECK(data_->should_access_heap() || broker()->is_concurrent_inlining());
+ base::Optional<PropertyCell> maybe_cell =
+ ConcurrentLookupIterator::TryGetPropertyCell(
+ broker()->isolate(), broker()->local_isolate_or_isolate(),
+ broker()->target_native_context().global_object().object(),
+ name.object());
+ if (!maybe_cell.has_value()) return {};
+ return TryMakeRef(broker(), *maybe_cell);
}
std::ostream& operator<<(std::ostream& os, const ObjectRef& ref) {
@@ -3347,13 +2894,11 @@ unsigned CodeRef::GetInlinedBytecodeSize() const {
#undef BIMODAL_ACCESSOR
#undef BIMODAL_ACCESSOR_B
#undef BIMODAL_ACCESSOR_C
-#undef BIMODAL_ACCESSOR_WITH_FLAG
#undef BIMODAL_ACCESSOR_WITH_FLAG_B
#undef BIMODAL_ACCESSOR_WITH_FLAG_C
#undef HEAP_ACCESSOR_C
#undef IF_ACCESS_FROM_HEAP
#undef IF_ACCESS_FROM_HEAP_C
-#undef IF_ACCESS_FROM_HEAP_WITH_FLAG
#undef IF_ACCESS_FROM_HEAP_WITH_FLAG_C
#undef TRACE
#undef TRACE_MISSING
diff --git a/deps/v8/src/compiler/heap-refs.h b/deps/v8/src/compiler/heap-refs.h
index d580671f6d..4644071ea5 100644
--- a/deps/v8/src/compiler/heap-refs.h
+++ b/deps/v8/src/compiler/heap-refs.h
@@ -55,8 +55,6 @@ inline bool IsAnyStore(AccessMode mode) {
return mode == AccessMode::kStore || mode == AccessMode::kStoreInLiteral;
}
-enum class SerializationPolicy { kAssumeSerialized, kSerializeIfNeeded };
-
// Clarifies in function signatures that a method may only be called when
// concurrent inlining is disabled.
class NotConcurrentInliningTag final {
@@ -272,6 +270,7 @@ class V8_EXPORT_PRIVATE ObjectRef {
private:
friend class FunctionTemplateInfoRef;
friend class JSArrayData;
+ friend class JSFunctionData;
friend class JSGlobalObjectData;
friend class JSGlobalObjectRef;
friend class JSHeapBroker;
@@ -395,9 +394,7 @@ class JSObjectRef : public JSReceiverRef {
// against inconsistency due to weak memory concurrency.
base::Optional<ObjectRef> GetOwnConstantElement(
const FixedArrayBaseRef& elements_ref, uint32_t index,
- CompilationDependencies* dependencies,
- SerializationPolicy policy =
- SerializationPolicy::kAssumeSerialized) const;
+ CompilationDependencies* dependencies) const;
// The direct-read implementation of the above, extracted into a helper since
// it's also called from compilation-dependency validation. This helper is
// guaranteed to not create new Ref instances.
@@ -412,16 +409,12 @@ class JSObjectRef : public JSReceiverRef {
// property at code finalization time.
base::Optional<ObjectRef> GetOwnFastDataProperty(
Representation field_representation, FieldIndex index,
- CompilationDependencies* dependencies,
- SerializationPolicy policy =
- SerializationPolicy::kAssumeSerialized) const;
+ CompilationDependencies* dependencies) const;
// Return the value of the dictionary property at {index} in the dictionary
// if {index} is known to be an own data property of the object.
base::Optional<ObjectRef> GetOwnDictionaryProperty(
- InternalIndex index, CompilationDependencies* dependencies,
- SerializationPolicy policy =
- SerializationPolicy::kAssumeSerialized) const;
+ InternalIndex index, CompilationDependencies* dependencies) const;
// When concurrent inlining is enabled, reads the elements through a direct
// relaxed read. This is to ease the transition to unserialized (or
@@ -451,12 +444,8 @@ class JSBoundFunctionRef : public JSObjectRef {
Handle<JSBoundFunction> object() const;
- bool Serialize(NotConcurrentInliningTag tag);
-
- // TODO(neis): Make return types non-optional once JSFunction is no longer
- // fg-serialized.
- base::Optional<JSReceiverRef> bound_target_function() const;
- base::Optional<ObjectRef> bound_this() const;
+ JSReceiverRef bound_target_function() const;
+ ObjectRef bound_this() const;
FixedArrayRef bound_arguments() const;
};
@@ -474,8 +463,8 @@ class V8_EXPORT_PRIVATE JSFunctionRef : public JSObjectRef {
ContextRef context() const;
NativeContextRef native_context() const;
SharedFunctionInfoRef shared() const;
+ CodeRef code() const;
- bool has_feedback_vector(CompilationDependencies* dependencies) const;
bool has_initial_map(CompilationDependencies* dependencies) const;
bool PrototypeRequiresRuntimeLookup(
CompilationDependencies* dependencies) const;
@@ -484,12 +473,10 @@ class V8_EXPORT_PRIVATE JSFunctionRef : public JSObjectRef {
MapRef initial_map(CompilationDependencies* dependencies) const;
int InitialMapInstanceSizeWithMinSlack(
CompilationDependencies* dependencies) const;
- FeedbackVectorRef feedback_vector(
- CompilationDependencies* dependencies) const;
FeedbackCellRef raw_feedback_cell(
CompilationDependencies* dependencies) const;
-
- CodeRef code() const;
+ base::Optional<FeedbackVectorRef> feedback_vector(
+ CompilationDependencies* dependencies) const;
};
class RegExpBoilerplateDescriptionRef : public HeapObjectRef {
@@ -535,9 +522,6 @@ class ContextRef : public HeapObjectRef {
base::Optional<ObjectRef> get(int index) const;
};
-// TODO(jgruber): Don't serialize NativeContext fields once all refs can be
-// created concurrently.
-
#define BROKER_NATIVE_CONTEXT_FIELDS(V) \
V(JSFunction, array_function) \
V(JSFunction, bigint_function) \
@@ -629,13 +613,12 @@ class FeedbackCellRef : public HeapObjectRef {
DEFINE_REF_CONSTRUCTOR(FeedbackCell, HeapObjectRef)
Handle<FeedbackCell> object() const;
- base::Optional<SharedFunctionInfoRef> shared_function_info() const;
- // TODO(mvstanton): Once we allow inlining of functions we didn't see
- // during serialization, we do need to ensure that any feedback vector
- // we read here has been fully initialized (ie, store-ordered into the
- // cell).
- base::Optional<FeedbackVectorRef> value() const;
+ ObjectRef value() const;
+
+ // Convenience wrappers around {value()}:
+ base::Optional<FeedbackVectorRef> feedback_vector() const;
+ base::Optional<SharedFunctionInfoRef> shared_function_info() const;
};
class FeedbackVectorRef : public HeapObjectRef {
@@ -729,6 +712,8 @@ class V8_EXPORT_PRIVATE MapRef : public HeapObjectRef {
OddballType oddball_type() const;
+ bool CanInlineElementAccess() const;
+
// Note: Only returns a value if the requested elements kind matches the
// current kind, or if the current map is an unmodified JSArray initial map.
base::Optional<MapRef> AsElementsKind(ElementsKind kind) const;
@@ -752,6 +737,7 @@ class V8_EXPORT_PRIVATE MapRef : public HeapObjectRef {
ZoneVector<MapRef>* prototype_maps);
// Concerning the underlying instance_descriptors:
+ DescriptorArrayRef instance_descriptors() const;
MapRef FindFieldOwner(InternalIndex descriptor_index) const;
PropertyDetails GetPropertyDetails(InternalIndex descriptor_index) const;
NameRef GetPropertyKey(InternalIndex descriptor_index) const;
@@ -760,11 +746,7 @@ class V8_EXPORT_PRIVATE MapRef : public HeapObjectRef {
base::Optional<ObjectRef> GetStrongValue(
InternalIndex descriptor_number) const;
- DescriptorArrayRef instance_descriptors() const;
-
- void SerializeRootMap(NotConcurrentInliningTag tag);
- base::Optional<MapRef> FindRootMap() const;
-
+ MapRef FindRootMap() const;
ObjectRef GetConstructor() const;
};
@@ -785,17 +767,10 @@ class FunctionTemplateInfoRef : public HeapObjectRef {
bool is_signature_undefined() const;
bool accept_any_receiver() const;
- // The following returns true if the CallHandlerInfo is present.
- bool has_call_code() const;
-
- void SerializeCallCode(NotConcurrentInliningTag tag);
base::Optional<CallHandlerInfoRef> call_code() const;
ZoneVector<Address> c_functions() const;
ZoneVector<const CFunctionInfo*> c_signatures() const;
-
- HolderLookupResult LookupHolderOfExpectedType(
- MapRef receiver_map,
- SerializationPolicy policy = SerializationPolicy::kAssumeSerialized);
+ HolderLookupResult LookupHolderOfExpectedType(MapRef receiver_map);
};
class FixedArrayBaseRef : public HeapObjectRef {
@@ -821,12 +796,6 @@ class FixedArrayRef : public FixedArrayBaseRef {
Handle<FixedArray> object() const;
- ObjectRef get(int i) const;
-
- // As above but may fail if Ref construction is not possible (e.g. for
- // serialized types on the background thread).
- // TODO(jgruber): Remove once all Ref types are never-serialized or
- // background-serialized and can thus be created on background threads.
base::Optional<ObjectRef> TryGet(int i) const;
};
@@ -894,15 +863,14 @@ class JSArrayRef : public JSObjectRef {
// storage and {index} is known to be an own data property.
// Note the value returned by this function is only valid if we ensure at
// runtime that the backing store has not changed.
- base::Optional<ObjectRef> GetOwnCowElement(
- FixedArrayBaseRef elements_ref, uint32_t index,
- SerializationPolicy policy =
- SerializationPolicy::kAssumeSerialized) const;
+ base::Optional<ObjectRef> GetOwnCowElement(FixedArrayBaseRef elements_ref,
+ uint32_t index) const;
// The `JSArray::length` property; not safe to use in general, but can be
// used in some special cases that guarantee a valid `length` value despite
- // concurrent reads.
- ObjectRef length_unsafe() const;
+ // concurrent reads. The result needs to be optional in case the
+ // return value was created too recently to pass the gc predicate.
+ base::Optional<ObjectRef> length_unsafe() const;
};
class ScopeInfoRef : public HeapObjectRef {
@@ -918,22 +886,23 @@ class ScopeInfoRef : public HeapObjectRef {
ScopeInfoRef OuterScopeInfo() const;
};
-#define BROKER_SFI_FIELDS(V) \
- V(int, internal_formal_parameter_count) \
- V(bool, has_simple_parameters) \
- V(bool, has_duplicate_parameters) \
- V(int, function_map_index) \
- V(FunctionKind, kind) \
- V(LanguageMode, language_mode) \
- V(bool, native) \
- V(bool, HasBreakInfo) \
- V(bool, HasBuiltinId) \
- V(bool, construct_as_builtin) \
- V(bool, HasBytecodeArray) \
- V(int, StartPosition) \
- V(bool, is_compiled) \
- V(bool, IsUserJavaScript) \
- IF_WASM(V, const wasm::WasmModule*, wasm_module) \
+#define BROKER_SFI_FIELDS(V) \
+ V(int, internal_formal_parameter_count_without_receiver) \
+ V(bool, IsDontAdaptArguments) \
+ V(bool, has_simple_parameters) \
+ V(bool, has_duplicate_parameters) \
+ V(int, function_map_index) \
+ V(FunctionKind, kind) \
+ V(LanguageMode, language_mode) \
+ V(bool, native) \
+ V(bool, HasBreakInfo) \
+ V(bool, HasBuiltinId) \
+ V(bool, construct_as_builtin) \
+ V(bool, HasBytecodeArray) \
+ V(int, StartPosition) \
+ V(bool, is_compiled) \
+ V(bool, IsUserJavaScript) \
+ IF_WASM(V, const wasm::WasmModule*, wasm_module) \
IF_WASM(V, const wasm::FunctionSig*, wasm_function_signature)
class V8_EXPORT_PRIVATE SharedFunctionInfoRef : public HeapObjectRef {
@@ -966,9 +935,7 @@ class StringRef : public NameRef {
// With concurrent inlining on, we return base::nullopt due to not being able
// to use LookupIterator in a thread-safe way.
- base::Optional<ObjectRef> GetCharAsStringOrUndefined(
- uint32_t index, SerializationPolicy policy =
- SerializationPolicy::kAssumeSerialized) const;
+ base::Optional<ObjectRef> GetCharAsStringOrUndefined(uint32_t index) const;
// When concurrently accessing non-read-only non-supported strings, we return
// base::nullopt for these methods.
@@ -1002,10 +969,6 @@ class JSTypedArrayRef : public JSObjectRef {
bool is_on_heap() const;
size_t length() const;
void* data_ptr() const;
-
- void Serialize(NotConcurrentInliningTag tag);
- bool serialized() const;
-
HeapObjectRef buffer() const;
};
@@ -1042,9 +1005,7 @@ class JSGlobalObjectRef : public JSObjectRef {
bool IsDetachedFrom(JSGlobalProxyRef const& proxy) const;
// Can be called even when there is no property cell for the given name.
- base::Optional<PropertyCellRef> GetPropertyCell(
- NameRef const& name, SerializationPolicy policy =
- SerializationPolicy::kAssumeSerialized) const;
+ base::Optional<PropertyCellRef> GetPropertyCell(NameRef const& name) const;
};
class JSGlobalProxyRef : public JSObjectRef {
diff --git a/deps/v8/src/compiler/int64-lowering.cc b/deps/v8/src/compiler/int64-lowering.cc
index 28eb30969c..00930998dd 100644
--- a/deps/v8/src/compiler/int64-lowering.cc
+++ b/deps/v8/src/compiler/int64-lowering.cc
@@ -944,29 +944,31 @@ void Int64Lowering::LowerNode(Node* node) {
}
case IrOpcode::kWord64AtomicLoad: {
DCHECK_EQ(4, node->InputCount());
- MachineType type = AtomicOpType(node->op());
+ AtomicLoadParameters params = AtomicLoadParametersOf(node->op());
DefaultLowering(node, true);
- if (type == MachineType::Uint64()) {
- NodeProperties::ChangeOp(node, machine()->Word32AtomicPairLoad());
+ if (params.representation() == MachineType::Uint64()) {
+ NodeProperties::ChangeOp(
+ node, machine()->Word32AtomicPairLoad(params.order()));
ReplaceNodeWithProjections(node);
} else {
- NodeProperties::ChangeOp(node, machine()->Word32AtomicLoad(type));
+ NodeProperties::ChangeOp(node, machine()->Word32AtomicLoad(params));
ReplaceNode(node, node, graph()->NewNode(common()->Int32Constant(0)));
}
break;
}
case IrOpcode::kWord64AtomicStore: {
DCHECK_EQ(5, node->InputCount());
- MachineRepresentation rep = AtomicStoreRepresentationOf(node->op());
- if (rep == MachineRepresentation::kWord64) {
+ AtomicStoreParameters params = AtomicStoreParametersOf(node->op());
+ if (params.representation() == MachineRepresentation::kWord64) {
LowerMemoryBaseAndIndex(node);
Node* value = node->InputAt(2);
node->ReplaceInput(2, GetReplacementLow(value));
node->InsertInput(zone(), 3, GetReplacementHigh(value));
- NodeProperties::ChangeOp(node, machine()->Word32AtomicPairStore());
+ NodeProperties::ChangeOp(
+ node, machine()->Word32AtomicPairStore(params.order()));
} else {
DefaultLowering(node, true);
- NodeProperties::ChangeOp(node, machine()->Word32AtomicStore(rep));
+ NodeProperties::ChangeOp(node, machine()->Word32AtomicStore(params));
}
break;
}
diff --git a/deps/v8/src/compiler/js-call-reducer.cc b/deps/v8/src/compiler/js-call-reducer.cc
index 3dcdc6a33e..91197ead1e 100644
--- a/deps/v8/src/compiler/js-call-reducer.cc
+++ b/deps/v8/src/compiler/js-call-reducer.cc
@@ -728,8 +728,7 @@ class IteratingArrayBuiltinReducerAssembler : public JSCallReducerAssembler {
TNode<HeapObject> elements =
LoadField<HeapObject>(AccessBuilder::ForJSObjectElements(), o);
TNode<Object> value = LoadElement<Object>(
- AccessBuilder::ForFixedArrayElement(kind, LoadSensitivity::kCritical),
- elements, index);
+ AccessBuilder::ForFixedArrayElement(kind), elements, index);
return std::make_pair(index, value);
}
@@ -2099,7 +2098,8 @@ FrameState CreateArtificialFrameState(
FrameState PromiseConstructorFrameState(
const PromiseCtorFrameStateParams& params, CommonOperatorBuilder* common,
Graph* graph) {
- DCHECK_EQ(1, params.shared.internal_formal_parameter_count());
+ DCHECK_EQ(1,
+ params.shared.internal_formal_parameter_count_without_receiver());
return CreateArtificialFrameState(
params.node_ptr, params.outer_frame_state, 1,
BytecodeOffset::ConstructStubInvoke(), FrameStateType::kConstructStub,
@@ -3639,8 +3639,6 @@ Reduction JSCallReducer::ReduceCallApiFunction(
FunctionTemplateInfoRef function_template_info(
shared.function_template_info().value());
- if (!function_template_info.has_call_code()) return NoChange();
-
if (function_template_info.accept_any_receiver() &&
function_template_info.is_signature_undefined()) {
// We might be able to
@@ -3764,7 +3762,8 @@ Reduction JSCallReducer::ReduceCallApiFunction(
node->InsertInput(graph()->zone(), 0,
jsgraph()->HeapConstant(callable.code()));
node->ReplaceInput(1, jsgraph()->Constant(function_template_info));
- node->InsertInput(graph()->zone(), 2, jsgraph()->Constant(argc));
+ node->InsertInput(graph()->zone(), 2,
+ jsgraph()->Constant(JSParameterCount(argc)));
node->ReplaceInput(3, receiver); // Update receiver input.
node->ReplaceInput(6 + argc, effect); // Update effect input.
NodeProperties::ChangeOp(node, common()->Call(call_descriptor));
@@ -4039,7 +4038,8 @@ JSCallReducer::ReduceCallOrConstructWithArrayLikeOrSpreadOfCreateArguments(
return NoChange();
}
formal_parameter_count =
- MakeRef(broker(), shared).internal_formal_parameter_count();
+ MakeRef(broker(), shared)
+ .internal_formal_parameter_count_without_receiver();
}
if (type == CreateArgumentsType::kMappedArguments) {
@@ -4309,13 +4309,9 @@ Reduction JSCallReducer::ReduceJSCall(Node* node) {
return ReduceJSCall(node, function.shared());
} else if (target_ref.IsJSBoundFunction()) {
JSBoundFunctionRef function = target_ref.AsJSBoundFunction();
- base::Optional<JSReceiverRef> bound_target_function =
- function.bound_target_function();
- if (!bound_target_function.has_value()) return NoChange();
- base::Optional<ObjectRef> bound_this = function.bound_this();
- if (!bound_this.has_value()) return NoChange();
+ ObjectRef bound_this = function.bound_this();
ConvertReceiverMode const convert_mode =
- bound_this->IsNullOrUndefined()
+ bound_this.IsNullOrUndefined()
? ConvertReceiverMode::kNullOrUndefined
: ConvertReceiverMode::kNotNullOrUndefined;
@@ -4336,9 +4332,9 @@ Reduction JSCallReducer::ReduceJSCall(Node* node) {
// Patch {node} to use [[BoundTargetFunction]] and [[BoundThis]].
NodeProperties::ReplaceValueInput(
- node, jsgraph()->Constant(*bound_target_function),
+ node, jsgraph()->Constant(function.bound_target_function()),
JSCallNode::TargetIndex());
- NodeProperties::ReplaceValueInput(node, jsgraph()->Constant(*bound_this),
+ NodeProperties::ReplaceValueInput(node, jsgraph()->Constant(bound_this),
JSCallNode::ReceiverIndex());
// Insert the [[BoundArguments]] for {node}.
@@ -4372,13 +4368,13 @@ Reduction JSCallReducer::ReduceJSCall(Node* node) {
return ReduceJSCall(node, p.shared_info(broker()));
} else if (target->opcode() == IrOpcode::kCheckClosure) {
FeedbackCellRef cell = MakeRef(broker(), FeedbackCellOf(target->op()));
- if (cell.shared_function_info().has_value()) {
- return ReduceJSCall(node, *cell.shared_function_info());
- } else {
+ base::Optional<SharedFunctionInfoRef> shared = cell.shared_function_info();
+ if (!shared.has_value()) {
TRACE_BROKER_MISSING(broker(), "Unable to reduce JSCall. FeedbackCell "
<< cell << " has no FeedbackVector");
return NoChange();
}
+ return ReduceJSCall(node, *shared);
}
// If {target} is the result of a JSCreateBoundFunction operation,
@@ -4457,7 +4453,8 @@ Reduction JSCallReducer::ReduceJSCall(Node* node) {
} else if (feedback_target.has_value() && feedback_target->IsFeedbackCell()) {
FeedbackCellRef feedback_cell =
MakeRef(broker(), feedback_target.value().AsFeedbackCell().object());
- if (feedback_cell.value().has_value()) {
+ // TODO(neis): This check seems unnecessary.
+ if (feedback_cell.feedback_vector().has_value()) {
// Check that {target} is a closure with given {feedback_cell},
// which uniquely identifies a given function inside a native context.
Node* target_closure = effect =
@@ -5055,9 +5052,7 @@ Reduction JSCallReducer::ReduceJSConstruct(Node* node) {
}
} else if (target_ref.IsJSBoundFunction()) {
JSBoundFunctionRef function = target_ref.AsJSBoundFunction();
- base::Optional<JSReceiverRef> bound_target_function =
- function.bound_target_function();
- if (!bound_target_function.has_value()) return NoChange();
+ JSReceiverRef bound_target_function = function.bound_target_function();
FixedArrayRef bound_arguments = function.bound_arguments();
const int bound_arguments_length = bound_arguments.length();
@@ -5076,20 +5071,20 @@ Reduction JSCallReducer::ReduceJSConstruct(Node* node) {
// Patch {node} to use [[BoundTargetFunction]].
node->ReplaceInput(n.TargetIndex(),
- jsgraph()->Constant(*bound_target_function));
+ jsgraph()->Constant(bound_target_function));
// Patch {node} to use [[BoundTargetFunction]]
// as new.target if {new_target} equals {target}.
if (target == new_target) {
node->ReplaceInput(n.NewTargetIndex(),
- jsgraph()->Constant(*bound_target_function));
+ jsgraph()->Constant(bound_target_function));
} else {
node->ReplaceInput(
n.NewTargetIndex(),
graph()->NewNode(common()->Select(MachineRepresentation::kTagged),
graph()->NewNode(simplified()->ReferenceEqual(),
target, new_target),
- jsgraph()->Constant(*bound_target_function),
+ jsgraph()->Constant(bound_target_function),
new_target));
}
@@ -6373,9 +6368,8 @@ Reduction JSCallReducer::ReduceStringPrototypeStringAt(
index, receiver_length, effect, control);
// Return the character from the {receiver} as single character string.
- Node* masked_index = graph()->NewNode(simplified()->PoisonIndex(), index);
Node* value = effect = graph()->NewNode(string_access_operator, receiver,
- masked_index, effect, control);
+ index, effect, control);
ReplaceWithValue(node, value, effect, control);
return Replace(value);
@@ -6433,11 +6427,9 @@ Reduction JSCallReducer::ReduceStringPrototypeStartsWith(Node* node) {
Node* etrue = effect;
Node* vtrue;
{
- Node* masked_position = graph()->NewNode(
- simplified()->PoisonIndex(), unsigned_position);
Node* string_first = etrue =
graph()->NewNode(simplified()->StringCharCodeAt(), receiver,
- masked_position, etrue, if_true);
+ unsigned_position, etrue, if_true);
Node* search_first =
jsgraph()->Constant(str.GetFirstChar().value());
@@ -6488,10 +6480,8 @@ Reduction JSCallReducer::ReduceStringPrototypeCharAt(Node* node) {
index, receiver_length, effect, control);
// Return the character from the {receiver} as single character string.
- Node* masked_index = graph()->NewNode(simplified()->PoisonIndex(), index);
- Node* value = effect =
- graph()->NewNode(simplified()->StringCharCodeAt(), receiver, masked_index,
- effect, control);
+ Node* value = effect = graph()->NewNode(simplified()->StringCharCodeAt(),
+ receiver, index, effect, control);
value = graph()->NewNode(simplified()->StringFromSingleCharCode(), value);
ReplaceWithValue(node, value, effect, control);
diff --git a/deps/v8/src/compiler/js-context-specialization.cc b/deps/v8/src/compiler/js-context-specialization.cc
index 02e5cb1710..36217ca13b 100644
--- a/deps/v8/src/compiler/js-context-specialization.cc
+++ b/deps/v8/src/compiler/js-context-specialization.cc
@@ -103,7 +103,16 @@ base::Optional<ContextRef> GetSpecializationContext(
Maybe<OuterContext> maybe_outer) {
switch (node->opcode()) {
case IrOpcode::kHeapConstant: {
- HeapObjectRef object = MakeRef(broker, HeapConstantOf(node->op()));
+ // TODO(jgruber,chromium:1209798): Using kAssumeMemoryFence works around
+ // the fact that the graph stores handles (and not refs). The assumption
+ // is that any handle inserted into the graph is safe to read; but we
+ // don't preserve the reason why it is safe to read. Thus we must
+ // over-approximate here and assume the existence of a memory fence. In
+ // the future, we should consider having the graph store ObjectRefs or
+ // ObjectData pointer instead, which would make new ref construction here
+ // unnecessary.
+ HeapObjectRef object =
+ MakeRefAssumeMemoryFence(broker, HeapConstantOf(node->op()));
if (object.IsContext()) return object.AsContext();
break;
}
@@ -231,7 +240,16 @@ base::Optional<ContextRef> GetModuleContext(JSHeapBroker* broker, Node* node,
switch (context->opcode()) {
case IrOpcode::kHeapConstant: {
- HeapObjectRef object = MakeRef(broker, HeapConstantOf(context->op()));
+ // TODO(jgruber,chromium:1209798): Using kAssumeMemoryFence works around
+ // the fact that the graph stores handles (and not refs). The assumption
+ // is that any handle inserted into the graph is safe to read; but we
+ // don't preserve the reason why it is safe to read. Thus we must
+ // over-approximate here and assume the existence of a memory fence. In
+ // the future, we should consider having the graph store ObjectRefs or
+ // ObjectData pointer instead, which would make new ref construction here
+ // unnecessary.
+ HeapObjectRef object =
+ MakeRefAssumeMemoryFence(broker, HeapConstantOf(context->op()));
if (object.IsContext()) {
return find_context(object.AsContext());
}
diff --git a/deps/v8/src/compiler/js-create-lowering.cc b/deps/v8/src/compiler/js-create-lowering.cc
index 414977eb7d..60c9017fc2 100644
--- a/deps/v8/src/compiler/js-create-lowering.cc
+++ b/deps/v8/src/compiler/js-create-lowering.cc
@@ -197,11 +197,11 @@ Reduction JSCreateLowering::ReduceJSCreateArguments(Node* node) {
Node* const arguments_length =
graph()->NewNode(simplified()->ArgumentsLength());
// Allocate the elements backing store.
- Node* const elements = effect =
- graph()->NewNode(simplified()->NewArgumentsElements(
- CreateArgumentsType::kUnmappedArguments,
- shared.internal_formal_parameter_count()),
- arguments_length, effect);
+ Node* const elements = effect = graph()->NewNode(
+ simplified()->NewArgumentsElements(
+ CreateArgumentsType::kUnmappedArguments,
+ shared.internal_formal_parameter_count_without_receiver()),
+ arguments_length, effect);
// Load the arguments object map.
Node* const arguments_map =
jsgraph()->Constant(native_context().strict_arguments_map());
@@ -222,14 +222,14 @@ Reduction JSCreateLowering::ReduceJSCreateArguments(Node* node) {
Node* effect = NodeProperties::GetEffectInput(node);
Node* const arguments_length =
graph()->NewNode(simplified()->ArgumentsLength());
- Node* const rest_length = graph()->NewNode(
- simplified()->RestLength(shared.internal_formal_parameter_count()));
+ Node* const rest_length = graph()->NewNode(simplified()->RestLength(
+ shared.internal_formal_parameter_count_without_receiver()));
// Allocate the elements backing store.
- Node* const elements = effect =
- graph()->NewNode(simplified()->NewArgumentsElements(
- CreateArgumentsType::kRestParameter,
- shared.internal_formal_parameter_count()),
- arguments_length, effect);
+ Node* const elements = effect = graph()->NewNode(
+ simplified()->NewArgumentsElements(
+ CreateArgumentsType::kRestParameter,
+ shared.internal_formal_parameter_count_without_receiver()),
+ arguments_length, effect);
// Load the JSArray object map.
Node* const jsarray_map = jsgraph()->Constant(
native_context().js_array_packed_elements_map());
@@ -332,7 +332,8 @@ Reduction JSCreateLowering::ReduceJSCreateArguments(Node* node) {
return Changed(node);
}
case CreateArgumentsType::kRestParameter: {
- int start_index = shared.internal_formal_parameter_count();
+ int start_index =
+ shared.internal_formal_parameter_count_without_receiver();
// Use inline allocation for all unmapped arguments objects within inlined
// (i.e. non-outermost) frames, independent of the object size.
Node* effect = NodeProperties::GetEffectInput(node);
@@ -401,7 +402,8 @@ Reduction JSCreateLowering::ReduceJSCreateGeneratorObject(Node* node) {
// Allocate a register file.
SharedFunctionInfoRef shared = js_function.shared();
DCHECK(shared.HasBytecodeArray());
- int parameter_count_no_receiver = shared.internal_formal_parameter_count();
+ int parameter_count_no_receiver =
+ shared.internal_formal_parameter_count_without_receiver();
int length = parameter_count_no_receiver +
shared.GetBytecodeArray().register_count();
MapRef fixed_array_map = MakeRef(broker(), factory()->fixed_array_map());
@@ -466,9 +468,10 @@ Reduction JSCreateLowering::ReduceNewArray(
// Constructing an Array via new Array(N) where N is an unsigned
// integer, always creates a holey backing store.
- ASSIGN_RETURN_NO_CHANGE_IF_DATA_MISSING(
- initial_map,
- initial_map.AsElementsKind(GetHoleyElementsKind(elements_kind)));
+ base::Optional<MapRef> maybe_initial_map =
+ initial_map.AsElementsKind(GetHoleyElementsKind(elements_kind));
+ if (!maybe_initial_map.has_value()) return NoChange();
+ initial_map = maybe_initial_map.value();
// Because CheckBounds performs implicit conversion from string to number, an
// additional CheckNumber is required to behave correctly for calls with a
@@ -525,8 +528,12 @@ Reduction JSCreateLowering::ReduceNewArray(
if (NodeProperties::GetType(length).Max() > 0.0) {
elements_kind = GetHoleyElementsKind(elements_kind);
}
- ASSIGN_RETURN_NO_CHANGE_IF_DATA_MISSING(
- initial_map, initial_map.AsElementsKind(elements_kind));
+
+ base::Optional<MapRef> maybe_initial_map =
+ initial_map.AsElementsKind(elements_kind);
+ if (!maybe_initial_map.has_value()) return NoChange();
+ initial_map = maybe_initial_map.value();
+
DCHECK(IsFastElementsKind(elements_kind));
// Setup elements and properties.
@@ -566,8 +573,11 @@ Reduction JSCreateLowering::ReduceNewArray(
// Determine the appropriate elements kind.
DCHECK(IsFastElementsKind(elements_kind));
- ASSIGN_RETURN_NO_CHANGE_IF_DATA_MISSING(
- initial_map, initial_map.AsElementsKind(elements_kind));
+
+ base::Optional<MapRef> maybe_initial_map =
+ initial_map.AsElementsKind(elements_kind);
+ if (!maybe_initial_map.has_value()) return NoChange();
+ initial_map = maybe_initial_map.value();
// Check {values} based on the {elements_kind}. These checks are guarded
// by the {elements_kind} feedback on the {site}, so it's safe to just
@@ -1479,7 +1489,8 @@ Node* JSCreateLowering::TryAllocateAliasedArguments(
// If there is no aliasing, the arguments object elements are not special in
// any way, we can just return an unmapped backing store instead.
- int parameter_count = shared.internal_formal_parameter_count();
+ int parameter_count =
+ shared.internal_formal_parameter_count_without_receiver();
if (parameter_count == 0) {
return TryAllocateArguments(effect, control, frame_state);
}
@@ -1545,7 +1556,8 @@ Node* JSCreateLowering::TryAllocateAliasedArguments(
const SharedFunctionInfoRef& shared, bool* has_aliased_arguments) {
// If there is no aliasing, the arguments object elements are not
// special in any way, we can just return an unmapped backing store.
- int parameter_count = shared.internal_formal_parameter_count();
+ int parameter_count =
+ shared.internal_formal_parameter_count_without_receiver();
if (parameter_count == 0) {
return graph()->NewNode(
simplified()->NewArgumentsElements(
@@ -1713,7 +1725,6 @@ base::Optional<Node*> JSCreateLowering::TryAllocateFastLiteral(
Type::Any(),
MachineType::AnyTagged(),
kFullWriteBarrier,
- LoadSensitivity::kUnsafe,
const_field_info};
// Note: the use of RawInobjectPropertyAt (vs. the higher-level
diff --git a/deps/v8/src/compiler/js-generic-lowering.cc b/deps/v8/src/compiler/js-generic-lowering.cc
index bbc47e45ad..08896e3f11 100644
--- a/deps/v8/src/compiler/js-generic-lowering.cc
+++ b/deps/v8/src/compiler/js-generic-lowering.cc
@@ -586,7 +586,7 @@ void JSGenericLowering::LowerJSCreateArray(Node* node) {
// between top of stack and JS arguments.
DCHECK_EQ(interface_descriptor.GetStackParameterCount(), 0);
Node* stub_code = jsgraph()->ArrayConstructorStubConstant();
- Node* stub_arity = jsgraph()->Int32Constant(arity);
+ Node* stub_arity = jsgraph()->Int32Constant(JSParameterCount(arity));
base::Optional<AllocationSiteRef> const site = p.site(broker());
Node* type_info = site.has_value() ? jsgraph()->Constant(site.value())
: jsgraph()->UndefinedConstant();
@@ -820,7 +820,7 @@ void JSGenericLowering::LowerJSConstructForwardVarargs(Node* node) {
auto call_descriptor = Linkage::GetStubCallDescriptor(
zone(), callable.descriptor(), arg_count + 1, flags);
Node* stub_code = jsgraph()->HeapConstant(callable.code());
- Node* stub_arity = jsgraph()->Int32Constant(arg_count);
+ Node* stub_arity = jsgraph()->Int32Constant(JSParameterCount(arg_count));
Node* start_index = jsgraph()->Uint32Constant(p.start_index());
Node* receiver = jsgraph()->UndefinedConstant();
node->InsertInput(zone(), 0, stub_code);
@@ -843,7 +843,7 @@ void JSGenericLowering::LowerJSConstruct(Node* node) {
auto call_descriptor = Linkage::GetStubCallDescriptor(
zone(), callable.descriptor(), stack_argument_count, flags);
Node* stub_code = jsgraph()->HeapConstant(callable.code());
- Node* stub_arity = jsgraph()->Int32Constant(arg_count);
+ Node* stub_arity = jsgraph()->Int32Constant(JSParameterCount(arg_count));
Node* receiver = jsgraph()->UndefinedConstant();
node->RemoveInput(n.FeedbackVectorIndex());
node->InsertInput(zone(), 0, stub_code);
@@ -906,7 +906,8 @@ void JSGenericLowering::LowerJSConstructWithSpread(Node* node) {
Node* stub_code = jsgraph()->HeapConstant(callable.code());
// We pass the spread in a register, not on the stack.
- Node* stub_arity = jsgraph()->Int32Constant(arg_count - kTheSpread);
+ Node* stub_arity =
+ jsgraph()->Int32Constant(JSParameterCount(arg_count - kTheSpread));
Node* receiver = jsgraph()->UndefinedConstant();
DCHECK(n.FeedbackVectorIndex() > n.LastArgumentIndex());
node->RemoveInput(n.FeedbackVectorIndex());
@@ -930,7 +931,7 @@ void JSGenericLowering::LowerJSCallForwardVarargs(Node* node) {
auto call_descriptor = Linkage::GetStubCallDescriptor(
zone(), callable.descriptor(), arg_count + 1, flags);
Node* stub_code = jsgraph()->HeapConstant(callable.code());
- Node* stub_arity = jsgraph()->Int32Constant(arg_count);
+ Node* stub_arity = jsgraph()->Int32Constant(JSParameterCount(arg_count));
Node* start_index = jsgraph()->Uint32Constant(p.start_index());
node->InsertInput(zone(), 0, stub_code);
node->InsertInput(zone(), 2, stub_arity);
@@ -951,7 +952,7 @@ void JSGenericLowering::LowerJSCall(Node* node) {
auto call_descriptor = Linkage::GetStubCallDescriptor(
zone(), callable.descriptor(), arg_count + 1, flags);
Node* stub_code = jsgraph()->HeapConstant(callable.code());
- Node* stub_arity = jsgraph()->Int32Constant(arg_count);
+ Node* stub_arity = jsgraph()->Int32Constant(JSParameterCount(arg_count));
node->InsertInput(zone(), 0, stub_code);
node->InsertInput(zone(), 2, stub_arity);
NodeProperties::ChangeOp(node, common()->Call(call_descriptor));
@@ -1009,7 +1010,8 @@ void JSGenericLowering::LowerJSCallWithSpread(Node* node) {
Node* stub_code = jsgraph()->HeapConstant(callable.code());
// We pass the spread in a register, not on the stack.
- Node* stub_arity = jsgraph()->Int32Constant(arg_count - kTheSpread);
+ Node* stub_arity =
+ jsgraph()->Int32Constant(JSParameterCount(arg_count - kTheSpread));
// Shuffling inputs.
// Before: {target, receiver, ...args, spread, vector}.
diff --git a/deps/v8/src/compiler/js-heap-broker.cc b/deps/v8/src/compiler/js-heap-broker.cc
index dc34bcae6d..0007a582a0 100644
--- a/deps/v8/src/compiler/js-heap-broker.cc
+++ b/deps/v8/src/compiler/js-heap-broker.cc
@@ -50,12 +50,10 @@ JSHeapBroker::JSHeapBroker(Isolate* isolate, Zone* broker_zone,
array_and_object_prototypes_(zone()),
tracing_enabled_(tracing_enabled),
is_concurrent_inlining_(is_concurrent_inlining),
- is_isolate_bootstrapping_(isolate->bootstrapper()->IsActive()),
code_kind_(code_kind),
feedback_(zone()),
property_access_infos_(zone()),
- minimorphic_property_access_infos_(zone()),
- typed_array_string_tags_(zone()) {
+ minimorphic_property_access_infos_(zone()) {
// Note that this initialization of {refs_} with the minimal initial capacity
// is redundant in the normal use case (concurrent compilation enabled,
// standard objects to be serialized), as the map is going to be replaced
@@ -220,20 +218,6 @@ bool JSHeapBroker::ObjectMayBeUninitialized(HeapObject object) const {
return !IsMainThread() && isolate()->heap()->IsPendingAllocation(object);
}
-bool CanInlineElementAccess(MapRef const& map) {
- if (!map.IsJSObjectMap()) return false;
- if (map.is_access_check_needed()) return false;
- if (map.has_indexed_interceptor()) return false;
- ElementsKind const elements_kind = map.elements_kind();
- if (IsFastElementsKind(elements_kind)) return true;
- if (IsTypedArrayElementsKind(elements_kind) &&
- elements_kind != BIGUINT64_ELEMENTS &&
- elements_kind != BIGINT64_ELEMENTS) {
- return true;
- }
- return false;
-}
-
ProcessedFeedback::ProcessedFeedback(Kind kind, FeedbackSlotKind slot_kind)
: kind_(kind), slot_kind_(slot_kind) {}
@@ -423,7 +407,10 @@ ElementAccessFeedback::ElementAccessFeedback(Zone* zone,
bool ElementAccessFeedback::HasOnlyStringMaps(JSHeapBroker* broker) const {
for (auto const& group : transition_groups()) {
for (Handle<Map> map : group) {
- if (!MakeRef(broker, map).IsStringMap()) return false;
+ // We assume a memory fence because {map} was read earlier from
+ // the feedback vector and was store ordered on insertion into the
+ // vector.
+ if (!MakeRefAssumeMemoryFence(broker, map).IsStringMap()) return false;
}
}
return true;
@@ -880,11 +867,7 @@ ElementAccessFeedback const& JSHeapBroker::ProcessFeedbackMapsForElementAccess(
MapHandles possible_transition_targets;
possible_transition_targets.reserve(maps.size());
for (MapRef& map : maps) {
- if (!is_concurrent_inlining()) {
- map.SerializeRootMap(NotConcurrentInliningTag{this});
- }
-
- if (CanInlineElementAccess(map) &&
+ if (map.CanInlineElementAccess() &&
IsFastElementsKind(map.elements_kind()) &&
GetInitialFastElementsKind() != map.elements_kind()) {
possible_transition_targets.push_back(map.object());
@@ -992,9 +975,13 @@ MinimorphicLoadPropertyAccessInfo JSHeapBroker::GetPropertyAccessInfo(
MinimorphicLoadPropertyAccessInfo access_info =
factory.ComputePropertyAccessInfo(feedback);
if (is_concurrent_inlining_) {
+ // We can assume a memory fence on {source.vector} because in production,
+ // the vector has already passed the gc predicate. Unit tests create
+ // FeedbackSource objects directly from handles, but they run on
+ // the main thread.
TRACE(this, "Storing MinimorphicLoadPropertyAccessInfo for "
<< source.index() << " "
- << MakeRef<Object>(this, source.vector));
+ << MakeRefAssumeMemoryFence<Object>(this, source.vector));
minimorphic_property_access_infos_.insert({source, access_info});
}
return access_info;
diff --git a/deps/v8/src/compiler/js-heap-broker.h b/deps/v8/src/compiler/js-heap-broker.h
index 91b94bebb5..bf9b9aaac0 100644
--- a/deps/v8/src/compiler/js-heap-broker.h
+++ b/deps/v8/src/compiler/js-heap-broker.h
@@ -117,7 +117,6 @@ class V8_EXPORT_PRIVATE JSHeapBroker {
Zone* zone() const { return zone_; }
bool tracing_enabled() const { return tracing_enabled_; }
bool is_concurrent_inlining() const { return is_concurrent_inlining_; }
- bool is_isolate_bootstrapping() const { return is_isolate_bootstrapping_; }
bool is_turboprop() const { return code_kind_ == CodeKind::TURBOPROP; }
NexusConfig feedback_nexus_config() const {
@@ -173,7 +172,6 @@ class V8_EXPORT_PRIVATE JSHeapBroker {
ProcessedFeedback const* feedback);
FeedbackSlotKind GetFeedbackSlotKind(FeedbackSource const& source) const;
- // TODO(neis): Move these into serializer when we're always in the background.
ElementAccessFeedback const& ProcessFeedbackMapsForElementAccess(
ZoneVector<MapRef>& maps, KeyedAccessMode const& keyed_mode,
FeedbackSlotKind slot_kind);
@@ -291,8 +289,6 @@ class V8_EXPORT_PRIVATE JSHeapBroker {
void IncrementTracingIndentation();
void DecrementTracingIndentation();
- RootIndexMap const& root_index_map() { return root_index_map_; }
-
// Locks {mutex} through the duration of this scope iff it is the first
// occurrence. This is done to have a recursive shared lock on {mutex}.
class V8_NODISCARD RecursiveSharedMutexGuardIfNeeded {
@@ -389,8 +385,6 @@ class V8_EXPORT_PRIVATE JSHeapBroker {
void CollectArrayAndObjectPrototypes();
- PerIsolateCompilerCache* compiler_cache() const { return compiler_cache_; }
-
void set_persistent_handles(
std::unique_ptr<PersistentHandles> persistent_handles) {
DCHECK_NULL(ph_);
@@ -419,7 +413,7 @@ class V8_EXPORT_PRIVATE JSHeapBroker {
std::unique_ptr<CanonicalHandlesMap> canonical_handles);
Isolate* const isolate_;
- Zone* const zone_ = nullptr;
+ Zone* const zone_;
base::Optional<NativeContextRef> target_native_context_;
RefsMap* refs_;
RootIndexMap root_index_map_;
@@ -429,13 +423,11 @@ class V8_EXPORT_PRIVATE JSHeapBroker {
BrokerMode mode_ = kDisabled;
bool const tracing_enabled_;
bool const is_concurrent_inlining_;
- bool const is_isolate_bootstrapping_;
CodeKind const code_kind_;
std::unique_ptr<PersistentHandles> ph_;
LocalIsolate* local_isolate_ = nullptr;
std::unique_ptr<CanonicalHandlesMap> canonical_handles_;
unsigned trace_indentation_ = 0;
- PerIsolateCompilerCache* compiler_cache_ = nullptr;
ZoneUnorderedMap<FeedbackSource, ProcessedFeedback const*,
FeedbackSource::Hash, FeedbackSource::Equal>
feedback_;
@@ -446,8 +438,6 @@ class V8_EXPORT_PRIVATE JSHeapBroker {
FeedbackSource::Hash, FeedbackSource::Equal>
minimorphic_property_access_infos_;
- ZoneVector<ObjectData*> typed_array_string_tags_;
-
CompilationDependencies* dependencies_ = nullptr;
// The MapUpdater mutex is used in recursive patterns; for example,
@@ -460,7 +450,6 @@ class V8_EXPORT_PRIVATE JSHeapBroker {
// Likewise for boilerplate migrations.
int boilerplate_migration_mutex_depth_ = 0;
- static constexpr size_t kMaxSerializedFunctionsCacheSize = 200;
static constexpr uint32_t kMinimalRefsBucketCount = 8;
STATIC_ASSERT(base::bits::IsPowerOfTwo(kMinimalRefsBucketCount));
static constexpr uint32_t kInitialRefsBucketCount = 1024;
@@ -487,21 +476,6 @@ class V8_NODISCARD TraceScope {
JSHeapBroker* const broker_;
};
-#define ASSIGN_RETURN_NO_CHANGE_IF_DATA_MISSING(something_var, \
- optionally_something) \
- auto optionally_something_ = optionally_something; \
- if (!optionally_something_) \
- return NoChangeBecauseOfMissingData(broker(), __FUNCTION__, __LINE__); \
- something_var = *optionally_something_;
-
-class Reduction;
-Reduction NoChangeBecauseOfMissingData(JSHeapBroker* broker,
- const char* function, int line);
-
-// Miscellaneous definitions that should be moved elsewhere once concurrent
-// compilation is finished.
-bool CanInlineElementAccess(MapRef const& map);
-
// Scope that unparks the LocalHeap, if:
// a) We have a JSHeapBroker,
// b) Said JSHeapBroker has a LocalIsolate and thus a LocalHeap,
diff --git a/deps/v8/src/compiler/js-inlining-heuristic.cc b/deps/v8/src/compiler/js-inlining-heuristic.cc
index 177f35c7a0..c6a223b600 100644
--- a/deps/v8/src/compiler/js-inlining-heuristic.cc
+++ b/deps/v8/src/compiler/js-inlining-heuristic.cc
@@ -27,8 +27,40 @@ bool IsSmall(int const size) {
}
bool CanConsiderForInlining(JSHeapBroker* broker,
- SharedFunctionInfoRef const& shared,
- FeedbackVectorRef const& feedback_vector) {
+ FeedbackCellRef const& feedback_cell) {
+ base::Optional<FeedbackVectorRef> feedback_vector =
+ feedback_cell.feedback_vector();
+ if (!feedback_vector.has_value()) {
+ TRACE("Cannot consider " << feedback_cell
+ << " for inlining (no feedback vector)");
+ return false;
+ }
+ SharedFunctionInfoRef shared = feedback_vector->shared_function_info();
+
+ if (!shared.HasBytecodeArray()) {
+ TRACE("Cannot consider " << shared << " for inlining (no bytecode)");
+ return false;
+ }
+ // Ensure we have a persistent handle to the bytecode in order to avoid
+ // flushing it during the remaining compilation.
+ shared.GetBytecodeArray();
+
+ // Read feedback vector again in case it got flushed before we were able to
+ // prevent flushing above.
+ base::Optional<FeedbackVectorRef> feedback_vector_again =
+ feedback_cell.feedback_vector();
+ if (!feedback_vector_again.has_value()) {
+ TRACE("Cannot consider " << shared << " for inlining (no feedback vector)");
+ return false;
+ }
+ if (!feedback_vector_again->equals(*feedback_vector)) {
+ // The new feedback vector likely contains lots of uninitialized slots, so
+ // it doesn't make much sense to inline this function now.
+ TRACE("Not considering " << shared
+ << " for inlining (feedback vector changed)");
+ return false;
+ }
+
SharedFunctionInfo::Inlineability inlineability = shared.GetInlineability();
if (inlineability != SharedFunctionInfo::kIsInlineable) {
TRACE("Cannot consider "
@@ -36,22 +68,20 @@ bool CanConsiderForInlining(JSHeapBroker* broker,
return false;
}
- DCHECK(shared.HasBytecodeArray());
- TRACE("Considering " << shared << " for inlining with " << feedback_vector);
+ TRACE("Considering " << shared << " for inlining with " << *feedback_vector);
return true;
}
bool CanConsiderForInlining(JSHeapBroker* broker,
JSFunctionRef const& function) {
- if (!function.has_feedback_vector(broker->dependencies())) {
- TRACE("Cannot consider " << function
- << " for inlining (no feedback vector)");
- return false;
- }
-
- return CanConsiderForInlining(
- broker, function.shared(),
- function.feedback_vector(broker->dependencies()));
+ FeedbackCellRef feedback_cell =
+ function.raw_feedback_cell(broker->dependencies());
+ bool const result = CanConsiderForInlining(broker, feedback_cell);
+ if (result) {
+ CHECK(
+ function.shared().equals(feedback_cell.shared_function_info().value()));
+ }
+ return result;
}
} // namespace
@@ -65,8 +95,8 @@ JSInliningHeuristic::Candidate JSInliningHeuristic::CollectFunctions(
HeapObjectMatcher m(callee);
if (m.HasResolvedValue() && m.Ref(broker()).IsJSFunction()) {
- out.functions[0] = m.Ref(broker()).AsJSFunction();
- JSFunctionRef function = out.functions[0].value();
+ JSFunctionRef function = m.Ref(broker()).AsJSFunction();
+ out.functions[0] = function;
if (CanConsiderForInlining(broker(), function)) {
out.bytecode[0] = function.shared().GetBytecodeArray();
out.num_functions = 1;
@@ -98,10 +128,9 @@ JSInliningHeuristic::Candidate JSInliningHeuristic::CollectFunctions(
if (m.IsCheckClosure()) {
DCHECK(!out.functions[0].has_value());
FeedbackCellRef feedback_cell = MakeRef(broker(), FeedbackCellOf(m.op()));
- SharedFunctionInfoRef shared_info = *feedback_cell.shared_function_info();
- out.shared_info = shared_info;
- if (CanConsiderForInlining(broker(), shared_info, *feedback_cell.value())) {
- out.bytecode[0] = shared_info.GetBytecodeArray();
+ if (CanConsiderForInlining(broker(), feedback_cell)) {
+ out.shared_info = feedback_cell.shared_function_info().value();
+ out.bytecode[0] = out.shared_info->GetBytecodeArray();
}
out.num_functions = 1;
return out;
@@ -109,13 +138,11 @@ JSInliningHeuristic::Candidate JSInliningHeuristic::CollectFunctions(
if (m.IsJSCreateClosure()) {
DCHECK(!out.functions[0].has_value());
JSCreateClosureNode n(callee);
- CreateClosureParameters const& p = n.Parameters();
FeedbackCellRef feedback_cell = n.GetFeedbackCellRefChecked(broker());
- SharedFunctionInfoRef shared_info = p.shared_info(broker());
- out.shared_info = shared_info;
- if (feedback_cell.value().has_value() &&
- CanConsiderForInlining(broker(), shared_info, *feedback_cell.value())) {
- out.bytecode[0] = shared_info.GetBytecodeArray();
+ if (CanConsiderForInlining(broker(), feedback_cell)) {
+ out.shared_info = feedback_cell.shared_function_info().value();
+ out.bytecode[0] = out.shared_info->GetBytecodeArray();
+ CHECK(out.shared_info->equals(n.Parameters().shared_info(broker())));
}
out.num_functions = 1;
return out;
diff --git a/deps/v8/src/compiler/js-inlining.cc b/deps/v8/src/compiler/js-inlining.cc
index a17a43ecd2..deb8345bf7 100644
--- a/deps/v8/src/compiler/js-inlining.cc
+++ b/deps/v8/src/compiler/js-inlining.cc
@@ -305,7 +305,7 @@ base::Optional<SharedFunctionInfoRef> JSInliner::DetermineCallTarget(
JSFunctionRef function = match.Ref(broker()).AsJSFunction();
// The function might have not been called yet.
- if (!function.has_feedback_vector(broker()->dependencies())) {
+ if (!function.feedback_vector(broker()->dependencies()).has_value()) {
return base::nullopt;
}
@@ -355,7 +355,7 @@ FeedbackCellRef JSInliner::DetermineCallContext(Node* node,
if (match.HasResolvedValue() && match.Ref(broker()).IsJSFunction()) {
JSFunctionRef function = match.Ref(broker()).AsJSFunction();
// This was already ensured by DetermineCallTarget
- CHECK(function.has_feedback_vector(broker()->dependencies()));
+ CHECK(function.feedback_vector(broker()->dependencies()).has_value());
// The inlinee specializes to the context from the JSFunction object.
*context_out = jsgraph()->Constant(function.context());
@@ -709,7 +709,8 @@ Reduction JSInliner::ReduceJSCall(Node* node) {
// Insert argument adaptor frame if required. The callees formal parameter
// count have to match the number of arguments passed
// to the call.
- int parameter_count = shared_info->internal_formal_parameter_count();
+ int parameter_count =
+ shared_info->internal_formal_parameter_count_without_receiver();
DCHECK_EQ(parameter_count, start.FormalParameterCountWithoutReceiver());
if (call.argument_count() != parameter_count) {
frame_state = CreateArtificialFrameState(
diff --git a/deps/v8/src/compiler/js-native-context-specialization.cc b/deps/v8/src/compiler/js-native-context-specialization.cc
index e03e0d41a3..cdbc4848cc 100644
--- a/deps/v8/src/compiler/js-native-context-specialization.cc
+++ b/deps/v8/src/compiler/js-native-context-specialization.cc
@@ -230,8 +230,9 @@ Reduction JSNativeContextSpecialization::ReduceJSAsyncFunctionEnter(
broker(),
FrameStateInfoOf(frame_state->op()).shared_info().ToHandleChecked());
DCHECK(shared.is_compiled());
- int register_count = shared.internal_formal_parameter_count() +
- shared.GetBytecodeArray().register_count();
+ int register_count =
+ shared.internal_formal_parameter_count_without_receiver() +
+ shared.GetBytecodeArray().register_count();
MapRef fixed_array_map = MakeRef(broker(), factory()->fixed_array_map());
AllocationBuilder ab(jsgraph(), effect, control);
if (!ab.CanAllocateArray(register_count, fixed_array_map)) {
@@ -617,15 +618,11 @@ Reduction JSNativeContextSpecialization::ReduceJSOrdinaryHasInstance(
// OrdinaryHasInstance on bound functions turns into a recursive invocation
// of the instanceof operator again.
JSBoundFunctionRef function = m.Ref(broker()).AsJSBoundFunction();
- base::Optional<JSReceiverRef> bound_target_function =
- function.bound_target_function();
- if (bound_target_function.has_value()) return NoChange();
-
Node* feedback = jsgraph()->UndefinedConstant();
NodeProperties::ReplaceValueInput(node, object,
JSInstanceOfNode::LeftIndex());
NodeProperties::ReplaceValueInput(
- node, jsgraph()->Constant(*bound_target_function),
+ node, jsgraph()->Constant(function.bound_target_function()),
JSInstanceOfNode::RightIndex());
node->InsertInput(zone(), JSInstanceOfNode::FeedbackVectorIndex(),
feedback);
@@ -970,6 +967,7 @@ Reduction JSNativeContextSpecialization::ReduceGlobalAccess(
break;
}
case PropertyCellType::kUndefined:
+ case PropertyCellType::kInTransition:
UNREACHABLE();
}
}
@@ -1635,8 +1633,7 @@ void JSNativeContextSpecialization::RemoveImpossibleMaps(
maps->erase(std::remove_if(maps->begin(), maps->end(),
[root_map](const MapRef& map) {
return map.is_abandoned_prototype_map() ||
- (map.FindRootMap().has_value() &&
- !map.FindRootMap()->equals(*root_map));
+ !map.FindRootMap().equals(*root_map);
}),
maps->end());
}
@@ -1747,14 +1744,8 @@ Reduction JSNativeContextSpecialization::ReduceElementAccess(
}
}
- // Check if we have the necessary data for building element accesses.
for (ElementAccessInfo const& access_info : access_infos) {
if (!IsTypedArrayElementsKind(access_info.elements_kind())) continue;
- base::Optional<JSTypedArrayRef> typed_array =
- GetTypedArrayConstant(broker(), receiver);
- if (typed_array.has_value() && !typed_array->serialized()) {
- return NoChange();
- }
}
// Check for the monomorphic case.
@@ -2256,10 +2247,6 @@ void JSNativeContextSpecialization::InlinePropertySetterCall(
Node* JSNativeContextSpecialization::InlineApiCall(
Node* receiver, Node* holder, Node* frame_state, Node* value, Node** effect,
Node** control, FunctionTemplateInfoRef const& function_template_info) {
- if (!function_template_info.has_call_code()) {
- return nullptr;
- }
-
if (!function_template_info.call_code().has_value()) {
TRACE_BROKER_MISSING(broker(), "call code for function template info "
<< function_template_info);
@@ -2449,7 +2436,6 @@ JSNativeContextSpecialization::BuildPropertyStore(
field_type,
MachineType::TypeForRepresentation(field_representation),
kFullWriteBarrier,
- LoadSensitivity::kUnsafe,
access_info.GetConstFieldInfo(),
access_mode == AccessMode::kStoreInLiteral};
@@ -2483,7 +2469,6 @@ JSNativeContextSpecialization::BuildPropertyStore(
Type::OtherInternal(),
MachineType::TaggedPointer(),
kPointerWriteBarrier,
- LoadSensitivity::kUnsafe,
access_info.GetConstFieldInfo(),
access_mode == AccessMode::kStoreInLiteral};
storage = effect =
@@ -2789,10 +2774,8 @@ JSNativeContextSpecialization::BuildElementAccess(
if (situation == kHandleOOB_SmiCheckDone) {
Node* check =
graph()->NewNode(simplified()->NumberLessThan(), index, length);
- Node* branch = graph()->NewNode(
- common()->Branch(BranchHint::kTrue,
- IsSafetyCheck::kCriticalSafetyCheck),
- check, control);
+ Node* branch = graph()->NewNode(common()->Branch(BranchHint::kTrue),
+ check, control);
Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
Node* etrue = effect;
@@ -2980,10 +2963,9 @@ JSNativeContextSpecialization::BuildElementAccess(
element_type = Type::SignedSmall();
element_machine_type = MachineType::TaggedSigned();
}
- ElementAccess element_access = {
- kTaggedBase, FixedArray::kHeaderSize,
- element_type, element_machine_type,
- kFullWriteBarrier, LoadSensitivity::kCritical};
+ ElementAccess element_access = {kTaggedBase, FixedArray::kHeaderSize,
+ element_type, element_machine_type,
+ kFullWriteBarrier};
// Access the actual element.
if (keyed_mode.access_mode() == AccessMode::kLoad) {
@@ -3003,10 +2985,8 @@ JSNativeContextSpecialization::BuildElementAccess(
CanTreatHoleAsUndefined(receiver_maps)) {
Node* check =
graph()->NewNode(simplified()->NumberLessThan(), index, length);
- Node* branch = graph()->NewNode(
- common()->Branch(BranchHint::kTrue,
- IsSafetyCheck::kCriticalSafetyCheck),
- check, control);
+ Node* branch = graph()->NewNode(common()->Branch(BranchHint::kTrue),
+ check, control);
Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
Node* etrue = effect;
@@ -3289,9 +3269,7 @@ Node* JSNativeContextSpecialization::BuildIndexedStringLoad(
Node* check =
graph()->NewNode(simplified()->NumberLessThan(), index, length);
Node* branch =
- graph()->NewNode(common()->Branch(BranchHint::kTrue,
- IsSafetyCheck::kCriticalSafetyCheck),
- check, *control);
+ graph()->NewNode(common()->Branch(BranchHint::kTrue), check, *control);
Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
// Do a real bounds check against {length}. This is in order to protect
@@ -3302,10 +3280,8 @@ Node* JSNativeContextSpecialization::BuildIndexedStringLoad(
CheckBoundsFlag::kConvertStringAndMinusZero |
CheckBoundsFlag::kAbortOnOutOfBounds),
index, length, *effect, if_true);
- Node* masked_index = graph()->NewNode(simplified()->PoisonIndex(), index);
- Node* vtrue = etrue =
- graph()->NewNode(simplified()->StringCharCodeAt(), receiver,
- masked_index, etrue, if_true);
+ Node* vtrue = etrue = graph()->NewNode(simplified()->StringCharCodeAt(),
+ receiver, index, etrue, if_true);
vtrue = graph()->NewNode(simplified()->StringFromSingleCharCode(), vtrue);
Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
@@ -3323,12 +3299,9 @@ Node* JSNativeContextSpecialization::BuildIndexedStringLoad(
CheckBoundsFlag::kConvertStringAndMinusZero),
index, length, *effect, *control);
- Node* masked_index = graph()->NewNode(simplified()->PoisonIndex(), index);
-
// Return the character from the {receiver} as single character string.
- Node* value = *effect =
- graph()->NewNode(simplified()->StringCharCodeAt(), receiver,
- masked_index, *effect, *control);
+ Node* value = *effect = graph()->NewNode(
+ simplified()->StringCharCodeAt(), receiver, index, *effect, *control);
value = graph()->NewNode(simplified()->StringFromSingleCharCode(), value);
return value;
}
@@ -3465,10 +3438,7 @@ base::Optional<MapRef> JSNativeContextSpecialization::InferRootMap(
base::Optional<MapRef> initial_map =
NodeProperties::GetJSCreateMap(broker(), object);
if (initial_map.has_value()) {
- if (!initial_map->FindRootMap().has_value()) {
- return base::nullopt;
- }
- DCHECK(initial_map->equals(*initial_map->FindRootMap()));
+ DCHECK(initial_map->equals(initial_map->FindRootMap()));
return *initial_map;
}
}
diff --git a/deps/v8/src/compiler/js-typed-lowering.cc b/deps/v8/src/compiler/js-typed-lowering.cc
index e986ef1baf..8d67e41751 100644
--- a/deps/v8/src/compiler/js-typed-lowering.cc
+++ b/deps/v8/src/compiler/js-typed-lowering.cc
@@ -998,9 +998,9 @@ Reduction JSTypedLowering::ReduceJSToNumberInput(Node* input) {
HeapObjectMatcher m(input);
if (m.HasResolvedValue() && m.Ref(broker()).IsString()) {
StringRef input_value = m.Ref(broker()).AsString();
- double number;
- ASSIGN_RETURN_NO_CHANGE_IF_DATA_MISSING(number, input_value.ToNumber());
- return Replace(jsgraph()->Constant(number));
+ base::Optional<double> number = input_value.ToNumber();
+ if (!number.has_value()) return NoChange();
+ return Replace(jsgraph()->Constant(number.value()));
}
}
if (input_type.IsHeapConstant()) {
@@ -1595,7 +1595,8 @@ Reduction JSTypedLowering::ReduceJSConstructForwardVarargs(Node* node) {
Callable callable = CodeFactory::ConstructFunctionForwardVarargs(isolate());
node->InsertInput(graph()->zone(), 0,
jsgraph()->HeapConstant(callable.code()));
- node->InsertInput(graph()->zone(), 3, jsgraph()->Constant(arity));
+ node->InsertInput(graph()->zone(), 3,
+ jsgraph()->Constant(JSParameterCount(arity)));
node->InsertInput(graph()->zone(), 4, jsgraph()->Constant(start_index));
node->InsertInput(graph()->zone(), 5, jsgraph()->UndefinedConstant());
NodeProperties::ChangeOp(
@@ -1633,7 +1634,8 @@ Reduction JSTypedLowering::ReduceJSConstruct(Node* node) {
STATIC_ASSERT(JSConstructNode::NewTargetIndex() == 1);
node->RemoveInput(n.FeedbackVectorIndex());
node->InsertInput(graph()->zone(), 0, jsgraph()->Constant(code));
- node->InsertInput(graph()->zone(), 3, jsgraph()->Constant(arity));
+ node->InsertInput(graph()->zone(), 3,
+ jsgraph()->Constant(JSParameterCount(arity)));
node->InsertInput(graph()->zone(), 4, jsgraph()->UndefinedConstant());
node->InsertInput(graph()->zone(), 5, jsgraph()->UndefinedConstant());
NodeProperties::ChangeOp(
@@ -1663,7 +1665,8 @@ Reduction JSTypedLowering::ReduceJSCallForwardVarargs(Node* node) {
Callable callable = CodeFactory::CallFunctionForwardVarargs(isolate());
node->InsertInput(graph()->zone(), 0,
jsgraph()->HeapConstant(callable.code()));
- node->InsertInput(graph()->zone(), 2, jsgraph()->Constant(arity));
+ node->InsertInput(graph()->zone(), 2,
+ jsgraph()->Constant(JSParameterCount(arity)));
node->InsertInput(graph()->zone(), 3, jsgraph()->Constant(start_index));
NodeProperties::ChangeOp(
node, common()->Call(Linkage::GetStubCallDescriptor(
@@ -1750,8 +1753,11 @@ Reduction JSTypedLowering::ReduceJSCall(Node* node) {
CallDescriptor::Flags flags = CallDescriptor::kNeedsFrameState;
Node* new_target = jsgraph()->UndefinedConstant();
- int formal_count = shared->internal_formal_parameter_count();
- if (formal_count != kDontAdaptArgumentsSentinel && formal_count > arity) {
+ int formal_count =
+ shared->internal_formal_parameter_count_without_receiver();
+ // TODO(v8:11112): Once the sentinel is always 0, the check against
+ // IsDontAdaptArguments() can be removed.
+ if (!shared->IsDontAdaptArguments() && formal_count > arity) {
node->RemoveInput(n.FeedbackVectorIndex());
// Underapplication. Massage the arguments to match the expected number of
// arguments.
@@ -1763,7 +1769,7 @@ Reduction JSTypedLowering::ReduceJSCall(Node* node) {
// Patch {node} to a direct call.
node->InsertInput(graph()->zone(), formal_count + 2, new_target);
node->InsertInput(graph()->zone(), formal_count + 3,
- jsgraph()->Constant(arity));
+ jsgraph()->Constant(JSParameterCount(arity)));
NodeProperties::ChangeOp(node,
common()->Call(Linkage::GetJSCallDescriptor(
graph()->zone(), false, 1 + formal_count,
@@ -1786,13 +1792,15 @@ Reduction JSTypedLowering::ReduceJSCall(Node* node) {
node->RemoveInput(n.FeedbackVectorIndex());
node->InsertInput(graph()->zone(), 0, stub_code); // Code object.
node->InsertInput(graph()->zone(), 2, new_target);
- node->InsertInput(graph()->zone(), 3, jsgraph()->Constant(arity));
+ node->InsertInput(graph()->zone(), 3,
+ jsgraph()->Constant(JSParameterCount(arity)));
NodeProperties::ChangeOp(node, common()->Call(call_descriptor));
} else {
// Patch {node} to a direct call.
node->RemoveInput(n.FeedbackVectorIndex());
node->InsertInput(graph()->zone(), arity + 2, new_target);
- node->InsertInput(graph()->zone(), arity + 3, jsgraph()->Constant(arity));
+ node->InsertInput(graph()->zone(), arity + 3,
+ jsgraph()->Constant(JSParameterCount(arity)));
NodeProperties::ChangeOp(node,
common()->Call(Linkage::GetJSCallDescriptor(
graph()->zone(), false, 1 + arity,
@@ -1811,7 +1819,8 @@ Reduction JSTypedLowering::ReduceJSCall(Node* node) {
Callable callable = CodeFactory::CallFunction(isolate(), convert_mode);
node->InsertInput(graph()->zone(), 0,
jsgraph()->HeapConstant(callable.code()));
- node->InsertInput(graph()->zone(), 2, jsgraph()->Constant(arity));
+ node->InsertInput(graph()->zone(), 2,
+ jsgraph()->Constant(JSParameterCount(arity)));
NodeProperties::ChangeOp(
node, common()->Call(Linkage::GetStubCallDescriptor(
graph()->zone(), callable.descriptor(), 1 + arity, flags)));
diff --git a/deps/v8/src/compiler/linkage.cc b/deps/v8/src/compiler/linkage.cc
index fac24e802d..fec0040b61 100644
--- a/deps/v8/src/compiler/linkage.cc
+++ b/deps/v8/src/compiler/linkage.cc
@@ -219,9 +219,10 @@ CallDescriptor* Linkage::ComputeIncoming(Zone* zone,
// If we are compiling a JS function, use a JS call descriptor,
// plus the receiver.
SharedFunctionInfo shared = info->closure()->shared();
- return GetJSCallDescriptor(zone, info->is_osr(),
- 1 + shared.internal_formal_parameter_count(),
- CallDescriptor::kCanUseRoots);
+ return GetJSCallDescriptor(
+ zone, info->is_osr(),
+ shared.internal_formal_parameter_count_with_receiver(),
+ CallDescriptor::kCanUseRoots);
}
return nullptr; // TODO(titzer): ?
}
diff --git a/deps/v8/src/compiler/linkage.h b/deps/v8/src/compiler/linkage.h
index 8b33444b29..707c7d98ab 100644
--- a/deps/v8/src/compiler/linkage.h
+++ b/deps/v8/src/compiler/linkage.h
@@ -214,15 +214,13 @@ class V8_EXPORT_PRIVATE CallDescriptor final
kInitializeRootRegister = 1u << 3,
// Does not ever try to allocate space on our heap.
kNoAllocate = 1u << 4,
- // Use retpoline for this call if indirect.
- kRetpoline = 1u << 5,
// Use the kJavaScriptCallCodeStartRegister (fixed) register for the
// indirect target address when calling.
- kFixedTargetRegister = 1u << 6,
- kCallerSavedRegisters = 1u << 7,
+ kFixedTargetRegister = 1u << 5,
+ kCallerSavedRegisters = 1u << 6,
// The kCallerSavedFPRegisters only matters (and set) when the more general
// flag for kCallerSavedRegisters above is also set.
- kCallerSavedFPRegisters = 1u << 8,
+ kCallerSavedFPRegisters = 1u << 7,
// Tail calls for tier up are special (in fact they are different enough
// from normal tail calls to warrant a dedicated opcode; but they also have
// enough similar aspects that reusing the TailCall opcode is pragmatic).
@@ -238,15 +236,15 @@ class V8_EXPORT_PRIVATE CallDescriptor final
//
// In other words, behavior is identical to a jmp instruction prior caller
// frame construction.
- kIsTailCallForTierUp = 1u << 9,
+ kIsTailCallForTierUp = 1u << 8,
+
+ // AIX has a function descriptor by default but it can be disabled for a
+ // certain CFunction call (only used for Kind::kCallAddress).
+ kNoFunctionDescriptor = 1u << 9,
// Flags past here are *not* encoded in InstructionCode and are thus not
// accessible from the code generator. See also
// kFlagsBitsEncodedInInstructionCode.
-
- // AIX has a function descriptor by default but it can be disabled for a
- // certain CFunction call (only used for Kind::kCallAddress).
- kNoFunctionDescriptor = 1u << 10,
};
using Flags = base::Flags<Flag>;
diff --git a/deps/v8/src/compiler/loop-analysis.cc b/deps/v8/src/compiler/loop-analysis.cc
index e184534ed7..7b660856b7 100644
--- a/deps/v8/src/compiler/loop-analysis.cc
+++ b/deps/v8/src/compiler/loop-analysis.cc
@@ -5,12 +5,17 @@
#include "src/compiler/loop-analysis.h"
#include "src/codegen/tick-counter.h"
+#include "src/compiler/common-operator.h"
#include "src/compiler/graph.h"
#include "src/compiler/node-marker.h"
#include "src/compiler/node-properties.h"
#include "src/compiler/node.h"
#include "src/zone/zone.h"
+#if V8_ENABLE_WEBASSEMBLY
+#include "src/wasm/wasm-code-manager.h"
+#endif
+
namespace v8 {
namespace internal {
@@ -581,12 +586,24 @@ ZoneUnorderedSet<Node*>* LoopFinder::FindSmallUnnestedLoopFromHeader(
loop_header);
// All uses are outside the loop, do nothing.
break;
- case IrOpcode::kCall:
case IrOpcode::kTailCall:
case IrOpcode::kJSWasmCall:
case IrOpcode::kJSCall:
// Call nodes are considered to have unbounded size, i.e. >max_size.
+ // An exception is the call to the stack guard builtin at the beginning
+ // of many loops.
return nullptr;
+ case IrOpcode::kCall: {
+ Node* callee = node->InputAt(0);
+ if (callee->opcode() == IrOpcode::kRelocatableInt32Constant ||
+ callee->opcode() == IrOpcode::kRelocatableInt64Constant) {
+ auto info = OpParameter<RelocatablePtrConstantInfo>(callee->op());
+ if (info.value() != v8::internal::wasm::WasmCode::kWasmStackGuard) {
+ return nullptr;
+ }
+ }
+ V8_FALLTHROUGH;
+ }
default:
for (Node* use : node->uses()) {
if (visited->count(use) == 0) queue.push_back(use);
diff --git a/deps/v8/src/compiler/machine-graph-verifier.cc b/deps/v8/src/compiler/machine-graph-verifier.cc
index 88679283d9..fedb208b5f 100644
--- a/deps/v8/src/compiler/machine-graph-verifier.cc
+++ b/deps/v8/src/compiler/machine-graph-verifier.cc
@@ -121,10 +121,14 @@ class MachineRepresentationInferrer {
break;
case IrOpcode::kWord32AtomicLoad:
case IrOpcode::kWord64AtomicLoad:
+ representation_vector_[node->id()] =
+ PromoteRepresentation(AtomicLoadParametersOf(node->op())
+ .representation()
+ .representation());
+ break;
case IrOpcode::kLoad:
case IrOpcode::kLoadImmutable:
case IrOpcode::kProtectedLoad:
- case IrOpcode::kPoisonedLoad:
representation_vector_[node->id()] = PromoteRepresentation(
LoadRepresentationOf(node->op()).representation());
break;
@@ -154,8 +158,8 @@ class MachineRepresentationInferrer {
}
case IrOpcode::kWord32AtomicStore:
case IrOpcode::kWord64AtomicStore:
- representation_vector_[node->id()] =
- PromoteRepresentation(AtomicStoreRepresentationOf(node->op()));
+ representation_vector_[node->id()] = PromoteRepresentation(
+ AtomicStoreParametersOf(node->op()).representation());
break;
case IrOpcode::kWord32AtomicPairLoad:
case IrOpcode::kWord32AtomicPairStore:
@@ -206,15 +210,8 @@ class MachineRepresentationInferrer {
case IrOpcode::kChangeInt32ToTagged:
case IrOpcode::kChangeUint32ToTagged:
case IrOpcode::kBitcastWordToTagged:
- case IrOpcode::kTaggedPoisonOnSpeculation:
representation_vector_[node->id()] = MachineRepresentation::kTagged;
break;
- case IrOpcode::kWord32PoisonOnSpeculation:
- representation_vector_[node->id()] = MachineRepresentation::kWord32;
- break;
- case IrOpcode::kWord64PoisonOnSpeculation:
- representation_vector_[node->id()] = MachineRepresentation::kWord64;
- break;
case IrOpcode::kCompressedHeapConstant:
representation_vector_[node->id()] =
MachineRepresentation::kCompressedPointer;
@@ -394,14 +391,6 @@ class MachineRepresentationChecker {
CheckValueInputRepresentationIs(
node, 0, MachineType::PointerRepresentation());
break;
- case IrOpcode::kWord32PoisonOnSpeculation:
- CheckValueInputRepresentationIs(node, 0,
- MachineRepresentation::kWord32);
- break;
- case IrOpcode::kWord64PoisonOnSpeculation:
- CheckValueInputRepresentationIs(node, 0,
- MachineRepresentation::kWord64);
- break;
case IrOpcode::kBitcastTaggedToWord:
case IrOpcode::kBitcastTaggedToWordForTagAndSmiBits:
if (COMPRESS_POINTERS_BOOL) {
@@ -410,9 +399,6 @@ class MachineRepresentationChecker {
CheckValueInputIsTagged(node, 0);
}
break;
- case IrOpcode::kTaggedPoisonOnSpeculation:
- CheckValueInputIsTagged(node, 0);
- break;
case IrOpcode::kTruncateFloat64ToWord32:
case IrOpcode::kTruncateFloat64ToUint32:
case IrOpcode::kTruncateFloat64ToFloat32:
@@ -566,7 +552,6 @@ class MachineRepresentationChecker {
case IrOpcode::kWord32AtomicLoad:
case IrOpcode::kWord32AtomicPairLoad:
case IrOpcode::kWord64AtomicLoad:
- case IrOpcode::kPoisonedLoad:
CheckValueInputIsTaggedOrPointer(node, 0);
CheckValueInputRepresentationIs(
node, 1, MachineType::PointerRepresentation());
@@ -605,9 +590,12 @@ class MachineRepresentationChecker {
case MachineRepresentation::kTaggedPointer:
case MachineRepresentation::kTaggedSigned:
if (COMPRESS_POINTERS_BOOL &&
- node->opcode() == IrOpcode::kStore &&
- IsAnyTagged(
- StoreRepresentationOf(node->op()).representation())) {
+ ((node->opcode() == IrOpcode::kStore &&
+ IsAnyTagged(StoreRepresentationOf(node->op())
+ .representation())) ||
+ (node->opcode() == IrOpcode::kWord32AtomicStore &&
+ IsAnyTagged(AtomicStoreParametersOf(node->op())
+ .representation())))) {
CheckValueInputIsCompressedOrTagged(node, 2);
} else {
CheckValueInputIsTagged(node, 2);
diff --git a/deps/v8/src/compiler/machine-operator-reducer.cc b/deps/v8/src/compiler/machine-operator-reducer.cc
index 33d58c854b..775e5ada81 100644
--- a/deps/v8/src/compiler/machine-operator-reducer.cc
+++ b/deps/v8/src/compiler/machine-operator-reducer.cc
@@ -947,6 +947,20 @@ Reduction MachineOperatorReducer::Reduce(Node* node) {
}
return ReduceWord64Comparisons(node);
}
+ case IrOpcode::kFloat32Select:
+ case IrOpcode::kFloat64Select:
+ case IrOpcode::kWord32Select:
+ case IrOpcode::kWord64Select: {
+ Int32Matcher match(node->InputAt(0));
+ if (match.HasResolvedValue()) {
+ if (match.Is(0)) {
+ return Replace(node->InputAt(2));
+ } else {
+ return Replace(node->InputAt(1));
+ }
+ }
+ break;
+ }
default:
break;
}
@@ -2061,7 +2075,6 @@ bool IsFloat64RepresentableAsFloat32(const Float64Matcher& m) {
} // namespace
-
Reduction MachineOperatorReducer::ReduceFloat64Compare(Node* node) {
DCHECK(IrOpcode::kFloat64Equal == node->opcode() ||
IrOpcode::kFloat64LessThan == node->opcode() ||
diff --git a/deps/v8/src/compiler/machine-operator.cc b/deps/v8/src/compiler/machine-operator.cc
index 411c6d4cb3..d24030e1a7 100644
--- a/deps/v8/src/compiler/machine-operator.cc
+++ b/deps/v8/src/compiler/machine-operator.cc
@@ -32,6 +32,41 @@ std::ostream& operator<<(std::ostream& os, StoreRepresentation rep) {
return os << rep.representation() << ", " << rep.write_barrier_kind();
}
+bool operator==(AtomicStoreParameters lhs, AtomicStoreParameters rhs) {
+ return lhs.store_representation() == rhs.store_representation() &&
+ lhs.order() == rhs.order();
+}
+
+bool operator!=(AtomicStoreParameters lhs, AtomicStoreParameters rhs) {
+ return !(lhs == rhs);
+}
+
+size_t hash_value(AtomicStoreParameters params) {
+ return base::hash_combine(hash_value(params.store_representation()),
+ params.order());
+}
+
+std::ostream& operator<<(std::ostream& os, AtomicStoreParameters params) {
+ return os << params.store_representation() << ", " << params.order();
+}
+
+bool operator==(AtomicLoadParameters lhs, AtomicLoadParameters rhs) {
+ return lhs.representation() == rhs.representation() &&
+ lhs.order() == rhs.order();
+}
+
+bool operator!=(AtomicLoadParameters lhs, AtomicLoadParameters rhs) {
+ return !(lhs == rhs);
+}
+
+size_t hash_value(AtomicLoadParameters params) {
+ return base::hash_combine(params.representation(), params.order());
+}
+
+std::ostream& operator<<(std::ostream& os, AtomicLoadParameters params) {
+ return os << params.representation() << ", " << params.order();
+}
+
size_t hash_value(MemoryAccessKind kind) { return static_cast<size_t>(kind); }
std::ostream& operator<<(std::ostream& os, MemoryAccessKind kind) {
@@ -121,21 +156,29 @@ bool operator==(LoadLaneParameters lhs, LoadLaneParameters rhs) {
LoadRepresentation LoadRepresentationOf(Operator const* op) {
DCHECK(IrOpcode::kLoad == op->opcode() ||
IrOpcode::kProtectedLoad == op->opcode() ||
- IrOpcode::kWord32AtomicLoad == op->opcode() ||
- IrOpcode::kWord64AtomicLoad == op->opcode() ||
- IrOpcode::kWord32AtomicPairLoad == op->opcode() ||
- IrOpcode::kPoisonedLoad == op->opcode() ||
IrOpcode::kUnalignedLoad == op->opcode() ||
IrOpcode::kLoadImmutable == op->opcode());
return OpParameter<LoadRepresentation>(op);
}
+AtomicLoadParameters AtomicLoadParametersOf(Operator const* op) {
+ DCHECK(IrOpcode::kWord32AtomicLoad == op->opcode() ||
+ IrOpcode::kWord64AtomicLoad == op->opcode());
+ return OpParameter<AtomicLoadParameters>(op);
+}
+
StoreRepresentation const& StoreRepresentationOf(Operator const* op) {
DCHECK(IrOpcode::kStore == op->opcode() ||
IrOpcode::kProtectedStore == op->opcode());
return OpParameter<StoreRepresentation>(op);
}
+AtomicStoreParameters const& AtomicStoreParametersOf(Operator const* op) {
+ DCHECK(IrOpcode::kWord32AtomicStore == op->opcode() ||
+ IrOpcode::kWord64AtomicStore == op->opcode());
+ return OpParameter<AtomicStoreParameters>(op);
+}
+
UnalignedStoreRepresentation const& UnalignedStoreRepresentationOf(
Operator const* op) {
DCHECK_EQ(IrOpcode::kUnalignedStore, op->opcode());
@@ -182,12 +225,6 @@ StackSlotRepresentation const& StackSlotRepresentationOf(Operator const* op) {
return OpParameter<StackSlotRepresentation>(op);
}
-MachineRepresentation AtomicStoreRepresentationOf(Operator const* op) {
- DCHECK(IrOpcode::kWord32AtomicStore == op->opcode() ||
- IrOpcode::kWord64AtomicStore == op->opcode());
- return OpParameter<MachineRepresentation>(op);
-}
-
MachineType AtomicOpType(Operator const* op) {
return OpParameter<MachineType>(op);
}
@@ -650,6 +687,30 @@ std::ostream& operator<<(std::ostream& os, TruncateKind kind) {
V(S128Load32Zero) \
V(S128Load64Zero)
+#if TAGGED_SIZE_8_BYTES
+
+#define ATOMIC_TAGGED_TYPE_LIST(V)
+
+#define ATOMIC64_TAGGED_TYPE_LIST(V) \
+ V(TaggedSigned) \
+ V(TaggedPointer) \
+ V(AnyTagged) \
+ V(CompressedPointer) \
+ V(AnyCompressed)
+
+#else
+
+#define ATOMIC_TAGGED_TYPE_LIST(V) \
+ V(TaggedSigned) \
+ V(TaggedPointer) \
+ V(AnyTagged) \
+ V(CompressedPointer) \
+ V(AnyCompressed)
+
+#define ATOMIC64_TAGGED_TYPE_LIST(V)
+
+#endif // TAGGED_SIZE_8_BYTES
+
#define ATOMIC_U32_TYPE_LIST(V) \
V(Uint8) \
V(Uint16) \
@@ -665,6 +726,28 @@ std::ostream& operator<<(std::ostream& os, TruncateKind kind) {
ATOMIC_U32_TYPE_LIST(V) \
V(Uint64)
+#if TAGGED_SIZE_8_BYTES
+
+#define ATOMIC_TAGGED_REPRESENTATION_LIST(V)
+
+#define ATOMIC64_TAGGED_REPRESENTATION_LIST(V) \
+ V(kTaggedSigned) \
+ V(kTaggedPointer) \
+ V(kTagged)
+
+#else
+
+#define ATOMIC_TAGGED_REPRESENTATION_LIST(V) \
+ V(kTaggedSigned) \
+ V(kTaggedPointer) \
+ V(kTagged) \
+ V(kCompressedPointer) \
+ V(kCompressed)
+
+#define ATOMIC64_TAGGED_REPRESENTATION_LIST(V)
+
+#endif // TAGGED_SIZE_8_BYTES
+
#define ATOMIC_REPRESENTATION_LIST(V) \
V(kWord8) \
V(kWord16) \
@@ -831,13 +914,6 @@ struct MachineOperatorGlobalCache {
Operator::kEliminatable, "Load", 2, 1, \
1, 1, 1, 0, MachineType::Type()) {} \
}; \
- struct PoisonedLoad##Type##Operator final \
- : public Operator1<LoadRepresentation> { \
- PoisonedLoad##Type##Operator() \
- : Operator1<LoadRepresentation>( \
- IrOpcode::kPoisonedLoad, Operator::kEliminatable, \
- "PoisonedLoad", 2, 1, 1, 1, 1, 0, MachineType::Type()) {} \
- }; \
struct UnalignedLoad##Type##Operator final \
: public Operator1<LoadRepresentation> { \
UnalignedLoad##Type##Operator() \
@@ -861,7 +937,6 @@ struct MachineOperatorGlobalCache {
0, 0, 1, 0, 0, MachineType::Type()) {} \
}; \
Load##Type##Operator kLoad##Type; \
- PoisonedLoad##Type##Operator kPoisonedLoad##Type; \
UnalignedLoad##Type##Operator kUnalignedLoad##Type; \
ProtectedLoad##Type##Operator kProtectedLoad##Type; \
LoadImmutable##Type##Operator kLoadImmutable##Type;
@@ -976,55 +1051,63 @@ struct MachineOperatorGlobalCache {
MACHINE_REPRESENTATION_LIST(STORE)
#undef STORE
-#define ATOMIC_LOAD(Type) \
- struct Word32AtomicLoad##Type##Operator final \
- : public Operator1<LoadRepresentation> { \
- Word32AtomicLoad##Type##Operator() \
- : Operator1<LoadRepresentation>( \
- IrOpcode::kWord32AtomicLoad, Operator::kEliminatable, \
- "Word32AtomicLoad", 2, 1, 1, 1, 1, 0, MachineType::Type()) {} \
- }; \
- Word32AtomicLoad##Type##Operator kWord32AtomicLoad##Type;
+#define ATOMIC_LOAD(Type) \
+ struct Word32SeqCstLoad##Type##Operator \
+ : public Operator1<AtomicLoadParameters> { \
+ Word32SeqCstLoad##Type##Operator() \
+ : Operator1<AtomicLoadParameters>( \
+ IrOpcode::kWord32AtomicLoad, Operator::kEliminatable, \
+ "Word32AtomicLoad", 2, 1, 1, 1, 1, 0, \
+ AtomicLoadParameters(MachineType::Type(), \
+ AtomicMemoryOrder::kSeqCst)) {} \
+ }; \
+ Word32SeqCstLoad##Type##Operator kWord32SeqCstLoad##Type;
ATOMIC_TYPE_LIST(ATOMIC_LOAD)
#undef ATOMIC_LOAD
-#define ATOMIC_LOAD(Type) \
- struct Word64AtomicLoad##Type##Operator final \
- : public Operator1<LoadRepresentation> { \
- Word64AtomicLoad##Type##Operator() \
- : Operator1<LoadRepresentation>( \
- IrOpcode::kWord64AtomicLoad, Operator::kEliminatable, \
- "Word64AtomicLoad", 2, 1, 1, 1, 1, 0, MachineType::Type()) {} \
- }; \
- Word64AtomicLoad##Type##Operator kWord64AtomicLoad##Type;
+#define ATOMIC_LOAD(Type) \
+ struct Word64SeqCstLoad##Type##Operator \
+ : public Operator1<AtomicLoadParameters> { \
+ Word64SeqCstLoad##Type##Operator() \
+ : Operator1<AtomicLoadParameters>( \
+ IrOpcode::kWord64AtomicLoad, Operator::kEliminatable, \
+ "Word64AtomicLoad", 2, 1, 1, 1, 1, 0, \
+ AtomicLoadParameters(MachineType::Type(), \
+ AtomicMemoryOrder::kSeqCst)) {} \
+ }; \
+ Word64SeqCstLoad##Type##Operator kWord64SeqCstLoad##Type;
ATOMIC_U64_TYPE_LIST(ATOMIC_LOAD)
#undef ATOMIC_LOAD
#define ATOMIC_STORE(Type) \
- struct Word32AtomicStore##Type##Operator \
- : public Operator1<MachineRepresentation> { \
- Word32AtomicStore##Type##Operator() \
- : Operator1<MachineRepresentation>( \
+ struct Word32SeqCstStore##Type##Operator \
+ : public Operator1<AtomicStoreParameters> { \
+ Word32SeqCstStore##Type##Operator() \
+ : Operator1<AtomicStoreParameters>( \
IrOpcode::kWord32AtomicStore, \
Operator::kNoDeopt | Operator::kNoRead | Operator::kNoThrow, \
"Word32AtomicStore", 3, 1, 1, 0, 1, 0, \
- MachineRepresentation::Type) {} \
+ AtomicStoreParameters(MachineRepresentation::Type, \
+ kNoWriteBarrier, \
+ AtomicMemoryOrder::kSeqCst)) {} \
}; \
- Word32AtomicStore##Type##Operator kWord32AtomicStore##Type;
+ Word32SeqCstStore##Type##Operator kWord32SeqCstStore##Type;
ATOMIC_REPRESENTATION_LIST(ATOMIC_STORE)
#undef ATOMIC_STORE
#define ATOMIC_STORE(Type) \
- struct Word64AtomicStore##Type##Operator \
- : public Operator1<MachineRepresentation> { \
- Word64AtomicStore##Type##Operator() \
- : Operator1<MachineRepresentation>( \
+ struct Word64SeqCstStore##Type##Operator \
+ : public Operator1<AtomicStoreParameters> { \
+ Word64SeqCstStore##Type##Operator() \
+ : Operator1<AtomicStoreParameters>( \
IrOpcode::kWord64AtomicStore, \
Operator::kNoDeopt | Operator::kNoRead | Operator::kNoThrow, \
"Word64AtomicStore", 3, 1, 1, 0, 1, 0, \
- MachineRepresentation::Type) {} \
+ AtomicStoreParameters(MachineRepresentation::Type, \
+ kNoWriteBarrier, \
+ AtomicMemoryOrder::kSeqCst)) {} \
}; \
- Word64AtomicStore##Type##Operator kWord64AtomicStore##Type;
+ Word64SeqCstStore##Type##Operator kWord64SeqCstStore##Type;
ATOMIC64_REPRESENTATION_LIST(ATOMIC_STORE)
#undef ATOMIC_STORE
@@ -1084,21 +1167,23 @@ struct MachineOperatorGlobalCache {
ATOMIC_U64_TYPE_LIST(ATOMIC_COMPARE_EXCHANGE)
#undef ATOMIC_COMPARE_EXCHANGE
- struct Word32AtomicPairLoadOperator : public Operator {
- Word32AtomicPairLoadOperator()
- : Operator(IrOpcode::kWord32AtomicPairLoad,
- Operator::kNoDeopt | Operator::kNoThrow,
- "Word32AtomicPairLoad", 2, 1, 1, 2, 1, 0) {}
+ struct Word32SeqCstPairLoadOperator : public Operator1<AtomicMemoryOrder> {
+ Word32SeqCstPairLoadOperator()
+ : Operator1<AtomicMemoryOrder>(IrOpcode::kWord32AtomicPairLoad,
+ Operator::kNoDeopt | Operator::kNoThrow,
+ "Word32AtomicPairLoad", 2, 1, 1, 2, 1, 0,
+ AtomicMemoryOrder::kSeqCst) {}
};
- Word32AtomicPairLoadOperator kWord32AtomicPairLoad;
-
- struct Word32AtomicPairStoreOperator : public Operator {
- Word32AtomicPairStoreOperator()
- : Operator(IrOpcode::kWord32AtomicPairStore,
- Operator::kNoDeopt | Operator::kNoThrow,
- "Word32AtomicPairStore", 4, 1, 1, 0, 1, 0) {}
+ Word32SeqCstPairLoadOperator kWord32SeqCstPairLoad;
+
+ struct Word32SeqCstPairStoreOperator : public Operator1<AtomicMemoryOrder> {
+ Word32SeqCstPairStoreOperator()
+ : Operator1<AtomicMemoryOrder>(IrOpcode::kWord32AtomicPairStore,
+ Operator::kNoDeopt | Operator::kNoThrow,
+ "Word32AtomicPairStore", 4, 1, 1, 0, 1,
+ 0, AtomicMemoryOrder::kSeqCst) {}
};
- Word32AtomicPairStoreOperator kWord32AtomicPairStore;
+ Word32SeqCstPairStoreOperator kWord32SeqCstPairStore;
#define ATOMIC_PAIR_OP(op) \
struct Word32AtomicPair##op##Operator : public Operator { \
@@ -1157,30 +1242,6 @@ struct MachineOperatorGlobalCache {
};
BitcastMaybeObjectToWordOperator kBitcastMaybeObjectToWord;
- struct TaggedPoisonOnSpeculation : public Operator {
- TaggedPoisonOnSpeculation()
- : Operator(IrOpcode::kTaggedPoisonOnSpeculation,
- Operator::kEliminatable | Operator::kNoWrite,
- "TaggedPoisonOnSpeculation", 1, 1, 1, 1, 1, 0) {}
- };
- TaggedPoisonOnSpeculation kTaggedPoisonOnSpeculation;
-
- struct Word32PoisonOnSpeculation : public Operator {
- Word32PoisonOnSpeculation()
- : Operator(IrOpcode::kWord32PoisonOnSpeculation,
- Operator::kEliminatable | Operator::kNoWrite,
- "Word32PoisonOnSpeculation", 1, 1, 1, 1, 1, 0) {}
- };
- Word32PoisonOnSpeculation kWord32PoisonOnSpeculation;
-
- struct Word64PoisonOnSpeculation : public Operator {
- Word64PoisonOnSpeculation()
- : Operator(IrOpcode::kWord64PoisonOnSpeculation,
- Operator::kEliminatable | Operator::kNoWrite,
- "Word64PoisonOnSpeculation", 1, 1, 1, 1, 1, 0) {}
- };
- Word64PoisonOnSpeculation kWord64PoisonOnSpeculation;
-
struct AbortCSAAssertOperator : public Operator {
AbortCSAAssertOperator()
: Operator(IrOpcode::kAbortCSAAssert, Operator::kNoThrow,
@@ -1366,16 +1427,6 @@ const Operator* MachineOperatorBuilder::LoadImmutable(LoadRepresentation rep) {
UNREACHABLE();
}
-const Operator* MachineOperatorBuilder::PoisonedLoad(LoadRepresentation rep) {
-#define LOAD(Type) \
- if (rep == MachineType::Type()) { \
- return &cache_.kPoisonedLoad##Type; \
- }
- MACHINE_TYPE_LIST(LOAD)
-#undef LOAD
- UNREACHABLE();
-}
-
const Operator* MachineOperatorBuilder::ProtectedLoad(LoadRepresentation rep) {
#define LOAD(Type) \
if (rep == MachineType::Type()) { \
@@ -1592,23 +1643,47 @@ const Operator* MachineOperatorBuilder::MemBarrier() {
}
const Operator* MachineOperatorBuilder::Word32AtomicLoad(
- LoadRepresentation rep) {
-#define LOAD(Type) \
- if (rep == MachineType::Type()) { \
- return &cache_.kWord32AtomicLoad##Type; \
+ AtomicLoadParameters params) {
+#define CACHED_LOAD(Type) \
+ if (params.representation() == MachineType::Type() && \
+ params.order() == AtomicMemoryOrder::kSeqCst) { \
+ return &cache_.kWord32SeqCstLoad##Type; \
+ }
+ ATOMIC_TYPE_LIST(CACHED_LOAD)
+#undef CACHED_LOAD
+
+#define LOAD(Type) \
+ if (params.representation() == MachineType::Type()) { \
+ return zone_->New<Operator1<AtomicLoadParameters>>( \
+ IrOpcode::kWord32AtomicLoad, Operator::kEliminatable, \
+ "Word32AtomicLoad", 2, 1, 1, 1, 1, 0, params); \
}
ATOMIC_TYPE_LIST(LOAD)
+ ATOMIC_TAGGED_TYPE_LIST(LOAD)
#undef LOAD
+
UNREACHABLE();
}
const Operator* MachineOperatorBuilder::Word32AtomicStore(
- MachineRepresentation rep) {
-#define STORE(kRep) \
- if (rep == MachineRepresentation::kRep) { \
- return &cache_.kWord32AtomicStore##kRep; \
+ AtomicStoreParameters params) {
+#define CACHED_STORE(kRep) \
+ if (params.representation() == MachineRepresentation::kRep && \
+ params.order() == AtomicMemoryOrder::kSeqCst) { \
+ return &cache_.kWord32SeqCstStore##kRep; \
+ }
+ ATOMIC_REPRESENTATION_LIST(CACHED_STORE)
+#undef CACHED_STORE
+
+#define STORE(kRep) \
+ if (params.representation() == MachineRepresentation::kRep) { \
+ return zone_->New<Operator1<AtomicStoreParameters>>( \
+ IrOpcode::kWord32AtomicStore, \
+ Operator::kNoDeopt | Operator::kNoRead | Operator::kNoThrow, \
+ "Word32AtomicStore", 3, 1, 1, 0, 1, 0, params); \
}
ATOMIC_REPRESENTATION_LIST(STORE)
+ ATOMIC_TAGGED_REPRESENTATION_LIST(STORE)
#undef STORE
UNREACHABLE();
}
@@ -1685,24 +1760,49 @@ const Operator* MachineOperatorBuilder::Word32AtomicXor(MachineType type) {
}
const Operator* MachineOperatorBuilder::Word64AtomicLoad(
- LoadRepresentation rep) {
-#define LOAD(Type) \
- if (rep == MachineType::Type()) { \
- return &cache_.kWord64AtomicLoad##Type; \
+ AtomicLoadParameters params) {
+#define CACHED_LOAD(Type) \
+ if (params.representation() == MachineType::Type() && \
+ params.order() == AtomicMemoryOrder::kSeqCst) { \
+ return &cache_.kWord64SeqCstLoad##Type; \
+ }
+ ATOMIC_U64_TYPE_LIST(CACHED_LOAD)
+#undef CACHED_LOAD
+
+#define LOAD(Type) \
+ if (params.representation() == MachineType::Type()) { \
+ return zone_->New<Operator1<AtomicLoadParameters>>( \
+ IrOpcode::kWord64AtomicLoad, Operator::kEliminatable, \
+ "Word64AtomicLoad", 2, 1, 1, 1, 1, 0, params); \
}
ATOMIC_U64_TYPE_LIST(LOAD)
+ ATOMIC64_TAGGED_TYPE_LIST(LOAD)
#undef LOAD
+
UNREACHABLE();
}
const Operator* MachineOperatorBuilder::Word64AtomicStore(
- MachineRepresentation rep) {
-#define STORE(kRep) \
- if (rep == MachineRepresentation::kRep) { \
- return &cache_.kWord64AtomicStore##kRep; \
+ AtomicStoreParameters params) {
+#define CACHED_STORE(kRep) \
+ if (params.representation() == MachineRepresentation::kRep && \
+ params.order() == AtomicMemoryOrder::kSeqCst) { \
+ return &cache_.kWord64SeqCstStore##kRep; \
+ }
+ ATOMIC64_REPRESENTATION_LIST(CACHED_STORE)
+#undef CACHED_STORE
+
+#define STORE(kRep) \
+ if (params.representation() == MachineRepresentation::kRep) { \
+ return zone_->New<Operator1<AtomicStoreParameters>>( \
+ IrOpcode::kWord64AtomicStore, \
+ Operator::kNoDeopt | Operator::kNoRead | Operator::kNoThrow, \
+ "Word64AtomicStore", 3, 1, 1, 0, 1, 0, params); \
}
ATOMIC64_REPRESENTATION_LIST(STORE)
+ ATOMIC64_TAGGED_REPRESENTATION_LIST(STORE)
#undef STORE
+
UNREACHABLE();
}
@@ -1777,12 +1877,24 @@ const Operator* MachineOperatorBuilder::Word64AtomicCompareExchange(
UNREACHABLE();
}
-const Operator* MachineOperatorBuilder::Word32AtomicPairLoad() {
- return &cache_.kWord32AtomicPairLoad;
+const Operator* MachineOperatorBuilder::Word32AtomicPairLoad(
+ AtomicMemoryOrder order) {
+ if (order == AtomicMemoryOrder::kSeqCst) {
+ return &cache_.kWord32SeqCstPairLoad;
+ }
+ return zone_->New<Operator1<AtomicMemoryOrder>>(
+ IrOpcode::kWord32AtomicPairLoad, Operator::kNoDeopt | Operator::kNoThrow,
+ "Word32AtomicPairLoad", 2, 1, 1, 2, 1, 0, order);
}
-const Operator* MachineOperatorBuilder::Word32AtomicPairStore() {
- return &cache_.kWord32AtomicPairStore;
+const Operator* MachineOperatorBuilder::Word32AtomicPairStore(
+ AtomicMemoryOrder order) {
+ if (order == AtomicMemoryOrder::kSeqCst) {
+ return &cache_.kWord32SeqCstPairStore;
+ }
+ return zone_->New<Operator1<AtomicMemoryOrder>>(
+ IrOpcode::kWord32AtomicPairStore, Operator::kNoDeopt | Operator::kNoThrow,
+ "Word32AtomicPairStore", 4, 1, 1, 0, 1, 0, order);
}
const Operator* MachineOperatorBuilder::Word32AtomicPairAdd() {
@@ -1813,18 +1925,6 @@ const Operator* MachineOperatorBuilder::Word32AtomicPairCompareExchange() {
return &cache_.kWord32AtomicPairCompareExchange;
}
-const Operator* MachineOperatorBuilder::TaggedPoisonOnSpeculation() {
- return &cache_.kTaggedPoisonOnSpeculation;
-}
-
-const Operator* MachineOperatorBuilder::Word32PoisonOnSpeculation() {
- return &cache_.kWord32PoisonOnSpeculation;
-}
-
-const Operator* MachineOperatorBuilder::Word64PoisonOnSpeculation() {
- return &cache_.kWord64PoisonOnSpeculation;
-}
-
#define EXTRACT_LANE_OP(Type, Sign, lane_count) \
const Operator* MachineOperatorBuilder::Type##ExtractLane##Sign( \
int32_t lane_index) { \
@@ -1918,8 +2018,12 @@ StackCheckKind StackCheckKindOf(Operator const* op) {
#undef ATOMIC_TYPE_LIST
#undef ATOMIC_U64_TYPE_LIST
#undef ATOMIC_U32_TYPE_LIST
+#undef ATOMIC_TAGGED_TYPE_LIST
+#undef ATOMIC64_TAGGED_TYPE_LIST
#undef ATOMIC_REPRESENTATION_LIST
+#undef ATOMIC_TAGGED_REPRESENTATION_LIST
#undef ATOMIC64_REPRESENTATION_LIST
+#undef ATOMIC64_TAGGED_REPRESENTATION_LIST
#undef SIMD_LANE_OP_LIST
#undef STACK_SLOT_CACHED_SIZES_ALIGNMENTS_LIST
#undef LOAD_TRANSFORM_LIST
diff --git a/deps/v8/src/compiler/machine-operator.h b/deps/v8/src/compiler/machine-operator.h
index 0ee3649ad0..7bd73663ab 100644
--- a/deps/v8/src/compiler/machine-operator.h
+++ b/deps/v8/src/compiler/machine-operator.h
@@ -8,6 +8,7 @@
#include "src/base/compiler-specific.h"
#include "src/base/enum-set.h"
#include "src/base/flags.h"
+#include "src/codegen/atomic-memory-order.h"
#include "src/codegen/machine-type.h"
#include "src/compiler/globals.h"
#include "src/compiler/write-barrier-kind.h"
@@ -50,6 +51,32 @@ using LoadRepresentation = MachineType;
V8_EXPORT_PRIVATE LoadRepresentation LoadRepresentationOf(Operator const*)
V8_WARN_UNUSED_RESULT;
+// A Word(32|64)AtomicLoad needs both a LoadRepresentation and a memory
+// order.
+class AtomicLoadParameters final {
+ public:
+ AtomicLoadParameters(LoadRepresentation representation,
+ AtomicMemoryOrder order)
+ : representation_(representation), order_(order) {}
+
+ LoadRepresentation representation() const { return representation_; }
+ AtomicMemoryOrder order() const { return order_; }
+
+ private:
+ LoadRepresentation representation_;
+ AtomicMemoryOrder order_;
+};
+
+V8_EXPORT_PRIVATE bool operator==(AtomicLoadParameters, AtomicLoadParameters);
+bool operator!=(AtomicLoadParameters, AtomicLoadParameters);
+
+size_t hash_value(AtomicLoadParameters);
+
+V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream&, AtomicLoadParameters);
+
+V8_EXPORT_PRIVATE AtomicLoadParameters AtomicLoadParametersOf(Operator const*)
+ V8_WARN_UNUSED_RESULT;
+
enum class MemoryAccessKind {
kNormal,
kUnaligned,
@@ -131,6 +158,43 @@ V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream&, StoreRepresentation);
V8_EXPORT_PRIVATE StoreRepresentation const& StoreRepresentationOf(
Operator const*) V8_WARN_UNUSED_RESULT;
+// A Word(32|64)AtomicStore needs both a StoreRepresentation and a memory order.
+class AtomicStoreParameters final {
+ public:
+ AtomicStoreParameters(MachineRepresentation representation,
+ WriteBarrierKind write_barrier_kind,
+ AtomicMemoryOrder order)
+ : store_representation_(representation, write_barrier_kind),
+ order_(order) {}
+
+ MachineRepresentation representation() const {
+ return store_representation_.representation();
+ }
+ WriteBarrierKind write_barrier_kind() const {
+ return store_representation_.write_barrier_kind();
+ }
+ AtomicMemoryOrder order() const { return order_; }
+
+ StoreRepresentation store_representation() const {
+ return store_representation_;
+ }
+
+ private:
+ StoreRepresentation store_representation_;
+ AtomicMemoryOrder order_;
+};
+
+V8_EXPORT_PRIVATE bool operator==(AtomicStoreParameters, AtomicStoreParameters);
+bool operator!=(AtomicStoreParameters, AtomicStoreParameters);
+
+size_t hash_value(AtomicStoreParameters);
+
+V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream&,
+ AtomicStoreParameters);
+
+V8_EXPORT_PRIVATE AtomicStoreParameters const& AtomicStoreParametersOf(
+ Operator const*) V8_WARN_UNUSED_RESULT;
+
// An UnalignedStore needs a MachineType.
using UnalignedStoreRepresentation = MachineRepresentation;
@@ -173,9 +237,6 @@ V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream&,
V8_EXPORT_PRIVATE StackSlotRepresentation const& StackSlotRepresentationOf(
Operator const* op) V8_WARN_UNUSED_RESULT;
-MachineRepresentation AtomicStoreRepresentationOf(Operator const* op)
- V8_WARN_UNUSED_RESULT;
-
MachineType AtomicOpType(Operator const* op) V8_WARN_UNUSED_RESULT;
class S128ImmediateParameter {
@@ -852,7 +913,6 @@ class V8_EXPORT_PRIVATE MachineOperatorBuilder final
// load [base + index]
const Operator* Load(LoadRepresentation rep);
const Operator* LoadImmutable(LoadRepresentation rep);
- const Operator* PoisonedLoad(LoadRepresentation rep);
const Operator* ProtectedLoad(LoadRepresentation rep);
const Operator* LoadTransform(MemoryAccessKind kind,
@@ -879,11 +939,6 @@ class V8_EXPORT_PRIVATE MachineOperatorBuilder final
const Operator* StackSlot(int size, int alignment = 0);
const Operator* StackSlot(MachineRepresentation rep, int alignment = 0);
- // Destroy value by masking when misspeculating.
- const Operator* TaggedPoisonOnSpeculation();
- const Operator* Word32PoisonOnSpeculation();
- const Operator* Word64PoisonOnSpeculation();
-
// Access to the machine stack.
const Operator* LoadFramePointer();
const Operator* LoadParentFramePointer();
@@ -901,13 +956,13 @@ class V8_EXPORT_PRIVATE MachineOperatorBuilder final
const Operator* MemBarrier();
// atomic-load [base + index]
- const Operator* Word32AtomicLoad(LoadRepresentation rep);
+ const Operator* Word32AtomicLoad(AtomicLoadParameters params);
// atomic-load [base + index]
- const Operator* Word64AtomicLoad(LoadRepresentation rep);
+ const Operator* Word64AtomicLoad(AtomicLoadParameters params);
// atomic-store [base + index], value
- const Operator* Word32AtomicStore(MachineRepresentation rep);
+ const Operator* Word32AtomicStore(AtomicStoreParameters params);
// atomic-store [base + index], value
- const Operator* Word64AtomicStore(MachineRepresentation rep);
+ const Operator* Word64AtomicStore(AtomicStoreParameters params);
// atomic-exchange [base + index], value
const Operator* Word32AtomicExchange(MachineType type);
// atomic-exchange [base + index], value
@@ -937,9 +992,9 @@ class V8_EXPORT_PRIVATE MachineOperatorBuilder final
// atomic-xor [base + index], value
const Operator* Word64AtomicXor(MachineType type);
// atomic-pair-load [base + index]
- const Operator* Word32AtomicPairLoad();
+ const Operator* Word32AtomicPairLoad(AtomicMemoryOrder order);
// atomic-pair-sub [base + index], value_high, value-low
- const Operator* Word32AtomicPairStore();
+ const Operator* Word32AtomicPairStore(AtomicMemoryOrder order);
// atomic-pair-add [base + index], value_high, value_low
const Operator* Word32AtomicPairAdd();
// atomic-pair-sub [base + index], value_high, value-low
@@ -980,7 +1035,6 @@ class V8_EXPORT_PRIVATE MachineOperatorBuilder final
V(Word, Ror) \
V(Word, Clz) \
V(Word, Equal) \
- V(Word, PoisonOnSpeculation) \
V(Int, Add) \
V(Int, Sub) \
V(Int, Mul) \
diff --git a/deps/v8/src/compiler/memory-lowering.cc b/deps/v8/src/compiler/memory-lowering.cc
index 9673a51844..27ad71c07a 100644
--- a/deps/v8/src/compiler/memory-lowering.cc
+++ b/deps/v8/src/compiler/memory-lowering.cc
@@ -60,7 +60,6 @@ class MemoryLowering::AllocationGroup final : public ZoneObject {
MemoryLowering::MemoryLowering(JSGraph* jsgraph, Zone* zone,
JSGraphAssembler* graph_assembler,
- PoisoningMitigationLevel poisoning_level,
AllocationFolding allocation_folding,
WriteBarrierAssertFailedCallback callback,
const char* function_debug_name)
@@ -71,7 +70,6 @@ MemoryLowering::MemoryLowering(JSGraph* jsgraph, Zone* zone,
machine_(jsgraph->machine()),
graph_assembler_(graph_assembler),
allocation_folding_(allocation_folding),
- poisoning_level_(poisoning_level),
write_barrier_assert_failed_(callback),
function_debug_name_(function_debug_name) {}
@@ -401,11 +399,7 @@ Reduction MemoryLowering::ReduceLoadElement(Node* node) {
node->ReplaceInput(1, ComputeIndex(access, index));
MachineType type = access.machine_type;
DCHECK(!type.IsMapWord());
- if (NeedsPoisoning(access.load_sensitivity)) {
- NodeProperties::ChangeOp(node, machine()->PoisonedLoad(type));
- } else {
- NodeProperties::ChangeOp(node, machine()->Load(type));
- }
+ NodeProperties::ChangeOp(node, machine()->Load(type));
return Changed(node);
}
@@ -413,8 +407,7 @@ Node* MemoryLowering::DecodeExternalPointer(
Node* node, ExternalPointerTag external_pointer_tag) {
#ifdef V8_HEAP_SANDBOX
DCHECK(V8_HEAP_SANDBOX_BOOL);
- DCHECK(node->opcode() == IrOpcode::kLoad ||
- node->opcode() == IrOpcode::kPoisonedLoad);
+ DCHECK(node->opcode() == IrOpcode::kLoad);
Node* effect = NodeProperties::GetEffectInput(node);
Node* control = NodeProperties::GetControlInput(node);
__ InitializeEffectControl(effect, control);
@@ -476,16 +469,11 @@ Reduction MemoryLowering::ReduceLoadField(Node* node) {
}
if (type.IsMapWord()) {
- DCHECK(!NeedsPoisoning(access.load_sensitivity));
DCHECK(!access.type.Is(Type::SandboxedExternalPointer()));
return ReduceLoadMap(node);
}
- if (NeedsPoisoning(access.load_sensitivity)) {
- NodeProperties::ChangeOp(node, machine()->PoisonedLoad(type));
- } else {
- NodeProperties::ChangeOp(node, machine()->Load(type));
- }
+ NodeProperties::ChangeOp(node, machine()->Load(type));
if (V8_HEAP_SANDBOX_BOOL &&
access.type.Is(Type::SandboxedExternalPointer())) {
@@ -655,21 +643,6 @@ WriteBarrierKind MemoryLowering::ComputeWriteBarrierKind(
return write_barrier_kind;
}
-bool MemoryLowering::NeedsPoisoning(LoadSensitivity load_sensitivity) const {
- // Safe loads do not need poisoning.
- if (load_sensitivity == LoadSensitivity::kSafe) return false;
-
- switch (poisoning_level_) {
- case PoisoningMitigationLevel::kDontPoison:
- return false;
- case PoisoningMitigationLevel::kPoisonAll:
- return true;
- case PoisoningMitigationLevel::kPoisonCriticalOnly:
- return load_sensitivity == LoadSensitivity::kCritical;
- }
- UNREACHABLE();
-}
-
MemoryLowering::AllocationGroup::AllocationGroup(Node* node,
AllocationType allocation,
Zone* zone)
diff --git a/deps/v8/src/compiler/memory-lowering.h b/deps/v8/src/compiler/memory-lowering.h
index 1fbe18abff..9edb880e6f 100644
--- a/deps/v8/src/compiler/memory-lowering.h
+++ b/deps/v8/src/compiler/memory-lowering.h
@@ -75,7 +75,6 @@ class MemoryLowering final : public Reducer {
MemoryLowering(
JSGraph* jsgraph, Zone* zone, JSGraphAssembler* graph_assembler,
- PoisoningMitigationLevel poisoning_level,
AllocationFolding allocation_folding =
AllocationFolding::kDontAllocationFolding,
WriteBarrierAssertFailedCallback callback = [](Node*, Node*, const char*,
@@ -112,7 +111,6 @@ class MemoryLowering final : public Reducer {
Node* DecodeExternalPointer(Node* encoded_pointer, ExternalPointerTag tag);
Reduction ReduceLoadMap(Node* encoded_pointer);
Node* ComputeIndex(ElementAccess const& access, Node* node);
- bool NeedsPoisoning(LoadSensitivity load_sensitivity) const;
void EnsureAllocateOperator();
Node* GetWasmInstanceNode();
@@ -133,7 +131,6 @@ class MemoryLowering final : public Reducer {
MachineOperatorBuilder* machine_;
JSGraphAssembler* graph_assembler_;
AllocationFolding allocation_folding_;
- PoisoningMitigationLevel poisoning_level_;
WriteBarrierAssertFailedCallback write_barrier_assert_failed_;
const char* function_debug_name_;
diff --git a/deps/v8/src/compiler/memory-optimizer.cc b/deps/v8/src/compiler/memory-optimizer.cc
index 860ea1fae1..ba4a5c1f67 100644
--- a/deps/v8/src/compiler/memory-optimizer.cc
+++ b/deps/v8/src/compiler/memory-optimizer.cc
@@ -40,7 +40,6 @@ bool CanAllocate(const Node* node) {
case IrOpcode::kLoadLane:
case IrOpcode::kLoadTransform:
case IrOpcode::kMemoryBarrier:
- case IrOpcode::kPoisonedLoad:
case IrOpcode::kProtectedLoad:
case IrOpcode::kProtectedStore:
case IrOpcode::kRetain:
@@ -54,7 +53,6 @@ bool CanAllocate(const Node* node) {
case IrOpcode::kStoreField:
case IrOpcode::kStoreLane:
case IrOpcode::kStoreToObject:
- case IrOpcode::kTaggedPoisonOnSpeculation:
case IrOpcode::kUnalignedLoad:
case IrOpcode::kUnalignedStore:
case IrOpcode::kUnreachable:
@@ -77,7 +75,6 @@ bool CanAllocate(const Node* node) {
case IrOpcode::kWord32AtomicStore:
case IrOpcode::kWord32AtomicSub:
case IrOpcode::kWord32AtomicXor:
- case IrOpcode::kWord32PoisonOnSpeculation:
case IrOpcode::kWord64AtomicAdd:
case IrOpcode::kWord64AtomicAnd:
case IrOpcode::kWord64AtomicCompareExchange:
@@ -87,7 +84,6 @@ bool CanAllocate(const Node* node) {
case IrOpcode::kWord64AtomicStore:
case IrOpcode::kWord64AtomicSub:
case IrOpcode::kWord64AtomicXor:
- case IrOpcode::kWord64PoisonOnSpeculation:
return false;
case IrOpcode::kCall:
@@ -183,13 +179,12 @@ void WriteBarrierAssertFailed(Node* node, Node* object, const char* name,
} // namespace
MemoryOptimizer::MemoryOptimizer(
- JSGraph* jsgraph, Zone* zone, PoisoningMitigationLevel poisoning_level,
+ JSGraph* jsgraph, Zone* zone,
MemoryLowering::AllocationFolding allocation_folding,
const char* function_debug_name, TickCounter* tick_counter)
: graph_assembler_(jsgraph, zone),
- memory_lowering_(jsgraph, zone, &graph_assembler_, poisoning_level,
- allocation_folding, WriteBarrierAssertFailed,
- function_debug_name),
+ memory_lowering_(jsgraph, zone, &graph_assembler_, allocation_folding,
+ WriteBarrierAssertFailed, function_debug_name),
jsgraph_(jsgraph),
empty_state_(AllocationState::Empty(zone)),
pending_(zone),
diff --git a/deps/v8/src/compiler/memory-optimizer.h b/deps/v8/src/compiler/memory-optimizer.h
index 3845304fdd..7d8bca44d4 100644
--- a/deps/v8/src/compiler/memory-optimizer.h
+++ b/deps/v8/src/compiler/memory-optimizer.h
@@ -30,7 +30,6 @@ using NodeId = uint32_t;
class MemoryOptimizer final {
public:
MemoryOptimizer(JSGraph* jsgraph, Zone* zone,
- PoisoningMitigationLevel poisoning_level,
MemoryLowering::AllocationFolding allocation_folding,
const char* function_debug_name, TickCounter* tick_counter);
~MemoryOptimizer() = default;
diff --git a/deps/v8/src/compiler/node-matchers.h b/deps/v8/src/compiler/node-matchers.h
index 1ce4023424..52dc476dc4 100644
--- a/deps/v8/src/compiler/node-matchers.h
+++ b/deps/v8/src/compiler/node-matchers.h
@@ -743,7 +743,6 @@ struct BaseWithIndexAndDisplacementMatcher {
switch (from->opcode()) {
case IrOpcode::kLoad:
case IrOpcode::kLoadImmutable:
- case IrOpcode::kPoisonedLoad:
case IrOpcode::kProtectedLoad:
case IrOpcode::kInt32Add:
case IrOpcode::kInt64Add:
diff --git a/deps/v8/src/compiler/opcodes.h b/deps/v8/src/compiler/opcodes.h
index 912bd7b5ce..b956f148cc 100644
--- a/deps/v8/src/compiler/opcodes.h
+++ b/deps/v8/src/compiler/opcodes.h
@@ -463,7 +463,6 @@
V(PlainPrimitiveToFloat64) \
V(PlainPrimitiveToNumber) \
V(PlainPrimitiveToWord32) \
- V(PoisonIndex) \
V(RestLength) \
V(RuntimeAbort) \
V(StoreDataViewElement) \
@@ -686,7 +685,6 @@
V(DebugBreak) \
V(Comment) \
V(Load) \
- V(PoisonedLoad) \
V(LoadImmutable) \
V(Store) \
V(StackSlot) \
@@ -746,9 +744,6 @@
V(Word64Select) \
V(Float32Select) \
V(Float64Select) \
- V(TaggedPoisonOnSpeculation) \
- V(Word32PoisonOnSpeculation) \
- V(Word64PoisonOnSpeculation) \
V(LoadStackCheckOffset) \
V(LoadFramePointer) \
V(LoadParentFramePointer) \
diff --git a/deps/v8/src/compiler/pipeline-statistics.cc b/deps/v8/src/compiler/pipeline-statistics.cc
index 82a6e6bb3e..16366bf588 100644
--- a/deps/v8/src/compiler/pipeline-statistics.cc
+++ b/deps/v8/src/compiler/pipeline-statistics.cc
@@ -10,21 +10,12 @@
#include "src/compiler/zone-stats.h"
#include "src/objects/shared-function-info.h"
#include "src/objects/string.h"
-#include "src/tracing/trace-event.h"
namespace v8 {
namespace internal {
namespace compiler {
-namespace {
-
-// We log detailed phase information about the pipeline
-// in both the v8.turbofan and the v8.wasm.turbofan categories.
-constexpr const char kTraceCategory[] = // --
- TRACE_DISABLED_BY_DEFAULT("v8.turbofan") "," // --
- TRACE_DISABLED_BY_DEFAULT("v8.wasm.turbofan");
-
-} // namespace
+constexpr char PipelineStatistics::kTraceCategory[];
void PipelineStatistics::CommonStats::Begin(
PipelineStatistics* pipeline_stats) {
@@ -62,6 +53,7 @@ PipelineStatistics::PipelineStatistics(OptimizedCompilationInfo* info,
: outer_zone_(info->zone()),
zone_stats_(zone_stats),
compilation_stats_(compilation_stats),
+ code_kind_(info->code_kind()),
phase_kind_name_(nullptr),
phase_name_(nullptr) {
if (info->has_shared_info()) {
@@ -70,7 +62,6 @@ PipelineStatistics::PipelineStatistics(OptimizedCompilationInfo* info,
total_stats_.Begin(this);
}
-
PipelineStatistics::~PipelineStatistics() {
if (InPhaseKind()) EndPhaseKind();
CompilationStatistics::BasicStats diff;
@@ -82,7 +73,8 @@ PipelineStatistics::~PipelineStatistics() {
void PipelineStatistics::BeginPhaseKind(const char* phase_kind_name) {
DCHECK(!InPhase());
if (InPhaseKind()) EndPhaseKind();
- TRACE_EVENT_BEGIN0(kTraceCategory, phase_kind_name);
+ TRACE_EVENT_BEGIN1(kTraceCategory, phase_kind_name, "kind",
+ CodeKindToString(code_kind_));
phase_kind_name_ = phase_kind_name;
phase_kind_stats_.Begin(this);
}
@@ -92,11 +84,14 @@ void PipelineStatistics::EndPhaseKind() {
CompilationStatistics::BasicStats diff;
phase_kind_stats_.End(this, &diff);
compilation_stats_->RecordPhaseKindStats(phase_kind_name_, diff);
- TRACE_EVENT_END0(kTraceCategory, phase_kind_name_);
+ TRACE_EVENT_END2(kTraceCategory, phase_kind_name_, "kind",
+ CodeKindToString(code_kind_), "stats",
+ TRACE_STR_COPY(diff.AsJSON().c_str()));
}
void PipelineStatistics::BeginPhase(const char* phase_name) {
- TRACE_EVENT_BEGIN0(kTraceCategory, phase_name);
+ TRACE_EVENT_BEGIN1(kTraceCategory, phase_name, "kind",
+ CodeKindToString(code_kind_));
DCHECK(InPhaseKind());
phase_name_ = phase_name;
phase_stats_.Begin(this);
@@ -107,7 +102,9 @@ void PipelineStatistics::EndPhase() {
CompilationStatistics::BasicStats diff;
phase_stats_.End(this, &diff);
compilation_stats_->RecordPhaseStats(phase_kind_name_, phase_name_, diff);
- TRACE_EVENT_END0(kTraceCategory, phase_name_);
+ TRACE_EVENT_END2(kTraceCategory, phase_name_, "kind",
+ CodeKindToString(code_kind_), "stats",
+ TRACE_STR_COPY(diff.AsJSON().c_str()));
}
} // namespace compiler
diff --git a/deps/v8/src/compiler/pipeline-statistics.h b/deps/v8/src/compiler/pipeline-statistics.h
index 8a05d98011..19f7574e2a 100644
--- a/deps/v8/src/compiler/pipeline-statistics.h
+++ b/deps/v8/src/compiler/pipeline-statistics.h
@@ -11,6 +11,8 @@
#include "src/base/platform/elapsed-timer.h"
#include "src/compiler/zone-stats.h"
#include "src/diagnostics/compilation-statistics.h"
+#include "src/objects/code-kind.h"
+#include "src/tracing/trace-event.h"
namespace v8 {
namespace internal {
@@ -29,6 +31,12 @@ class PipelineStatistics : public Malloced {
void BeginPhaseKind(const char* phase_kind_name);
void EndPhaseKind();
+ // We log detailed phase information about the pipeline
+ // in both the v8.turbofan and the v8.wasm.turbofan categories.
+ static constexpr char kTraceCategory[] =
+ TRACE_DISABLED_BY_DEFAULT("v8.turbofan") "," // --
+ TRACE_DISABLED_BY_DEFAULT("v8.wasm.turbofan");
+
private:
size_t OuterZoneSize() {
return static_cast<size_t>(outer_zone_->allocation_size());
@@ -60,6 +68,7 @@ class PipelineStatistics : public Malloced {
Zone* outer_zone_;
ZoneStats* zone_stats_;
CompilationStatistics* compilation_stats_;
+ CodeKind code_kind_;
std::string function_name_;
// Stats for the entire compilation.
diff --git a/deps/v8/src/compiler/pipeline.cc b/deps/v8/src/compiler/pipeline.cc
index e802cd7268..8d3d93aa2a 100644
--- a/deps/v8/src/compiler/pipeline.cc
+++ b/deps/v8/src/compiler/pipeline.cc
@@ -84,6 +84,7 @@
#include "src/execution/isolate-inl.h"
#include "src/heap/local-heap.h"
#include "src/init/bootstrapper.h"
+#include "src/logging/code-events.h"
#include "src/logging/counters.h"
#include "src/logging/runtime-call-stats-scope.h"
#include "src/objects/shared-function-info.h"
@@ -95,6 +96,7 @@
#if V8_ENABLE_WEBASSEMBLY
#include "src/compiler/wasm-compiler.h"
+#include "src/compiler/wasm-inlining.h"
#include "src/wasm/function-body-decoder.h"
#include "src/wasm/function-compiler.h"
#include "src/wasm/wasm-engine.h"
@@ -547,8 +549,7 @@ class PipelineData {
code_generator_ = new CodeGenerator(
codegen_zone(), frame(), linkage, sequence(), info(), isolate(),
osr_helper_, start_source_position_, jump_optimization_info_,
- info()->GetPoisoningMitigationLevel(), assembler_options(),
- info_->builtin(), max_unoptimized_frame_height(),
+ assembler_options(), info_->builtin(), max_unoptimized_frame_height(),
max_pushed_argument_count(),
FLAG_trace_turbo_stack_accesses ? debug_name_.get() : nullptr);
}
@@ -947,13 +948,10 @@ void PrintCode(Isolate* isolate, Handle<Code> code,
void TraceScheduleAndVerify(OptimizedCompilationInfo* info, PipelineData* data,
Schedule* schedule, const char* phase_name) {
-#ifdef V8_RUNTIME_CALL_STATS
- PipelineRunScope scope(data, "V8.TraceScheduleAndVerify",
- RuntimeCallCounterId::kOptimizeTraceScheduleAndVerify,
- RuntimeCallStats::kThreadSpecific);
-#else
- PipelineRunScope scope(data, "V8.TraceScheduleAndVerify");
-#endif
+ RCS_SCOPE(data->runtime_call_stats(),
+ RuntimeCallCounterId::kOptimizeTraceScheduleAndVerify,
+ RuntimeCallStats::kThreadSpecific);
+ TRACE_EVENT0(PipelineStatistics::kTraceCategory, "V8.TraceScheduleAndVerify");
if (info->trace_turbo_json()) {
UnparkedScopeIfNeeded scope(data->broker());
AllowHandleDereference allow_deref;
@@ -1161,18 +1159,6 @@ PipelineCompilationJob::Status PipelineCompilationJob::PrepareJobImpl(
if (FLAG_turbo_inlining) {
compilation_info()->set_inlining();
}
-
- // This is the bottleneck for computing and setting poisoning level in the
- // optimizing compiler.
- PoisoningMitigationLevel load_poisoning =
- PoisoningMitigationLevel::kDontPoison;
- if (FLAG_untrusted_code_mitigations) {
- // For full mitigations, this can be changed to
- // PoisoningMitigationLevel::kPoisonAll.
- load_poisoning = PoisoningMitigationLevel::kPoisonCriticalOnly;
- }
- compilation_info()->SetPoisoningMitigationLevel(load_poisoning);
-
if (FLAG_turbo_allocation_folding) {
compilation_info()->set_allocation_folding();
}
@@ -1424,8 +1410,8 @@ struct InliningPhase {
};
#if V8_ENABLE_WEBASSEMBLY
-struct WasmInliningPhase {
- DECL_PIPELINE_PHASE_CONSTANTS(WasmInlining)
+struct JSWasmInliningPhase {
+ DECL_PIPELINE_PHASE_CONSTANTS(JSWasmInlining)
void Run(PipelineData* data, Zone* temp_zone) {
DCHECK(data->has_js_wasm_calls());
@@ -1629,10 +1615,10 @@ struct SimplifiedLoweringPhase {
DECL_PIPELINE_PHASE_CONSTANTS(SimplifiedLowering)
void Run(PipelineData* data, Zone* temp_zone, Linkage* linkage) {
- SimplifiedLowering lowering(
- data->jsgraph(), data->broker(), temp_zone, data->source_positions(),
- data->node_origins(), data->info()->GetPoisoningMitigationLevel(),
- &data->info()->tick_counter(), linkage, data->observe_node_manager());
+ SimplifiedLowering lowering(data->jsgraph(), data->broker(), temp_zone,
+ data->source_positions(), data->node_origins(),
+ &data->info()->tick_counter(), linkage,
+ data->observe_node_manager());
// RepresentationChanger accesses the heap.
UnparkedScopeIfNeeded scope(data->broker());
@@ -1699,6 +1685,25 @@ struct WasmLoopUnrollingPhase {
}
}
};
+
+struct WasmInliningPhase {
+ DECL_PIPELINE_PHASE_CONSTANTS(WasmInlining)
+
+ void Run(PipelineData* data, Zone* temp_zone, wasm::CompilationEnv* env,
+ const wasm::WireBytesStorage* wire_bytes) {
+ GraphReducer graph_reducer(
+ temp_zone, data->graph(), &data->info()->tick_counter(), data->broker(),
+ data->jsgraph()->Dead(), data->observe_node_manager());
+ DeadCodeElimination dead(&graph_reducer, data->graph(),
+ data->mcgraph()->common(), temp_zone);
+ WasmInliner inliner(&graph_reducer, env, data->source_positions(),
+ data->node_origins(), data->mcgraph(), wire_bytes, 0);
+ AddReducer(data, &graph_reducer, &dead);
+ AddReducer(data, &graph_reducer, &inliner);
+
+ graph_reducer.ReduceGraph();
+ }
+};
#endif // V8_ENABLE_WEBASSEMBLY
struct LoopExitEliminationPhase {
@@ -1797,7 +1802,6 @@ struct EffectControlLinearizationPhase {
// - introduce effect phis and rewire effects to get SSA again.
LinearizeEffectControl(data->jsgraph(), schedule, temp_zone,
data->source_positions(), data->node_origins(),
- data->info()->GetPoisoningMitigationLevel(),
data->broker());
}
{
@@ -1899,7 +1903,7 @@ struct MemoryOptimizationPhase {
// Optimize allocations and load/store operations.
MemoryOptimizer optimizer(
- data->jsgraph(), temp_zone, data->info()->GetPoisoningMitigationLevel(),
+ data->jsgraph(), temp_zone,
data->info()->allocation_folding()
? MemoryLowering::AllocationFolding::kDoAllocationFolding
: MemoryLowering::AllocationFolding::kDontAllocationFolding,
@@ -1989,7 +1993,6 @@ struct ScheduledEffectControlLinearizationPhase {
// - lower simplified memory and select nodes to machine level nodes.
LowerToMachineSchedule(data->jsgraph(), data->schedule(), temp_zone,
data->source_positions(), data->node_origins(),
- data->info()->GetPoisoningMitigationLevel(),
data->broker());
// TODO(rmcilroy) Avoid having to rebuild rpo_order on schedule each time.
@@ -2205,7 +2208,6 @@ struct InstructionSelectionPhase {
data->assembler_options().enable_root_relative_access
? InstructionSelector::kEnableRootsRelativeAddressing
: InstructionSelector::kDisableRootsRelativeAddressing,
- data->info()->GetPoisoningMitigationLevel(),
data->info()->trace_turbo_json()
? InstructionSelector::kEnableTraceTurboJson
: InstructionSelector::kDisableTraceTurboJson);
@@ -2607,6 +2609,9 @@ CompilationJob::Status WasmHeapStubCompilationJob::FinalizeJobImpl(
tracing_scope.stream(), isolate);
}
#endif
+ PROFILE(isolate, CodeCreateEvent(CodeEventListener::STUB_TAG,
+ Handle<AbstractCode>::cast(code),
+ compilation_info()->GetDebugName().get()));
return SUCCEEDED;
}
return FAILED;
@@ -2750,8 +2755,8 @@ bool PipelineImpl::OptimizeGraph(Linkage* linkage) {
#if V8_ENABLE_WEBASSEMBLY
if (data->has_js_wasm_calls()) {
DCHECK(data->info()->inline_js_wasm_calls());
- Run<WasmInliningPhase>();
- RunPrintAndVerify(WasmInliningPhase::phase_name(), true);
+ Run<JSWasmInliningPhase>();
+ RunPrintAndVerify(JSWasmInliningPhase::phase_name(), true);
}
#endif // V8_ENABLE_WEBASSEMBLY
@@ -2853,8 +2858,8 @@ bool PipelineImpl::OptimizeGraphForMidTier(Linkage* linkage) {
#if V8_ENABLE_WEBASSEMBLY
if (data->has_js_wasm_calls()) {
DCHECK(data->info()->inline_js_wasm_calls());
- Run<WasmInliningPhase>();
- RunPrintAndVerify(WasmInliningPhase::phase_name(), true);
+ Run<JSWasmInliningPhase>();
+ RunPrintAndVerify(JSWasmInliningPhase::phase_name(), true);
}
#endif // V8_ENABLE_WEBASSEMBLY
@@ -2969,17 +2974,12 @@ int HashGraphForPGO(Graph* graph) {
MaybeHandle<Code> Pipeline::GenerateCodeForCodeStub(
Isolate* isolate, CallDescriptor* call_descriptor, Graph* graph,
JSGraph* jsgraph, SourcePositionTable* source_positions, CodeKind kind,
- const char* debug_name, Builtin builtin,
- PoisoningMitigationLevel poisoning_level, const AssemblerOptions& options,
+ const char* debug_name, Builtin builtin, const AssemblerOptions& options,
const ProfileDataFromFile* profile_data) {
OptimizedCompilationInfo info(base::CStrVector(debug_name), graph->zone(),
kind);
info.set_builtin(builtin);
- if (poisoning_level != PoisoningMitigationLevel::kDontPoison) {
- info.SetPoisoningMitigationLevel(poisoning_level);
- }
-
// Construct a pipeline for scheduling and code generation.
ZoneStats zone_stats(isolate->allocator());
NodeOriginTable node_origins(graph);
@@ -3195,7 +3195,8 @@ wasm::WasmCompilationResult Pipeline::GenerateCodeForWasmNativeStub(
// static
void Pipeline::GenerateCodeForWasmFunction(
- OptimizedCompilationInfo* info, MachineGraph* mcgraph,
+ OptimizedCompilationInfo* info, wasm::CompilationEnv* env,
+ const wasm::WireBytesStorage* wire_bytes_storage, MachineGraph* mcgraph,
CallDescriptor* call_descriptor, SourcePositionTable* source_positions,
NodeOriginTable* node_origins, wasm::FunctionBody function_body,
const wasm::WasmModule* module, int function_index,
@@ -3225,6 +3226,10 @@ void Pipeline::GenerateCodeForWasmFunction(
pipeline.Run<WasmLoopUnrollingPhase>(loop_info);
pipeline.RunPrintAndVerify(WasmLoopUnrollingPhase::phase_name(), true);
}
+ if (FLAG_wasm_inlining) {
+ pipeline.Run<WasmInliningPhase>(env, wire_bytes_storage);
+ pipeline.RunPrintAndVerify(WasmInliningPhase::phase_name(), true);
+ }
const bool is_asm_js = is_asmjs_module(module);
if (FLAG_wasm_opt || is_asm_js) {
@@ -3546,18 +3551,7 @@ bool PipelineImpl::SelectInstructions(Linkage* linkage) {
config.reset(RegisterConfiguration::RestrictGeneralRegisters(registers));
AllocateRegistersForTopTier(config.get(), call_descriptor, run_verifier);
} else {
- const RegisterConfiguration* config;
- if (data->info()->GetPoisoningMitigationLevel() !=
- PoisoningMitigationLevel::kDontPoison) {
-#ifdef V8_TARGET_ARCH_IA32
- FATAL("Poisoning is not supported on ia32.");
-#else
- config = RegisterConfiguration::Poisoning();
-#endif // V8_TARGET_ARCH_IA32
- } else {
- config = RegisterConfiguration::Default();
- }
-
+ const RegisterConfiguration* config = RegisterConfiguration::Default();
if (data->info()->IsTurboprop() && FLAG_turboprop_mid_tier_reg_alloc) {
AllocateRegistersForMidTier(config, call_descriptor, run_verifier);
} else {
@@ -3643,7 +3637,6 @@ std::ostream& operator<<(std::ostream& out,
out << "\"codeStartRegisterCheck\": "
<< s.offsets_info->code_start_register_check << ", ";
out << "\"deoptCheck\": " << s.offsets_info->deopt_check << ", ";
- out << "\"initPoison\": " << s.offsets_info->init_poison << ", ";
out << "\"blocksStart\": " << s.offsets_info->blocks_start << ", ";
out << "\"outOfLineCode\": " << s.offsets_info->out_of_line_code << ", ";
out << "\"deoptimizationExits\": " << s.offsets_info->deoptimization_exits
diff --git a/deps/v8/src/compiler/pipeline.h b/deps/v8/src/compiler/pipeline.h
index ea67b31e06..19fd715885 100644
--- a/deps/v8/src/compiler/pipeline.h
+++ b/deps/v8/src/compiler/pipeline.h
@@ -23,11 +23,13 @@ class ProfileDataFromFile;
class RegisterConfiguration;
namespace wasm {
+struct CompilationEnv;
struct FunctionBody;
class NativeModule;
struct WasmCompilationResult;
class WasmEngine;
struct WasmModule;
+class WireBytesStorage;
} // namespace wasm
namespace compiler {
@@ -54,7 +56,8 @@ class Pipeline : public AllStatic {
// Run the pipeline for the WebAssembly compilation info.
static void GenerateCodeForWasmFunction(
- OptimizedCompilationInfo* info, MachineGraph* mcgraph,
+ OptimizedCompilationInfo* info, wasm::CompilationEnv* env,
+ const wasm::WireBytesStorage* wire_bytes_storage, MachineGraph* mcgraph,
CallDescriptor* call_descriptor, SourcePositionTable* source_positions,
NodeOriginTable* node_origins, wasm::FunctionBody function_body,
const wasm::WasmModule* module, int function_index,
@@ -78,8 +81,7 @@ class Pipeline : public AllStatic {
static MaybeHandle<Code> GenerateCodeForCodeStub(
Isolate* isolate, CallDescriptor* call_descriptor, Graph* graph,
JSGraph* jsgraph, SourcePositionTable* source_positions, CodeKind kind,
- const char* debug_name, Builtin builtin,
- PoisoningMitigationLevel poisoning_level, const AssemblerOptions& options,
+ const char* debug_name, Builtin builtin, const AssemblerOptions& options,
const ProfileDataFromFile* profile_data);
// ---------------------------------------------------------------------------
diff --git a/deps/v8/src/compiler/property-access-builder.cc b/deps/v8/src/compiler/property-access-builder.cc
index a64521d6f6..456512a867 100644
--- a/deps/v8/src/compiler/property-access-builder.cc
+++ b/deps/v8/src/compiler/property-access-builder.cc
@@ -168,7 +168,9 @@ base::Optional<Node*> PropertyAccessBuilder::FoldLoadDictPrototypeConstant(
Map::GetConstructorFunction(
*map_handle, *broker()->target_native_context().object())
.value();
- map = MakeRef(broker(), constructor.initial_map());
+ // {constructor.initial_map()} is loaded/stored with acquire-release
+ // semantics for constructors.
+ map = MakeRefAssumeMemoryFence(broker(), constructor.initial_map());
DCHECK(map.object()->IsJSObjectMap());
}
dependencies()->DependOnConstantInDictionaryPrototypeChain(
@@ -235,7 +237,6 @@ Node* PropertyAccessBuilder::BuildLoadDataField(NameRef const& name,
Type::Any(),
MachineType::AnyTagged(),
kPointerWriteBarrier,
- LoadSensitivity::kCritical,
field_access.const_field_info};
storage = *effect = graph()->NewNode(
simplified()->LoadField(storage_access), storage, *effect, *control);
@@ -263,7 +264,6 @@ Node* PropertyAccessBuilder::BuildLoadDataField(NameRef const& name,
Type::OtherInternal(),
MachineType::TaggedPointer(),
kPointerWriteBarrier,
- LoadSensitivity::kCritical,
field_access.const_field_info};
storage = *effect = graph()->NewNode(
simplified()->LoadField(storage_access), storage, *effect, *control);
@@ -291,7 +291,6 @@ Node* PropertyAccessBuilder::BuildMinimorphicLoadDataField(
access_info.field_type(),
MachineType::TypeForRepresentation(field_representation),
kFullWriteBarrier,
- LoadSensitivity::kCritical,
ConstFieldInfo::None()};
return BuildLoadDataField(name, lookup_start_object, field_access,
access_info.is_inobject(), effect, control);
@@ -319,7 +318,6 @@ Node* PropertyAccessBuilder::BuildLoadDataField(
access_info.field_type(),
MachineType::TypeForRepresentation(field_representation),
kFullWriteBarrier,
- LoadSensitivity::kCritical,
access_info.GetConstFieldInfo()};
if (field_representation == MachineRepresentation::kTaggedPointer ||
field_representation == MachineRepresentation::kCompressedPointer) {
diff --git a/deps/v8/src/compiler/raw-machine-assembler.cc b/deps/v8/src/compiler/raw-machine-assembler.cc
index 7ed217d4e3..383d63dd69 100644
--- a/deps/v8/src/compiler/raw-machine-assembler.cc
+++ b/deps/v8/src/compiler/raw-machine-assembler.cc
@@ -18,8 +18,7 @@ namespace compiler {
RawMachineAssembler::RawMachineAssembler(
Isolate* isolate, Graph* graph, CallDescriptor* call_descriptor,
MachineRepresentation word, MachineOperatorBuilder::Flags flags,
- MachineOperatorBuilder::AlignmentRequirements alignment_requirements,
- PoisoningMitigationLevel poisoning_level)
+ MachineOperatorBuilder::AlignmentRequirements alignment_requirements)
: isolate_(isolate),
graph_(graph),
schedule_(zone()->New<Schedule>(zone())),
@@ -30,8 +29,7 @@ RawMachineAssembler::RawMachineAssembler(
call_descriptor_(call_descriptor),
target_parameter_(nullptr),
parameters_(parameter_count(), zone()),
- current_block_(schedule()->start()),
- poisoning_level_(poisoning_level) {
+ current_block_(schedule()->start()) {
int param_count = static_cast<int>(parameter_count());
// Add an extra input for the JSFunction parameter to the start node.
graph->SetStart(graph->NewNode(common_.Start(param_count + 1)));
@@ -472,7 +470,7 @@ void RawMachineAssembler::MarkControlDeferred(Node* control_node) {
return;
case IrOpcode::kIfTrue: {
Node* branch = NodeProperties::GetControlInput(control_node);
- BranchHint hint = BranchOperatorInfoOf(branch->op()).hint;
+ BranchHint hint = BranchHintOf(branch->op());
if (hint == BranchHint::kTrue) {
// The other possibility is also deferred, so the responsible branch
// has to be before.
@@ -485,7 +483,7 @@ void RawMachineAssembler::MarkControlDeferred(Node* control_node) {
}
case IrOpcode::kIfFalse: {
Node* branch = NodeProperties::GetControlInput(control_node);
- BranchHint hint = BranchOperatorInfoOf(branch->op()).hint;
+ BranchHint hint = BranchHintOf(branch->op());
if (hint == BranchHint::kFalse) {
// The other possibility is also deferred, so the responsible branch
// has to be before.
@@ -516,11 +514,10 @@ void RawMachineAssembler::MarkControlDeferred(Node* control_node) {
}
}
- BranchOperatorInfo info = BranchOperatorInfoOf(responsible_branch->op());
- if (info.hint == new_branch_hint) return;
- NodeProperties::ChangeOp(
- responsible_branch,
- common()->Branch(new_branch_hint, info.is_safety_check));
+ BranchHint hint = BranchHintOf(responsible_branch->op());
+ if (hint == new_branch_hint) return;
+ NodeProperties::ChangeOp(responsible_branch,
+ common()->Branch(new_branch_hint));
}
Node* RawMachineAssembler::TargetParameter() {
@@ -544,9 +541,7 @@ void RawMachineAssembler::Goto(RawMachineLabel* label) {
void RawMachineAssembler::Branch(Node* condition, RawMachineLabel* true_val,
RawMachineLabel* false_val) {
DCHECK(current_block_ != schedule()->end());
- Node* branch = MakeNode(
- common()->Branch(BranchHint::kNone, IsSafetyCheck::kNoSafetyCheck), 1,
- &condition);
+ Node* branch = MakeNode(common()->Branch(BranchHint::kNone), 1, &condition);
BasicBlock* true_block = schedule()->NewBasicBlock();
BasicBlock* false_block = schedule()->NewBasicBlock();
schedule()->AddBranch(CurrentBlock(), branch, true_block, false_block);
diff --git a/deps/v8/src/compiler/raw-machine-assembler.h b/deps/v8/src/compiler/raw-machine-assembler.h
index a811fa7bf9..f0bb6e0425 100644
--- a/deps/v8/src/compiler/raw-machine-assembler.h
+++ b/deps/v8/src/compiler/raw-machine-assembler.h
@@ -52,9 +52,7 @@ class V8_EXPORT_PRIVATE RawMachineAssembler {
MachineOperatorBuilder::Flag::kNoFlags,
MachineOperatorBuilder::AlignmentRequirements alignment_requirements =
MachineOperatorBuilder::AlignmentRequirements::
- FullUnalignedAccessSupport(),
- PoisoningMitigationLevel poisoning_level =
- PoisoningMitigationLevel::kPoisonCriticalOnly);
+ FullUnalignedAccessSupport());
~RawMachineAssembler() = default;
RawMachineAssembler(const RawMachineAssembler&) = delete;
@@ -67,7 +65,6 @@ class V8_EXPORT_PRIVATE RawMachineAssembler {
CommonOperatorBuilder* common() { return &common_; }
SimplifiedOperatorBuilder* simplified() { return &simplified_; }
CallDescriptor* call_descriptor() const { return call_descriptor_; }
- PoisoningMitigationLevel poisoning_level() const { return poisoning_level_; }
// Only used for tests: Finalizes the schedule and exports it to be used for
// code generation. Note that this RawMachineAssembler becomes invalid after
@@ -132,19 +129,11 @@ class V8_EXPORT_PRIVATE RawMachineAssembler {
}
// Memory Operations.
- Node* Load(MachineType type, Node* base,
- LoadSensitivity needs_poisoning = LoadSensitivity::kSafe) {
- return Load(type, base, IntPtrConstant(0), needs_poisoning);
+ Node* Load(MachineType type, Node* base) {
+ return Load(type, base, IntPtrConstant(0));
}
- Node* Load(MachineType type, Node* base, Node* index,
- LoadSensitivity needs_poisoning = LoadSensitivity::kSafe) {
+ Node* Load(MachineType type, Node* base, Node* index) {
const Operator* op = machine()->Load(type);
- CHECK_NE(PoisoningMitigationLevel::kPoisonAll, poisoning_level_);
- if (needs_poisoning == LoadSensitivity::kCritical &&
- poisoning_level_ == PoisoningMitigationLevel::kPoisonCriticalOnly) {
- op = machine()->PoisonedLoad(type);
- }
-
Node* load = AddNode(op, base, index);
return load;
}
@@ -174,10 +163,7 @@ class V8_EXPORT_PRIVATE RawMachineAssembler {
bool IsMapOffsetConstantMinusTag(int offset) {
return offset == HeapObject::kMapOffset - kHeapObjectTag;
}
- Node* LoadFromObject(
- MachineType type, Node* base, Node* offset,
- LoadSensitivity needs_poisoning = LoadSensitivity::kSafe) {
- CHECK_EQ(needs_poisoning, LoadSensitivity::kSafe);
+ Node* LoadFromObject(MachineType type, Node* base, Node* offset) {
DCHECK_IMPLIES(V8_MAP_PACKING_BOOL && IsMapOffsetConstantMinusTag(offset),
type == MachineType::MapInHeader());
ObjectAccess access = {type, WriteBarrierKind::kNoWriteBarrier};
@@ -253,20 +239,20 @@ class V8_EXPORT_PRIVATE RawMachineAssembler {
}
// Atomic memory operations.
- Node* AtomicLoad(MachineType type, Node* base, Node* index) {
- DCHECK_NE(type.representation(), MachineRepresentation::kWord64);
- return AddNode(machine()->Word32AtomicLoad(type), base, index);
+ Node* AtomicLoad(AtomicLoadParameters rep, Node* base, Node* index) {
+ DCHECK_NE(rep.representation().representation(),
+ MachineRepresentation::kWord64);
+ return AddNode(machine()->Word32AtomicLoad(rep), base, index);
}
- Node* AtomicLoad64(Node* base, Node* index) {
+ Node* AtomicLoad64(AtomicLoadParameters rep, Node* base, Node* index) {
if (machine()->Is64()) {
// This uses Uint64() intentionally: AtomicLoad is not implemented for
// Int64(), which is fine because the machine instruction only cares
// about words.
- return AddNode(machine()->Word64AtomicLoad(MachineType::Uint64()), base,
- index);
+ return AddNode(machine()->Word64AtomicLoad(rep), base, index);
} else {
- return AddNode(machine()->Word32AtomicPairLoad(), base, index);
+ return AddNode(machine()->Word32AtomicPairLoad(rep.order()), base, index);
}
}
@@ -276,22 +262,24 @@ class V8_EXPORT_PRIVATE RawMachineAssembler {
#define VALUE_HALVES value, value_high
#endif
- Node* AtomicStore(MachineRepresentation rep, Node* base, Node* index,
+ Node* AtomicStore(AtomicStoreParameters params, Node* base, Node* index,
Node* value) {
DCHECK(!IsMapOffsetConstantMinusTag(index));
- DCHECK_NE(rep, MachineRepresentation::kWord64);
- return AddNode(machine()->Word32AtomicStore(rep), base, index, value);
+ DCHECK_NE(params.representation(), MachineRepresentation::kWord64);
+ return AddNode(machine()->Word32AtomicStore(params), base, index, value);
}
- Node* AtomicStore64(Node* base, Node* index, Node* value, Node* value_high) {
+ Node* AtomicStore64(AtomicStoreParameters params, Node* base, Node* index,
+ Node* value, Node* value_high) {
if (machine()->Is64()) {
DCHECK_NULL(value_high);
- return AddNode(
- machine()->Word64AtomicStore(MachineRepresentation::kWord64), base,
- index, value);
+ return AddNode(machine()->Word64AtomicStore(params), base, index, value);
} else {
- return AddNode(machine()->Word32AtomicPairStore(), base, index,
- VALUE_HALVES);
+ DCHECK(params.representation() != MachineRepresentation::kTaggedPointer &&
+ params.representation() != MachineRepresentation::kTaggedSigned &&
+ params.representation() != MachineRepresentation::kTagged);
+ return AddNode(machine()->Word32AtomicPairStore(params.order()), base,
+ index, VALUE_HALVES);
}
}
@@ -959,20 +947,6 @@ class V8_EXPORT_PRIVATE RawMachineAssembler {
return HeapConstant(isolate()->factory()->InternalizeUtf8String(string));
}
- Node* TaggedPoisonOnSpeculation(Node* value) {
- if (poisoning_level_ != PoisoningMitigationLevel::kDontPoison) {
- return AddNode(machine()->TaggedPoisonOnSpeculation(), value);
- }
- return value;
- }
-
- Node* WordPoisonOnSpeculation(Node* value) {
- if (poisoning_level_ != PoisoningMitigationLevel::kDontPoison) {
- return AddNode(machine()->WordPoisonOnSpeculation(), value);
- }
- return value;
- }
-
// Call a given call descriptor and the given arguments.
// The call target is passed as part of the {inputs} array.
Node* CallN(CallDescriptor* call_descriptor, int input_count,
@@ -1136,6 +1110,7 @@ class V8_EXPORT_PRIVATE RawMachineAssembler {
CommonOperatorBuilder* common);
Isolate* isolate_;
+
Graph* graph_;
Schedule* schedule_;
SourcePositionTable* source_positions_;
@@ -1146,7 +1121,6 @@ class V8_EXPORT_PRIVATE RawMachineAssembler {
Node* target_parameter_;
NodeVector parameters_;
BasicBlock* current_block_;
- PoisoningMitigationLevel poisoning_level_;
};
class V8_EXPORT_PRIVATE RawMachineLabel final {
diff --git a/deps/v8/src/compiler/simplified-lowering.cc b/deps/v8/src/compiler/simplified-lowering.cc
index 1c07a23dde..6416eed376 100644
--- a/deps/v8/src/compiler/simplified-lowering.cc
+++ b/deps/v8/src/compiler/simplified-lowering.cc
@@ -1735,11 +1735,9 @@ class RepresentationSelector {
VisitBinop<T>(node, UseInfo::TruncatingWord32(),
MachineRepresentation::kWord32);
if (lower<T>()) {
- if (lowering->poisoning_level_ ==
- PoisoningMitigationLevel::kDontPoison &&
- (index_type.IsNone() || length_type.IsNone() ||
- (index_type.Min() >= 0.0 &&
- index_type.Max() < length_type.Min()))) {
+ if (index_type.IsNone() || length_type.IsNone() ||
+ (index_type.Min() >= 0.0 &&
+ index_type.Max() < length_type.Min())) {
// The bounds check is redundant if we already know that
// the index is within the bounds of [0.0, length[.
// TODO(neis): Move this into TypedOptimization?
@@ -3181,11 +3179,6 @@ class RepresentationSelector {
}
case IrOpcode::kCheckBounds:
return VisitCheckBounds<T>(node, lowering);
- case IrOpcode::kPoisonIndex: {
- VisitUnop<T>(node, UseInfo::TruncatingWord32(),
- MachineRepresentation::kWord32);
- return;
- }
case IrOpcode::kCheckHeapObject: {
if (InputCannotBe(node, Type::SignedSmall())) {
VisitUnop<T>(node, UseInfo::AnyTagged(),
@@ -3835,7 +3828,7 @@ class RepresentationSelector {
case IrOpcode::kDateNow:
VisitInputs<T>(node);
- return SetOutput<T>(node, MachineRepresentation::kTaggedPointer);
+ return SetOutput<T>(node, MachineRepresentation::kTagged);
case IrOpcode::kFrameState:
return VisitFrameState<T>(FrameState{node});
case IrOpcode::kStateValues:
@@ -4225,18 +4218,19 @@ void RepresentationSelector::InsertUnreachableIfNecessary<LOWER>(Node* node) {
}
}
-SimplifiedLowering::SimplifiedLowering(
- JSGraph* jsgraph, JSHeapBroker* broker, Zone* zone,
- SourcePositionTable* source_positions, NodeOriginTable* node_origins,
- PoisoningMitigationLevel poisoning_level, TickCounter* tick_counter,
- Linkage* linkage, ObserveNodeManager* observe_node_manager)
+SimplifiedLowering::SimplifiedLowering(JSGraph* jsgraph, JSHeapBroker* broker,
+ Zone* zone,
+ SourcePositionTable* source_positions,
+ NodeOriginTable* node_origins,
+ TickCounter* tick_counter,
+ Linkage* linkage,
+ ObserveNodeManager* observe_node_manager)
: jsgraph_(jsgraph),
broker_(broker),
zone_(zone),
type_cache_(TypeCache::Get()),
source_positions_(source_positions),
node_origins_(node_origins),
- poisoning_level_(poisoning_level),
tick_counter_(tick_counter),
linkage_(linkage),
observe_node_manager_(observe_node_manager) {}
diff --git a/deps/v8/src/compiler/simplified-lowering.h b/deps/v8/src/compiler/simplified-lowering.h
index 54017b34f7..f60bc1a7e3 100644
--- a/deps/v8/src/compiler/simplified-lowering.h
+++ b/deps/v8/src/compiler/simplified-lowering.h
@@ -31,7 +31,6 @@ class V8_EXPORT_PRIVATE SimplifiedLowering final {
SimplifiedLowering(JSGraph* jsgraph, JSHeapBroker* broker, Zone* zone,
SourcePositionTable* source_position,
NodeOriginTable* node_origins,
- PoisoningMitigationLevel poisoning_level,
TickCounter* tick_counter, Linkage* linkage,
ObserveNodeManager* observe_node_manager = nullptr);
~SimplifiedLowering() = default;
@@ -83,8 +82,6 @@ class V8_EXPORT_PRIVATE SimplifiedLowering final {
SourcePositionTable* source_positions_;
NodeOriginTable* node_origins_;
- PoisoningMitigationLevel poisoning_level_;
-
TickCounter* const tick_counter_;
Linkage* const linkage_;
diff --git a/deps/v8/src/compiler/simplified-operator.cc b/deps/v8/src/compiler/simplified-operator.cc
index 9c4f8f083a..9461194b55 100644
--- a/deps/v8/src/compiler/simplified-operator.cc
+++ b/deps/v8/src/compiler/simplified-operator.cc
@@ -73,22 +73,6 @@ size_t hash_value(FieldAccess const& access) {
access.is_store_in_literal);
}
-size_t hash_value(LoadSensitivity load_sensitivity) {
- return static_cast<size_t>(load_sensitivity);
-}
-
-std::ostream& operator<<(std::ostream& os, LoadSensitivity load_sensitivity) {
- switch (load_sensitivity) {
- case LoadSensitivity::kCritical:
- return os << "Critical";
- case LoadSensitivity::kSafe:
- return os << "Safe";
- case LoadSensitivity::kUnsafe:
- return os << "Unsafe";
- }
- UNREACHABLE();
-}
-
std::ostream& operator<<(std::ostream& os, FieldAccess const& access) {
os << "[" << access.base_is_tagged << ", " << access.offset << ", ";
#ifdef OBJECT_PRINT
@@ -107,9 +91,6 @@ std::ostream& operator<<(std::ostream& os, FieldAccess const& access) {
if (access.is_store_in_literal) {
os << " (store in literal)";
}
- if (FLAG_untrusted_code_mitigations) {
- os << ", " << access.load_sensitivity;
- }
os << "]";
return os;
}
@@ -145,9 +126,6 @@ std::ostream& operator<<(std::ostream& os, ElementAccess const& access) {
os << access.base_is_tagged << ", " << access.header_size << ", "
<< access.type << ", " << access.machine_type << ", "
<< access.write_barrier_kind;
- if (FLAG_untrusted_code_mitigations) {
- os << ", " << access.load_sensitivity;
- }
return os;
}
@@ -719,129 +697,128 @@ bool operator==(CheckMinusZeroParameters const& lhs,
return lhs.mode() == rhs.mode() && lhs.feedback() == rhs.feedback();
}
-#define PURE_OP_LIST(V) \
- V(BooleanNot, Operator::kNoProperties, 1, 0) \
- V(NumberEqual, Operator::kCommutative, 2, 0) \
- V(NumberLessThan, Operator::kNoProperties, 2, 0) \
- V(NumberLessThanOrEqual, Operator::kNoProperties, 2, 0) \
- V(NumberAdd, Operator::kCommutative, 2, 0) \
- V(NumberSubtract, Operator::kNoProperties, 2, 0) \
- V(NumberMultiply, Operator::kCommutative, 2, 0) \
- V(NumberDivide, Operator::kNoProperties, 2, 0) \
- V(NumberModulus, Operator::kNoProperties, 2, 0) \
- V(NumberBitwiseOr, Operator::kCommutative, 2, 0) \
- V(NumberBitwiseXor, Operator::kCommutative, 2, 0) \
- V(NumberBitwiseAnd, Operator::kCommutative, 2, 0) \
- V(NumberShiftLeft, Operator::kNoProperties, 2, 0) \
- V(NumberShiftRight, Operator::kNoProperties, 2, 0) \
- V(NumberShiftRightLogical, Operator::kNoProperties, 2, 0) \
- V(NumberImul, Operator::kCommutative, 2, 0) \
- V(NumberAbs, Operator::kNoProperties, 1, 0) \
- V(NumberClz32, Operator::kNoProperties, 1, 0) \
- V(NumberCeil, Operator::kNoProperties, 1, 0) \
- V(NumberFloor, Operator::kNoProperties, 1, 0) \
- V(NumberFround, Operator::kNoProperties, 1, 0) \
- V(NumberAcos, Operator::kNoProperties, 1, 0) \
- V(NumberAcosh, Operator::kNoProperties, 1, 0) \
- V(NumberAsin, Operator::kNoProperties, 1, 0) \
- V(NumberAsinh, Operator::kNoProperties, 1, 0) \
- V(NumberAtan, Operator::kNoProperties, 1, 0) \
- V(NumberAtan2, Operator::kNoProperties, 2, 0) \
- V(NumberAtanh, Operator::kNoProperties, 1, 0) \
- V(NumberCbrt, Operator::kNoProperties, 1, 0) \
- V(NumberCos, Operator::kNoProperties, 1, 0) \
- V(NumberCosh, Operator::kNoProperties, 1, 0) \
- V(NumberExp, Operator::kNoProperties, 1, 0) \
- V(NumberExpm1, Operator::kNoProperties, 1, 0) \
- V(NumberLog, Operator::kNoProperties, 1, 0) \
- V(NumberLog1p, Operator::kNoProperties, 1, 0) \
- V(NumberLog10, Operator::kNoProperties, 1, 0) \
- V(NumberLog2, Operator::kNoProperties, 1, 0) \
- V(NumberMax, Operator::kNoProperties, 2, 0) \
- V(NumberMin, Operator::kNoProperties, 2, 0) \
- V(NumberPow, Operator::kNoProperties, 2, 0) \
- V(NumberRound, Operator::kNoProperties, 1, 0) \
- V(NumberSign, Operator::kNoProperties, 1, 0) \
- V(NumberSin, Operator::kNoProperties, 1, 0) \
- V(NumberSinh, Operator::kNoProperties, 1, 0) \
- V(NumberSqrt, Operator::kNoProperties, 1, 0) \
- V(NumberTan, Operator::kNoProperties, 1, 0) \
- V(NumberTanh, Operator::kNoProperties, 1, 0) \
- V(NumberTrunc, Operator::kNoProperties, 1, 0) \
- V(NumberToBoolean, Operator::kNoProperties, 1, 0) \
- V(NumberToInt32, Operator::kNoProperties, 1, 0) \
- V(NumberToString, Operator::kNoProperties, 1, 0) \
- V(NumberToUint32, Operator::kNoProperties, 1, 0) \
- V(NumberToUint8Clamped, Operator::kNoProperties, 1, 0) \
- V(NumberSilenceNaN, Operator::kNoProperties, 1, 0) \
- V(BigIntNegate, Operator::kNoProperties, 1, 0) \
- V(StringConcat, Operator::kNoProperties, 3, 0) \
- V(StringToNumber, Operator::kNoProperties, 1, 0) \
- V(StringFromSingleCharCode, Operator::kNoProperties, 1, 0) \
- V(StringFromSingleCodePoint, Operator::kNoProperties, 1, 0) \
- V(StringIndexOf, Operator::kNoProperties, 3, 0) \
- V(StringLength, Operator::kNoProperties, 1, 0) \
- V(StringToLowerCaseIntl, Operator::kNoProperties, 1, 0) \
- V(StringToUpperCaseIntl, Operator::kNoProperties, 1, 0) \
- V(TypeOf, Operator::kNoProperties, 1, 1) \
- V(PlainPrimitiveToNumber, Operator::kNoProperties, 1, 0) \
- V(PlainPrimitiveToWord32, Operator::kNoProperties, 1, 0) \
- V(PlainPrimitiveToFloat64, Operator::kNoProperties, 1, 0) \
- V(ChangeTaggedSignedToInt32, Operator::kNoProperties, 1, 0) \
- V(ChangeTaggedSignedToInt64, Operator::kNoProperties, 1, 0) \
- V(ChangeTaggedToInt32, Operator::kNoProperties, 1, 0) \
- V(ChangeTaggedToInt64, Operator::kNoProperties, 1, 0) \
- V(ChangeTaggedToUint32, Operator::kNoProperties, 1, 0) \
- V(ChangeTaggedToFloat64, Operator::kNoProperties, 1, 0) \
- V(ChangeTaggedToTaggedSigned, Operator::kNoProperties, 1, 0) \
- V(ChangeFloat64ToTaggedPointer, Operator::kNoProperties, 1, 0) \
- V(ChangeInt31ToTaggedSigned, Operator::kNoProperties, 1, 0) \
- V(ChangeInt32ToTagged, Operator::kNoProperties, 1, 0) \
- V(ChangeInt64ToTagged, Operator::kNoProperties, 1, 0) \
- V(ChangeUint32ToTagged, Operator::kNoProperties, 1, 0) \
- V(ChangeUint64ToTagged, Operator::kNoProperties, 1, 0) \
- V(ChangeTaggedToBit, Operator::kNoProperties, 1, 0) \
- V(ChangeBitToTagged, Operator::kNoProperties, 1, 0) \
- V(TruncateBigIntToUint64, Operator::kNoProperties, 1, 0) \
- V(ChangeUint64ToBigInt, Operator::kNoProperties, 1, 0) \
- V(TruncateTaggedToBit, Operator::kNoProperties, 1, 0) \
- V(TruncateTaggedPointerToBit, Operator::kNoProperties, 1, 0) \
- V(TruncateTaggedToWord32, Operator::kNoProperties, 1, 0) \
- V(TruncateTaggedToFloat64, Operator::kNoProperties, 1, 0) \
- V(ObjectIsArrayBufferView, Operator::kNoProperties, 1, 0) \
- V(ObjectIsBigInt, Operator::kNoProperties, 1, 0) \
- V(ObjectIsCallable, Operator::kNoProperties, 1, 0) \
- V(ObjectIsConstructor, Operator::kNoProperties, 1, 0) \
- V(ObjectIsDetectableCallable, Operator::kNoProperties, 1, 0) \
- V(ObjectIsMinusZero, Operator::kNoProperties, 1, 0) \
- V(NumberIsMinusZero, Operator::kNoProperties, 1, 0) \
- V(ObjectIsNaN, Operator::kNoProperties, 1, 0) \
- V(NumberIsNaN, Operator::kNoProperties, 1, 0) \
- V(ObjectIsNonCallable, Operator::kNoProperties, 1, 0) \
- V(ObjectIsNumber, Operator::kNoProperties, 1, 0) \
- V(ObjectIsReceiver, Operator::kNoProperties, 1, 0) \
- V(ObjectIsSmi, Operator::kNoProperties, 1, 0) \
- V(ObjectIsString, Operator::kNoProperties, 1, 0) \
- V(ObjectIsSymbol, Operator::kNoProperties, 1, 0) \
- V(ObjectIsUndetectable, Operator::kNoProperties, 1, 0) \
- V(NumberIsFloat64Hole, Operator::kNoProperties, 1, 0) \
- V(NumberIsFinite, Operator::kNoProperties, 1, 0) \
- V(ObjectIsFiniteNumber, Operator::kNoProperties, 1, 0) \
- V(NumberIsInteger, Operator::kNoProperties, 1, 0) \
- V(ObjectIsSafeInteger, Operator::kNoProperties, 1, 0) \
- V(NumberIsSafeInteger, Operator::kNoProperties, 1, 0) \
- V(ObjectIsInteger, Operator::kNoProperties, 1, 0) \
- V(ConvertTaggedHoleToUndefined, Operator::kNoProperties, 1, 0) \
- V(SameValue, Operator::kCommutative, 2, 0) \
- V(SameValueNumbersOnly, Operator::kCommutative, 2, 0) \
- V(NumberSameValue, Operator::kCommutative, 2, 0) \
- V(ReferenceEqual, Operator::kCommutative, 2, 0) \
- V(StringEqual, Operator::kCommutative, 2, 0) \
- V(StringLessThan, Operator::kNoProperties, 2, 0) \
- V(StringLessThanOrEqual, Operator::kNoProperties, 2, 0) \
- V(ToBoolean, Operator::kNoProperties, 1, 0) \
- V(NewConsString, Operator::kNoProperties, 3, 0) \
- V(PoisonIndex, Operator::kNoProperties, 1, 0)
+#define PURE_OP_LIST(V) \
+ V(BooleanNot, Operator::kNoProperties, 1, 0) \
+ V(NumberEqual, Operator::kCommutative, 2, 0) \
+ V(NumberLessThan, Operator::kNoProperties, 2, 0) \
+ V(NumberLessThanOrEqual, Operator::kNoProperties, 2, 0) \
+ V(NumberAdd, Operator::kCommutative, 2, 0) \
+ V(NumberSubtract, Operator::kNoProperties, 2, 0) \
+ V(NumberMultiply, Operator::kCommutative, 2, 0) \
+ V(NumberDivide, Operator::kNoProperties, 2, 0) \
+ V(NumberModulus, Operator::kNoProperties, 2, 0) \
+ V(NumberBitwiseOr, Operator::kCommutative, 2, 0) \
+ V(NumberBitwiseXor, Operator::kCommutative, 2, 0) \
+ V(NumberBitwiseAnd, Operator::kCommutative, 2, 0) \
+ V(NumberShiftLeft, Operator::kNoProperties, 2, 0) \
+ V(NumberShiftRight, Operator::kNoProperties, 2, 0) \
+ V(NumberShiftRightLogical, Operator::kNoProperties, 2, 0) \
+ V(NumberImul, Operator::kCommutative, 2, 0) \
+ V(NumberAbs, Operator::kNoProperties, 1, 0) \
+ V(NumberClz32, Operator::kNoProperties, 1, 0) \
+ V(NumberCeil, Operator::kNoProperties, 1, 0) \
+ V(NumberFloor, Operator::kNoProperties, 1, 0) \
+ V(NumberFround, Operator::kNoProperties, 1, 0) \
+ V(NumberAcos, Operator::kNoProperties, 1, 0) \
+ V(NumberAcosh, Operator::kNoProperties, 1, 0) \
+ V(NumberAsin, Operator::kNoProperties, 1, 0) \
+ V(NumberAsinh, Operator::kNoProperties, 1, 0) \
+ V(NumberAtan, Operator::kNoProperties, 1, 0) \
+ V(NumberAtan2, Operator::kNoProperties, 2, 0) \
+ V(NumberAtanh, Operator::kNoProperties, 1, 0) \
+ V(NumberCbrt, Operator::kNoProperties, 1, 0) \
+ V(NumberCos, Operator::kNoProperties, 1, 0) \
+ V(NumberCosh, Operator::kNoProperties, 1, 0) \
+ V(NumberExp, Operator::kNoProperties, 1, 0) \
+ V(NumberExpm1, Operator::kNoProperties, 1, 0) \
+ V(NumberLog, Operator::kNoProperties, 1, 0) \
+ V(NumberLog1p, Operator::kNoProperties, 1, 0) \
+ V(NumberLog10, Operator::kNoProperties, 1, 0) \
+ V(NumberLog2, Operator::kNoProperties, 1, 0) \
+ V(NumberMax, Operator::kNoProperties, 2, 0) \
+ V(NumberMin, Operator::kNoProperties, 2, 0) \
+ V(NumberPow, Operator::kNoProperties, 2, 0) \
+ V(NumberRound, Operator::kNoProperties, 1, 0) \
+ V(NumberSign, Operator::kNoProperties, 1, 0) \
+ V(NumberSin, Operator::kNoProperties, 1, 0) \
+ V(NumberSinh, Operator::kNoProperties, 1, 0) \
+ V(NumberSqrt, Operator::kNoProperties, 1, 0) \
+ V(NumberTan, Operator::kNoProperties, 1, 0) \
+ V(NumberTanh, Operator::kNoProperties, 1, 0) \
+ V(NumberTrunc, Operator::kNoProperties, 1, 0) \
+ V(NumberToBoolean, Operator::kNoProperties, 1, 0) \
+ V(NumberToInt32, Operator::kNoProperties, 1, 0) \
+ V(NumberToString, Operator::kNoProperties, 1, 0) \
+ V(NumberToUint32, Operator::kNoProperties, 1, 0) \
+ V(NumberToUint8Clamped, Operator::kNoProperties, 1, 0) \
+ V(NumberSilenceNaN, Operator::kNoProperties, 1, 0) \
+ V(BigIntNegate, Operator::kNoProperties, 1, 0) \
+ V(StringConcat, Operator::kNoProperties, 3, 0) \
+ V(StringToNumber, Operator::kNoProperties, 1, 0) \
+ V(StringFromSingleCharCode, Operator::kNoProperties, 1, 0) \
+ V(StringFromSingleCodePoint, Operator::kNoProperties, 1, 0) \
+ V(StringIndexOf, Operator::kNoProperties, 3, 0) \
+ V(StringLength, Operator::kNoProperties, 1, 0) \
+ V(StringToLowerCaseIntl, Operator::kNoProperties, 1, 0) \
+ V(StringToUpperCaseIntl, Operator::kNoProperties, 1, 0) \
+ V(TypeOf, Operator::kNoProperties, 1, 1) \
+ V(PlainPrimitiveToNumber, Operator::kNoProperties, 1, 0) \
+ V(PlainPrimitiveToWord32, Operator::kNoProperties, 1, 0) \
+ V(PlainPrimitiveToFloat64, Operator::kNoProperties, 1, 0) \
+ V(ChangeTaggedSignedToInt32, Operator::kNoProperties, 1, 0) \
+ V(ChangeTaggedSignedToInt64, Operator::kNoProperties, 1, 0) \
+ V(ChangeTaggedToInt32, Operator::kNoProperties, 1, 0) \
+ V(ChangeTaggedToInt64, Operator::kNoProperties, 1, 0) \
+ V(ChangeTaggedToUint32, Operator::kNoProperties, 1, 0) \
+ V(ChangeTaggedToFloat64, Operator::kNoProperties, 1, 0) \
+ V(ChangeTaggedToTaggedSigned, Operator::kNoProperties, 1, 0) \
+ V(ChangeFloat64ToTaggedPointer, Operator::kNoProperties, 1, 0) \
+ V(ChangeInt31ToTaggedSigned, Operator::kNoProperties, 1, 0) \
+ V(ChangeInt32ToTagged, Operator::kNoProperties, 1, 0) \
+ V(ChangeInt64ToTagged, Operator::kNoProperties, 1, 0) \
+ V(ChangeUint32ToTagged, Operator::kNoProperties, 1, 0) \
+ V(ChangeUint64ToTagged, Operator::kNoProperties, 1, 0) \
+ V(ChangeTaggedToBit, Operator::kNoProperties, 1, 0) \
+ V(ChangeBitToTagged, Operator::kNoProperties, 1, 0) \
+ V(TruncateBigIntToUint64, Operator::kNoProperties, 1, 0) \
+ V(ChangeUint64ToBigInt, Operator::kNoProperties, 1, 0) \
+ V(TruncateTaggedToBit, Operator::kNoProperties, 1, 0) \
+ V(TruncateTaggedPointerToBit, Operator::kNoProperties, 1, 0) \
+ V(TruncateTaggedToWord32, Operator::kNoProperties, 1, 0) \
+ V(TruncateTaggedToFloat64, Operator::kNoProperties, 1, 0) \
+ V(ObjectIsArrayBufferView, Operator::kNoProperties, 1, 0) \
+ V(ObjectIsBigInt, Operator::kNoProperties, 1, 0) \
+ V(ObjectIsCallable, Operator::kNoProperties, 1, 0) \
+ V(ObjectIsConstructor, Operator::kNoProperties, 1, 0) \
+ V(ObjectIsDetectableCallable, Operator::kNoProperties, 1, 0) \
+ V(ObjectIsMinusZero, Operator::kNoProperties, 1, 0) \
+ V(NumberIsMinusZero, Operator::kNoProperties, 1, 0) \
+ V(ObjectIsNaN, Operator::kNoProperties, 1, 0) \
+ V(NumberIsNaN, Operator::kNoProperties, 1, 0) \
+ V(ObjectIsNonCallable, Operator::kNoProperties, 1, 0) \
+ V(ObjectIsNumber, Operator::kNoProperties, 1, 0) \
+ V(ObjectIsReceiver, Operator::kNoProperties, 1, 0) \
+ V(ObjectIsSmi, Operator::kNoProperties, 1, 0) \
+ V(ObjectIsString, Operator::kNoProperties, 1, 0) \
+ V(ObjectIsSymbol, Operator::kNoProperties, 1, 0) \
+ V(ObjectIsUndetectable, Operator::kNoProperties, 1, 0) \
+ V(NumberIsFloat64Hole, Operator::kNoProperties, 1, 0) \
+ V(NumberIsFinite, Operator::kNoProperties, 1, 0) \
+ V(ObjectIsFiniteNumber, Operator::kNoProperties, 1, 0) \
+ V(NumberIsInteger, Operator::kNoProperties, 1, 0) \
+ V(ObjectIsSafeInteger, Operator::kNoProperties, 1, 0) \
+ V(NumberIsSafeInteger, Operator::kNoProperties, 1, 0) \
+ V(ObjectIsInteger, Operator::kNoProperties, 1, 0) \
+ V(ConvertTaggedHoleToUndefined, Operator::kNoProperties, 1, 0) \
+ V(SameValue, Operator::kCommutative, 2, 0) \
+ V(SameValueNumbersOnly, Operator::kCommutative, 2, 0) \
+ V(NumberSameValue, Operator::kCommutative, 2, 0) \
+ V(ReferenceEqual, Operator::kCommutative, 2, 0) \
+ V(StringEqual, Operator::kCommutative, 2, 0) \
+ V(StringLessThan, Operator::kNoProperties, 2, 0) \
+ V(StringLessThanOrEqual, Operator::kNoProperties, 2, 0) \
+ V(ToBoolean, Operator::kNoProperties, 1, 0) \
+ V(NewConsString, Operator::kNoProperties, 3, 0)
#define EFFECT_DEPENDENT_OP_LIST(V) \
V(BigIntAdd, Operator::kNoProperties, 2, 1) \
diff --git a/deps/v8/src/compiler/simplified-operator.h b/deps/v8/src/compiler/simplified-operator.h
index d7a5901448..0602b795a9 100644
--- a/deps/v8/src/compiler/simplified-operator.h
+++ b/deps/v8/src/compiler/simplified-operator.h
@@ -46,10 +46,6 @@ size_t hash_value(BaseTaggedness);
std::ostream& operator<<(std::ostream&, BaseTaggedness);
-size_t hash_value(LoadSensitivity);
-
-std::ostream& operator<<(std::ostream&, LoadSensitivity);
-
struct ConstFieldInfo {
// the map that introduced the const field, if any. An access is considered
// mutable iff the handle is null.
@@ -82,7 +78,6 @@ struct FieldAccess {
Type type; // type of the field.
MachineType machine_type; // machine type of the field.
WriteBarrierKind write_barrier_kind; // write barrier hint.
- LoadSensitivity load_sensitivity; // load safety for poisoning.
ConstFieldInfo const_field_info; // the constness of this access, and the
// field owner map, if the access is const
bool is_store_in_literal; // originates from a kStoreInLiteral access
@@ -96,14 +91,12 @@ struct FieldAccess {
type(Type::None()),
machine_type(MachineType::None()),
write_barrier_kind(kFullWriteBarrier),
- load_sensitivity(LoadSensitivity::kUnsafe),
const_field_info(ConstFieldInfo::None()),
is_store_in_literal(false) {}
FieldAccess(BaseTaggedness base_is_tagged, int offset, MaybeHandle<Name> name,
MaybeHandle<Map> map, Type type, MachineType machine_type,
WriteBarrierKind write_barrier_kind,
- LoadSensitivity load_sensitivity = LoadSensitivity::kUnsafe,
ConstFieldInfo const_field_info = ConstFieldInfo::None(),
bool is_store_in_literal = false
#ifdef V8_HEAP_SANDBOX
@@ -118,7 +111,6 @@ struct FieldAccess {
type(type),
machine_type(machine_type),
write_barrier_kind(write_barrier_kind),
- load_sensitivity(load_sensitivity),
const_field_info(const_field_info),
is_store_in_literal(is_store_in_literal)
#ifdef V8_HEAP_SANDBOX
@@ -162,25 +154,21 @@ struct ElementAccess {
Type type; // type of the element.
MachineType machine_type; // machine type of the element.
WriteBarrierKind write_barrier_kind; // write barrier hint.
- LoadSensitivity load_sensitivity; // load safety for poisoning.
ElementAccess()
: base_is_tagged(kTaggedBase),
header_size(0),
type(Type::None()),
machine_type(MachineType::None()),
- write_barrier_kind(kFullWriteBarrier),
- load_sensitivity(LoadSensitivity::kUnsafe) {}
+ write_barrier_kind(kFullWriteBarrier) {}
ElementAccess(BaseTaggedness base_is_tagged, int header_size, Type type,
- MachineType machine_type, WriteBarrierKind write_barrier_kind,
- LoadSensitivity load_sensitivity = LoadSensitivity::kUnsafe)
+ MachineType machine_type, WriteBarrierKind write_barrier_kind)
: base_is_tagged(base_is_tagged),
header_size(header_size),
type(type),
machine_type(machine_type),
- write_barrier_kind(write_barrier_kind),
- load_sensitivity(load_sensitivity) {}
+ write_barrier_kind(write_barrier_kind) {}
int tag() const { return base_is_tagged == kTaggedBase ? kHeapObjectTag : 0; }
};
@@ -926,7 +914,6 @@ class V8_EXPORT_PRIVATE SimplifiedOperatorBuilder final
const Operator* TruncateTaggedToBit();
const Operator* TruncateTaggedPointerToBit();
- const Operator* PoisonIndex();
const Operator* CompareMaps(ZoneHandleSet<Map>);
const Operator* MapGuard(ZoneHandleSet<Map> maps);
diff --git a/deps/v8/src/compiler/typed-optimization.cc b/deps/v8/src/compiler/typed-optimization.cc
index ce9b6fdb18..5025233c88 100644
--- a/deps/v8/src/compiler/typed-optimization.cc
+++ b/deps/v8/src/compiler/typed-optimization.cc
@@ -814,9 +814,9 @@ Reduction TypedOptimization::ReduceJSToNumberInput(Node* input) {
HeapObjectMatcher m(input);
if (m.HasResolvedValue() && m.Ref(broker()).IsString()) {
StringRef input_value = m.Ref(broker()).AsString();
- double number;
- ASSIGN_RETURN_NO_CHANGE_IF_DATA_MISSING(number, input_value.ToNumber());
- return Replace(jsgraph()->Constant(number));
+ base::Optional<double> number = input_value.ToNumber();
+ if (!number.has_value()) return NoChange();
+ return Replace(jsgraph()->Constant(number.value()));
}
}
if (input_type.IsHeapConstant()) {
diff --git a/deps/v8/src/compiler/typer.cc b/deps/v8/src/compiler/typer.cc
index 529f1cc7bb..a96d1ea981 100644
--- a/deps/v8/src/compiler/typer.cc
+++ b/deps/v8/src/compiler/typer.cc
@@ -882,9 +882,10 @@ bool Typer::Visitor::InductionVariablePhiTypeIsPrefixedPoint(
InductionVariable* induction_var) {
Node* node = induction_var->phi();
DCHECK_EQ(node->opcode(), IrOpcode::kInductionVariablePhi);
+ Node* arith = node->InputAt(1);
Type type = NodeProperties::GetType(node);
Type initial_type = Operand(node, 0);
- Node* arith = node->InputAt(1);
+ Type arith_type = Operand(node, 1);
Type increment_type = Operand(node, 2);
// Intersect {type} with useful bounds.
@@ -910,26 +911,30 @@ bool Typer::Visitor::InductionVariablePhiTypeIsPrefixedPoint(
type = Type::Intersect(type, bound_type, typer_->zone());
}
- // Apply ordinary typing to the "increment" operation.
- // clang-format off
- switch (arith->opcode()) {
+ if (arith_type.IsNone()) {
+ type = Type::None();
+ } else {
+ // Apply ordinary typing to the "increment" operation.
+ // clang-format off
+ switch (arith->opcode()) {
#define CASE(x) \
- case IrOpcode::k##x: \
- type = Type##x(type, increment_type); \
- break;
- CASE(JSAdd)
- CASE(JSSubtract)
- CASE(NumberAdd)
- CASE(NumberSubtract)
- CASE(SpeculativeNumberAdd)
- CASE(SpeculativeNumberSubtract)
- CASE(SpeculativeSafeIntegerAdd)
- CASE(SpeculativeSafeIntegerSubtract)
+ case IrOpcode::k##x: \
+ type = Type##x(type, increment_type); \
+ break;
+ CASE(JSAdd)
+ CASE(JSSubtract)
+ CASE(NumberAdd)
+ CASE(NumberSubtract)
+ CASE(SpeculativeNumberAdd)
+ CASE(SpeculativeNumberSubtract)
+ CASE(SpeculativeSafeIntegerAdd)
+ CASE(SpeculativeSafeIntegerSubtract)
#undef CASE
- default:
- UNREACHABLE();
+ default:
+ UNREACHABLE();
+ }
+ // clang-format on
}
- // clang-format on
type = Type::Union(initial_type, type, typer_->zone());
@@ -2065,10 +2070,6 @@ Type Typer::Visitor::TypeStringLength(Node* node) {
Type Typer::Visitor::TypeStringSubstring(Node* node) { return Type::String(); }
-Type Typer::Visitor::TypePoisonIndex(Node* node) {
- return Type::Union(Operand(node, 0), typer_->cache_->kSingletonZero, zone());
-}
-
Type Typer::Visitor::TypeCheckBounds(Node* node) {
return typer_->operation_typer_.CheckBounds(Operand(node, 0),
Operand(node, 1));
diff --git a/deps/v8/src/compiler/verifier.cc b/deps/v8/src/compiler/verifier.cc
index f33edaa6c0..a0f2aa569d 100644
--- a/deps/v8/src/compiler/verifier.cc
+++ b/deps/v8/src/compiler/verifier.cc
@@ -1422,10 +1422,6 @@ void Verifier::Visitor::Check(Node* node, const AllNodes& all) {
CheckValueInputIs(node, 1, TypeCache::Get()->kPositiveSafeInteger);
CheckTypeIs(node, TypeCache::Get()->kPositiveSafeInteger);
break;
- case IrOpcode::kPoisonIndex:
- CheckValueInputIs(node, 0, Type::Unsigned32());
- CheckTypeIs(node, Type::Unsigned32());
- break;
case IrOpcode::kCheckClosure:
// Any -> Function
CheckValueInputIs(node, 0, Type::Any());
@@ -1641,7 +1637,6 @@ void Verifier::Visitor::Check(Node* node, const AllNodes& all) {
// -----------------------
case IrOpcode::kLoad:
case IrOpcode::kLoadImmutable:
- case IrOpcode::kPoisonedLoad:
case IrOpcode::kProtectedLoad:
case IrOpcode::kProtectedStore:
case IrOpcode::kStore:
@@ -1817,9 +1812,6 @@ void Verifier::Visitor::Check(Node* node, const AllNodes& all) {
case IrOpcode::kWord32PairShl:
case IrOpcode::kWord32PairShr:
case IrOpcode::kWord32PairSar:
- case IrOpcode::kTaggedPoisonOnSpeculation:
- case IrOpcode::kWord32PoisonOnSpeculation:
- case IrOpcode::kWord64PoisonOnSpeculation:
case IrOpcode::kLoadStackCheckOffset:
case IrOpcode::kLoadFramePointer:
case IrOpcode::kLoadParentFramePointer:
diff --git a/deps/v8/src/compiler/wasm-compiler.cc b/deps/v8/src/compiler/wasm-compiler.cc
index f91c21fd1d..f6f6c3844f 100644
--- a/deps/v8/src/compiler/wasm-compiler.cc
+++ b/deps/v8/src/compiler/wasm-compiler.cc
@@ -44,6 +44,7 @@
#include "src/roots/roots.h"
#include "src/tracing/trace-event.h"
#include "src/trap-handler/trap-handler.h"
+#include "src/wasm/code-space-access.h"
#include "src/wasm/function-body-decoder-impl.h"
#include "src/wasm/function-compiler.h"
#include "src/wasm/graph-builder-interface.h"
@@ -196,14 +197,7 @@ class WasmGraphAssembler : public GraphAssembler {
return Call(call_descriptor, call_target, args...);
}
- void EnsureEnd() {
- if (graph()->end() == nullptr) {
- graph()->SetEnd(graph()->NewNode(mcgraph()->common()->End(0)));
- }
- }
-
void MergeControlToEnd(Node* node) {
- EnsureEnd();
NodeProperties::MergeControlToEnd(graph(), mcgraph()->common(), node);
}
@@ -212,7 +206,6 @@ class WasmGraphAssembler : public GraphAssembler {
if (FLAG_debug_code) {
auto ok = MakeLabel();
GotoIfNot(condition, &ok);
- EnsureEnd();
Unreachable();
Bind(&ok);
}
@@ -472,7 +465,6 @@ WasmGraphBuilder::WasmGraphBuilder(
mcgraph_(mcgraph),
env_(env),
has_simd_(ContainsSimd(sig)),
- untrusted_code_mitigations_(FLAG_untrusted_code_mitigations),
sig_(sig),
source_position_table_(source_position_table),
isolate_(isolate) {
@@ -501,6 +493,8 @@ void WasmGraphBuilder::Start(unsigned params) {
gasm_->LoadFunctionDataFromJSFunction(
Param(Linkage::kJSCallClosureParamIndex, "%closure")))
: Param(wasm::kWasmInstanceParameterIndex);
+
+ graph()->SetEnd(graph()->NewNode(mcgraph()->common()->End(0)));
}
Node* WasmGraphBuilder::Param(int index, const char* debug_name) {
@@ -2901,13 +2895,13 @@ Node* WasmGraphBuilder::BuildCallNode(const wasm::FunctionSig* sig,
return call;
}
-Node* WasmGraphBuilder::BuildWasmCall(
- const wasm::FunctionSig* sig, base::Vector<Node*> args,
- base::Vector<Node*> rets, wasm::WasmCodePosition position,
- Node* instance_node, UseRetpoline use_retpoline, Node* frame_state) {
- CallDescriptor* call_descriptor =
- GetWasmCallDescriptor(mcgraph()->zone(), sig, use_retpoline,
- kWasmFunction, frame_state != nullptr);
+Node* WasmGraphBuilder::BuildWasmCall(const wasm::FunctionSig* sig,
+ base::Vector<Node*> args,
+ base::Vector<Node*> rets,
+ wasm::WasmCodePosition position,
+ Node* instance_node, Node* frame_state) {
+ CallDescriptor* call_descriptor = GetWasmCallDescriptor(
+ mcgraph()->zone(), sig, kWasmFunction, frame_state != nullptr);
const Operator* op = mcgraph()->common()->Call(call_descriptor);
Node* call =
BuildCallNode(sig, args, position, instance_node, op, frame_state);
@@ -2935,10 +2929,9 @@ Node* WasmGraphBuilder::BuildWasmCall(
Node* WasmGraphBuilder::BuildWasmReturnCall(const wasm::FunctionSig* sig,
base::Vector<Node*> args,
wasm::WasmCodePosition position,
- Node* instance_node,
- UseRetpoline use_retpoline) {
+ Node* instance_node) {
CallDescriptor* call_descriptor =
- GetWasmCallDescriptor(mcgraph()->zone(), sig, use_retpoline);
+ GetWasmCallDescriptor(mcgraph()->zone(), sig);
const Operator* op = mcgraph()->common()->TailCall(call_descriptor);
Node* call = BuildCallNode(sig, args, position, instance_node, op);
@@ -2982,15 +2975,13 @@ Node* WasmGraphBuilder::BuildImportCall(const wasm::FunctionSig* sig,
Node* target_node = gasm_->LoadFromObject(
MachineType::Pointer(), imported_targets, func_index_times_pointersize);
args[0] = target_node;
- const UseRetpoline use_retpoline =
- untrusted_code_mitigations_ ? kRetpoline : kNoRetpoline;
switch (continuation) {
case kCallContinues:
- return BuildWasmCall(sig, args, rets, position, ref_node, use_retpoline);
+ return BuildWasmCall(sig, args, rets, position, ref_node);
case kReturnCall:
DCHECK(rets.empty());
- return BuildWasmReturnCall(sig, args, position, ref_node, use_retpoline);
+ return BuildWasmReturnCall(sig, args, position, ref_node);
}
}
@@ -3010,7 +3001,7 @@ Node* WasmGraphBuilder::CallDirect(uint32_t index, base::Vector<Node*> args,
Address code = static_cast<Address>(index);
args[0] = mcgraph()->RelocatableIntPtrConstant(code, RelocInfo::WASM_CALL);
- return BuildWasmCall(sig, args, rets, position, nullptr, kNoRetpoline);
+ return BuildWasmCall(sig, args, rets, position, nullptr);
}
Node* WasmGraphBuilder::CallIndirect(uint32_t table_index, uint32_t sig_index,
@@ -3095,16 +3086,6 @@ Node* WasmGraphBuilder::BuildIndirectCall(uint32_t table_index,
Node* in_bounds = gasm_->Uint32LessThan(key, ift_size);
TrapIfFalse(wasm::kTrapTableOutOfBounds, in_bounds, position);
- // Mask the key to prevent SSCA.
- if (untrusted_code_mitigations_) {
- // mask = ((key - size) & ~key) >> 31
- Node* neg_key = gasm_->Word32Xor(key, Int32Constant(-1));
- Node* masked_diff =
- gasm_->Word32And(gasm_->Int32Sub(key, ift_size), neg_key);
- Node* mask = gasm_->Word32Sar(masked_diff, Int32Constant(31));
- key = gasm_->Word32And(key, mask);
- }
-
const wasm::ValueType table_type = env_->module->tables[table_index].type;
// Check that the table entry is not null and that the type of the function is
// **identical with** the function type declared at the call site (no
@@ -3140,16 +3121,12 @@ Node* WasmGraphBuilder::BuildIndirectCall(uint32_t table_index,
intptr_scaled_key);
args[0] = target;
- const UseRetpoline use_retpoline =
- untrusted_code_mitigations_ ? kRetpoline : kNoRetpoline;
switch (continuation) {
case kCallContinues:
- return BuildWasmCall(sig, args, rets, position, target_instance,
- use_retpoline);
+ return BuildWasmCall(sig, args, rets, position, target_instance);
case kReturnCall:
- return BuildWasmReturnCall(sig, args, position, target_instance,
- use_retpoline);
+ return BuildWasmReturnCall(sig, args, position, target_instance);
}
}
@@ -3244,14 +3221,9 @@ Node* WasmGraphBuilder::BuildCallRef(uint32_t sig_index,
args[0] = end_label.PhiAt(0);
- const UseRetpoline use_retpoline =
- untrusted_code_mitigations_ ? kRetpoline : kNoRetpoline;
-
Node* call = continuation == kCallContinues
- ? BuildWasmCall(sig, args, rets, position, instance_node,
- use_retpoline)
- : BuildWasmReturnCall(sig, args, position, instance_node,
- use_retpoline);
+ ? BuildWasmCall(sig, args, rets, position, instance_node)
+ : BuildWasmReturnCall(sig, args, position, instance_node);
return call;
}
@@ -3287,7 +3259,7 @@ Node* WasmGraphBuilder::ReturnCall(uint32_t index, base::Vector<Node*> args,
Address code = static_cast<Address>(index);
args[0] = mcgraph()->RelocatableIntPtrConstant(code, RelocInfo::WASM_CALL);
- return BuildWasmReturnCall(sig, args, position, nullptr, kNoRetpoline);
+ return BuildWasmReturnCall(sig, args, position, nullptr);
}
Node* WasmGraphBuilder::ReturnCallIndirect(uint32_t table_index,
@@ -3416,15 +3388,6 @@ void WasmGraphBuilder::InitInstanceCache(
// Load the memory size.
instance_cache->mem_size =
LOAD_MUTABLE_INSTANCE_FIELD(MemorySize, MachineType::UintPtr());
-
- if (untrusted_code_mitigations_) {
- // Load the memory mask.
- instance_cache->mem_mask =
- LOAD_INSTANCE_FIELD(MemoryMask, MachineType::UintPtr());
- } else {
- // Explicitly set to nullptr to ensure a SEGV when we try to use it.
- instance_cache->mem_mask = nullptr;
- }
}
void WasmGraphBuilder::PrepareInstanceCacheForLoop(
@@ -3435,10 +3398,6 @@ void WasmGraphBuilder::PrepareInstanceCacheForLoop(
INTRODUCE_PHI(mem_start, MachineType::PointerRepresentation());
INTRODUCE_PHI(mem_size, MachineType::PointerRepresentation());
- if (untrusted_code_mitigations_) {
- INTRODUCE_PHI(mem_mask, MachineType::PointerRepresentation());
- }
-
#undef INTRODUCE_PHI
}
@@ -3453,10 +3412,6 @@ void WasmGraphBuilder::NewInstanceCacheMerge(WasmInstanceCacheNodes* to,
INTRODUCE_PHI(mem_start, MachineType::PointerRepresentation());
INTRODUCE_PHI(mem_size, MachineRepresentation::kWord32);
- if (untrusted_code_mitigations_) {
- INTRODUCE_PHI(mem_mask, MachineRepresentation::kWord32);
- }
-
#undef INTRODUCE_PHI
}
@@ -3467,10 +3422,6 @@ void WasmGraphBuilder::MergeInstanceCacheInto(WasmInstanceCacheNodes* to,
merge, to->mem_size, from->mem_size);
to->mem_start = CreateOrMergeIntoPhi(MachineType::PointerRepresentation(),
merge, to->mem_start, from->mem_start);
- if (untrusted_code_mitigations_) {
- to->mem_mask = CreateOrMergeIntoPhi(MachineType::PointerRepresentation(),
- merge, to->mem_mask, from->mem_mask);
- }
}
Node* WasmGraphBuilder::CreateOrMergeIntoPhi(MachineRepresentation rep,
@@ -3839,13 +3790,6 @@ WasmGraphBuilder::BoundsCheckMem(uint8_t access_size, Node* index,
// Introduce the actual bounds check.
Node* cond = gasm_->UintLessThan(index, effective_size);
TrapIfFalse(wasm::kTrapMemOutOfBounds, cond, position);
-
- if (untrusted_code_mitigations_) {
- // In the fallthrough case, condition the index with the memory mask.
- Node* mem_mask = instance_cache_->mem_mask;
- DCHECK_NOT_NULL(mem_mask);
- index = gasm_->WordAnd(index, mem_mask);
- }
return {index, kDynamicallyChecked};
}
@@ -4345,13 +4289,6 @@ Node* WasmGraphBuilder::BuildAsmjsLoadMem(MachineType type, Node* index) {
gasm_->UintLessThan(index, mem_size), BranchHint::kTrue);
bounds_check.Chain(control());
- if (untrusted_code_mitigations_) {
- // Condition the index with the memory mask.
- Node* mem_mask = instance_cache_->mem_mask;
- DCHECK_NOT_NULL(mem_mask);
- index = gasm_->WordAnd(index, mem_mask);
- }
-
Node* load = graph()->NewNode(mcgraph()->machine()->Load(type), mem_start,
index, effect(), bounds_check.if_true);
SetEffectControl(bounds_check.EffectPhi(load, effect()), bounds_check.merge);
@@ -4396,13 +4333,6 @@ Node* WasmGraphBuilder::BuildAsmjsStoreMem(MachineType type, Node* index,
BranchHint::kTrue);
bounds_check.Chain(control());
- if (untrusted_code_mitigations_) {
- // Condition the index with the memory mask.
- Node* mem_mask = instance_cache_->mem_mask;
- DCHECK_NOT_NULL(mem_mask);
- index = gasm_->Word32And(index, mem_mask);
- }
-
index = BuildChangeUint32ToUintPtr(index);
const Operator* store_op = mcgraph()->machine()->Store(StoreRepresentation(
type.representation(), WriteBarrierKind::kNoWriteBarrier));
@@ -5240,16 +5170,26 @@ Node* WasmGraphBuilder::AtomicOp(wasm::WasmOpcode opcode, Node* const* inputs,
const Operator* (MachineOperatorBuilder::*)(MachineType);
using OperatorByRep =
const Operator* (MachineOperatorBuilder::*)(MachineRepresentation);
+ using OperatorByAtomicLoadRep =
+ const Operator* (MachineOperatorBuilder::*)(AtomicLoadParameters);
+ using OperatorByAtomicStoreRep =
+ const Operator* (MachineOperatorBuilder::*)(AtomicStoreParameters);
const Type type;
const MachineType machine_type;
const OperatorByType operator_by_type = nullptr;
const OperatorByRep operator_by_rep = nullptr;
+ const OperatorByAtomicLoadRep operator_by_atomic_load_params = nullptr;
+ const OperatorByAtomicStoreRep operator_by_atomic_store_rep = nullptr;
constexpr AtomicOpInfo(Type t, MachineType m, OperatorByType o)
: type(t), machine_type(m), operator_by_type(o) {}
constexpr AtomicOpInfo(Type t, MachineType m, OperatorByRep o)
: type(t), machine_type(m), operator_by_rep(o) {}
+ constexpr AtomicOpInfo(Type t, MachineType m, OperatorByAtomicLoadRep o)
+ : type(t), machine_type(m), operator_by_atomic_load_params(o) {}
+ constexpr AtomicOpInfo(Type t, MachineType m, OperatorByAtomicStoreRep o)
+ : type(t), machine_type(m), operator_by_atomic_store_rep(o) {}
// Constexpr, hence just a table lookup in most compilers.
static constexpr AtomicOpInfo Get(wasm::WasmOpcode opcode) {
@@ -5358,11 +5298,21 @@ Node* WasmGraphBuilder::AtomicOp(wasm::WasmOpcode opcode, Node* const* inputs,
// {offset} is validated to be within uintptr_t range in {BoundsCheckMem}.
uintptr_t capped_offset = static_cast<uintptr_t>(offset);
if (info.type != AtomicOpInfo::kSpecial) {
- const Operator* op =
- info.operator_by_type
- ? (mcgraph()->machine()->*info.operator_by_type)(info.machine_type)
- : (mcgraph()->machine()->*info.operator_by_rep)(
- info.machine_type.representation());
+ const Operator* op;
+ if (info.operator_by_type) {
+ op = (mcgraph()->machine()->*info.operator_by_type)(info.machine_type);
+ } else if (info.operator_by_rep) {
+ op = (mcgraph()->machine()->*info.operator_by_rep)(
+ info.machine_type.representation());
+ } else if (info.operator_by_atomic_load_params) {
+ op = (mcgraph()->machine()->*info.operator_by_atomic_load_params)(
+ AtomicLoadParameters(info.machine_type, AtomicMemoryOrder::kSeqCst));
+ } else {
+ op = (mcgraph()->machine()->*info.operator_by_atomic_store_rep)(
+ AtomicStoreParameters(info.machine_type.representation(),
+ WriteBarrierKind::kNoWriteBarrier,
+ AtomicMemoryOrder::kSeqCst));
+ }
Node* input_nodes[6] = {MemBuffer(capped_offset), index};
int num_actual_inputs = info.type;
@@ -5610,13 +5560,16 @@ Node* WasmGraphBuilder::ArrayNewWithRtt(uint32_t array_index,
wasm::WasmCodePosition position) {
TrapIfFalse(wasm::kTrapArrayTooLarge,
gasm_->Uint32LessThanOrEqual(
- length, gasm_->Uint32Constant(wasm::kV8MaxWasmArrayLength)),
+ length, gasm_->Uint32Constant(WasmArray::MaxLength(type))),
position);
wasm::ValueType element_type = type->element_type();
Builtin stub = ChooseArrayAllocationBuiltin(element_type, initial_value);
- Node* a =
- gasm_->CallBuiltin(stub, Operator::kEliminatable, rtt, length,
- Int32Constant(element_type.element_size_bytes()));
+ // Do NOT mark this as Operator::kEliminatable, because that would cause the
+ // Call node to have no control inputs, which means it could get scheduled
+ // before the check/trap above.
+ Node* a = gasm_->CallBuiltin(
+ stub, Operator::kNoDeopt | Operator::kNoThrow, rtt, length,
+ Int32Constant(element_type.element_size_bytes()));
if (initial_value != nullptr) {
// TODO(manoskouk): If the loop is ever removed here, we have to update
// ArrayNewWithRtt() in graph-builder-interface.cc to not mark the current
@@ -5628,8 +5581,6 @@ Node* WasmGraphBuilder::ArrayNewWithRtt(uint32_t array_index,
Node* element_size = Int32Constant(element_type.element_size_bytes());
Node* end_offset =
gasm_->Int32Add(start_offset, gasm_->Int32Mul(element_size, length));
- // Loops need the graph's end to have been set up.
- gasm_->EnsureEnd();
gasm_->Goto(&loop, start_offset);
gasm_->Bind(&loop);
{
@@ -6005,24 +5956,33 @@ Node* WasmGraphBuilder::ArrayLen(Node* array_object, CheckForNull null_check,
return gasm_->LoadWasmArrayLength(array_object);
}
-// TODO(7748): Change {CallBuiltin} to {BuildCCall}. Add an option to copy in a
-// loop for small array sizes. To find the length limit, run
-// test/mjsunit/wasm/array-copy-benchmark.js.
+// TODO(7748): Add an option to copy in a loop for small array sizes. To find
+// the length limit, run test/mjsunit/wasm/array-copy-benchmark.js.
void WasmGraphBuilder::ArrayCopy(Node* dst_array, Node* dst_index,
- Node* src_array, Node* src_index, Node* length,
+ CheckForNull dst_null_check, Node* src_array,
+ Node* src_index, CheckForNull src_null_check,
+ Node* length,
wasm::WasmCodePosition position) {
- // TODO(7748): Skip null checks when possible.
- TrapIfTrue(wasm::kTrapNullDereference, gasm_->WordEqual(dst_array, RefNull()),
- position);
- TrapIfTrue(wasm::kTrapNullDereference, gasm_->WordEqual(src_array, RefNull()),
- position);
+ if (dst_null_check == kWithNullCheck) {
+ TrapIfTrue(wasm::kTrapNullDereference,
+ gasm_->WordEqual(dst_array, RefNull()), position);
+ }
+ if (src_null_check == kWithNullCheck) {
+ TrapIfTrue(wasm::kTrapNullDereference,
+ gasm_->WordEqual(src_array, RefNull()), position);
+ }
BoundsCheckArrayCopy(dst_array, dst_index, length, position);
BoundsCheckArrayCopy(src_array, src_index, length, position);
- Operator::Properties copy_properties =
- Operator::kIdempotent | Operator::kNoThrow | Operator::kNoDeopt;
- // The builtin needs the int parameters first.
- gasm_->CallBuiltin(Builtin::kWasmArrayCopy, copy_properties, dst_index,
- src_index, length, dst_array, src_array);
+
+ Node* function =
+ gasm_->ExternalConstant(ExternalReference::wasm_array_copy());
+ MachineType arg_types[]{
+ MachineType::TaggedPointer(), MachineType::TaggedPointer(),
+ MachineType::Uint32(), MachineType::TaggedPointer(),
+ MachineType::Uint32(), MachineType::Uint32()};
+ MachineSignature sig(0, 6, arg_types);
+ BuildCCall(&sig, function, GetInstance(), dst_array, dst_index, src_array,
+ src_index, length);
}
// 1 bit V8 Smi tag, 31 bits V8 Smi shift, 1 bit i31ref high-bit truncation.
@@ -6659,8 +6619,7 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
// The (cached) call target is the jump table slot for that function.
args[0] = BuildLoadCallTargetFromExportedFunctionData(function_data);
BuildWasmCall(sig_, base::VectorOf(args), base::VectorOf(rets),
- wasm::kNoCodePosition, nullptr, kNoRetpoline,
- frame_state);
+ wasm::kNoCodePosition, nullptr, frame_state);
}
}
@@ -6929,8 +6888,9 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
// Convert wasm numbers to JS values.
pos = AddArgumentNodes(base::VectorOf(args), pos, wasm_count, sig_);
- args[pos++] = undefined_node; // new target
- args[pos++] = Int32Constant(wasm_count); // argument count
+ args[pos++] = undefined_node; // new target
+ args[pos++] =
+ Int32Constant(JSParameterCount(wasm_count)); // argument count
args[pos++] = function_context;
args[pos++] = effect();
args[pos++] = control();
@@ -6957,8 +6917,9 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
for (int i = wasm_count; i < expected_arity; ++i) {
args[pos++] = undefined_node;
}
- args[pos++] = undefined_node; // new target
- args[pos++] = Int32Constant(wasm_count); // argument count
+ args[pos++] = undefined_node; // new target
+ args[pos++] =
+ Int32Constant(JSParameterCount(wasm_count)); // argument count
Node* function_context =
gasm_->LoadContextFromJSFunction(callable_node);
@@ -6981,7 +6942,8 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
args[pos++] =
gasm_->GetBuiltinPointerTarget(Builtin::kCall_ReceiverIsAny);
args[pos++] = callable_node;
- args[pos++] = Int32Constant(wasm_count); // argument count
+ args[pos++] =
+ Int32Constant(JSParameterCount(wasm_count)); // argument count
args[pos++] = undefined_node; // receiver
auto call_descriptor = Linkage::GetStubCallDescriptor(
@@ -7162,8 +7124,9 @@ class WasmWrapperGraphBuilder : public WasmGraphBuilder {
int pos = 0;
args[pos++] = gasm_->GetBuiltinPointerTarget(Builtin::kCall_ReceiverIsAny);
args[pos++] = callable;
- args[pos++] = Int32Constant(wasm_count); // argument count
- args[pos++] = UndefinedValue(); // receiver
+ args[pos++] =
+ Int32Constant(JSParameterCount(wasm_count)); // argument count
+ args[pos++] = UndefinedValue(); // receiver
auto call_descriptor = Linkage::GetStubCallDescriptor(
graph()->zone(), CallTrampolineDescriptor{}, wasm_count + 1,
@@ -7457,7 +7420,7 @@ std::pair<WasmImportCallKind, Handle<JSReceiver>> ResolveWasmImportCall(
return std::make_pair(WasmImportCallKind::kUseCallBuiltin, callable);
}
- if (shared->internal_formal_parameter_count() ==
+ if (shared->internal_formal_parameter_count_without_receiver() ==
expected_sig->parameter_count()) {
return std::make_pair(WasmImportCallKind::kJSFunctionArityMatch,
callable);
@@ -7623,8 +7586,7 @@ wasm::WasmCompilationResult CompileWasmImportCallWrapper(
// Schedule and compile to machine code.
CallDescriptor* incoming =
- GetWasmCallDescriptor(&zone, sig, WasmGraphBuilder::kNoRetpoline,
- WasmCallKind::kWasmImportWrapper);
+ GetWasmCallDescriptor(&zone, sig, WasmCallKind::kWasmImportWrapper);
if (machine->Is32()) {
incoming = GetI32WasmCallDescriptor(&zone, incoming);
}
@@ -7665,8 +7627,7 @@ wasm::WasmCode* CompileWasmCapiCallWrapper(wasm::NativeModule* native_module,
// Run the compiler pipeline to generate machine code.
CallDescriptor* call_descriptor =
- GetWasmCallDescriptor(&zone, sig, WasmGraphBuilder::kNoRetpoline,
- WasmCallKind::kWasmCapiFunction);
+ GetWasmCallDescriptor(&zone, sig, WasmCallKind::kWasmCapiFunction);
if (mcgraph->machine()->Is32()) {
call_descriptor = GetI32WasmCallDescriptor(&zone, call_descriptor);
}
@@ -7676,13 +7637,18 @@ wasm::WasmCode* CompileWasmCapiCallWrapper(wasm::NativeModule* native_module,
call_descriptor, mcgraph, CodeKind::WASM_TO_CAPI_FUNCTION,
wasm::WasmCode::kWasmToCapiWrapper, debug_name,
WasmStubAssemblerOptions(), source_positions);
- std::unique_ptr<wasm::WasmCode> wasm_code = native_module->AddCode(
- wasm::kAnonymousFuncIndex, result.code_desc, result.frame_slot_count,
- result.tagged_parameter_slots,
- result.protected_instructions_data.as_vector(),
- result.source_positions.as_vector(), wasm::WasmCode::kWasmToCapiWrapper,
- wasm::ExecutionTier::kNone, wasm::kNoDebugging);
- return native_module->PublishCode(std::move(wasm_code));
+ wasm::WasmCode* published_code;
+ {
+ wasm::CodeSpaceWriteScope code_space_write_scope(native_module);
+ std::unique_ptr<wasm::WasmCode> wasm_code = native_module->AddCode(
+ wasm::kAnonymousFuncIndex, result.code_desc, result.frame_slot_count,
+ result.tagged_parameter_slots,
+ result.protected_instructions_data.as_vector(),
+ result.source_positions.as_vector(), wasm::WasmCode::kWasmToCapiWrapper,
+ wasm::ExecutionTier::kNone, wasm::kNoDebugging);
+ published_code = native_module->PublishCode(std::move(wasm_code));
+ }
+ return published_code;
}
MaybeHandle<Code> CompileWasmToJSWrapper(Isolate* isolate,
@@ -7716,8 +7682,7 @@ MaybeHandle<Code> CompileWasmToJSWrapper(Isolate* isolate,
// Generate the call descriptor.
CallDescriptor* incoming =
- GetWasmCallDescriptor(zone.get(), sig, WasmGraphBuilder::kNoRetpoline,
- WasmCallKind::kWasmImportWrapper);
+ GetWasmCallDescriptor(zone.get(), sig, WasmCallKind::kWasmImportWrapper);
// Run the compilation job synchronously.
std::unique_ptr<OptimizedCompilationJob> job(
@@ -7851,9 +7816,10 @@ bool BuildGraphForWasmFunction(wasm::CompilationEnv* env,
WasmGraphBuilder builder(env, mcgraph->zone(), mcgraph, func_body.sig,
source_positions);
auto* allocator = wasm::GetWasmEngine()->allocator();
- wasm::VoidResult graph_construction_result = wasm::BuildTFGraph(
- allocator, env->enabled_features, env->module, &builder, detected,
- func_body, loop_infos, node_origins, func_index);
+ wasm::VoidResult graph_construction_result =
+ wasm::BuildTFGraph(allocator, env->enabled_features, env->module,
+ &builder, detected, func_body, loop_infos,
+ node_origins, func_index, wasm::kInstrumentEndpoints);
if (graph_construction_result.failed()) {
if (FLAG_trace_wasm_compiler) {
StdoutStream{} << "Compilation failed: "
@@ -7886,8 +7852,9 @@ base::Vector<const char> GetDebugName(Zone* zone, int index) {
} // namespace
wasm::WasmCompilationResult ExecuteTurbofanWasmCompilation(
- wasm::CompilationEnv* env, const wasm::FunctionBody& func_body,
- int func_index, Counters* counters, wasm::WasmFeatures* detected) {
+ wasm::CompilationEnv* env, const wasm::WireBytesStorage* wire_bytes_storage,
+ const wasm::FunctionBody& func_body, int func_index, Counters* counters,
+ wasm::WasmFeatures* detected) {
TRACE_EVENT2(TRACE_DISABLED_BY_DEFAULT("v8.wasm.detailed"),
"wasm.CompileTopTier", "func_index", func_index, "body_size",
func_body.end - func_body.start);
@@ -7939,9 +7906,10 @@ wasm::WasmCompilationResult ExecuteTurbofanWasmCompilation(
call_descriptor = GetI32WasmCallDescriptorForSimd(&zone, call_descriptor);
}
- Pipeline::GenerateCodeForWasmFunction(
- &info, mcgraph, call_descriptor, source_positions, node_origins,
- func_body, env->module, func_index, &loop_infos);
+ Pipeline::GenerateCodeForWasmFunction(&info, env, wire_bytes_storage, mcgraph,
+ call_descriptor, source_positions,
+ node_origins, func_body, env->module,
+ func_index, &loop_infos);
if (counters) {
int zone_bytes =
@@ -7997,10 +7965,9 @@ class LinkageLocationAllocator {
} // namespace
// General code uses the above configuration data.
-CallDescriptor* GetWasmCallDescriptor(
- Zone* zone, const wasm::FunctionSig* fsig,
- WasmGraphBuilder::UseRetpoline use_retpoline, WasmCallKind call_kind,
- bool need_frame_state) {
+CallDescriptor* GetWasmCallDescriptor(Zone* zone, const wasm::FunctionSig* fsig,
+ WasmCallKind call_kind,
+ bool need_frame_state) {
// The extra here is to accomodate the instance object as first parameter
// and, when specified, the additional callable.
bool extra_callable_param =
@@ -8078,10 +8045,9 @@ CallDescriptor* GetWasmCallDescriptor(
descriptor_kind = CallDescriptor::kCallWasmCapiFunction;
}
- CallDescriptor::Flags flags =
- use_retpoline ? CallDescriptor::kRetpoline
- : need_frame_state ? CallDescriptor::kNeedsFrameState
- : CallDescriptor::kNoFlags;
+ CallDescriptor::Flags flags = need_frame_state
+ ? CallDescriptor::kNeedsFrameState
+ : CallDescriptor::kNoFlags;
return zone->New<CallDescriptor>( // --
descriptor_kind, // kind
target_type, // target MachineType
diff --git a/deps/v8/src/compiler/wasm-compiler.h b/deps/v8/src/compiler/wasm-compiler.h
index 71e3111c8c..328152b363 100644
--- a/deps/v8/src/compiler/wasm-compiler.h
+++ b/deps/v8/src/compiler/wasm-compiler.h
@@ -53,13 +53,15 @@ using TFNode = compiler::Node;
using TFGraph = compiler::MachineGraph;
class WasmCode;
class WasmFeatures;
+class WireBytesStorage;
enum class LoadTransformationKind : uint8_t;
} // namespace wasm
namespace compiler {
wasm::WasmCompilationResult ExecuteTurbofanWasmCompilation(
- wasm::CompilationEnv*, const wasm::FunctionBody&, int func_index, Counters*,
+ wasm::CompilationEnv*, const wasm::WireBytesStorage* wire_bytes_storage,
+ const wasm::FunctionBody&, int func_index, Counters*,
wasm::WasmFeatures* detected);
// Calls to Wasm imports are handled in several different ways, depending on the
@@ -176,7 +178,6 @@ class JSWasmCallData {
struct WasmInstanceCacheNodes {
Node* mem_start;
Node* mem_size;
- Node* mem_mask;
};
struct WasmLoopInfo {
@@ -207,10 +208,6 @@ class WasmGraphBuilder {
kNeedsBoundsCheck = true,
kCanOmitBoundsCheck = false
};
- enum UseRetpoline : bool { // --
- kRetpoline = true,
- kNoRetpoline = false
- };
enum CheckForNull : bool { // --
kWithNullCheck = true,
kWithoutNullCheck = false
@@ -474,9 +471,9 @@ class WasmGraphBuilder {
wasm::WasmCodePosition position);
Node* ArrayLen(Node* array_object, CheckForNull null_check,
wasm::WasmCodePosition position);
- void ArrayCopy(Node* dst_array, Node* dst_index, Node* src_array,
- Node* src_index, Node* length,
- wasm::WasmCodePosition position);
+ void ArrayCopy(Node* dst_array, Node* dst_index, CheckForNull dst_null_check,
+ Node* src_array, Node* src_index, CheckForNull src_null_check,
+ Node* length, wasm::WasmCodePosition position);
Node* I31New(Node* input);
Node* I31GetS(Node* input);
Node* I31GetU(Node* input);
@@ -576,12 +573,11 @@ class WasmGraphBuilder {
IsReturnCall continuation);
Node* BuildWasmCall(const wasm::FunctionSig* sig, base::Vector<Node*> args,
base::Vector<Node*> rets, wasm::WasmCodePosition position,
- Node* instance_node, UseRetpoline use_retpoline,
- Node* frame_state = nullptr);
+ Node* instance_node, Node* frame_state = nullptr);
Node* BuildWasmReturnCall(const wasm::FunctionSig* sig,
base::Vector<Node*> args,
wasm::WasmCodePosition position,
- Node* instance_node, UseRetpoline use_retpoline);
+ Node* instance_node);
Node* BuildImportCall(const wasm::FunctionSig* sig, base::Vector<Node*> args,
base::Vector<Node*> rets,
wasm::WasmCodePosition position, int func_index,
@@ -765,7 +761,6 @@ class WasmGraphBuilder {
bool use_js_isolate_and_params() const { return isolate_ != nullptr; }
bool has_simd_ = false;
bool needs_stack_check_ = false;
- const bool untrusted_code_mitigations_ = true;
const wasm::FunctionSig* const sig_;
@@ -791,8 +786,6 @@ V8_EXPORT_PRIVATE void BuildInlinedJSToWasmWrapper(
V8_EXPORT_PRIVATE CallDescriptor* GetWasmCallDescriptor(
Zone* zone, const wasm::FunctionSig* signature,
- WasmGraphBuilder::UseRetpoline use_retpoline =
- WasmGraphBuilder::kNoRetpoline,
WasmCallKind kind = kWasmFunction, bool need_frame_state = false);
V8_EXPORT_PRIVATE CallDescriptor* GetI32WasmCallDescriptor(
diff --git a/deps/v8/src/compiler/wasm-inlining.cc b/deps/v8/src/compiler/wasm-inlining.cc
new file mode 100644
index 0000000000..6753769953
--- /dev/null
+++ b/deps/v8/src/compiler/wasm-inlining.cc
@@ -0,0 +1,195 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/wasm-inlining.h"
+
+#include "src/compiler/node-matchers.h"
+#include "src/compiler/wasm-compiler.h"
+#include "src/wasm/function-body-decoder.h"
+#include "src/wasm/graph-builder-interface.h"
+#include "src/wasm/wasm-features.h"
+#include "src/wasm/wasm-module.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+Reduction WasmInliner::Reduce(Node* node) {
+ if (node->opcode() == IrOpcode::kCall) {
+ return ReduceCall(node);
+ } else {
+ return NoChange();
+ }
+}
+
+// TODO(12166): Abstract over a heuristics provider.
+Reduction WasmInliner::ReduceCall(Node* call) {
+ Node* callee = NodeProperties::GetValueInput(call, 0);
+ IrOpcode::Value reloc_opcode = mcgraph_->machine()->Is32()
+ ? IrOpcode::kRelocatableInt32Constant
+ : IrOpcode::kRelocatableInt64Constant;
+ if (callee->opcode() != reloc_opcode) return NoChange();
+ auto info = OpParameter<RelocatablePtrConstantInfo>(callee->op());
+ if (static_cast<uint32_t>(info.value()) != inlinee_index_) return NoChange();
+
+ CHECK_LT(inlinee_index_, module()->functions.size());
+ const wasm::WasmFunction* function = &module()->functions[inlinee_index_];
+ base::Vector<const byte> function_bytes =
+ wire_bytes_->GetCode(function->code);
+ const wasm::FunctionBody inlinee_body(function->sig, function->code.offset(),
+ function_bytes.begin(),
+ function_bytes.end());
+ wasm::WasmFeatures detected;
+ WasmGraphBuilder builder(env_, zone(), mcgraph_, inlinee_body.sig, spt_);
+ std::vector<WasmLoopInfo> infos;
+
+ wasm::DecodeResult result;
+ Node* inlinee_start;
+ Node* inlinee_end;
+ {
+ Graph::SubgraphScope scope(graph());
+ result = wasm::BuildTFGraph(zone()->allocator(), env_->enabled_features,
+ module(), &builder, &detected, inlinee_body,
+ &infos, node_origins_, inlinee_index_,
+ wasm::kDoNotInstrumentEndpoints);
+ inlinee_start = graph()->start();
+ inlinee_end = graph()->end();
+ }
+
+ if (result.failed()) return NoChange();
+ return InlineCall(call, inlinee_start, inlinee_end);
+}
+
+// TODO(12166): Handle exceptions and tail calls.
+Reduction WasmInliner::InlineCall(Node* call, Node* callee_start,
+ Node* callee_end) {
+ DCHECK_EQ(call->opcode(), IrOpcode::kCall);
+
+ /* 1) Rewire callee formal parameters to the call-site real parameters. Rewire
+ * effect and control dependencies of callee's start node with the respective
+ * inputs of the call node.
+ */
+ Node* control = NodeProperties::GetControlInput(call);
+ Node* effect = NodeProperties::GetEffectInput(call);
+
+ for (Edge edge : callee_start->use_edges()) {
+ Node* use = edge.from();
+ switch (use->opcode()) {
+ case IrOpcode::kParameter: {
+ // Index 0 is the callee node.
+ int index = 1 + ParameterIndexOf(use->op());
+ Replace(use, NodeProperties::GetValueInput(call, index));
+ break;
+ }
+ default:
+ if (NodeProperties::IsEffectEdge(edge)) {
+ edge.UpdateTo(effect);
+ } else if (NodeProperties::IsControlEdge(edge)) {
+ edge.UpdateTo(control);
+ } else {
+ UNREACHABLE();
+ }
+ break;
+ }
+ }
+
+ /* 2) Rewire uses of the call node to the return values of the callee. Since
+ * there might be multiple return nodes in the callee, we have to create Merge
+ * and Phi nodes for them.
+ */
+ NodeVector return_nodes(zone());
+ for (Node* const input : callee_end->inputs()) {
+ DCHECK(IrOpcode::IsGraphTerminator(input->opcode()));
+ switch (input->opcode()) {
+ case IrOpcode::kReturn:
+ return_nodes.push_back(input);
+ break;
+ case IrOpcode::kDeoptimize:
+ case IrOpcode::kTerminate:
+ case IrOpcode::kThrow:
+ NodeProperties::MergeControlToEnd(graph(), common(), input);
+ Revisit(graph()->end());
+ break;
+ case IrOpcode::kTailCall:
+ // TODO(12166): A tail call in the inlined function has to be
+ // transformed into a regular call in the caller function.
+ UNIMPLEMENTED();
+ default:
+ UNREACHABLE();
+ }
+ }
+
+ if (return_nodes.size() > 0) {
+ int const return_count = static_cast<int>(return_nodes.size());
+ NodeVector controls(zone());
+ NodeVector effects(zone());
+ for (Node* const return_node : return_nodes) {
+ controls.push_back(NodeProperties::GetControlInput(return_node));
+ effects.push_back(NodeProperties::GetEffectInput(return_node));
+ }
+ Node* control_output = graph()->NewNode(common()->Merge(return_count),
+ return_count, &controls.front());
+ effects.push_back(control_output);
+ Node* effect_output =
+ graph()->NewNode(common()->EffectPhi(return_count),
+ static_cast<int>(effects.size()), &effects.front());
+
+ // The first input of a return node is discarded. This is because Wasm
+ // functions always return an additional 0 constant as a first return value.
+ DCHECK(
+ Int32Matcher(NodeProperties::GetValueInput(return_nodes[0], 0)).Is(0));
+ int const return_arity = return_nodes[0]->op()->ValueInputCount() - 1;
+ NodeVector values(zone());
+ for (int i = 0; i < return_arity; i++) {
+ NodeVector ith_values(zone());
+ for (Node* const return_node : return_nodes) {
+ Node* value = NodeProperties::GetValueInput(return_node, i + 1);
+ ith_values.push_back(value);
+ }
+ ith_values.push_back(control_output);
+ // Find the correct machine representation for the return values from the
+ // inlinee signature.
+ const wasm::WasmFunction* function = &module()->functions[inlinee_index_];
+ MachineRepresentation repr =
+ function->sig->GetReturn(i).machine_representation();
+ Node* ith_value_output = graph()->NewNode(
+ common()->Phi(repr, return_count),
+ static_cast<int>(ith_values.size()), &ith_values.front());
+ values.push_back(ith_value_output);
+ }
+
+ if (return_arity == 0) {
+ // Void function, no value uses.
+ ReplaceWithValue(call, mcgraph()->Dead(), effect_output, control_output);
+ } else if (return_arity == 1) {
+ // One return value. Just replace value uses of the call node with it.
+ ReplaceWithValue(call, values[0], effect_output, control_output);
+ } else {
+ // Multiple returns. We have to find the projections of the call node and
+ // replace them with the returned values.
+ for (Edge use_edge : call->use_edges()) {
+ if (NodeProperties::IsValueEdge(use_edge)) {
+ Node* use = use_edge.from();
+ DCHECK_EQ(use->opcode(), IrOpcode::kProjection);
+ ReplaceWithValue(use, values[ProjectionIndexOf(use->op())]);
+ }
+ }
+ // All value inputs are replaced by the above loop, so it is ok to use
+ // Dead() as a dummy for value replacement.
+ ReplaceWithValue(call, mcgraph()->Dead(), effect_output, control_output);
+ }
+ return Replace(mcgraph()->Dead());
+ } else {
+ // The callee can never return. The call node and all its uses are dead.
+ ReplaceWithValue(call, mcgraph()->Dead(), mcgraph()->Dead(),
+ mcgraph()->Dead());
+ return Changed(call);
+ }
+}
+
+const wasm::WasmModule* WasmInliner::module() const { return env_->module; }
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/compiler/wasm-inlining.h b/deps/v8/src/compiler/wasm-inlining.h
new file mode 100644
index 0000000000..8b31b6b291
--- /dev/null
+++ b/deps/v8/src/compiler/wasm-inlining.h
@@ -0,0 +1,77 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#if !V8_ENABLE_WEBASSEMBLY
+#error This header should only be included if WebAssembly is enabled.
+#endif // !V8_ENABLE_WEBASSEMBLY
+
+#ifndef V8_COMPILER_WASM_INLINING_H_
+#define V8_COMPILER_WASM_INLINING_H_
+
+#include "src/compiler/graph-reducer.h"
+#include "src/compiler/js-graph.h"
+
+namespace v8 {
+namespace internal {
+
+namespace wasm {
+struct CompilationEnv;
+struct WasmModule;
+class WireBytesStorage;
+} // namespace wasm
+
+class BytecodeOffset;
+class OptimizedCompilationInfo;
+
+namespace compiler {
+
+class NodeOriginTable;
+class SourcePositionTable;
+
+// The WasmInliner provides the core graph inlining machinery for Webassembly
+// graphs. Note that this class only deals with the mechanics of how to inline
+// one graph into another, heuristics that decide what and how much to inline
+// are beyond its scope. As a current placeholder, only a function at specific
+// given index {inlinee_index} is inlined.
+class WasmInliner final : public AdvancedReducer {
+ public:
+ WasmInliner(Editor* editor, wasm::CompilationEnv* env,
+ SourcePositionTable* spt, NodeOriginTable* node_origins,
+ MachineGraph* mcgraph, const wasm::WireBytesStorage* wire_bytes,
+ uint32_t inlinee_index)
+ : AdvancedReducer(editor),
+ env_(env),
+ spt_(spt),
+ node_origins_(node_origins),
+ mcgraph_(mcgraph),
+ wire_bytes_(wire_bytes),
+ inlinee_index_(inlinee_index) {}
+
+ const char* reducer_name() const override { return "WasmInliner"; }
+
+ Reduction Reduce(Node* node) final;
+
+ private:
+ Zone* zone() const { return mcgraph_->zone(); }
+ CommonOperatorBuilder* common() const { return mcgraph_->common(); }
+ Graph* graph() const { return mcgraph_->graph(); }
+ MachineGraph* mcgraph() const { return mcgraph_; }
+ const wasm::WasmModule* module() const;
+
+ Reduction ReduceCall(Node* call);
+ Reduction InlineCall(Node* call, Node* callee_start, Node* callee_end);
+
+ wasm::CompilationEnv* const env_;
+ SourcePositionTable* const spt_;
+ NodeOriginTable* const node_origins_;
+ MachineGraph* const mcgraph_;
+ const wasm::WireBytesStorage* const wire_bytes_;
+ const uint32_t inlinee_index_;
+};
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
+
+#endif // V8_COMPILER_WASM_INLINING_H_
diff --git a/deps/v8/src/d8/async-hooks-wrapper.cc b/deps/v8/src/d8/async-hooks-wrapper.cc
index 84191b9815..13b67ce8ea 100644
--- a/deps/v8/src/d8/async-hooks-wrapper.cc
+++ b/deps/v8/src/d8/async-hooks-wrapper.cc
@@ -3,6 +3,11 @@
// found in the LICENSE file.
#include "src/d8/async-hooks-wrapper.h"
+
+#include "include/v8-function.h"
+#include "include/v8-local-handle.h"
+#include "include/v8-primitive.h"
+#include "include/v8-template.h"
#include "src/d8/d8.h"
#include "src/execution/isolate-inl.h"
@@ -120,66 +125,74 @@ Local<Object> AsyncHooks::CreateHook(
void AsyncHooks::ShellPromiseHook(PromiseHookType type, Local<Promise> promise,
Local<Value> parent) {
- AsyncHooks* hooks =
- PerIsolateData::Get(promise->GetIsolate())->GetAsyncHooks();
-
- HandleScope handle_scope(hooks->isolate_);
-
- Local<Context> currentContext = hooks->isolate_->GetCurrentContext();
- DCHECK(!currentContext.IsEmpty());
+ v8::Isolate* isolate = promise->GetIsolate();
+ i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
- if (type == PromiseHookType::kInit) {
- ++hooks->current_async_id;
- Local<Integer> async_id =
- Integer::New(hooks->isolate_, hooks->current_async_id);
-
- CHECK(!promise
- ->HasPrivate(currentContext,
- hooks->async_id_smb.Get(hooks->isolate_))
+ AsyncHooks* hooks = PerIsolateData::Get(isolate)->GetAsyncHooks();
+ HandleScope handle_scope(isolate);
+ // Temporarily clear any scheduled_exception to allow evaluating JS that can
+ // throw.
+ i::Handle<i::Object> scheduled_exception;
+ if (i_isolate->has_scheduled_exception()) {
+ scheduled_exception = handle(i_isolate->scheduled_exception(), i_isolate);
+ i_isolate->clear_scheduled_exception();
+ }
+ {
+ TryCatch try_catch(isolate);
+ try_catch.SetVerbose(true);
+
+ Local<Context> currentContext = isolate->GetCurrentContext();
+ DCHECK(!currentContext.IsEmpty());
+
+ if (type == PromiseHookType::kInit) {
+ ++hooks->current_async_id;
+ Local<Integer> async_id = Integer::New(isolate, hooks->current_async_id);
+ CHECK(
+ !promise->HasPrivate(currentContext, hooks->async_id_smb.Get(isolate))
.ToChecked());
- promise->SetPrivate(currentContext,
- hooks->async_id_smb.Get(hooks->isolate_), async_id);
-
- if (parent->IsPromise()) {
- Local<Promise> parent_promise = parent.As<Promise>();
- Local<Value> parent_async_id =
- parent_promise
- ->GetPrivate(hooks->isolate_->GetCurrentContext(),
- hooks->async_id_smb.Get(hooks->isolate_))
- .ToLocalChecked();
- promise->SetPrivate(currentContext,
- hooks->trigger_id_smb.Get(hooks->isolate_),
- parent_async_id);
- } else {
- CHECK(parent->IsUndefined());
- Local<Integer> trigger_id = Integer::New(hooks->isolate_, 0);
- promise->SetPrivate(currentContext,
- hooks->trigger_id_smb.Get(hooks->isolate_),
- trigger_id);
+ promise->SetPrivate(currentContext, hooks->async_id_smb.Get(isolate),
+ async_id);
+
+ if (parent->IsPromise()) {
+ Local<Promise> parent_promise = parent.As<Promise>();
+ Local<Value> parent_async_id =
+ parent_promise
+ ->GetPrivate(currentContext, hooks->async_id_smb.Get(isolate))
+ .ToLocalChecked();
+ promise->SetPrivate(currentContext, hooks->trigger_id_smb.Get(isolate),
+ parent_async_id);
+ } else {
+ CHECK(parent->IsUndefined());
+ promise->SetPrivate(currentContext, hooks->trigger_id_smb.Get(isolate),
+ Integer::New(isolate, 0));
+ }
+ } else if (type == PromiseHookType::kBefore) {
+ AsyncContext ctx;
+ ctx.execution_async_id =
+ promise->GetPrivate(currentContext, hooks->async_id_smb.Get(isolate))
+ .ToLocalChecked()
+ .As<Integer>()
+ ->Value();
+ ctx.trigger_async_id =
+ promise
+ ->GetPrivate(currentContext, hooks->trigger_id_smb.Get(isolate))
+ .ToLocalChecked()
+ .As<Integer>()
+ ->Value();
+ hooks->asyncContexts.push(ctx);
+ } else if (type == PromiseHookType::kAfter) {
+ hooks->asyncContexts.pop();
+ }
+ if (!i::StackLimitCheck{i_isolate}.HasOverflowed()) {
+ for (AsyncHooksWrap* wrap : hooks->async_wraps_) {
+ PromiseHookDispatch(type, promise, parent, wrap, hooks);
+ if (try_catch.HasCaught()) break;
+ }
+ if (try_catch.HasCaught()) Shell::ReportException(isolate, &try_catch);
}
- } else if (type == PromiseHookType::kBefore) {
- AsyncContext ctx;
- ctx.execution_async_id =
- promise
- ->GetPrivate(hooks->isolate_->GetCurrentContext(),
- hooks->async_id_smb.Get(hooks->isolate_))
- .ToLocalChecked()
- .As<Integer>()
- ->Value();
- ctx.trigger_async_id =
- promise
- ->GetPrivate(hooks->isolate_->GetCurrentContext(),
- hooks->trigger_id_smb.Get(hooks->isolate_))
- .ToLocalChecked()
- .As<Integer>()
- ->Value();
- hooks->asyncContexts.push(ctx);
- } else if (type == PromiseHookType::kAfter) {
- hooks->asyncContexts.pop();
}
-
- for (AsyncHooksWrap* wrap : hooks->async_wraps_) {
- PromiseHookDispatch(type, promise, parent, wrap, hooks);
+ if (!scheduled_exception.is_null()) {
+ i_isolate->set_scheduled_exception(*scheduled_exception);
}
}
@@ -215,28 +228,14 @@ void AsyncHooks::PromiseHookDispatch(PromiseHookType type,
Local<Promise> promise,
Local<Value> parent, AsyncHooksWrap* wrap,
AsyncHooks* hooks) {
- if (!wrap->IsEnabled()) {
- return;
- }
+ if (!wrap->IsEnabled()) return;
+ v8::Isolate* v8_isolate = hooks->isolate_;
+ HandleScope handle_scope(v8_isolate);
- HandleScope handle_scope(hooks->isolate_);
-
- TryCatch try_catch(hooks->isolate_);
- try_catch.SetVerbose(true);
-
- i::Isolate* isolate = reinterpret_cast<i::Isolate*>(hooks->isolate_);
- if (isolate->has_scheduled_exception()) {
- isolate->ScheduleThrow(isolate->scheduled_exception());
-
- DCHECK(try_catch.HasCaught());
- Shell::ReportException(hooks->isolate_, &try_catch);
- return;
- }
-
- Local<Value> rcv = Undefined(hooks->isolate_);
- Local<Context> context = hooks->isolate_->GetCurrentContext();
+ Local<Value> rcv = Undefined(v8_isolate);
+ Local<Context> context = v8_isolate->GetCurrentContext();
Local<Value> async_id =
- promise->GetPrivate(context, hooks->async_id_smb.Get(hooks->isolate_))
+ promise->GetPrivate(context, hooks->async_id_smb.Get(v8_isolate))
.ToLocalChecked();
Local<Value> args[1] = {async_id};
@@ -245,28 +244,31 @@ void AsyncHooks::PromiseHookDispatch(PromiseHookType type,
MaybeLocal<Value> result;
// Sacrifice the brevity for readability and debugfulness
- if (type == PromiseHookType::kInit) {
- if (!wrap->init_function().IsEmpty()) {
- Local<Value> initArgs[4] = {
- async_id, String::NewFromUtf8Literal(hooks->isolate_, "PROMISE"),
- promise
- ->GetPrivate(context, hooks->trigger_id_smb.Get(hooks->isolate_))
- .ToLocalChecked(),
- promise};
- result = wrap->init_function()->Call(context, rcv, 4, initArgs);
- }
- } else if (type == PromiseHookType::kBefore) {
- if (!wrap->before_function().IsEmpty()) {
- result = wrap->before_function()->Call(context, rcv, 1, args);
- }
- } else if (type == PromiseHookType::kAfter) {
- if (!wrap->after_function().IsEmpty()) {
- result = wrap->after_function()->Call(context, rcv, 1, args);
- }
- } else if (type == PromiseHookType::kResolve) {
- if (!wrap->promiseResolve_function().IsEmpty()) {
- result = wrap->promiseResolve_function()->Call(context, rcv, 1, args);
- }
+ switch (type) {
+ case PromiseHookType::kInit:
+ if (!wrap->init_function().IsEmpty()) {
+ Local<Value> initArgs[4] = {
+ async_id, String::NewFromUtf8Literal(v8_isolate, "PROMISE"),
+ promise->GetPrivate(context, hooks->trigger_id_smb.Get(v8_isolate))
+ .ToLocalChecked(),
+ promise};
+ result = wrap->init_function()->Call(context, rcv, 4, initArgs);
+ }
+ break;
+ case PromiseHookType::kBefore:
+ if (!wrap->before_function().IsEmpty()) {
+ result = wrap->before_function()->Call(context, rcv, 1, args);
+ }
+ break;
+ case PromiseHookType::kAfter:
+ if (!wrap->after_function().IsEmpty()) {
+ result = wrap->after_function()->Call(context, rcv, 1, args);
+ }
+ break;
+ case PromiseHookType::kResolve:
+ if (!wrap->promiseResolve_function().IsEmpty()) {
+ result = wrap->promiseResolve_function()->Call(context, rcv, 1, args);
+ }
}
}
diff --git a/deps/v8/src/d8/async-hooks-wrapper.h b/deps/v8/src/d8/async-hooks-wrapper.h
index f339b6e316..23cc0be9c0 100644
--- a/deps/v8/src/d8/async-hooks-wrapper.h
+++ b/deps/v8/src/d8/async-hooks-wrapper.h
@@ -7,11 +7,18 @@
#include <stack>
-#include "include/v8.h"
+#include "include/v8-function-callback.h"
+#include "include/v8-local-handle.h"
+#include "include/v8-promise.h"
#include "src/objects/objects.h"
namespace v8 {
+class Function;
+class Isolate;
+class ObjectTemplate;
+class Value;
+
using async_id_t = double;
struct AsyncContext {
diff --git a/deps/v8/src/d8/d8-platforms.cc b/deps/v8/src/d8/d8-platforms.cc
index 722b2bc4e2..cd48a35bbd 100644
--- a/deps/v8/src/d8/d8-platforms.cc
+++ b/deps/v8/src/d8/d8-platforms.cc
@@ -82,7 +82,14 @@ class PredictablePlatform final : public Platform {
}
double MonotonicallyIncreasingTime() override {
- return synthetic_time_in_sec_ += 0.00001;
+ // In predictable mode, there should be no (observable) concurrency, but we
+ // still run some tests that explicitly specify '--predictable' in the
+ // '--isolates' variant, where several threads run the same test in
+ // different isolates. To avoid TSan issues in that scenario we use atomic
+ // increments here.
+ uint64_t synthetic_time =
+ synthetic_time_.fetch_add(1, std::memory_order_relaxed);
+ return 1e-5 * synthetic_time;
}
double CurrentClockTimeMillis() override {
@@ -96,7 +103,7 @@ class PredictablePlatform final : public Platform {
Platform* platform() const { return platform_.get(); }
private:
- double synthetic_time_in_sec_ = 0.0;
+ std::atomic<uint64_t> synthetic_time_{0};
std::unique_ptr<Platform> platform_;
};
diff --git a/deps/v8/src/d8/d8-posix.cc b/deps/v8/src/d8/d8-posix.cc
index 05e475f538..8a031ccdc0 100644
--- a/deps/v8/src/d8/d8-posix.cc
+++ b/deps/v8/src/d8/d8-posix.cc
@@ -16,6 +16,8 @@
#include <sys/wait.h>
#include <unistd.h>
+#include "include/v8-container.h"
+#include "include/v8-template.h"
#include "src/base/platform/wrappers.h"
#include "src/d8/d8.h"
diff --git a/deps/v8/src/d8/d8-test.cc b/deps/v8/src/d8/d8-test.cc
index 635a1f4514..6202c397ec 100644
--- a/deps/v8/src/d8/d8-test.cc
+++ b/deps/v8/src/d8/d8-test.cc
@@ -5,6 +5,7 @@
#include "src/d8/d8.h"
#include "include/v8-fast-api-calls.h"
+#include "include/v8-template.h"
#include "src/api/api-inl.h"
// This file exposes a d8.test.fast_c_api object, which adds testing facility
@@ -94,10 +95,10 @@ class FastCApiObject {
#ifdef V8_ENABLE_FP_PARAMS_IN_C_LINKAGE
typedef double Type;
- static constexpr CTypeInfo type_info = CTypeInfo(CTypeInfo::Type::kFloat64);
+#define type_info kTypeInfoFloat64
#else
typedef int32_t Type;
- static constexpr CTypeInfo type_info = CTypeInfo(CTypeInfo::Type::kInt32);
+#define type_info kTypeInfoInt32
#endif // V8_ENABLE_FP_PARAMS_IN_C_LINKAGE
static Type AddAllSequenceFastCallback(Local<Object> receiver,
bool should_fallback,
@@ -630,16 +631,19 @@ Local<FunctionTemplate> Shell::CreateTestFastCApiTemplate(Isolate* isolate) {
SideEffectType::kHasSideEffect, &is_valid_api_object_c_func));
api_obj_ctor->PrototypeTemplate()->Set(
isolate, "fast_call_count",
- FunctionTemplate::New(isolate, FastCApiObject::FastCallCount,
- Local<Value>(), signature));
+ FunctionTemplate::New(
+ isolate, FastCApiObject::FastCallCount, Local<Value>(), signature,
+ 1, ConstructorBehavior::kThrow, SideEffectType::kHasNoSideEffect));
api_obj_ctor->PrototypeTemplate()->Set(
isolate, "slow_call_count",
- FunctionTemplate::New(isolate, FastCApiObject::SlowCallCount,
- Local<Value>(), signature));
+ FunctionTemplate::New(
+ isolate, FastCApiObject::SlowCallCount, Local<Value>(), signature,
+ 1, ConstructorBehavior::kThrow, SideEffectType::kHasNoSideEffect));
api_obj_ctor->PrototypeTemplate()->Set(
isolate, "reset_counts",
FunctionTemplate::New(isolate, FastCApiObject::ResetCounts,
- Local<Value>(), signature));
+ Local<Value>(), signature, 1,
+ ConstructorBehavior::kThrow));
}
api_obj_ctor->InstanceTemplate()->SetInternalFieldCount(
FastCApiObject::kV8WrapperObjectIndex + 1);
diff --git a/deps/v8/src/d8/d8.cc b/deps/v8/src/d8/d8.cc
index 2b831bc747..6d35be77b8 100644
--- a/deps/v8/src/d8/d8.cc
+++ b/deps/v8/src/d8/d8.cc
@@ -24,8 +24,12 @@
#include "include/libplatform/libplatform.h"
#include "include/libplatform/v8-tracing.h"
+#include "include/v8-function.h"
+#include "include/v8-initialization.h"
#include "include/v8-inspector.h"
+#include "include/v8-json.h"
#include "include/v8-profiler.h"
+#include "include/v8-wasm.h"
#include "src/api/api-inl.h"
#include "src/base/cpu.h"
#include "src/base/logging.h"
@@ -166,7 +170,11 @@ class ShellArrayBufferAllocator : public ArrayBufferAllocatorBase {
void* AllocateVM(size_t length) {
DCHECK_LE(kVMThreshold, length);
+#ifdef V8_VIRTUAL_MEMORY_CAGE
+ v8::PageAllocator* page_allocator = i::GetPlatformDataCagePageAllocator();
+#else
v8::PageAllocator* page_allocator = i::GetPlatformPageAllocator();
+#endif
size_t page_size = page_allocator->AllocatePageSize();
size_t allocated = RoundUp(length, page_size);
return i::AllocatePages(page_allocator, nullptr, allocated, page_size,
@@ -174,7 +182,11 @@ class ShellArrayBufferAllocator : public ArrayBufferAllocatorBase {
}
void FreeVM(void* data, size_t length) {
+#ifdef V8_VIRTUAL_MEMORY_CAGE
+ v8::PageAllocator* page_allocator = i::GetPlatformDataCagePageAllocator();
+#else
v8::PageAllocator* page_allocator = i::GetPlatformPageAllocator();
+#endif
size_t page_size = page_allocator->AllocatePageSize();
size_t allocated = RoundUp(length, page_size);
CHECK(i::FreePages(page_allocator, data, allocated));
@@ -236,7 +248,7 @@ class MockArrayBufferAllocatiorWithLimit : public MockArrayBufferAllocator {
std::atomic<size_t> space_left_;
};
-#ifdef V8_OS_LINUX
+#if MULTI_MAPPED_ALLOCATOR_AVAILABLE
// This is a mock allocator variant that provides a huge virtual allocation
// backed by a small real allocation that is repeatedly mapped. If you create an
@@ -329,7 +341,7 @@ class MultiMappedAllocator : public ArrayBufferAllocatorBase {
base::Mutex regions_mutex_;
};
-#endif // V8_OS_LINUX
+#endif // MULTI_MAPPED_ALLOCATOR_AVAILABLE
v8::Platform* g_default_platform;
std::unique_ptr<v8::Platform> g_platform;
@@ -846,16 +858,21 @@ std::string NormalizePath(const std::string& path,
std::string segment;
while (std::getline(segment_stream, segment, '/')) {
if (segment == "..") {
- segments.pop_back();
+ if (!segments.empty()) segments.pop_back();
} else if (segment != ".") {
segments.push_back(segment);
}
}
// Join path segments.
std::ostringstream os;
- std::copy(segments.begin(), segments.end() - 1,
- std::ostream_iterator<std::string>(os, "/"));
- os << *segments.rbegin();
+ if (segments.size() > 1) {
+ std::copy(segments.begin(), segments.end() - 1,
+ std::ostream_iterator<std::string>(os, "/"));
+ os << *segments.rbegin();
+ } else {
+ os << "/";
+ if (!segments.empty()) os << segments[0];
+ }
return os.str();
}
@@ -1995,8 +2012,14 @@ void Shell::TestVerifySourcePositions(
auto callable = i::Handle<i::JSFunctionOrBoundFunction>::cast(arg_handle);
while (callable->IsJSBoundFunction()) {
+ internal::DisallowGarbageCollection no_gc;
auto bound_function = i::Handle<i::JSBoundFunction>::cast(callable);
auto bound_target = bound_function->bound_target_function();
+ if (!bound_target.IsJSFunctionOrBoundFunction()) {
+ internal::AllowGarbageCollection allow_gc;
+ isolate->ThrowError("Expected function as bound target.");
+ return;
+ }
callable =
handle(i::JSFunctionOrBoundFunction::cast(bound_target), i_isolate);
}
@@ -2009,7 +2032,7 @@ void Shell::TestVerifySourcePositions(
i::Handle<i::BytecodeArray> bytecodes =
handle(function->shared().GetBytecodeArray(i_isolate), i_isolate);
i::interpreter::BytecodeArrayIterator bytecode_iterator(bytecodes);
- bool has_baseline = function->shared().HasBaselineData();
+ bool has_baseline = function->shared().HasBaselineCode();
i::Handle<i::ByteArray> bytecode_offsets;
std::unique_ptr<i::baseline::BytecodeOffsetIterator> offset_iterator;
if (has_baseline) {
@@ -2990,7 +3013,7 @@ Local<ObjectTemplate> Shell::CreateD8Template(Isolate* isolate) {
// Correctness fuzzing will attempt to compare results of tests with and
// without turbo_fast_api_calls, so we don't expose the fast_c_api
// constructor when --correctness_fuzzer_suppressions is on.
- if (i::FLAG_turbo_fast_api_calls &&
+ if (options.expose_fast_api && i::FLAG_turbo_fast_api_calls &&
!i::FLAG_correctness_fuzzer_suppressions) {
test_template->Set(isolate, "FastCAPI",
Shell::CreateTestFastCApiTemplate(isolate));
@@ -3166,13 +3189,15 @@ void Shell::WriteIgnitionDispatchCountersFile(v8::Isolate* isolate) {
Local<Context> context = Context::New(isolate);
Context::Scope context_scope(context);
- Local<Object> dispatch_counters = reinterpret_cast<i::Isolate*>(isolate)
- ->interpreter()
- ->GetDispatchCountersObject();
+ i::Handle<i::JSObject> dispatch_counters =
+ reinterpret_cast<i::Isolate*>(isolate)
+ ->interpreter()
+ ->GetDispatchCountersObject();
std::ofstream dispatch_counters_stream(
i::FLAG_trace_ignition_dispatches_output_file);
dispatch_counters_stream << *String::Utf8Value(
- isolate, JSON::Stringify(context, dispatch_counters).ToLocalChecked());
+ isolate, JSON::Stringify(context, Utils::ToLocal(dispatch_counters))
+ .ToLocalChecked());
}
namespace {
@@ -3491,15 +3516,9 @@ void Shell::ReadBuffer(const v8::FunctionCallbackInfo<v8::Value>& args) {
isolate->ThrowError("Error reading file");
return;
}
- std::unique_ptr<v8::BackingStore> backing_store =
- ArrayBuffer::NewBackingStore(
- data, length,
- [](void* data, size_t length, void*) {
- delete[] reinterpret_cast<uint8_t*>(data);
- },
- nullptr);
- Local<v8::ArrayBuffer> buffer =
- ArrayBuffer::New(isolate, std::move(backing_store));
+ Local<v8::ArrayBuffer> buffer = ArrayBuffer::New(isolate, length);
+ memcpy(buffer->GetBackingStore()->Data(), data, length);
+ delete[] data;
args.GetReturnValue().Set(buffer);
}
@@ -4252,6 +4271,9 @@ bool Shell::SetOptions(int argc, char* argv[]) {
} else if (strcmp(argv[i], "--throws") == 0) {
options.expected_to_throw = true;
argv[i] = nullptr;
+ } else if (strcmp(argv[i], "--no-fail") == 0) {
+ options.no_fail = true;
+ argv[i] = nullptr;
} else if (strncmp(argv[i], "--icu-data-file=", 16) == 0) {
options.icu_data_file = argv[i] + 16;
argv[i] = nullptr;
@@ -4357,8 +4379,9 @@ bool Shell::SetOptions(int argc, char* argv[]) {
options.fuzzilli_coverage_statistics = true;
argv[i] = nullptr;
#endif
- } else if (strcmp(argv[i], "--fuzzy-module-file-extensions") == 0) {
- options.fuzzy_module_file_extensions = true;
+ } else if (strcmp(argv[i], "--no-fuzzy-module-file-extensions") == 0) {
+ DCHECK(options.fuzzy_module_file_extensions);
+ options.fuzzy_module_file_extensions = false;
argv[i] = nullptr;
#if defined(V8_ENABLE_SYSTEM_INSTRUMENTATION)
} else if (strcmp(argv[i], "--enable-system-instrumentation") == 0) {
@@ -4381,6 +4404,9 @@ bool Shell::SetOptions(int argc, char* argv[]) {
options.wasm_trap_handler = false;
argv[i] = nullptr;
#endif // V8_ENABLE_WEBASSEMBLY
+ } else if (strcmp(argv[i], "--expose-fast-api") == 0) {
+ options.expose_fast_api = true;
+ argv[i] = nullptr;
}
}
@@ -4404,10 +4430,15 @@ bool Shell::SetOptions(int argc, char* argv[]) {
options.mock_arraybuffer_allocator = i::FLAG_mock_arraybuffer_allocator;
options.mock_arraybuffer_allocator_limit =
i::FLAG_mock_arraybuffer_allocator_limit;
-#if V8_OS_LINUX
+#if MULTI_MAPPED_ALLOCATOR_AVAILABLE
options.multi_mapped_mock_allocator = i::FLAG_multi_mapped_mock_allocator;
#endif
+ if (i::FLAG_stress_snapshot && options.expose_fast_api &&
+ check_d8_flag_contradictions) {
+ FATAL("Flag --expose-fast-api is incompatible with --stress-snapshot.");
+ }
+
// Set up isolated source groups.
options.isolate_sources = new SourceGroup[options.num_isolates];
SourceGroup* current = options.isolate_sources;
@@ -4501,7 +4532,8 @@ int Shell::RunMain(Isolate* isolate, bool last_run) {
Shell::unhandled_promise_rejections_.store(0);
}
// In order to finish successfully, success must be != expected_to_throw.
- return success == Shell::options.expected_to_throw ? 1 : 0;
+ if (Shell::options.no_fail) return 0;
+ return (success == Shell::options.expected_to_throw ? 1 : 0);
}
void Shell::CollectGarbage(Isolate* isolate) {
@@ -5019,7 +5051,7 @@ int Shell::Main(int argc, char* argv[]) {
options.thread_pool_size, v8::platform::IdleTaskSupport::kEnabled,
in_process_stack_dumping, std::move(tracing));
g_default_platform = g_platform.get();
- if (i::FLAG_verify_predictable) {
+ if (i::FLAG_predictable) {
g_platform = MakePredictablePlatform(std::move(g_platform));
}
if (options.stress_delay_tasks) {
@@ -5037,6 +5069,11 @@ int Shell::Main(int argc, char* argv[]) {
V8::SetFlagsFromString("--redirect-code-traces-to=code.asm");
}
v8::V8::InitializePlatform(g_platform.get());
+#ifdef V8_VIRTUAL_MEMORY_CAGE
+ if (!v8::V8::InitializeVirtualMemoryCage()) {
+ FATAL("Could not initialize the virtual memory cage");
+ }
+#endif
v8::V8::Initialize();
if (options.snapshot_blob) {
v8::V8::InitializeExternalStartupDataFromFile(options.snapshot_blob);
@@ -5053,19 +5090,19 @@ int Shell::Main(int argc, char* argv[]) {
memory_limit >= options.mock_arraybuffer_allocator_limit
? memory_limit
: std::numeric_limits<size_t>::max());
-#if V8_OS_LINUX
+#if MULTI_MAPPED_ALLOCATOR_AVAILABLE
MultiMappedAllocator multi_mapped_mock_allocator;
-#endif // V8_OS_LINUX
+#endif
if (options.mock_arraybuffer_allocator) {
if (memory_limit) {
Shell::array_buffer_allocator = &mock_arraybuffer_allocator_with_limit;
} else {
Shell::array_buffer_allocator = &mock_arraybuffer_allocator;
}
-#if V8_OS_LINUX
+#if MULTI_MAPPED_ALLOCATOR_AVAILABLE
} else if (options.multi_mapped_mock_allocator) {
Shell::array_buffer_allocator = &multi_mapped_mock_allocator;
-#endif // V8_OS_LINUX
+#endif
} else {
Shell::array_buffer_allocator = &shell_array_buffer_allocator;
}
diff --git a/deps/v8/src/d8/d8.h b/deps/v8/src/d8/d8.h
index 9d3cc4f6d2..77b3ca6679 100644
--- a/deps/v8/src/d8/d8.h
+++ b/deps/v8/src/d8/d8.h
@@ -14,6 +14,9 @@
#include <unordered_set>
#include <vector>
+#include "include/v8-array-buffer.h"
+#include "include/v8-isolate.h"
+#include "include/v8-script.h"
#include "src/base/once.h"
#include "src/base/platform/time.h"
#include "src/base/platform/wrappers.h"
@@ -24,7 +27,11 @@
namespace v8 {
+class BackingStore;
+class CompiledWasmModule;
class D8Console;
+class Message;
+class TryCatch;
enum class ModuleType { kJavaScript, kJSON, kInvalid };
@@ -385,14 +392,17 @@ class ShellOptions {
DisallowReassignment<bool> interactive_shell = {"shell", false};
bool test_shell = false;
DisallowReassignment<bool> expected_to_throw = {"throws", false};
+ DisallowReassignment<bool> no_fail = {"no-fail", false};
DisallowReassignment<bool> ignore_unhandled_promises = {
"ignore-unhandled-promises", false};
DisallowReassignment<bool> mock_arraybuffer_allocator = {
"mock-arraybuffer-allocator", false};
DisallowReassignment<size_t> mock_arraybuffer_allocator_limit = {
"mock-arraybuffer-allocator-limit", 0};
+#if MULTI_MAPPED_ALLOCATOR_AVAILABLE
DisallowReassignment<bool> multi_mapped_mock_allocator = {
"multi-mapped-mock-allocator", false};
+#endif
DisallowReassignment<bool> enable_inspector = {"enable-inspector", false};
int num_isolates = 1;
DisallowReassignment<v8::ScriptCompiler::CompileOptions, true>
@@ -433,6 +443,7 @@ class ShellOptions {
#if V8_ENABLE_WEBASSEMBLY
DisallowReassignment<bool> wasm_trap_handler = {"wasm-trap-handler", true};
#endif // V8_ENABLE_WEBASSEMBLY
+ DisallowReassignment<bool> expose_fast_api = {"expose-fast-api", false};
};
class Shell : public i::AllStatic {
diff --git a/deps/v8/src/date/date.cc b/deps/v8/src/date/date.cc
index 250539e24c..9b0665aba0 100644
--- a/deps/v8/src/date/date.cc
+++ b/deps/v8/src/date/date.cc
@@ -455,5 +455,83 @@ DateCache::DST* DateCache::LeastRecentlyUsedDST(DST* skip) {
return result;
}
+namespace {
+
+// ES6 section 20.3.1.1 Time Values and Time Range
+const double kMinYear = -1000000.0;
+const double kMaxYear = -kMinYear;
+const double kMinMonth = -10000000.0;
+const double kMaxMonth = -kMinMonth;
+
+const double kMsPerDay = 86400000.0;
+
+const double kMsPerSecond = 1000.0;
+const double kMsPerMinute = 60000.0;
+const double kMsPerHour = 3600000.0;
+
+} // namespace
+
+double MakeDate(double day, double time) {
+ if (std::isfinite(day) && std::isfinite(time)) {
+ return time + day * kMsPerDay;
+ }
+ return std::numeric_limits<double>::quiet_NaN();
+}
+
+double MakeDay(double year, double month, double date) {
+ if ((kMinYear <= year && year <= kMaxYear) &&
+ (kMinMonth <= month && month <= kMaxMonth) && std::isfinite(date)) {
+ int y = FastD2I(year);
+ int m = FastD2I(month);
+ y += m / 12;
+ m %= 12;
+ if (m < 0) {
+ m += 12;
+ y -= 1;
+ }
+ DCHECK_LE(0, m);
+ DCHECK_LT(m, 12);
+
+ // kYearDelta is an arbitrary number such that:
+ // a) kYearDelta = -1 (mod 400)
+ // b) year + kYearDelta > 0 for years in the range defined by
+ // ECMA 262 - 15.9.1.1, i.e. upto 100,000,000 days on either side of
+ // Jan 1 1970. This is required so that we don't run into integer
+ // division of negative numbers.
+ // c) there shouldn't be an overflow for 32-bit integers in the following
+ // operations.
+ static const int kYearDelta = 399999;
+ static const int kBaseDay =
+ 365 * (1970 + kYearDelta) + (1970 + kYearDelta) / 4 -
+ (1970 + kYearDelta) / 100 + (1970 + kYearDelta) / 400;
+ int day_from_year = 365 * (y + kYearDelta) + (y + kYearDelta) / 4 -
+ (y + kYearDelta) / 100 + (y + kYearDelta) / 400 -
+ kBaseDay;
+ if ((y % 4 != 0) || (y % 100 == 0 && y % 400 != 0)) {
+ static const int kDayFromMonth[] = {0, 31, 59, 90, 120, 151,
+ 181, 212, 243, 273, 304, 334};
+ day_from_year += kDayFromMonth[m];
+ } else {
+ static const int kDayFromMonth[] = {0, 31, 60, 91, 121, 152,
+ 182, 213, 244, 274, 305, 335};
+ day_from_year += kDayFromMonth[m];
+ }
+ return static_cast<double>(day_from_year - 1) + DoubleToInteger(date);
+ }
+ return std::numeric_limits<double>::quiet_NaN();
+}
+
+double MakeTime(double hour, double min, double sec, double ms) {
+ if (std::isfinite(hour) && std::isfinite(min) && std::isfinite(sec) &&
+ std::isfinite(ms)) {
+ double const h = DoubleToInteger(hour);
+ double const m = DoubleToInteger(min);
+ double const s = DoubleToInteger(sec);
+ double const milli = DoubleToInteger(ms);
+ return h * kMsPerHour + m * kMsPerMinute + s * kMsPerSecond + milli;
+ }
+ return std::numeric_limits<double>::quiet_NaN();
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/date/date.h b/deps/v8/src/date/date.h
index 1f6c79c5d4..734ab3a26f 100644
--- a/deps/v8/src/date/date.h
+++ b/deps/v8/src/date/date.h
@@ -236,6 +236,17 @@ class V8_EXPORT_PRIVATE DateCache {
base::TimezoneCache* tz_cache_;
};
+// Routines shared between Date and Temporal
+
+// ES6 section 20.3.1.14 MakeDate (day, time)
+double MakeDate(double day, double time);
+
+// ES6 section 20.3.1.13 MakeDay (year, month, date)
+double MakeDay(double year, double month, double date);
+
+// ES6 section 20.3.1.12 MakeTime (hour, min, sec, ms)
+double MakeTime(double hour, double min, double sec, double ms);
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/debug/debug-evaluate.cc b/deps/v8/src/debug/debug-evaluate.cc
index cecf46d7b7..5940e2dd02 100644
--- a/deps/v8/src/debug/debug-evaluate.cc
+++ b/deps/v8/src/debug/debug-evaluate.cc
@@ -34,9 +34,8 @@ static MaybeHandle<SharedFunctionInfo> GetFunctionInfo(Isolate* isolate,
ScriptOriginOptions(false, true));
script_details.repl_mode = repl_mode;
return Compiler::GetSharedFunctionInfoForScript(
- isolate, source, script_details, nullptr, nullptr,
- ScriptCompiler::kNoCompileOptions, ScriptCompiler::kNoCacheNoReason,
- NOT_NATIVES_CODE);
+ isolate, source, script_details, ScriptCompiler::kNoCompileOptions,
+ ScriptCompiler::kNoCacheNoReason, NOT_NATIVES_CODE);
}
} // namespace
@@ -391,8 +390,7 @@ bool IntrinsicHasNoSideEffect(Runtime::FunctionId id) {
/* Test */ \
V(GetOptimizationStatus) \
V(OptimizeFunctionOnNextCall) \
- V(OptimizeOsr) \
- V(UnblockConcurrentRecompilation)
+ V(OptimizeOsr)
// Intrinsics with inline versions have to be allowlisted here a second time.
#define INLINE_INTRINSIC_ALLOWLIST(V) \
@@ -1061,6 +1059,14 @@ static bool TransitivelyCalledBuiltinHasNoSideEffect(Builtin caller,
case Builtin::kTSANRelaxedStore32SaveFP:
case Builtin::kTSANRelaxedStore64IgnoreFP:
case Builtin::kTSANRelaxedStore64SaveFP:
+ case Builtin::kTSANSeqCstStore8IgnoreFP:
+ case Builtin::kTSANSeqCstStore8SaveFP:
+ case Builtin::kTSANSeqCstStore16IgnoreFP:
+ case Builtin::kTSANSeqCstStore16SaveFP:
+ case Builtin::kTSANSeqCstStore32IgnoreFP:
+ case Builtin::kTSANSeqCstStore32SaveFP:
+ case Builtin::kTSANSeqCstStore64IgnoreFP:
+ case Builtin::kTSANSeqCstStore64SaveFP:
case Builtin::kTSANRelaxedLoad32IgnoreFP:
case Builtin::kTSANRelaxedLoad32SaveFP:
case Builtin::kTSANRelaxedLoad64IgnoreFP:
diff --git a/deps/v8/src/debug/debug-interface.cc b/deps/v8/src/debug/debug-interface.cc
index 5112c5ba73..50c63e8f8e 100644
--- a/deps/v8/src/debug/debug-interface.cc
+++ b/deps/v8/src/debug/debug-interface.cc
@@ -4,6 +4,7 @@
#include "src/debug/debug-interface.h"
+#include "include/v8-function.h"
#include "src/api/api-inl.h"
#include "src/base/utils/random-number-generator.h"
#include "src/codegen/script-details.h"
@@ -760,8 +761,8 @@ MaybeLocal<UnboundScript> CompileInspectorScript(Isolate* v8_isolate,
{
i::AlignedCachedData* cached_data = nullptr;
i::MaybeHandle<i::SharedFunctionInfo> maybe_function_info =
- i::Compiler::GetSharedFunctionInfoForScript(
- isolate, str, i::ScriptDetails(), nullptr, cached_data,
+ i::Compiler::GetSharedFunctionInfoForScriptWithCachedData(
+ isolate, str, i::ScriptDetails(), cached_data,
ScriptCompiler::kNoCompileOptions,
ScriptCompiler::kNoCacheBecauseInspector,
i::FLAG_expose_inspector_scripts ? i::NOT_NATIVES_CODE
@@ -862,7 +863,7 @@ Local<Function> GetBuiltin(Isolate* v8_isolate, Builtin requested_builtin) {
.set_map(isolate->strict_function_without_prototype_map())
.Build();
- fun->shared().set_internal_formal_parameter_count(0);
+ fun->shared().set_internal_formal_parameter_count(i::JSParameterCount(0));
fun->shared().set_length(0);
return Utils::ToLocal(handle_scope.CloseAndEscape(fun));
}
@@ -1034,16 +1035,6 @@ int64_t GetNextRandomInt64(v8::Isolate* v8_isolate) {
->NextInt64();
}
-void EnumerateRuntimeCallCounters(v8::Isolate* v8_isolate,
- RuntimeCallCounterCallback callback) {
-#ifdef V8_RUNTIME_CALL_STATS
- i::Isolate* isolate = reinterpret_cast<i::Isolate*>(v8_isolate);
- if (isolate->counters()) {
- isolate->counters()->runtime_call_stats()->EnumerateCounters(callback);
- }
-#endif // V8_RUNTIME_CALL_STATS
-}
-
int GetDebuggingId(v8::Local<v8::Function> function) {
i::Handle<i::JSReceiver> callable = v8::Utils::OpenHandle(*function);
if (!callable->IsJSFunction()) return i::DebugInfo::kNoDebuggingId;
diff --git a/deps/v8/src/debug/debug-interface.h b/deps/v8/src/debug/debug-interface.h
index 81d38011cb..b186ab5689 100644
--- a/deps/v8/src/debug/debug-interface.h
+++ b/deps/v8/src/debug/debug-interface.h
@@ -7,9 +7,14 @@
#include <memory>
+#include "include/v8-callbacks.h"
+#include "include/v8-debug.h"
+#include "include/v8-embedder-heap.h"
+#include "include/v8-local-handle.h"
+#include "include/v8-memory-span.h"
+#include "include/v8-promise.h"
+#include "include/v8-script.h"
#include "include/v8-util.h"
-#include "include/v8.h"
-#include "src/base/platform/time.h"
#include "src/base/vector.h"
#include "src/common/globals.h"
#include "src/debug/interface-types.h"
@@ -20,6 +25,8 @@ class V8Inspector;
namespace v8 {
+class Platform;
+
namespace internal {
struct CoverageBlock;
struct CoverageFunction;
@@ -515,11 +522,6 @@ enum class NativeAccessorType {
int64_t GetNextRandomInt64(v8::Isolate* isolate);
-using RuntimeCallCounterCallback =
- std::function<void(const char* name, int64_t count, base::TimeDelta time)>;
-void EnumerateRuntimeCallCounters(v8::Isolate* isolate,
- RuntimeCallCounterCallback callback);
-
MaybeLocal<Value> CallFunctionOn(Local<Context> context,
Local<Function> function, Local<Value> recv,
int argc, Local<Value> argv[],
diff --git a/deps/v8/src/debug/debug-property-iterator.h b/deps/v8/src/debug/debug-property-iterator.h
index 38c78b12bd..4e6a93f10e 100644
--- a/deps/v8/src/debug/debug-property-iterator.h
+++ b/deps/v8/src/debug/debug-property-iterator.h
@@ -5,14 +5,18 @@
#ifndef V8_DEBUG_DEBUG_PROPERTY_ITERATOR_H_
#define V8_DEBUG_DEBUG_PROPERTY_ITERATOR_H_
+#include "include/v8-local-handle.h"
+#include "include/v8-maybe.h"
+#include "include/v8-object.h"
#include "src/debug/debug-interface.h"
#include "src/execution/isolate.h"
#include "src/handles/handles.h"
#include "src/objects/prototype.h"
-#include "include/v8.h"
-
namespace v8 {
+
+class Name;
+
namespace internal {
class JSReceiver;
diff --git a/deps/v8/src/debug/debug.cc b/deps/v8/src/debug/debug.cc
index 41775c8965..4cf0124e8c 100644
--- a/deps/v8/src/debug/debug.cc
+++ b/deps/v8/src/debug/debug.cc
@@ -1325,7 +1325,7 @@ class DiscardBaselineCodeVisitor : public ThreadVisitor {
void Debug::DiscardBaselineCode(SharedFunctionInfo shared) {
RCS_SCOPE(isolate_, RuntimeCallCounterId::kDebugger);
- DCHECK(shared.HasBaselineData());
+ DCHECK(shared.HasBaselineCode());
Isolate* isolate = shared.GetIsolate();
DiscardBaselineCodeVisitor visitor(shared);
visitor.VisitThread(isolate, isolate->thread_local_top());
@@ -1333,7 +1333,7 @@ void Debug::DiscardBaselineCode(SharedFunctionInfo shared) {
// TODO(v8:11429): Avoid this heap walk somehow.
HeapObjectIterator iterator(isolate->heap());
auto trampoline = BUILTIN_CODE(isolate, InterpreterEntryTrampoline);
- shared.flush_baseline_data();
+ shared.FlushBaselineCode();
for (HeapObject obj = iterator.Next(); !obj.is_null();
obj = iterator.Next()) {
if (obj.IsJSFunction()) {
@@ -1356,9 +1356,14 @@ void Debug::DiscardAllBaselineCode() {
obj = iterator.Next()) {
if (obj.IsJSFunction()) {
JSFunction fun = JSFunction::cast(obj);
- if (fun.shared().HasBaselineData()) {
+ if (fun.ActiveTierIsBaseline()) {
fun.set_code(*trampoline);
}
+ } else if (obj.IsSharedFunctionInfo()) {
+ SharedFunctionInfo shared = SharedFunctionInfo::cast(obj);
+ if (shared.HasBaselineCode()) {
+ shared.FlushBaselineCode();
+ }
}
}
}
@@ -1369,7 +1374,7 @@ void Debug::DeoptimizeFunction(Handle<SharedFunctionInfo> shared) {
// inlining.
isolate_->AbortConcurrentOptimization(BlockingBehavior::kBlock);
- if (shared->HasBaselineData()) {
+ if (shared->HasBaselineCode()) {
DiscardBaselineCode(*shared);
}
@@ -1399,26 +1404,35 @@ void Debug::PrepareFunctionForDebugExecution(
DCHECK(shared->is_compiled());
DCHECK(shared->HasDebugInfo());
Handle<DebugInfo> debug_info = GetOrCreateDebugInfo(shared);
- if (debug_info->flags(kRelaxedLoad) & DebugInfo::kPreparedForDebugExecution)
+ if (debug_info->flags(kRelaxedLoad) & DebugInfo::kPreparedForDebugExecution) {
return;
-
- if (shared->HasBytecodeArray()) {
- SharedFunctionInfo::InstallDebugBytecode(shared, isolate_);
}
+ // Have to discard baseline code before installing debug bytecode, since the
+ // bytecode array field on the baseline code object is immutable.
if (debug_info->CanBreakAtEntry()) {
// Deopt everything in case the function is inlined anywhere.
Deoptimizer::DeoptimizeAll(isolate_);
DiscardAllBaselineCode();
- InstallDebugBreakTrampoline();
} else {
DeoptimizeFunction(shared);
+ }
+
+ if (shared->HasBytecodeArray()) {
+ DCHECK(!shared->HasBaselineCode());
+ SharedFunctionInfo::InstallDebugBytecode(shared, isolate_);
+ }
+
+ if (debug_info->CanBreakAtEntry()) {
+ InstallDebugBreakTrampoline();
+ } else {
// Update PCs on the stack to point to recompiled code.
RedirectActiveFunctions redirect_visitor(
*shared, RedirectActiveFunctions::Mode::kUseDebugBytecode);
redirect_visitor.VisitThread(isolate_, isolate_->thread_local_top());
isolate_->thread_manager()->IterateArchivedThreads(&redirect_visitor);
}
+
debug_info->set_flags(
debug_info->flags(kRelaxedLoad) | DebugInfo::kPreparedForDebugExecution,
kRelaxedStore);
@@ -2183,8 +2197,7 @@ bool Debug::ShouldBeSkipped() {
DisableBreak no_recursive_break(this);
StackTraceFrameIterator iterator(isolate_);
- CommonFrame* frame = iterator.frame();
- FrameSummary summary = FrameSummary::GetTop(frame);
+ FrameSummary summary = iterator.GetTopValidFrame();
Handle<Object> script_obj = summary.script();
if (!script_obj->IsScript()) return false;
diff --git a/deps/v8/src/debug/interface-types.h b/deps/v8/src/debug/interface-types.h
index a2645d33d6..8c8d4bf2ad 100644
--- a/deps/v8/src/debug/interface-types.h
+++ b/deps/v8/src/debug/interface-types.h
@@ -9,11 +9,14 @@
#include <string>
#include <vector>
-#include "include/v8.h"
+#include "include/v8-function-callback.h"
+#include "include/v8-local-handle.h"
#include "src/common/globals.h"
namespace v8 {
+class String;
+
namespace internal {
class BuiltinArguments;
} // namespace internal
diff --git a/deps/v8/src/deoptimizer/deoptimized-frame-info.cc b/deps/v8/src/deoptimizer/deoptimized-frame-info.cc
index a424a73ea1..c268d7258f 100644
--- a/deps/v8/src/deoptimizer/deoptimized-frame-info.cc
+++ b/deps/v8/src/deoptimizer/deoptimized-frame-info.cc
@@ -27,15 +27,17 @@ DeoptimizedFrameInfo::DeoptimizedFrameInfo(TranslatedState* state,
TranslatedState::iterator frame_it,
Isolate* isolate) {
int parameter_count =
- frame_it->shared_info()->internal_formal_parameter_count();
+ frame_it->shared_info()
+ ->internal_formal_parameter_count_without_receiver();
TranslatedFrame::iterator stack_it = frame_it->begin();
// Get the function. Note that this might materialize the function.
// In case the debugger mutates this value, we should deoptimize
// the function and remember the value in the materialized value store.
- DCHECK_EQ(parameter_count, Handle<JSFunction>::cast(stack_it->GetValue())
- ->shared()
- .internal_formal_parameter_count());
+ DCHECK_EQ(parameter_count,
+ Handle<JSFunction>::cast(stack_it->GetValue())
+ ->shared()
+ .internal_formal_parameter_count_without_receiver());
stack_it++; // Skip the function.
stack_it++; // Skip the receiver.
diff --git a/deps/v8/src/deoptimizer/deoptimizer.cc b/deps/v8/src/deoptimizer/deoptimizer.cc
index ea460aa36f..6bf26d5bf3 100644
--- a/deps/v8/src/deoptimizer/deoptimizer.cc
+++ b/deps/v8/src/deoptimizer/deoptimizer.cc
@@ -477,15 +477,6 @@ const char* Deoptimizer::MessageFor(DeoptimizeKind kind, bool reuse_code) {
}
}
-namespace {
-
-uint16_t InternalFormalParameterCountWithReceiver(SharedFunctionInfo sfi) {
- static constexpr int kTheReceiver = 1;
- return sfi.internal_formal_parameter_count() + kTheReceiver;
-}
-
-} // namespace
-
Deoptimizer::Deoptimizer(Isolate* isolate, JSFunction function,
DeoptimizeKind kind, unsigned deopt_exit_index,
Address from, int fp_to_sp_delta)
@@ -541,7 +532,7 @@ Deoptimizer::Deoptimizer(Isolate* isolate, JSFunction function,
}
unsigned size = ComputeInputFrameSize();
const int parameter_count =
- InternalFormalParameterCountWithReceiver(function.shared());
+ function.shared().internal_formal_parameter_count_with_receiver();
input_ = new (size) FrameDescription(size, parameter_count);
if (kSupportsFixedDeoptExitSizes) {
@@ -903,9 +894,10 @@ void Deoptimizer::DoComputeOutputFrames() {
isolate_, input_->GetFramePointerAddress(), stack_fp_, &state_iterator,
input_data.LiteralArray(), input_->GetRegisterValues(), trace_file,
function_.IsHeapObject()
- ? function_.shared().internal_formal_parameter_count()
+ ? function_.shared()
+ .internal_formal_parameter_count_without_receiver()
: 0,
- actual_argument_count_);
+ actual_argument_count_ - kJSArgcReceiverSlots);
// Do the input frame to output frame(s) translation.
size_t count = translated_state_.frames().size();
@@ -1026,7 +1018,8 @@ void Deoptimizer::DoComputeUnoptimizedFrame(TranslatedFrame* translated_frame,
const int bytecode_offset =
goto_catch_handler ? catch_handler_pc_offset_ : real_bytecode_offset;
- const int parameters_count = InternalFormalParameterCountWithReceiver(shared);
+ const int parameters_count =
+ shared.internal_formal_parameter_count_with_receiver();
// If this is the bottom most frame or the previous frame was the arguments
// adaptor fake frame, then we already have extra arguments in the stack
@@ -1068,7 +1061,7 @@ void Deoptimizer::DoComputeUnoptimizedFrame(TranslatedFrame* translated_frame,
const bool advance_bc =
(!is_topmost || (deopt_kind_ == DeoptimizeKind::kLazy)) &&
!goto_catch_handler;
- const bool is_baseline = shared.HasBaselineData();
+ const bool is_baseline = shared.HasBaselineCode();
Code dispatch_builtin =
builtins->code(DispatchBuiltinFor(is_baseline, advance_bc));
@@ -1100,11 +1093,13 @@ void Deoptimizer::DoComputeUnoptimizedFrame(TranslatedFrame* translated_frame,
}
// Note: parameters_count includes the receiver.
+ // TODO(v8:11112): Simplify once the receiver is always included in argc.
if (verbose_tracing_enabled() && is_bottommost &&
- actual_argument_count_ > parameters_count - 1) {
- PrintF(trace_scope_->file(),
- " -- %d extra argument(s) already in the stack --\n",
- actual_argument_count_ - parameters_count + 1);
+ actual_argument_count_ - kJSArgcReceiverSlots > parameters_count - 1) {
+ PrintF(
+ trace_scope_->file(),
+ " -- %d extra argument(s) already in the stack --\n",
+ actual_argument_count_ - kJSArgcReceiverSlots - parameters_count + 1);
}
frame_writer.PushStackJSArguments(value_iterator, parameters_count);
@@ -1185,7 +1180,7 @@ void Deoptimizer::DoComputeUnoptimizedFrame(TranslatedFrame* translated_frame,
(translated_state_.frames()[frame_index - 1]).kind();
argc = previous_frame_kind == TranslatedFrame::kArgumentsAdaptor
? output_[frame_index - 1]->parameter_count()
- : parameters_count - 1;
+ : parameters_count - (kJSArgcIncludesReceiver ? 0 : 1);
}
frame_writer.PushRawValue(argc, "actual argument count\n");
@@ -1334,7 +1329,8 @@ void Deoptimizer::DoComputeArgumentsAdaptorFrame(
TranslatedFrame::iterator value_iterator = translated_frame->begin();
const int argument_count_without_receiver = translated_frame->height() - 1;
const int formal_parameter_count =
- translated_frame->raw_shared_info().internal_formal_parameter_count();
+ translated_frame->raw_shared_info()
+ .internal_formal_parameter_count_without_receiver();
const int extra_argument_count =
argument_count_without_receiver - formal_parameter_count;
// The number of pushed arguments is the maximum of the actual argument count
@@ -1350,8 +1346,8 @@ void Deoptimizer::DoComputeArgumentsAdaptorFrame(
}
// Allocate and store the output frame description.
- FrameDescription* output_frame = new (output_frame_size)
- FrameDescription(output_frame_size, argument_count_without_receiver);
+ FrameDescription* output_frame = new (output_frame_size) FrameDescription(
+ output_frame_size, JSParameterCount(argument_count_without_receiver));
// The top address of the frame is computed from the previous frame's top and
// this frame's size.
const intptr_t top_address =
@@ -1470,9 +1466,8 @@ void Deoptimizer::DoComputeConstructStubFrame(TranslatedFrame* translated_frame,
frame_writer.PushTranslatedValue(value_iterator++, "context");
// Number of incoming arguments.
- const uint32_t parameters_count_without_receiver = parameters_count - 1;
- frame_writer.PushRawObject(Smi::FromInt(parameters_count_without_receiver),
- "argc\n");
+ const uint32_t argc = parameters_count - (kJSArgcIncludesReceiver ? 0 : 1);
+ frame_writer.PushRawObject(Smi::FromInt(argc), "argc\n");
// The constructor function was mentioned explicitly in the
// CONSTRUCT_STUB_FRAME.
@@ -2067,7 +2062,7 @@ unsigned Deoptimizer::ComputeInputFrameSize() const {
// static
unsigned Deoptimizer::ComputeIncomingArgumentSize(SharedFunctionInfo shared) {
- int parameter_slots = InternalFormalParameterCountWithReceiver(shared);
+ int parameter_slots = shared.internal_formal_parameter_count_with_receiver();
return parameter_slots * kSystemPointerSize;
}
diff --git a/deps/v8/src/deoptimizer/loong64/deoptimizer-loong64.cc b/deps/v8/src/deoptimizer/loong64/deoptimizer-loong64.cc
new file mode 100644
index 0000000000..fb82466af1
--- /dev/null
+++ b/deps/v8/src/deoptimizer/loong64/deoptimizer-loong64.cc
@@ -0,0 +1,42 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/deoptimizer/deoptimizer.h"
+
+namespace v8 {
+namespace internal {
+
+const bool Deoptimizer::kSupportsFixedDeoptExitSizes = true;
+const int Deoptimizer::kNonLazyDeoptExitSize = 2 * kInstrSize;
+const int Deoptimizer::kLazyDeoptExitSize = 2 * kInstrSize;
+const int Deoptimizer::kEagerWithResumeBeforeArgsSize = 3 * kInstrSize;
+const int Deoptimizer::kEagerWithResumeDeoptExitSize =
+ kEagerWithResumeBeforeArgsSize + 2 * kSystemPointerSize;
+// TODO(LOONG_dev): LOONG64 Is the PcOffset right?
+const int Deoptimizer::kEagerWithResumeImmedArgs1PcOffset = kInstrSize;
+const int Deoptimizer::kEagerWithResumeImmedArgs2PcOffset =
+ kInstrSize + kSystemPointerSize;
+
+Float32 RegisterValues::GetFloatRegister(unsigned n) const {
+ return Float32::FromBits(
+ static_cast<uint32_t>(double_registers_[n].get_bits()));
+}
+
+void FrameDescription::SetCallerPc(unsigned offset, intptr_t value) {
+ SetFrameSlot(offset, value);
+}
+
+void FrameDescription::SetCallerFp(unsigned offset, intptr_t value) {
+ SetFrameSlot(offset, value);
+}
+
+void FrameDescription::SetCallerConstantPool(unsigned offset, intptr_t value) {
+ // No embedded constant pool support.
+ UNREACHABLE();
+}
+
+void FrameDescription::SetPc(intptr_t pc) { pc_ = pc; }
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/deoptimizer/translated-state.cc b/deps/v8/src/deoptimizer/translated-state.cc
index 4f5e3370e6..721918c195 100644
--- a/deps/v8/src/deoptimizer/translated-state.cc
+++ b/deps/v8/src/deoptimizer/translated-state.cc
@@ -678,15 +678,6 @@ TranslatedFrame TranslatedFrame::JavaScriptBuiltinContinuationWithCatchFrame(
return frame;
}
-namespace {
-
-uint16_t InternalFormalParameterCountWithReceiver(SharedFunctionInfo sfi) {
- static constexpr int kTheReceiver = 1;
- return sfi.internal_formal_parameter_count() + kTheReceiver;
-}
-
-} // namespace
-
int TranslatedFrame::GetValueCount() {
// The function is added to all frame state descriptors in
// InstructionSelector::AddInputsToFrameStateDescriptor.
@@ -695,7 +686,7 @@ int TranslatedFrame::GetValueCount() {
switch (kind()) {
case kUnoptimizedFunction: {
int parameter_count =
- InternalFormalParameterCountWithReceiver(raw_shared_info_);
+ raw_shared_info_.internal_formal_parameter_count_with_receiver();
static constexpr int kTheContext = 1;
static constexpr int kTheAccumulator = 1;
return height() + parameter_count + kTheContext + kTheFunction +
@@ -748,7 +739,8 @@ TranslatedFrame TranslatedState::CreateNextTranslatedFrame(
if (trace_file != nullptr) {
std::unique_ptr<char[]> name = shared_info.DebugNameCStr();
PrintF(trace_file, " reading input frame %s", name.get());
- int arg_count = InternalFormalParameterCountWithReceiver(shared_info);
+ int arg_count =
+ shared_info.internal_formal_parameter_count_with_receiver();
PrintF(trace_file,
" => bytecode_offset=%d, args=%d, height=%d, retval=%i(#%i); "
"inputs:\n",
@@ -1298,7 +1290,9 @@ TranslatedState::TranslatedState(const JavaScriptFrame* frame)
int actual_argc = frame->GetActualArgumentCount();
Init(frame->isolate(), frame->fp(), frame->fp(), &it, data.LiteralArray(),
nullptr /* registers */, nullptr /* trace file */,
- frame->function().shared().internal_formal_parameter_count(),
+ frame->function()
+ .shared()
+ .internal_formal_parameter_count_without_receiver(),
actual_argc);
}
@@ -1977,21 +1971,21 @@ TranslatedFrame* TranslatedState::GetArgumentsInfoFromJSFrameIndex(
// be shown in a stack trace.
if (frames_[i].kind() ==
TranslatedFrame::kJavaScriptBuiltinContinuation &&
- frames_[i].shared_info()->internal_formal_parameter_count() ==
- kDontAdaptArgumentsSentinel) {
+ frames_[i].shared_info()->IsDontAdaptArguments()) {
DCHECK(frames_[i].shared_info()->IsApiFunction());
// The argument count for this special case is always the second
// to last value in the TranslatedFrame. It should also always be
- // {1}, as the GenericLazyDeoptContinuation builtin only has one
- // argument (the receiver).
+ // {1}, as the GenericLazyDeoptContinuation builtin has one explicit
+ // argument (the result).
static constexpr int kTheContext = 1;
const int height = frames_[i].height() + kTheContext;
*args_count = frames_[i].ValueAt(height - 1)->GetSmiValue();
- DCHECK_EQ(*args_count, 1);
+ DCHECK_EQ(*args_count, JSParameterCount(1));
} else {
- *args_count = InternalFormalParameterCountWithReceiver(
- *frames_[i].shared_info());
+ *args_count = frames_[i]
+ .shared_info()
+ ->internal_formal_parameter_count_with_receiver();
}
return &(frames_[i]);
}
diff --git a/deps/v8/src/diagnostics/arm/disasm-arm.cc b/deps/v8/src/diagnostics/arm/disasm-arm.cc
index cf37d12a1f..7ba20c0d98 100644
--- a/deps/v8/src/diagnostics/arm/disasm-arm.cc
+++ b/deps/v8/src/diagnostics/arm/disasm-arm.cc
@@ -676,7 +676,6 @@ int Decoder::FormatOption(Instruction* instr, const char* format) {
}
default: {
UNREACHABLE();
- return -1;
}
}
out_buffer_pos_ +=
@@ -787,7 +786,6 @@ void Decoder::DecodeType01(Instruction* instr) {
break;
default:
UNREACHABLE();
- break;
}
} else {
// strex
@@ -808,7 +806,6 @@ void Decoder::DecodeType01(Instruction* instr) {
break;
default:
UNREACHABLE();
- break;
}
}
} else {
@@ -853,7 +850,6 @@ void Decoder::DecodeType01(Instruction* instr) {
default: {
// The PU field is a 2-bit field.
UNREACHABLE();
- break;
}
}
} else {
@@ -894,7 +890,6 @@ void Decoder::DecodeType01(Instruction* instr) {
default: {
// The PU field is a 2-bit field.
UNREACHABLE();
- break;
}
}
return;
@@ -1030,7 +1025,6 @@ void Decoder::DecodeType01(Instruction* instr) {
default: {
// The Opcode field is a 4-bit field.
UNREACHABLE();
- break;
}
}
}
@@ -1107,10 +1101,8 @@ void Decoder::DecodeType3(Instruction* instr) {
break;
case 1:
UNREACHABLE();
- break;
case 2:
UNREACHABLE();
- break;
case 3:
Format(instr, "usat 'rd, #'imm05@16, 'rm'shift_sat");
break;
@@ -1119,7 +1111,6 @@ void Decoder::DecodeType3(Instruction* instr) {
switch (instr->Bits(22, 21)) {
case 0:
UNREACHABLE();
- break;
case 1:
if (instr->Bits(9, 6) == 1) {
if (instr->Bit(20) == 0) {
@@ -1948,7 +1939,6 @@ void Decoder::DecodeFloatingPointDataProcessing(Instruction* instr) {
break;
default:
UNREACHABLE(); // Case analysis is exhaustive.
- break;
}
} else if (instr->Opc1Value() == 0x4 && op2) {
// Floating-point minNum/maxNum.
@@ -2002,7 +1992,6 @@ void Decoder::DecodeFloatingPointDataProcessing(Instruction* instr) {
break;
default:
UNREACHABLE(); // Case analysis is exhaustive.
- break;
}
} else {
Unknown(instr);
@@ -2617,12 +2606,10 @@ const char* NameConverter::NameOfCPURegister(int reg) const {
const char* NameConverter::NameOfByteCPURegister(int reg) const {
UNREACHABLE(); // ARM does not have the concept of a byte register
- return "nobytereg";
}
const char* NameConverter::NameOfXMMRegister(int reg) const {
UNREACHABLE(); // ARM does not have any XMM registers
- return "noxmmreg";
}
const char* NameConverter::NameInCode(byte* addr) const {
diff --git a/deps/v8/src/diagnostics/arm/eh-frame-arm.cc b/deps/v8/src/diagnostics/arm/eh-frame-arm.cc
index 7d0dc49155..ef0a421820 100644
--- a/deps/v8/src/diagnostics/arm/eh-frame-arm.cc
+++ b/deps/v8/src/diagnostics/arm/eh-frame-arm.cc
@@ -37,7 +37,6 @@ int EhFrameWriter::RegisterToDwarfCode(Register name) {
return kR0DwarfCode;
default:
UNIMPLEMENTED();
- return -1;
}
}
@@ -54,7 +53,6 @@ const char* EhFrameDisassembler::DwarfRegisterCodeToString(int code) {
return "lr";
default:
UNIMPLEMENTED();
- return nullptr;
}
}
diff --git a/deps/v8/src/diagnostics/arm/unwinder-arm.cc b/deps/v8/src/diagnostics/arm/unwinder-arm.cc
index e0e2f0e91f..e51804caea 100644
--- a/deps/v8/src/diagnostics/arm/unwinder-arm.cc
+++ b/deps/v8/src/diagnostics/arm/unwinder-arm.cc
@@ -5,7 +5,7 @@
#include <memory>
#include "include/v8-unwinder-state.h"
-#include "include/v8.h"
+#include "include/v8-unwinder.h"
#include "src/diagnostics/unwinder.h"
#include "src/execution/frame-constants.h"
diff --git a/deps/v8/src/diagnostics/arm64/disasm-arm64.cc b/deps/v8/src/diagnostics/arm64/disasm-arm64.cc
index 93b9531bd5..af6e7f5441 100644
--- a/deps/v8/src/diagnostics/arm64/disasm-arm64.cc
+++ b/deps/v8/src/diagnostics/arm64/disasm-arm64.cc
@@ -3954,7 +3954,6 @@ int DisassemblingDecoder::SubstituteImmediateField(Instruction* instr,
}
default: {
UNIMPLEMENTED();
- return 0;
}
}
}
@@ -3997,7 +3996,6 @@ int DisassemblingDecoder::SubstituteImmediateField(Instruction* instr,
return 0;
}
UNIMPLEMENTED();
- return 0;
}
case 'L': { // IVLSLane[0123] - suffix indicates access size shift.
AppendToOutput("%d", instr->NEONLSIndex(format[8] - '0'));
@@ -4042,12 +4040,10 @@ int DisassemblingDecoder::SubstituteImmediateField(Instruction* instr,
return static_cast<int>(strlen("IVMIShiftAmt2"));
} else {
UNIMPLEMENTED();
- return 0;
}
}
default: {
UNIMPLEMENTED();
- return 0;
}
}
}
@@ -4342,12 +4338,10 @@ const char* NameConverter::NameOfCPURegister(int reg) const {
const char* NameConverter::NameOfByteCPURegister(int reg) const {
UNREACHABLE(); // ARM64 does not have the concept of a byte register
- return "nobytereg";
}
const char* NameConverter::NameOfXMMRegister(int reg) const {
UNREACHABLE(); // ARM64 does not have any XMM registers
- return "noxmmreg";
}
const char* NameConverter::NameInCode(byte* addr) const {
diff --git a/deps/v8/src/diagnostics/arm64/eh-frame-arm64.cc b/deps/v8/src/diagnostics/arm64/eh-frame-arm64.cc
index 115d0cc300..d27827cfc1 100644
--- a/deps/v8/src/diagnostics/arm64/eh-frame-arm64.cc
+++ b/deps/v8/src/diagnostics/arm64/eh-frame-arm64.cc
@@ -38,7 +38,6 @@ int EhFrameWriter::RegisterToDwarfCode(Register name) {
return kX0DwarfCode;
default:
UNIMPLEMENTED();
- return -1;
}
}
@@ -55,7 +54,6 @@ const char* EhFrameDisassembler::DwarfRegisterCodeToString(int code) {
return "sp"; // This could be zr as well
default:
UNIMPLEMENTED();
- return nullptr;
}
}
diff --git a/deps/v8/src/diagnostics/compilation-statistics.cc b/deps/v8/src/diagnostics/compilation-statistics.cc
index 40bb239b12..74fa232a08 100644
--- a/deps/v8/src/diagnostics/compilation-statistics.cc
+++ b/deps/v8/src/diagnostics/compilation-statistics.cc
@@ -56,6 +56,29 @@ void CompilationStatistics::BasicStats::Accumulate(const BasicStats& stats) {
}
}
+std::string CompilationStatistics::BasicStats::AsJSON() {
+// clang-format off
+#define DICT(s) "{" << s << "}"
+#define QUOTE(s) "\"" << s << "\""
+#define MEMBER(s) QUOTE(s) << ":"
+
+ DCHECK_EQ(function_name_.find("\""), std::string::npos);
+
+ std::stringstream stream;
+ stream << DICT(
+ MEMBER("function_name") << QUOTE(function_name_) << ","
+ MEMBER("total_allocated_bytes") << total_allocated_bytes_ << ","
+ MEMBER("max_allocated_bytes") << max_allocated_bytes_ << ","
+ MEMBER("absolute_max_allocated_bytes") << absolute_max_allocated_bytes_);
+
+ return stream.str();
+
+#undef DICT
+#undef QUOTE
+#undef MEMBER
+ // clang-format on
+}
+
static void WriteLine(std::ostream& os, bool machine_format, const char* name,
const CompilationStatistics::BasicStats& stats,
const CompilationStatistics::BasicStats& total_stats) {
diff --git a/deps/v8/src/diagnostics/compilation-statistics.h b/deps/v8/src/diagnostics/compilation-statistics.h
index d14e108d07..a6abdf5e89 100644
--- a/deps/v8/src/diagnostics/compilation-statistics.h
+++ b/deps/v8/src/diagnostics/compilation-statistics.h
@@ -37,6 +37,8 @@ class CompilationStatistics final : public Malloced {
void Accumulate(const BasicStats& stats);
+ std::string AsJSON();
+
base::TimeDelta delta_;
size_t total_allocated_bytes_;
size_t max_allocated_bytes_;
diff --git a/deps/v8/src/diagnostics/eh-frame.cc b/deps/v8/src/diagnostics/eh-frame.cc
index d53ea7698a..223e288e6e 100644
--- a/deps/v8/src/diagnostics/eh-frame.cc
+++ b/deps/v8/src/diagnostics/eh-frame.cc
@@ -27,14 +27,12 @@ void EhFrameWriter::WriteInitialStateInCie() { UNIMPLEMENTED(); }
int EhFrameWriter::RegisterToDwarfCode(Register) {
UNIMPLEMENTED();
- return -1;
}
#ifdef ENABLE_DISASSEMBLER
const char* EhFrameDisassembler::DwarfRegisterCodeToString(int) {
UNIMPLEMENTED();
- return nullptr;
}
#endif
diff --git a/deps/v8/src/diagnostics/gdb-jit.cc b/deps/v8/src/diagnostics/gdb-jit.cc
index 53c29cfb24..bc03a189cd 100644
--- a/deps/v8/src/diagnostics/gdb-jit.cc
+++ b/deps/v8/src/diagnostics/gdb-jit.cc
@@ -4,14 +4,17 @@
#include "src/diagnostics/gdb-jit.h"
+#include <iterator>
#include <map>
#include <memory>
#include <vector>
-#include "include/v8.h"
+#include "include/v8-callbacks.h"
#include "src/api/api-inl.h"
+#include "src/base/address-region.h"
#include "src/base/bits.h"
#include "src/base/hashmap.h"
+#include "src/base/memory.h"
#include "src/base/platform/platform.h"
#include "src/base/platform/wrappers.h"
#include "src/base/strings.h"
@@ -63,7 +66,9 @@ class Writer {
T* operator->() { return w_->RawSlotAt<T>(offset_); }
- void set(const T& value) { *w_->RawSlotAt<T>(offset_) = value; }
+ void set(const T& value) {
+ base::WriteUnalignedValue(w_->AddressAt<T>(offset_), value);
+ }
Slot<T> at(int i) { return Slot<T>(w_, offset_ + sizeof(T) * i); }
@@ -75,7 +80,7 @@ class Writer {
template <typename T>
void Write(const T& val) {
Ensure(position_ + sizeof(T));
- *RawSlotAt<T>(position_) = val;
+ base::WriteUnalignedValue(AddressAt<T>(position_), val);
position_ += sizeof(T);
}
@@ -154,6 +159,12 @@ class Writer {
friend class Slot;
template <typename T>
+ Address AddressAt(uintptr_t offset) {
+ DCHECK(offset < capacity_ && offset + sizeof(T) <= capacity_);
+ return reinterpret_cast<Address>(&buffer_[offset]);
+ }
+
+ template <typename T>
T* RawSlotAt(uintptr_t offset) {
DCHECK(offset < capacity_ && offset + sizeof(T) <= capacity_);
return reinterpret_cast<T*>(&buffer_[offset]);
@@ -896,17 +907,20 @@ class CodeDescription {
};
#endif
- CodeDescription(const char* name, Code code, SharedFunctionInfo shared,
- LineInfo* lineinfo)
- : name_(name), code_(code), shared_info_(shared), lineinfo_(lineinfo) {}
+ CodeDescription(const char* name, base::AddressRegion region,
+ SharedFunctionInfo shared, LineInfo* lineinfo,
+ bool is_function)
+ : name_(name),
+ shared_info_(shared),
+ lineinfo_(lineinfo),
+ is_function_(is_function),
+ code_region_(region) {}
const char* name() const { return name_; }
LineInfo* lineinfo() const { return lineinfo_; }
- bool is_function() const {
- return CodeKindIsOptimizedJSFunction(code_.kind());
- }
+ bool is_function() const { return is_function_; }
bool has_scope_info() const { return !shared_info_.is_null(); }
@@ -915,15 +929,11 @@ class CodeDescription {
return shared_info_.scope_info();
}
- uintptr_t CodeStart() const {
- return static_cast<uintptr_t>(code_.InstructionStart());
- }
+ uintptr_t CodeStart() const { return code_region_.begin(); }
- uintptr_t CodeEnd() const {
- return static_cast<uintptr_t>(code_.InstructionEnd());
- }
+ uintptr_t CodeEnd() const { return code_region_.end(); }
- uintptr_t CodeSize() const { return CodeEnd() - CodeStart(); }
+ uintptr_t CodeSize() const { return code_region_.size(); }
bool has_script() {
return !shared_info_.is_null() && shared_info_.script().IsScript();
@@ -933,6 +943,8 @@ class CodeDescription {
bool IsLineInfoAvailable() { return lineinfo_ != nullptr; }
+ base::AddressRegion region() { return code_region_; }
+
#if V8_TARGET_ARCH_X64
uintptr_t GetStackStateStartAddress(StackState state) const {
DCHECK(state < STACK_STATE_MAX);
@@ -946,7 +958,7 @@ class CodeDescription {
#endif
std::unique_ptr<char[]> GetFilename() {
- if (!shared_info_.is_null()) {
+ if (!shared_info_.is_null() && script().name().IsString()) {
return String::cast(script().name()).ToCString();
} else {
std::unique_ptr<char[]> result(new char[1]);
@@ -965,9 +977,10 @@ class CodeDescription {
private:
const char* name_;
- Code code_;
SharedFunctionInfo shared_info_;
LineInfo* lineinfo_;
+ bool is_function_;
+ base::AddressRegion code_region_;
#if V8_TARGET_ARCH_X64
uintptr_t stack_state_start_addresses_[STACK_STATE_MAX];
#endif
@@ -1080,6 +1093,8 @@ class DebugInfoSection : public DebugSection {
UNIMPLEMENTED();
#elif V8_TARGET_ARCH_MIPS64
UNIMPLEMENTED();
+#elif V8_TARGET_ARCH_LOONG64
+ UNIMPLEMENTED();
#elif V8_TARGET_ARCH_PPC64 && V8_OS_LINUX
w->Write<uint8_t>(DW_OP_reg31); // The frame pointer is here on PPC64.
#elif V8_TARGET_ARCH_S390
@@ -1092,7 +1107,7 @@ class DebugInfoSection : public DebugSection {
int params = scope.ParameterCount();
int context_slots = scope.ContextLocalCount();
// The real slot ID is internal_slots + context_slot_id.
- int internal_slots = Context::MIN_CONTEXT_SLOTS;
+ int internal_slots = scope.ContextHeaderLength();
int current_abbreviation = 4;
for (int param = 0; param < params; ++param) {
@@ -1109,7 +1124,7 @@ class DebugInfoSection : public DebugSection {
}
// See contexts.h for more information.
- DCHECK_EQ(Context::MIN_CONTEXT_SLOTS, 3);
+ DCHECK(internal_slots == 2 || internal_slots == 3);
DCHECK_EQ(Context::SCOPE_INFO_INDEX, 0);
DCHECK_EQ(Context::PREVIOUS_INDEX, 1);
DCHECK_EQ(Context::EXTENSION_INDEX, 2);
@@ -1117,8 +1132,10 @@ class DebugInfoSection : public DebugSection {
w->WriteString(".scope_info");
w->WriteULEB128(current_abbreviation++);
w->WriteString(".previous");
- w->WriteULEB128(current_abbreviation++);
- w->WriteString(".extension");
+ if (internal_slots == 3) {
+ w->WriteULEB128(current_abbreviation++);
+ w->WriteString(".extension");
+ }
for (int context_slot = 0; context_slot < context_slots; ++context_slot) {
w->WriteULEB128(current_abbreviation++);
@@ -1814,26 +1831,17 @@ static JITCodeEntry* CreateELFObject(CodeDescription* desc, Isolate* isolate) {
return CreateCodeEntry(reinterpret_cast<Address>(w.buffer()), w.position());
}
-struct AddressRange {
- Address start;
- Address end;
-};
-
-struct AddressRangeLess {
- bool operator()(const AddressRange& a, const AddressRange& b) const {
- if (a.start == b.start) return a.end < b.end;
- return a.start < b.start;
+// Like base::AddressRegion::StartAddressLess but also compares |end| when
+// |begin| is equal.
+struct AddressRegionLess {
+ bool operator()(const base::AddressRegion& a,
+ const base::AddressRegion& b) const {
+ if (a.begin() == b.begin()) return a.end() < b.end();
+ return a.begin() < b.begin();
}
};
-struct CodeMapConfig {
- using Key = AddressRange;
- using Value = JITCodeEntry*;
- using Less = AddressRangeLess;
-};
-
-using CodeMap =
- std::map<CodeMapConfig::Key, CodeMapConfig::Value, CodeMapConfig::Less>;
+using CodeMap = std::map<base::AddressRegion, JITCodeEntry*, AddressRegionLess>;
static CodeMap* GetCodeMap() {
// TODO(jgruber): Don't leak.
@@ -1907,50 +1915,72 @@ static void AddUnwindInfo(CodeDescription* desc) {
static base::LazyMutex mutex = LAZY_MUTEX_INITIALIZER;
-// Remove entries from the map that intersect the given address range,
-// and deregister them from GDB.
-static void RemoveJITCodeEntries(CodeMap* map, const AddressRange& range) {
- DCHECK(range.start < range.end);
+static base::Optional<std::pair<CodeMap::iterator, CodeMap::iterator>>
+GetOverlappingRegions(CodeMap* map, const base::AddressRegion region) {
+ DCHECK_LT(region.begin(), region.end());
- if (map->empty()) return;
+ if (map->empty()) return {};
// Find the first overlapping entry.
- // If successful, points to the first element not less than `range`. The
+ // If successful, points to the first element not less than `region`. The
// returned iterator has the key in `first` and the value in `second`.
- auto it = map->lower_bound(range);
+ auto it = map->lower_bound(region);
auto start_it = it;
if (it == map->end()) {
start_it = map->begin();
+ // Find the first overlapping entry.
+ for (; start_it != map->end(); ++start_it) {
+ if (start_it->first.end() > region.begin()) {
+ break;
+ }
+ }
} else if (it != map->begin()) {
for (--it; it != map->begin(); --it) {
- if ((*it).first.end <= range.start) break;
+ if ((*it).first.end() <= region.begin()) break;
+ start_it = it;
+ }
+ if (it == map->begin() && it->first.end() > region.begin()) {
start_it = it;
}
}
- DCHECK(start_it != map->end());
+ if (start_it == map->end()) {
+ return {};
+ }
- // Find the first non-overlapping entry after `range`.
+ // Find the first non-overlapping entry after `region`.
- const auto end_it = map->lower_bound({range.end, 0});
+ const auto end_it = map->lower_bound({region.end(), 0});
- // Evict intersecting ranges.
+ // Return a range containing intersecting regions.
- if (std::distance(start_it, end_it) < 1) return; // No overlapping entries.
+ if (std::distance(start_it, end_it) < 1)
+ return {}; // No overlapping entries.
- for (auto it = start_it; it != end_it; it++) {
- JITCodeEntry* old_entry = (*it).second;
- UnregisterCodeEntry(old_entry);
- DestroyCodeEntry(old_entry);
- }
+ return {{start_it, end_it}};
+}
+
+// Remove entries from the map that intersect the given address region,
+// and deregister them from GDB.
+static void RemoveJITCodeEntries(CodeMap* map,
+ const base::AddressRegion region) {
+ if (auto overlap = GetOverlappingRegions(map, region)) {
+ auto start_it = overlap->first;
+ auto end_it = overlap->second;
+ for (auto it = start_it; it != end_it; it++) {
+ JITCodeEntry* old_entry = (*it).second;
+ UnregisterCodeEntry(old_entry);
+ DestroyCodeEntry(old_entry);
+ }
- map->erase(start_it, end_it);
+ map->erase(start_it, end_it);
+ }
}
// Insert the entry into the map and register it with GDB.
-static void AddJITCodeEntry(CodeMap* map, const AddressRange& range,
+static void AddJITCodeEntry(CodeMap* map, const base::AddressRegion region,
JITCodeEntry* entry, bool dump_if_enabled,
const char* name_hint) {
#if defined(DEBUG) && !V8_OS_WIN
@@ -1967,24 +1997,21 @@ static void AddJITCodeEntry(CodeMap* map, const AddressRange& range,
}
#endif
- auto result = map->emplace(range, entry);
+ auto result = map->emplace(region, entry);
DCHECK(result.second); // Insertion happened.
USE(result);
RegisterCodeEntry(entry);
}
-static void AddCode(const char* name, Code code, SharedFunctionInfo shared,
- LineInfo* lineinfo) {
+static void AddCode(const char* name, base::AddressRegion region,
+ SharedFunctionInfo shared, LineInfo* lineinfo,
+ Isolate* isolate, bool is_function) {
DisallowGarbageCollection no_gc;
+ CodeDescription code_desc(name, region, shared, lineinfo, is_function);
CodeMap* code_map = GetCodeMap();
- AddressRange range;
- range.start = code.address();
- range.end = code.address() + code.CodeSize();
- RemoveJITCodeEntries(code_map, range);
-
- CodeDescription code_desc(name, code, shared, lineinfo);
+ RemoveJITCodeEntries(code_map, region);
if (!FLAG_gdbjit_full && !code_desc.IsLineInfoAvailable()) {
delete lineinfo;
@@ -1992,7 +2019,6 @@ static void AddCode(const char* name, Code code, SharedFunctionInfo shared,
}
AddUnwindInfo(&code_desc);
- Isolate* isolate = code.GetIsolate();
JITCodeEntry* entry = CreateELFObject(&code_desc, isolate);
delete lineinfo;
@@ -2008,25 +2034,40 @@ static void AddCode(const char* name, Code code, SharedFunctionInfo shared,
should_dump = (name_hint != nullptr);
}
}
- AddJITCodeEntry(code_map, range, entry, should_dump, name_hint);
+ AddJITCodeEntry(code_map, region, entry, should_dump, name_hint);
}
void EventHandler(const v8::JitCodeEvent* event) {
if (!FLAG_gdbjit) return;
- if (event->code_type != v8::JitCodeEvent::JIT_CODE) return;
+ if ((event->code_type != v8::JitCodeEvent::JIT_CODE) &&
+ (event->code_type != v8::JitCodeEvent::WASM_CODE)) {
+ return;
+ }
base::MutexGuard lock_guard(mutex.Pointer());
switch (event->type) {
case v8::JitCodeEvent::CODE_ADDED: {
Address addr = reinterpret_cast<Address>(event->code_start);
- Isolate* isolate = reinterpret_cast<Isolate*>(event->isolate);
- Code code = isolate->heap()->GcSafeFindCodeForInnerPointer(addr);
LineInfo* lineinfo = GetLineInfo(addr);
std::string event_name(event->name.str, event->name.len);
// It's called UnboundScript in the API but it's a SharedFunctionInfo.
SharedFunctionInfo shared = event->script.IsEmpty()
? SharedFunctionInfo()
: *Utils::OpenHandle(*event->script);
- AddCode(event_name.c_str(), code, shared, lineinfo);
+ Isolate* isolate = reinterpret_cast<Isolate*>(event->isolate);
+ bool is_function = false;
+ // TODO(zhin): See if we can use event->code_type to determine
+ // is_function, the difference currently is that JIT_CODE is SparkPlug,
+ // TurboProp, TurboFan, whereas CodeKindIsOptimizedJSFunction is only
+ // TurboProp and TurboFan. is_function is used for AddUnwindInfo, and the
+ // prologue that SP generates probably matches that of TP/TF, so we can
+ // use event->code_type here instead of finding the Code.
+ // TODO(zhin): Rename is_function to be more accurate.
+ if (event->code_type == v8::JitCodeEvent::JIT_CODE) {
+ Code code = isolate->heap()->GcSafeFindCodeForInnerPointer(addr);
+ is_function = CodeKindIsOptimizedJSFunction(code.kind());
+ }
+ AddCode(event_name.c_str(), {addr, event->code_len}, shared, lineinfo,
+ isolate, is_function);
break;
}
case v8::JitCodeEvent::CODE_MOVED:
@@ -2056,6 +2097,23 @@ void EventHandler(const v8::JitCodeEvent* event) {
}
}
}
+
+void AddRegionForTesting(const base::AddressRegion region) {
+ // For testing purposes we don't care about JITCodeEntry, pass nullptr.
+ auto result = GetCodeMap()->emplace(region, nullptr);
+ DCHECK(result.second); // Insertion happened.
+ USE(result);
+}
+
+void ClearCodeMapForTesting() { GetCodeMap()->clear(); }
+
+size_t NumOverlapEntriesForTesting(const base::AddressRegion region) {
+ if (auto overlaps = GetOverlappingRegions(GetCodeMap(), region)) {
+ return std::distance(overlaps->first, overlaps->second);
+ }
+ return 0;
+}
+
#endif
} // namespace GDBJITInterface
} // namespace internal
diff --git a/deps/v8/src/diagnostics/gdb-jit.h b/deps/v8/src/diagnostics/gdb-jit.h
index 82f5ce892c..eb4d515a81 100644
--- a/deps/v8/src/diagnostics/gdb-jit.h
+++ b/deps/v8/src/diagnostics/gdb-jit.h
@@ -5,6 +5,8 @@
#ifndef V8_DIAGNOSTICS_GDB_JIT_H_
#define V8_DIAGNOSTICS_GDB_JIT_H_
+#include "src/base/address-region.h"
+
//
// GDB has two ways of interacting with JIT code. With the "JIT compilation
// interface", V8 can tell GDB when it emits JIT code. Unfortunately to do so,
@@ -29,9 +31,19 @@ struct JitCodeEvent;
namespace internal {
namespace GDBJITInterface {
#ifdef ENABLE_GDB_JIT_INTERFACE
+
// JitCodeEventHandler that creates ELF/Mach-O objects and registers them with
// GDB.
void EventHandler(const v8::JitCodeEvent* event);
+
+// Expose some functions for unittests. These only exercise the logic to add
+// AddressRegion to CodeMap, and checking for overlap. It does not touch the
+// actual JITCodeEntry at all.
+V8_EXPORT_PRIVATE void AddRegionForTesting(const base::AddressRegion region);
+V8_EXPORT_PRIVATE void ClearCodeMapForTesting();
+V8_EXPORT_PRIVATE size_t
+NumOverlapEntriesForTesting(const base::AddressRegion region);
+
#endif
} // namespace GDBJITInterface
} // namespace internal
diff --git a/deps/v8/src/diagnostics/ia32/disasm-ia32.cc b/deps/v8/src/diagnostics/ia32/disasm-ia32.cc
index de124de747..8f721c997d 100644
--- a/deps/v8/src/diagnostics/ia32/disasm-ia32.cc
+++ b/deps/v8/src/diagnostics/ia32/disasm-ia32.cc
@@ -89,6 +89,10 @@ static const char* const conditional_move_mnem[] = {
/*8*/ "cmovs", "cmovns", "cmovpe", "cmovpo",
/*12*/ "cmovl", "cmovnl", "cmovng", "cmovg"};
+static const char* const cmp_pseudo_op[16] = {
+ "eq", "lt", "le", "unord", "neq", "nlt", "nle", "ord",
+ "eq_uq", "nge", "ngt", "false", "neq_oq", "ge", "gt", "true"};
+
enum InstructionType {
NO_INSTR,
ZERO_OPERANDS_INSTR,
@@ -415,13 +419,11 @@ int DisassemblerIA32::PrintRightOperandHelper(
UnimplementedInstruction();
return 1;
}
- } else {
- AppendToBuffer("[%s]", (this->*register_name)(rm));
- return 1;
}
- break;
+ AppendToBuffer("[%s]", (this->*register_name)(rm));
+ return 1;
case 1: // fall through
- case 2:
+ case 2: {
if (rm == esp) {
byte sib = *(modrmp + 1);
int scale, index, base;
@@ -436,14 +438,13 @@ int DisassemblerIA32::PrintRightOperandHelper(
disp < 0 ? "-" : "+", disp < 0 ? -disp : disp);
}
return mod == 2 ? 6 : 3;
- } else {
- // No sib.
- int disp = mod == 2 ? Imm32(modrmp + 1) : Imm8(modrmp + 1);
- AppendToBuffer("[%s%s0x%x]", (this->*register_name)(rm),
- disp < 0 ? "-" : "+", disp < 0 ? -disp : disp);
- return mod == 2 ? 5 : 2;
}
- break;
+ // No sib.
+ int disp = mod == 2 ? Imm32(modrmp + 1) : Imm8(modrmp + 1);
+ AppendToBuffer("[%s%s0x%x]", (this->*register_name)(rm),
+ disp < 0 ? "-" : "+", disp < 0 ? -disp : disp);
+ return mod == 2 ? 5 : 2;
+ }
case 3:
AppendToBuffer("%s", (this->*register_name)(rm));
return 1;
@@ -789,6 +790,15 @@ int DisassemblerIA32::AVXInstruction(byte* data) {
SSSE3_UNOP_INSTRUCTION_LIST(DECLARE_SSE_AVX_RM_DIS_CASE)
SSE4_RM_INSTRUCTION_LIST(DECLARE_SSE_AVX_RM_DIS_CASE)
#undef DECLARE_SSE_AVX_RM_DIS_CASE
+
+#define DISASSEMBLE_AVX2_BROADCAST(instruction, _1, _2, _3, code) \
+ case 0x##code: \
+ AppendToBuffer("" #instruction " %s,", NameOfXMMRegister(regop)); \
+ current += PrintRightXMMOperand(current); \
+ break;
+ AVX2_BROADCAST_LIST(DISASSEMBLE_AVX2_BROADCAST)
+#undef DISASSEMBLE_AVX2_BROADCAST
+
default:
UnimplementedInstruction();
}
@@ -1243,12 +1253,10 @@ int DisassemblerIA32::AVXInstruction(byte* data) {
current += PrintRightXMMOperand(current);
break;
case 0xC2: {
- const char* const pseudo_op[] = {"eq", "lt", "le", "unord",
- "neq", "nlt", "nle", "ord"};
AppendToBuffer("vcmpps %s,%s,", NameOfXMMRegister(regop),
NameOfXMMRegister(vvvv));
current += PrintRightXMMOperand(current);
- AppendToBuffer(", (%s)", pseudo_op[*current]);
+ AppendToBuffer(", (%s)", cmp_pseudo_op[*current]);
current++;
break;
}
@@ -1371,11 +1379,10 @@ int DisassemblerIA32::AVXInstruction(byte* data) {
AppendToBuffer(",%s", NameOfXMMRegister(regop));
break;
case 0xC2: {
- const char* const pseudo_op[] = {"eq", "lt", "le", "unord", "neq"};
AppendToBuffer("vcmppd %s,%s,", NameOfXMMRegister(regop),
NameOfXMMRegister(vvvv));
current += PrintRightXMMOperand(current);
- AppendToBuffer(", (%s)", pseudo_op[*current]);
+ AppendToBuffer(", (%s)", cmp_pseudo_op[*current]);
current++;
break;
}
@@ -1999,11 +2006,9 @@ int DisassemblerIA32::InstructionDecode(v8::base::Vector<char> out_buffer,
data += PrintOperands("xadd", OPER_REG_OP_ORDER, data);
} else if (f0byte == 0xC2) {
data += 2;
- const char* const pseudo_op[] = {"eq", "lt", "le", "unord",
- "neq", "nlt", "nle", "ord"};
AppendToBuffer("cmpps %s, ", NameOfXMMRegister(regop));
data += PrintRightXMMOperand(data);
- AppendToBuffer(", (%s)", pseudo_op[*data]);
+ AppendToBuffer(", (%s)", cmp_pseudo_op[*data]);
data++;
} else if (f0byte == 0xC6) {
// shufps xmm, xmm/m128, imm8
@@ -2485,10 +2490,9 @@ int DisassemblerIA32::InstructionDecode(v8::base::Vector<char> out_buffer,
data++;
int mod, regop, rm;
get_modrm(*data, &mod, &regop, &rm);
- const char* const pseudo_op[] = {"eq", "lt", "le", "unord", "neq"};
AppendToBuffer("cmppd %s, ", NameOfXMMRegister(regop));
data += PrintRightXMMOperand(data);
- AppendToBuffer(", (%s)", pseudo_op[*data]);
+ AppendToBuffer(", (%s)", cmp_pseudo_op[*data]);
data++;
} else if (*data == 0xC4) {
data++;
@@ -2694,10 +2698,7 @@ int DisassemblerIA32::InstructionDecode(v8::base::Vector<char> out_buffer,
data += PrintRightXMMOperand(data);
} else if (b2 == 0xC2) {
// Intel manual 2A, Table 3-18.
- const char* const pseudo_op[] = {
- "cmpeqsd", "cmpltsd", "cmplesd", "cmpunordsd",
- "cmpneqsd", "cmpnltsd", "cmpnlesd", "cmpordsd"};
- AppendToBuffer("%s %s,%s", pseudo_op[data[1]],
+ AppendToBuffer("cmp%ssd %s,%s", cmp_pseudo_op[data[1]],
NameOfXMMRegister(regop), NameOfXMMRegister(rm));
data += 2;
} else {
@@ -2835,10 +2836,7 @@ int DisassemblerIA32::InstructionDecode(v8::base::Vector<char> out_buffer,
data += PrintRightXMMOperand(data);
} else if (b2 == 0xC2) {
// Intel manual 2A, Table 3-18.
- const char* const pseudo_op[] = {
- "cmpeqss", "cmpltss", "cmpless", "cmpunordss",
- "cmpneqss", "cmpnltss", "cmpnless", "cmpordss"};
- AppendToBuffer("%s %s,%s", pseudo_op[data[1]],
+ AppendToBuffer("cmp%sss %s,%s", cmp_pseudo_op[data[1]],
NameOfXMMRegister(regop), NameOfXMMRegister(rm));
data += 2;
} else {
diff --git a/deps/v8/src/diagnostics/loong64/disasm-loong64.cc b/deps/v8/src/diagnostics/loong64/disasm-loong64.cc
new file mode 100644
index 0000000000..1c41a3896a
--- /dev/null
+++ b/deps/v8/src/diagnostics/loong64/disasm-loong64.cc
@@ -0,0 +1,1697 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <assert.h>
+#include <stdarg.h>
+#include <stdio.h>
+#include <string.h>
+
+#if V8_TARGET_ARCH_LOONG64
+
+#include "src/base/platform/platform.h"
+#include "src/base/strings.h"
+#include "src/base/vector.h"
+#include "src/codegen/loong64/constants-loong64.h"
+#include "src/codegen/macro-assembler.h"
+#include "src/diagnostics/disasm.h"
+
+namespace v8 {
+namespace internal {
+
+//------------------------------------------------------------------------------
+
+// Decoder decodes and disassembles instructions into an output buffer.
+// It uses the converter to convert register names and call destinations into
+// more informative description.
+class Decoder {
+ public:
+ Decoder(const disasm::NameConverter& converter,
+ v8::base::Vector<char> out_buffer)
+ : converter_(converter), out_buffer_(out_buffer), out_buffer_pos_(0) {
+ out_buffer_[out_buffer_pos_] = '\0';
+ }
+
+ ~Decoder() {}
+
+ Decoder(const Decoder&) = delete;
+ Decoder& operator=(const Decoder&) = delete;
+
+ // Writes one disassembled instruction into 'buffer' (0-terminated).
+ // Returns the length of the disassembled machine instruction in bytes.
+ int InstructionDecode(byte* instruction);
+
+ private:
+ // Bottleneck functions to print into the out_buffer.
+ void PrintChar(const char ch);
+ void Print(const char* str);
+
+ // Printing of common values.
+ void PrintRegister(int reg);
+ void PrintFPURegister(int freg);
+ void PrintFPUStatusRegister(int freg);
+ void PrintRj(Instruction* instr);
+ void PrintRk(Instruction* instr);
+ void PrintRd(Instruction* instr);
+ void PrintFj(Instruction* instr);
+ void PrintFk(Instruction* instr);
+ void PrintFd(Instruction* instr);
+ void PrintFa(Instruction* instr);
+ void PrintSa2(Instruction* instr);
+ void PrintSa3(Instruction* instr);
+ void PrintUi5(Instruction* instr);
+ void PrintUi6(Instruction* instr);
+ void PrintUi12(Instruction* instr);
+ void PrintXi12(Instruction* instr);
+ void PrintMsbw(Instruction* instr);
+ void PrintLsbw(Instruction* instr);
+ void PrintMsbd(Instruction* instr);
+ void PrintLsbd(Instruction* instr);
+ // void PrintCond(Instruction* instr);
+ void PrintSi12(Instruction* instr);
+ void PrintSi14(Instruction* instr);
+ void PrintSi16(Instruction* instr);
+ void PrintSi20(Instruction* instr);
+ void PrintCj(Instruction* instr);
+ void PrintCd(Instruction* instr);
+ void PrintCa(Instruction* instr);
+ void PrintCode(Instruction* instr);
+ void PrintHint5(Instruction* instr);
+ void PrintHint15(Instruction* instr);
+ void PrintPCOffs16(Instruction* instr);
+ void PrintPCOffs21(Instruction* instr);
+ void PrintPCOffs26(Instruction* instr);
+ void PrintOffs16(Instruction* instr);
+ void PrintOffs21(Instruction* instr);
+ void PrintOffs26(Instruction* instr);
+
+ // Handle formatting of instructions and their options.
+ int FormatRegister(Instruction* instr, const char* option);
+ int FormatFPURegister(Instruction* instr, const char* option);
+ int FormatOption(Instruction* instr, const char* option);
+ void Format(Instruction* instr, const char* format);
+ void Unknown(Instruction* instr);
+ int DecodeBreakInstr(Instruction* instr);
+
+ // Each of these functions decodes one particular instruction type.
+ int InstructionDecode(Instruction* instr);
+ void DecodeTypekOp6(Instruction* instr);
+ void DecodeTypekOp7(Instruction* instr);
+ void DecodeTypekOp8(Instruction* instr);
+ void DecodeTypekOp10(Instruction* instr);
+ void DecodeTypekOp12(Instruction* instr);
+ void DecodeTypekOp14(Instruction* instr);
+ int DecodeTypekOp17(Instruction* instr);
+ void DecodeTypekOp22(Instruction* instr);
+
+ const disasm::NameConverter& converter_;
+ v8::base::Vector<char> out_buffer_;
+ int out_buffer_pos_;
+};
+
+// Support for assertions in the Decoder formatting functions.
+#define STRING_STARTS_WITH(string, compare_string) \
+ (strncmp(string, compare_string, strlen(compare_string)) == 0)
+
+// Append the ch to the output buffer.
+void Decoder::PrintChar(const char ch) { out_buffer_[out_buffer_pos_++] = ch; }
+
+// Append the str to the output buffer.
+void Decoder::Print(const char* str) {
+ char cur = *str++;
+ while (cur != '\0' && (out_buffer_pos_ < (out_buffer_.length() - 1))) {
+ PrintChar(cur);
+ cur = *str++;
+ }
+ out_buffer_[out_buffer_pos_] = 0;
+}
+
+// Print the register name according to the active name converter.
+void Decoder::PrintRegister(int reg) {
+ Print(converter_.NameOfCPURegister(reg));
+}
+
+void Decoder::PrintRj(Instruction* instr) {
+ int reg = instr->RjValue();
+ PrintRegister(reg);
+}
+
+void Decoder::PrintRk(Instruction* instr) {
+ int reg = instr->RkValue();
+ PrintRegister(reg);
+}
+
+void Decoder::PrintRd(Instruction* instr) {
+ int reg = instr->RdValue();
+ PrintRegister(reg);
+}
+
+// Print the FPUregister name according to the active name converter.
+void Decoder::PrintFPURegister(int freg) {
+ Print(converter_.NameOfXMMRegister(freg));
+}
+
+void Decoder::PrintFj(Instruction* instr) {
+ int freg = instr->FjValue();
+ PrintFPURegister(freg);
+}
+
+void Decoder::PrintFk(Instruction* instr) {
+ int freg = instr->FkValue();
+ PrintFPURegister(freg);
+}
+
+void Decoder::PrintFd(Instruction* instr) {
+ int freg = instr->FdValue();
+ PrintFPURegister(freg);
+}
+
+void Decoder::PrintFa(Instruction* instr) {
+ int freg = instr->FaValue();
+ PrintFPURegister(freg);
+}
+
+// Print the integer value of the sa field.
+void Decoder::PrintSa2(Instruction* instr) {
+ int sa = instr->Sa2Value();
+ uint32_t opcode = (instr->InstructionBits() >> 18) << 18;
+ if (opcode == ALSL || opcode == ALSL_D) {
+ sa += 1;
+ }
+ out_buffer_pos_ += base::SNPrintF(out_buffer_ + out_buffer_pos_, "%d", sa);
+}
+
+void Decoder::PrintSa3(Instruction* instr) {
+ int sa = instr->Sa3Value();
+ out_buffer_pos_ += base::SNPrintF(out_buffer_ + out_buffer_pos_, "%d", sa);
+}
+
+void Decoder::PrintUi5(Instruction* instr) {
+ int ui = instr->Ui5Value();
+ out_buffer_pos_ += base::SNPrintF(out_buffer_ + out_buffer_pos_, "%u", ui);
+}
+
+void Decoder::PrintUi6(Instruction* instr) {
+ int ui = instr->Ui6Value();
+ out_buffer_pos_ += base::SNPrintF(out_buffer_ + out_buffer_pos_, "%u", ui);
+}
+
+void Decoder::PrintUi12(Instruction* instr) {
+ int ui = instr->Ui12Value();
+ out_buffer_pos_ += base::SNPrintF(out_buffer_ + out_buffer_pos_, "%u", ui);
+}
+
+void Decoder::PrintXi12(Instruction* instr) {
+ int xi = instr->Ui12Value();
+ out_buffer_pos_ += base::SNPrintF(out_buffer_ + out_buffer_pos_, "0x%x", xi);
+}
+
+void Decoder::PrintMsbd(Instruction* instr) {
+ int msbd = instr->MsbdValue();
+ out_buffer_pos_ += base::SNPrintF(out_buffer_ + out_buffer_pos_, "%u", msbd);
+}
+
+void Decoder::PrintLsbd(Instruction* instr) {
+ int lsbd = instr->LsbdValue();
+ out_buffer_pos_ += base::SNPrintF(out_buffer_ + out_buffer_pos_, "%u", lsbd);
+}
+
+void Decoder::PrintMsbw(Instruction* instr) {
+ int msbw = instr->MsbwValue();
+ out_buffer_pos_ += base::SNPrintF(out_buffer_ + out_buffer_pos_, "%u", msbw);
+}
+
+void Decoder::PrintLsbw(Instruction* instr) {
+ int lsbw = instr->LsbwValue();
+ out_buffer_pos_ += base::SNPrintF(out_buffer_ + out_buffer_pos_, "%u", lsbw);
+}
+
+void Decoder::PrintSi12(Instruction* instr) {
+ int si = ((instr->Si12Value()) << (32 - kSi12Bits)) >> (32 - kSi12Bits);
+ out_buffer_pos_ += base::SNPrintF(out_buffer_ + out_buffer_pos_, "%d", si);
+}
+
+void Decoder::PrintSi14(Instruction* instr) {
+ int si = ((instr->Si14Value()) << (32 - kSi14Bits)) >> (32 - kSi14Bits);
+ si <<= 2;
+ out_buffer_pos_ += base::SNPrintF(out_buffer_ + out_buffer_pos_, "%d", si);
+}
+
+void Decoder::PrintSi16(Instruction* instr) {
+ int si = ((instr->Si16Value()) << (32 - kSi16Bits)) >> (32 - kSi16Bits);
+ out_buffer_pos_ += base::SNPrintF(out_buffer_ + out_buffer_pos_, "%d", si);
+}
+
+void Decoder::PrintSi20(Instruction* instr) {
+ int si = ((instr->Si20Value()) << (32 - kSi20Bits)) >> (32 - kSi20Bits);
+ out_buffer_pos_ += base::SNPrintF(out_buffer_ + out_buffer_pos_, "%d", si);
+}
+
+void Decoder::PrintCj(Instruction* instr) {
+ int cj = instr->CjValue();
+ out_buffer_pos_ += base::SNPrintF(out_buffer_ + out_buffer_pos_, "%u", cj);
+}
+
+void Decoder::PrintCd(Instruction* instr) {
+ int cd = instr->CdValue();
+ out_buffer_pos_ += base::SNPrintF(out_buffer_ + out_buffer_pos_, "%u", cd);
+}
+
+void Decoder::PrintCa(Instruction* instr) {
+ int ca = instr->CaValue();
+ out_buffer_pos_ += base::SNPrintF(out_buffer_ + out_buffer_pos_, "%u", ca);
+}
+
+void Decoder::PrintCode(Instruction* instr) {
+ int code = instr->CodeValue();
+ out_buffer_pos_ +=
+ base::SNPrintF(out_buffer_ + out_buffer_pos_, "0x%x(%u)", code, code);
+}
+
+void Decoder::PrintHint5(Instruction* instr) {
+ int hint = instr->Hint5Value();
+ out_buffer_pos_ +=
+ base::SNPrintF(out_buffer_ + out_buffer_pos_, "0x%x(%u)", hint, hint);
+}
+
+void Decoder::PrintHint15(Instruction* instr) {
+ int hint = instr->Hint15Value();
+ out_buffer_pos_ +=
+ base::SNPrintF(out_buffer_ + out_buffer_pos_, "0x%x(%u)", hint, hint);
+}
+
+void Decoder::PrintPCOffs16(Instruction* instr) {
+ int n_bits = 2;
+ int offs = instr->Offs16Value();
+ int target = ((offs << n_bits) << (32 - kOffsLowBits - n_bits)) >>
+ (32 - kOffsLowBits - n_bits);
+ out_buffer_pos_ += base::SNPrintF(
+ out_buffer_ + out_buffer_pos_, "%s",
+ converter_.NameOfAddress(reinterpret_cast<byte*>(instr) + target));
+}
+
+void Decoder::PrintPCOffs21(Instruction* instr) {
+ int n_bits = 2;
+ int offs = instr->Offs21Value();
+ int target =
+ ((offs << n_bits) << (32 - kOffsLowBits - kOffs21HighBits - n_bits)) >>
+ (32 - kOffsLowBits - kOffs21HighBits - n_bits);
+ out_buffer_pos_ += base::SNPrintF(
+ out_buffer_ + out_buffer_pos_, "%s",
+ converter_.NameOfAddress(reinterpret_cast<byte*>(instr) + target));
+}
+
+void Decoder::PrintPCOffs26(Instruction* instr) {
+ int n_bits = 2;
+ int offs = instr->Offs26Value();
+ int target =
+ ((offs << n_bits) << (32 - kOffsLowBits - kOffs26HighBits - n_bits)) >>
+ (32 - kOffsLowBits - kOffs26HighBits - n_bits);
+ out_buffer_pos_ += base::SNPrintF(
+ out_buffer_ + out_buffer_pos_, "%s",
+ converter_.NameOfAddress(reinterpret_cast<byte*>(instr) + target));
+}
+
+void Decoder::PrintOffs16(Instruction* instr) {
+ int offs = instr->Offs16Value();
+ offs <<= (32 - kOffsLowBits);
+ offs >>= (32 - kOffsLowBits);
+ out_buffer_pos_ += base::SNPrintF(out_buffer_ + out_buffer_pos_, "%d", offs);
+}
+
+void Decoder::PrintOffs21(Instruction* instr) {
+ int offs = instr->Offs21Value();
+ offs <<= (32 - kOffsLowBits - kOffs21HighBits);
+ offs >>= (32 - kOffsLowBits - kOffs21HighBits);
+ out_buffer_pos_ += base::SNPrintF(out_buffer_ + out_buffer_pos_, "%d", offs);
+}
+
+void Decoder::PrintOffs26(Instruction* instr) {
+ int offs = instr->Offs26Value();
+ offs <<= (32 - kOffsLowBits - kOffs26HighBits);
+ offs >>= (32 - kOffsLowBits - kOffs26HighBits);
+ out_buffer_pos_ += base::SNPrintF(out_buffer_ + out_buffer_pos_, "%d", offs);
+}
+
+// Handle all register based formatting in this function to reduce the
+// complexity of FormatOption.
+int Decoder::FormatRegister(Instruction* instr, const char* format) {
+ DCHECK_EQ(format[0], 'r');
+ if (format[1] == 'j') { // 'rj: Rj register.
+ int reg = instr->RjValue();
+ PrintRegister(reg);
+ return 2;
+ } else if (format[1] == 'k') { // 'rk: rk register.
+ int reg = instr->RkValue();
+ PrintRegister(reg);
+ return 2;
+ } else if (format[1] == 'd') { // 'rd: rd register.
+ int reg = instr->RdValue();
+ PrintRegister(reg);
+ return 2;
+ }
+ UNREACHABLE();
+}
+
+// Handle all FPUregister based formatting in this function to reduce the
+// complexity of FormatOption.
+int Decoder::FormatFPURegister(Instruction* instr, const char* format) {
+ DCHECK_EQ(format[0], 'f');
+ if (format[1] == 'j') { // 'fj: fj register.
+ int reg = instr->FjValue();
+ PrintFPURegister(reg);
+ return 2;
+ } else if (format[1] == 'k') { // 'fk: fk register.
+ int reg = instr->FkValue();
+ PrintFPURegister(reg);
+ return 2;
+ } else if (format[1] == 'd') { // 'fd: fd register.
+ int reg = instr->FdValue();
+ PrintFPURegister(reg);
+ return 2;
+ } else if (format[1] == 'a') { // 'fa: fa register.
+ int reg = instr->FaValue();
+ PrintFPURegister(reg);
+ return 2;
+ }
+ UNREACHABLE();
+}
+
+// FormatOption takes a formatting string and interprets it based on
+// the current instructions. The format string points to the first
+// character of the option string (the option escape has already been
+// consumed by the caller.) FormatOption returns the number of
+// characters that were consumed from the formatting string.
+int Decoder::FormatOption(Instruction* instr, const char* format) {
+ switch (format[0]) {
+ case 'c': {
+ switch (format[1]) {
+ case 'a':
+ DCHECK(STRING_STARTS_WITH(format, "ca"));
+ PrintCa(instr);
+ return 2;
+ case 'd':
+ DCHECK(STRING_STARTS_WITH(format, "cd"));
+ PrintCd(instr);
+ return 2;
+ case 'j':
+ DCHECK(STRING_STARTS_WITH(format, "cj"));
+ PrintCj(instr);
+ return 2;
+ case 'o':
+ DCHECK(STRING_STARTS_WITH(format, "code"));
+ PrintCode(instr);
+ return 4;
+ }
+ }
+ case 'f': {
+ return FormatFPURegister(instr, format);
+ }
+ case 'h': {
+ if (format[4] == '5') {
+ DCHECK(STRING_STARTS_WITH(format, "hint5"));
+ PrintHint5(instr);
+ return 5;
+ } else if (format[4] == '1') {
+ DCHECK(STRING_STARTS_WITH(format, "hint15"));
+ PrintHint15(instr);
+ return 6;
+ }
+ break;
+ }
+ case 'l': {
+ switch (format[3]) {
+ case 'w':
+ DCHECK(STRING_STARTS_WITH(format, "lsbw"));
+ PrintLsbw(instr);
+ return 4;
+ case 'd':
+ DCHECK(STRING_STARTS_WITH(format, "lsbd"));
+ PrintLsbd(instr);
+ return 4;
+ default:
+ return 0;
+ }
+ }
+ case 'm': {
+ if (format[3] == 'w') {
+ DCHECK(STRING_STARTS_WITH(format, "msbw"));
+ PrintMsbw(instr);
+ } else if (format[3] == 'd') {
+ DCHECK(STRING_STARTS_WITH(format, "msbd"));
+ PrintMsbd(instr);
+ }
+ return 4;
+ }
+ case 'o': {
+ if (format[1] == 'f') {
+ if (format[4] == '1') {
+ DCHECK(STRING_STARTS_WITH(format, "offs16"));
+ PrintOffs16(instr);
+ return 6;
+ } else if (format[4] == '2') {
+ if (format[5] == '1') {
+ DCHECK(STRING_STARTS_WITH(format, "offs21"));
+ PrintOffs21(instr);
+ return 6;
+ } else if (format[5] == '6') {
+ DCHECK(STRING_STARTS_WITH(format, "offs26"));
+ PrintOffs26(instr);
+ return 6;
+ }
+ }
+ }
+ break;
+ }
+ case 'p': {
+ if (format[6] == '1') {
+ DCHECK(STRING_STARTS_WITH(format, "pcoffs16"));
+ PrintPCOffs16(instr);
+ return 8;
+ } else if (format[6] == '2') {
+ if (format[7] == '1') {
+ DCHECK(STRING_STARTS_WITH(format, "pcoffs21"));
+ PrintPCOffs21(instr);
+ return 8;
+ } else if (format[7] == '6') {
+ DCHECK(STRING_STARTS_WITH(format, "pcoffs26"));
+ PrintPCOffs26(instr);
+ return 8;
+ }
+ }
+ break;
+ }
+ case 'r': {
+ return FormatRegister(instr, format);
+ }
+ case 's': {
+ switch (format[1]) {
+ case 'a':
+ if (format[2] == '2') {
+ DCHECK(STRING_STARTS_WITH(format, "sa2"));
+ PrintSa2(instr);
+ } else if (format[2] == '3') {
+ DCHECK(STRING_STARTS_WITH(format, "sa3"));
+ PrintSa3(instr);
+ }
+ return 3;
+ case 'i':
+ if (format[2] == '2') {
+ DCHECK(STRING_STARTS_WITH(format, "si20"));
+ PrintSi20(instr);
+ return 4;
+ } else if (format[2] == '1') {
+ switch (format[3]) {
+ case '2':
+ DCHECK(STRING_STARTS_WITH(format, "si12"));
+ PrintSi12(instr);
+ return 4;
+ case '4':
+ DCHECK(STRING_STARTS_WITH(format, "si14"));
+ PrintSi14(instr);
+ return 4;
+ case '6':
+ DCHECK(STRING_STARTS_WITH(format, "si16"));
+ PrintSi16(instr);
+ return 4;
+ default:
+ break;
+ }
+ }
+ break;
+ default:
+ break;
+ }
+ break;
+ }
+ case 'u': {
+ if (format[2] == '5') {
+ DCHECK(STRING_STARTS_WITH(format, "ui5"));
+ PrintUi5(instr);
+ return 3;
+ } else if (format[2] == '6') {
+ DCHECK(STRING_STARTS_WITH(format, "ui6"));
+ PrintUi6(instr);
+ return 3;
+ } else if (format[2] == '1') {
+ DCHECK(STRING_STARTS_WITH(format, "ui12"));
+ PrintUi12(instr);
+ return 4;
+ }
+ break;
+ }
+ case 'x': {
+ DCHECK(STRING_STARTS_WITH(format, "xi12"));
+ PrintXi12(instr);
+ return 4;
+ }
+ default:
+ UNREACHABLE();
+ }
+ return 0;
+}
+
+// Format takes a formatting string for a whole instruction and prints it into
+// the output buffer. All escaped options are handed to FormatOption to be
+// parsed further.
+void Decoder::Format(Instruction* instr, const char* format) {
+ char cur = *format++;
+ while ((cur != 0) && (out_buffer_pos_ < (out_buffer_.length() - 1))) {
+ if (cur == '\'') { // Single quote is used as the formatting escape.
+ format += FormatOption(instr, format);
+ } else {
+ out_buffer_[out_buffer_pos_++] = cur;
+ }
+ cur = *format++;
+ }
+ out_buffer_[out_buffer_pos_] = '\0';
+}
+
+// For currently unimplemented decodings the disassembler calls Unknown(instr)
+// which will just print "unknown" of the instruction bits.
+void Decoder::Unknown(Instruction* instr) { Format(instr, "unknown"); }
+
+int Decoder::DecodeBreakInstr(Instruction* instr) {
+ // This is already known to be BREAK instr, just extract the code.
+ /*if (instr->Bits(14, 0) == static_cast<int>(kMaxStopCode)) {
+ // This is stop(msg).
+ Format(instr, "break, code: 'code");
+ out_buffer_pos_ += SNPrintF(
+ out_buffer_ + out_buffer_pos_, "\n%p %08" PRIx64,
+ static_cast<void*>(reinterpret_cast<int32_t*>(instr + kInstrSize)),
+ reinterpret_cast<uint64_t>(
+ *reinterpret_cast<char**>(instr + kInstrSize)));
+ // Size 3: the break_ instr, plus embedded 64-bit char pointer.
+ return 3 * kInstrSize;
+ } else {
+ Format(instr, "break, code: 'code");
+ return kInstrSize;
+ }*/
+ Format(instr, "break code: 'code");
+ return kInstrSize;
+} //===================================================
+
+void Decoder::DecodeTypekOp6(Instruction* instr) {
+ switch (instr->Bits(31, 26) << 26) {
+ case ADDU16I_D:
+ Format(instr, "addu16i.d 'rd, 'rj, 'si16");
+ break;
+ case BEQZ:
+ Format(instr, "beqz 'rj, 'offs21 -> 'pcoffs21");
+ break;
+ case BNEZ:
+ Format(instr, "bnez 'rj, 'offs21 -> 'pcoffs21");
+ break;
+ case BCZ:
+ if (instr->Bit(8))
+ Format(instr, "bcnez fcc'cj, 'offs21 -> 'pcoffs21");
+ else
+ Format(instr, "bceqz fcc'cj, 'offs21 -> 'pcoffs21");
+ break;
+ case JIRL:
+ Format(instr, "jirl 'rd, 'rj, 'offs16");
+ break;
+ case B:
+ Format(instr, "b 'offs26 -> 'pcoffs26");
+ break;
+ case BL:
+ Format(instr, "bl 'offs26 -> 'pcoffs26");
+ break;
+ case BEQ:
+ Format(instr, "beq 'rj, 'rd, 'offs16 -> 'pcoffs16");
+ break;
+ case BNE:
+ Format(instr, "bne 'rj, 'rd, 'offs16 -> 'pcoffs16");
+ break;
+ case BLT:
+ Format(instr, "blt 'rj, 'rd, 'offs16 -> 'pcoffs16");
+ break;
+ case BGE:
+ Format(instr, "bge 'rj, 'rd, 'offs16 -> 'pcoffs16");
+ break;
+ case BLTU:
+ Format(instr, "bltu 'rj, 'rd, 'offs16 -> 'pcoffs16");
+ break;
+ case BGEU:
+ Format(instr, "bgeu 'rj, 'rd, 'offs16 -> 'pcoffs16");
+ break;
+ default:
+ UNREACHABLE();
+ }
+}
+
+void Decoder::DecodeTypekOp7(Instruction* instr) {
+ switch (instr->Bits(31, 25) << 25) {
+ case LU12I_W:
+ Format(instr, "lu12i.w 'rd, 'si20");
+ break;
+ case LU32I_D:
+ Format(instr, "lu32i.d 'rd, 'si20");
+ break;
+ case PCADDI:
+ Format(instr, "pcaddi 'rd, 'si20");
+ break;
+ case PCALAU12I:
+ Format(instr, "pcalau12i 'rd, 'si20");
+ break;
+ case PCADDU12I:
+ Format(instr, "pcaddu12i 'rd, 'si20");
+ break;
+ case PCADDU18I:
+ Format(instr, "pcaddu18i 'rd, 'si20");
+ break;
+ default:
+ UNREACHABLE();
+ }
+}
+
+void Decoder::DecodeTypekOp8(Instruction* instr) {
+ switch (instr->Bits(31, 24) << 24) {
+ case LDPTR_W:
+ Format(instr, "ldptr.w 'rd, 'rj, 'si14");
+ break;
+ case STPTR_W:
+ Format(instr, "stptr.w 'rd, 'rj, 'si14");
+ break;
+ case LDPTR_D:
+ Format(instr, "ldptr.d 'rd, 'rj, 'si14");
+ break;
+ case STPTR_D:
+ Format(instr, "stptr.d 'rd, 'rj, 'si14");
+ break;
+ case LL_W:
+ Format(instr, "ll.w 'rd, 'rj, 'si14");
+ break;
+ case SC_W:
+ Format(instr, "sc.w 'rd, 'rj, 'si14");
+ break;
+ case LL_D:
+ Format(instr, "ll.d 'rd, 'rj, 'si14");
+ break;
+ case SC_D:
+ Format(instr, "sc.d 'rd, 'rj, 'si14");
+ break;
+ default:
+ UNREACHABLE();
+ }
+}
+
+void Decoder::DecodeTypekOp10(Instruction* instr) {
+ switch (instr->Bits(31, 22) << 22) {
+ case BSTR_W: {
+ if (instr->Bit(21) != 0) {
+ if (instr->Bit(15) == 0) {
+ Format(instr, "bstrins.w 'rd, 'rj, 'msbw, 'lsbw");
+ } else {
+ Format(instr, "bstrpick.w 'rd, 'rj, 'msbw, 'lsbw");
+ }
+ }
+ break;
+ }
+ case BSTRINS_D:
+ Format(instr, "bstrins.d 'rd, 'rj, 'msbd, 'lsbd");
+ break;
+ case BSTRPICK_D:
+ Format(instr, "bstrpick.d 'rd, 'rj, 'msbd, 'lsbd");
+ break;
+ case SLTI:
+ Format(instr, "slti 'rd, 'rj, 'si12");
+ break;
+ case SLTUI:
+ Format(instr, "sltui 'rd, 'rj, 'si12");
+ break;
+ case ADDI_W:
+ Format(instr, "addi.w 'rd, 'rj, 'si12");
+ break;
+ case ADDI_D:
+ Format(instr, "addi.d 'rd, 'rj, 'si12");
+ break;
+ case LU52I_D:
+ Format(instr, "lu52i.d 'rd, 'rj, 'si12");
+ break;
+ case ANDI:
+ Format(instr, "andi 'rd, 'rj, 'xi12");
+ break;
+ case ORI:
+ Format(instr, "ori 'rd, 'rj, 'xi12");
+ break;
+ case XORI:
+ Format(instr, "xori 'rd, 'rj, 'xi12");
+ break;
+ case LD_B:
+ Format(instr, "ld.b 'rd, 'rj, 'si12");
+ break;
+ case LD_H:
+ Format(instr, "ld.h 'rd, 'rj, 'si12");
+ break;
+ case LD_W:
+ Format(instr, "ld.w 'rd, 'rj, 'si12");
+ break;
+ case LD_D:
+ Format(instr, "ld.d 'rd, 'rj, 'si12");
+ break;
+ case ST_B:
+ Format(instr, "st.b 'rd, 'rj, 'si12");
+ break;
+ case ST_H:
+ Format(instr, "st.h 'rd, 'rj, 'si12");
+ break;
+ case ST_W:
+ Format(instr, "st.w 'rd, 'rj, 'si12");
+ break;
+ case ST_D:
+ Format(instr, "st.d 'rd, 'rj, 'si12");
+ break;
+ case LD_BU:
+ Format(instr, "ld.bu 'rd, 'rj, 'si12");
+ break;
+ case LD_HU:
+ Format(instr, "ld.hu 'rd, 'rj, 'si12");
+ break;
+ case LD_WU:
+ Format(instr, "ld.wu 'rd, 'rj, 'si12");
+ break;
+ case FLD_S:
+ Format(instr, "fld.s 'fd, 'rj, 'si12");
+ break;
+ case FST_S:
+ Format(instr, "fst.s 'fd, 'rj, 'si12");
+ break;
+ case FLD_D:
+ Format(instr, "fld.d 'fd, 'rj, 'si12");
+ break;
+ case FST_D:
+ Format(instr, "fst.d 'fd, 'rj, 'si12");
+ break;
+ default:
+ UNREACHABLE();
+ }
+}
+
+void Decoder::DecodeTypekOp12(Instruction* instr) {
+ switch (instr->Bits(31, 20) << 20) {
+ case FMADD_S:
+ Format(instr, "fmadd.s 'fd, 'fj, 'fk, 'fa");
+ break;
+ case FMADD_D:
+ Format(instr, "fmadd.d 'fd, 'fj, 'fk, 'fa");
+ break;
+ case FMSUB_S:
+ Format(instr, "fmsub.s 'fd, 'fj, 'fk, 'fa");
+ break;
+ case FMSUB_D:
+ Format(instr, "fmsub.d 'fd, 'fj, 'fk, 'fa");
+ break;
+ case FNMADD_S:
+ Format(instr, "fnmadd.s 'fd, 'fj, 'fk, 'fa");
+ break;
+ case FNMADD_D:
+ Format(instr, "fnmadd.d 'fd, 'fj, 'fk, 'fa");
+ break;
+ case FNMSUB_S:
+ Format(instr, "fnmsub.s 'fd, 'fj, 'fk, 'fa");
+ break;
+ case FNMSUB_D:
+ Format(instr, "fnmsub.d 'fd, 'fj, 'fk, 'fa");
+ break;
+ case FCMP_COND_S:
+ switch (instr->Bits(19, 15)) {
+ case CAF:
+ Format(instr, "fcmp.caf.s fcc'cd, 'fj, 'fk");
+ break;
+ case SAF:
+ Format(instr, "fcmp.saf.s fcc'cd, 'fj, 'fk");
+ break;
+ case CLT:
+ Format(instr, "fcmp.clt.s fcc'cd, 'fj, 'fk");
+ break;
+ case CEQ:
+ Format(instr, "fcmp.ceq.s fcc'cd, 'fj, 'fk");
+ break;
+ case SEQ:
+ Format(instr, "fcmp.seq.s fcc'cd, 'fj, 'fk");
+ break;
+ case CLE:
+ Format(instr, "fcmp.cle.s fcc'cd, 'fj, 'fk");
+ break;
+ case SLE:
+ Format(instr, "fcmp.sle.s fcc'cd, 'fj, 'fk");
+ break;
+ case CUN:
+ Format(instr, "fcmp.cun.s fcc'cd, 'fj, 'fk");
+ break;
+ case SUN:
+ Format(instr, "fcmp.sun.s fcc'cd, 'fj, 'fk");
+ break;
+ case CULT:
+ Format(instr, "fcmp.cult.s fcc'cd, 'fj, 'fk");
+ break;
+ case SULT:
+ Format(instr, "fcmp.sult.s fcc'cd, 'fj, 'fk");
+ break;
+ case CUEQ:
+ Format(instr, "fcmp.cueq.s fcc'cd, 'fj, 'fk");
+ break;
+ case SUEQ:
+ Format(instr, "fcmp.sueq.s fcc'cd, 'fj, 'fk");
+ break;
+ case CULE:
+ Format(instr, "fcmp.cule.s fcc'cd, 'fj, 'fk");
+ break;
+ case SULE:
+ Format(instr, "fcmp.sule.s fcc'cd, 'fj, 'fk");
+ break;
+ case CNE:
+ Format(instr, "fcmp.cne.s fcc'cd, 'fj, 'fk");
+ break;
+ case SNE:
+ Format(instr, "fcmp.sne.s fcc'cd, 'fj, 'fk");
+ break;
+ case COR:
+ Format(instr, "fcmp.cor.s fcc'cd, 'fj, 'fk");
+ break;
+ case SOR:
+ Format(instr, "fcmp.sor.s fcc'cd, 'fj, 'fk");
+ break;
+ case CUNE:
+ Format(instr, "fcmp.cune.s fcc'cd, 'fj, 'fk");
+ break;
+ case SUNE:
+ Format(instr, "fcmp.sune.s fcc'cd, 'fj, 'fk");
+ break;
+ default:
+ UNREACHABLE();
+ }
+ break;
+ case FCMP_COND_D:
+ switch (instr->Bits(19, 15)) {
+ case CAF:
+ Format(instr, "fcmp.caf.d fcc'cd, 'fj, 'fk");
+ break;
+ case SAF:
+ Format(instr, "fcmp.saf.d fcc'cd, 'fj, 'fk");
+ break;
+ case CLT:
+ Format(instr, "fcmp.clt.d fcc'cd, 'fj, 'fk");
+ break;
+ case CEQ:
+ Format(instr, "fcmp.ceq.d fcc'cd, 'fj, 'fk");
+ break;
+ case SEQ:
+ Format(instr, "fcmp.seq.d fcc'cd, 'fj, 'fk");
+ break;
+ case CLE:
+ Format(instr, "fcmp.cle.d fcc'cd, 'fj, 'fk");
+ break;
+ case SLE:
+ Format(instr, "fcmp.sle.d fcc'cd, 'fj, 'fk");
+ break;
+ case CUN:
+ Format(instr, "fcmp.cun.d fcc'cd, 'fj, 'fk");
+ break;
+ case SUN:
+ Format(instr, "fcmp.sun.d fcc'cd, 'fj, 'fk");
+ break;
+ case CULT:
+ Format(instr, "fcmp.cult.d fcc'cd, 'fj, 'fk");
+ break;
+ case SULT:
+ Format(instr, "fcmp.sult.d fcc'cd, 'fj, 'fk");
+ break;
+ case CUEQ:
+ Format(instr, "fcmp.cueq.d fcc'cd, 'fj, 'fk");
+ break;
+ case SUEQ:
+ Format(instr, "fcmp.sueq.d fcc'cd, 'fj, 'fk");
+ break;
+ case CULE:
+ Format(instr, "fcmp.cule.d fcc'cd, 'fj, 'fk");
+ break;
+ case SULE:
+ Format(instr, "fcmp.sule.d fcc'cd, 'fj, 'fk");
+ break;
+ case CNE:
+ Format(instr, "fcmp.cne.d fcc'cd, 'fj, 'fk");
+ break;
+ case SNE:
+ Format(instr, "fcmp.sne.d fcc'cd, 'fj, 'fk");
+ break;
+ case COR:
+ Format(instr, "fcmp.cor.d fcc'cd, 'fj, 'fk");
+ break;
+ case SOR:
+ Format(instr, "fcmp.sor.d fcc'cd, 'fj, 'fk");
+ break;
+ case CUNE:
+ Format(instr, "fcmp.cune.d fcc'cd, 'fj, 'fk");
+ break;
+ case SUNE:
+ Format(instr, "fcmp.sune.d fcc'cd, 'fj, 'fk");
+ break;
+ default:
+ UNREACHABLE();
+ }
+ break;
+ case FSEL:
+ Format(instr, "fsel 'fd, 'fj, 'fk, fcc'ca");
+ break;
+ default:
+ UNREACHABLE();
+ }
+}
+
+void Decoder::DecodeTypekOp14(Instruction* instr) {
+ switch (instr->Bits(31, 18) << 18) {
+ case ALSL:
+ if (instr->Bit(17))
+ Format(instr, "alsl.wu 'rd, 'rj, 'rk, 'sa2");
+ else
+ Format(instr, "alsl.w 'rd, 'rj, 'rk, 'sa2");
+ break;
+ case BYTEPICK_W:
+ Format(instr, "bytepick.w 'rd, 'rj, 'rk, 'sa2");
+ break;
+ case BYTEPICK_D:
+ Format(instr, "bytepick.d 'rd, 'rj, 'rk, 'sa3");
+ break;
+ case ALSL_D:
+ Format(instr, "alsl.d 'rd, 'rj, 'rk, 'sa2");
+ break;
+ case SLLI:
+ if (instr->Bit(16))
+ Format(instr, "slli.d 'rd, 'rj, 'ui6");
+ else
+ Format(instr, "slli.w 'rd, 'rj, 'ui5");
+ break;
+ case SRLI:
+ if (instr->Bit(16))
+ Format(instr, "srli.d 'rd, 'rj, 'ui6");
+ else
+ Format(instr, "srli.w 'rd, 'rj, 'ui5");
+ break;
+ case SRAI:
+ if (instr->Bit(16))
+ Format(instr, "srai.d 'rd, 'rj, 'ui6");
+ else
+ Format(instr, "srai.w 'rd, 'rj, 'ui5");
+ break;
+ case ROTRI:
+ if (instr->Bit(16))
+ Format(instr, "rotri.d 'rd, 'rj, 'ui6");
+ else
+ Format(instr, "rotri.w 'rd, 'rj, 'ui5");
+ break;
+ default:
+ UNREACHABLE();
+ }
+}
+
+int Decoder::DecodeTypekOp17(Instruction* instr) {
+ switch (instr->Bits(31, 15) << 15) {
+ case ADD_W:
+ Format(instr, "add.w 'rd, 'rj, 'rk");
+ break;
+ case ADD_D:
+ Format(instr, "add.d 'rd, 'rj, 'rk");
+ break;
+ case SUB_W:
+ Format(instr, "sub.w 'rd, 'rj, 'rk");
+ break;
+ case SUB_D:
+ Format(instr, "sub.d 'rd, 'rj, 'rk");
+ break;
+ case SLT:
+ Format(instr, "slt 'rd, 'rj, 'rk");
+ break;
+ case SLTU:
+ Format(instr, "sltu 'rd, 'rj, 'rk");
+ break;
+ case MASKEQZ:
+ Format(instr, "maskeqz 'rd, 'rj, 'rk");
+ break;
+ case MASKNEZ:
+ Format(instr, "masknez 'rd, 'rj, 'rk");
+ break;
+ case NOR:
+ Format(instr, "nor 'rd, 'rj, 'rk");
+ break;
+ case AND:
+ Format(instr, "and 'rd, 'rj, 'rk");
+ break;
+ case OR:
+ Format(instr, "or 'rd, 'rj, 'rk");
+ break;
+ case XOR:
+ Format(instr, "xor 'rd, 'rj, 'rk");
+ break;
+ case ORN:
+ Format(instr, "orn 'rd, 'rj, 'rk");
+ break;
+ case ANDN:
+ Format(instr, "andn 'rd, 'rj, 'rk");
+ break;
+ case SLL_W:
+ Format(instr, "sll.w 'rd, 'rj, 'rk");
+ break;
+ case SRL_W:
+ Format(instr, "srl.w 'rd, 'rj, 'rk");
+ break;
+ case SRA_W:
+ Format(instr, "sra.w 'rd, 'rj, 'rk");
+ break;
+ case SLL_D:
+ Format(instr, "sll.d 'rd, 'rj, 'rk");
+ break;
+ case SRL_D:
+ Format(instr, "srl.d 'rd, 'rj, 'rk");
+ break;
+ case SRA_D:
+ Format(instr, "sra.d 'rd, 'rj, 'rk");
+ break;
+ case ROTR_D:
+ Format(instr, "rotr.d 'rd, 'rj, 'rk");
+ break;
+ case ROTR_W:
+ Format(instr, "rotr.w 'rd, 'rj, 'rk");
+ break;
+ case MUL_W:
+ Format(instr, "mul.w 'rd, 'rj, 'rk");
+ break;
+ case MULH_W:
+ Format(instr, "mulh.w 'rd, 'rj, 'rk");
+ break;
+ case MULH_WU:
+ Format(instr, "mulh.wu 'rd, 'rj, 'rk");
+ break;
+ case MUL_D:
+ Format(instr, "mul.d 'rd, 'rj, 'rk");
+ break;
+ case MULH_D:
+ Format(instr, "mulh.d 'rd, 'rj, 'rk");
+ break;
+ case MULH_DU:
+ Format(instr, "mulh.du 'rd, 'rj, 'rk");
+ break;
+ case MULW_D_W:
+ Format(instr, "mulw.d.w 'rd, 'rj, 'rk");
+ break;
+ case MULW_D_WU:
+ Format(instr, "mulw.d.wu 'rd, 'rj, 'rk");
+ break;
+ case DIV_W:
+ Format(instr, "div.w 'rd, 'rj, 'rk");
+ break;
+ case MOD_W:
+ Format(instr, "mod.w 'rd, 'rj, 'rk");
+ break;
+ case DIV_WU:
+ Format(instr, "div.wu 'rd, 'rj, 'rk");
+ break;
+ case MOD_WU:
+ Format(instr, "mod.wu 'rd, 'rj, 'rk");
+ break;
+ case DIV_D:
+ Format(instr, "div.d 'rd, 'rj, 'rk");
+ break;
+ case MOD_D:
+ Format(instr, "mod.d 'rd, 'rj, 'rk");
+ break;
+ case DIV_DU:
+ Format(instr, "div.du 'rd, 'rj, 'rk");
+ break;
+ case MOD_DU:
+ Format(instr, "mod.du 'rd, 'rj, 'rk");
+ break;
+ case BREAK:
+ return DecodeBreakInstr(instr);
+ case FADD_S:
+ Format(instr, "fadd.s 'fd, 'fj, 'fk");
+ break;
+ case FADD_D:
+ Format(instr, "fadd.d 'fd, 'fj, 'fk");
+ break;
+ case FSUB_S:
+ Format(instr, "fsub.s 'fd, 'fj, 'fk");
+ break;
+ case FSUB_D:
+ Format(instr, "fsub.d 'fd, 'fj, 'fk");
+ break;
+ case FMUL_S:
+ Format(instr, "fmul.s 'fd, 'fj, 'fk");
+ break;
+ case FMUL_D:
+ Format(instr, "fmul.d 'fd, 'fj, 'fk");
+ break;
+ case FDIV_S:
+ Format(instr, "fdiv.s 'fd, 'fj, 'fk");
+ break;
+ case FDIV_D:
+ Format(instr, "fdiv.d 'fd, 'fj, 'fk");
+ break;
+ case FMAX_S:
+ Format(instr, "fmax.s 'fd, 'fj, 'fk");
+ break;
+ case FMAX_D:
+ Format(instr, "fmax.d 'fd, 'fj, 'fk");
+ break;
+ case FMIN_S:
+ Format(instr, "fmin.s 'fd, 'fj, 'fk");
+ break;
+ case FMIN_D:
+ Format(instr, "fmin.d 'fd, 'fj, 'fk");
+ break;
+ case FMAXA_S:
+ Format(instr, "fmaxa.s 'fd, 'fj, 'fk");
+ break;
+ case FMAXA_D:
+ Format(instr, "fmaxa.d 'fd, 'fj, 'fk");
+ break;
+ case FMINA_S:
+ Format(instr, "fmina.s 'fd, 'fj, 'fk");
+ break;
+ case FMINA_D:
+ Format(instr, "fmina.d 'fd, 'fj, 'fk");
+ break;
+ case LDX_B:
+ Format(instr, "ldx.b 'rd, 'rj, 'rk");
+ break;
+ case LDX_H:
+ Format(instr, "ldx.h 'rd, 'rj, 'rk");
+ break;
+ case LDX_W:
+ Format(instr, "ldx.w 'rd, 'rj, 'rk");
+ break;
+ case LDX_D:
+ Format(instr, "ldx.d 'rd, 'rj, 'rk");
+ break;
+ case STX_B:
+ Format(instr, "stx.b 'rd, 'rj, 'rk");
+ break;
+ case STX_H:
+ Format(instr, "stx.h 'rd, 'rj, 'rk");
+ break;
+ case STX_W:
+ Format(instr, "stx.w 'rd, 'rj, 'rk");
+ break;
+ case STX_D:
+ Format(instr, "stx.d 'rd, 'rj, 'rk");
+ break;
+ case LDX_BU:
+ Format(instr, "ldx.bu 'rd, 'rj, 'rk");
+ break;
+ case LDX_HU:
+ Format(instr, "ldx.hu 'rd, 'rj, 'rk");
+ break;
+ case LDX_WU:
+ Format(instr, "ldx.wu 'rd, 'rj, 'rk");
+ break;
+ case FLDX_S:
+ Format(instr, "fldx.s 'fd, 'rj, 'rk");
+ break;
+ case FLDX_D:
+ Format(instr, "fldx.d 'fd, 'rj, 'rk");
+ break;
+ case FSTX_S:
+ Format(instr, "fstx.s 'fd, 'rj, 'rk");
+ break;
+ case FSTX_D:
+ Format(instr, "fstx.d 'fd, 'rj, 'rk");
+ break;
+ case AMSWAP_W:
+ Format(instr, "amswap.w 'rd, 'rk, 'rj");
+ break;
+ case AMSWAP_D:
+ Format(instr, "amswap.d 'rd, 'rk, 'rj");
+ break;
+ case AMADD_W:
+ Format(instr, "amadd.w 'rd, 'rk, 'rj");
+ break;
+ case AMADD_D:
+ Format(instr, "amadd.d 'rd, 'rk, 'rj");
+ break;
+ case AMAND_W:
+ Format(instr, "amand.w 'rd, 'rk, 'rj");
+ break;
+ case AMAND_D:
+ Format(instr, "amand.d 'rd, 'rk, 'rj");
+ break;
+ case AMOR_W:
+ Format(instr, "amor.w 'rd, 'rk, 'rj");
+ break;
+ case AMOR_D:
+ Format(instr, "amor.d 'rd, 'rk, 'rj");
+ break;
+ case AMXOR_W:
+ Format(instr, "amxor.w 'rd, 'rk, 'rj");
+ break;
+ case AMXOR_D:
+ Format(instr, "amxor.d 'rd, 'rk, 'rj");
+ break;
+ case AMMAX_W:
+ Format(instr, "ammax.w 'rd, 'rk, 'rj");
+ break;
+ case AMMAX_D:
+ Format(instr, "ammax.d 'rd, 'rk, 'rj");
+ break;
+ case AMMIN_W:
+ Format(instr, "ammin.w 'rd, 'rk, 'rj");
+ break;
+ case AMMIN_D:
+ Format(instr, "ammin.d 'rd, 'rk, 'rj");
+ break;
+ case AMMAX_WU:
+ Format(instr, "ammax.wu 'rd, 'rk, 'rj");
+ break;
+ case AMMAX_DU:
+ Format(instr, "ammax.du 'rd, 'rk, 'rj");
+ break;
+ case AMMIN_WU:
+ Format(instr, "ammin.wu 'rd, 'rk, 'rj");
+ break;
+ case AMMIN_DU:
+ Format(instr, "ammin.du 'rd, 'rk, 'rj");
+ break;
+ case AMSWAP_DB_W:
+ Format(instr, "amswap_db.w 'rd, 'rk, 'rj");
+ break;
+ case AMSWAP_DB_D:
+ Format(instr, "amswap_db.d 'rd, 'rk, 'rj");
+ break;
+ case AMADD_DB_W:
+ Format(instr, "amadd_db.w 'rd, 'rk, 'rj");
+ break;
+ case AMADD_DB_D:
+ Format(instr, "amadd_db.d 'rd, 'rk, 'rj");
+ break;
+ case AMAND_DB_W:
+ Format(instr, "amand_db.w 'rd, 'rk, 'rj");
+ break;
+ case AMAND_DB_D:
+ Format(instr, "amand_db.d 'rd, 'rk, 'rj");
+ break;
+ case AMOR_DB_W:
+ Format(instr, "amor_db.w 'rd, 'rk, 'rj");
+ break;
+ case AMOR_DB_D:
+ Format(instr, "amor_db.d 'rd, 'rk, 'rj");
+ break;
+ case AMXOR_DB_W:
+ Format(instr, "amxor_db.w 'rd, 'rk, 'rj");
+ break;
+ case AMXOR_DB_D:
+ Format(instr, "amxor_db.d 'rd, 'rk, 'rj");
+ break;
+ case AMMAX_DB_W:
+ Format(instr, "ammax_db.w 'rd, 'rk, 'rj");
+ break;
+ case AMMAX_DB_D:
+ Format(instr, "ammax_db.d 'rd, 'rk, 'rj");
+ break;
+ case AMMIN_DB_W:
+ Format(instr, "ammin_db.w 'rd, 'rk, 'rj");
+ break;
+ case AMMIN_DB_D:
+ Format(instr, "ammin_db.d 'rd, 'rk, 'rj");
+ break;
+ case AMMAX_DB_WU:
+ Format(instr, "ammax_db.wu 'rd, 'rk, 'rj");
+ break;
+ case AMMAX_DB_DU:
+ Format(instr, "ammax_db.du 'rd, 'rk, 'rj");
+ break;
+ case AMMIN_DB_WU:
+ Format(instr, "ammin_db.wu 'rd, 'rk, 'rj");
+ break;
+ case AMMIN_DB_DU:
+ Format(instr, "ammin_db.du 'rd, 'rk, 'rj");
+ break;
+ case DBAR:
+ Format(instr, "dbar 'hint15");
+ break;
+ case IBAR:
+ Format(instr, "ibar 'hint15");
+ break;
+ case FSCALEB_S:
+ Format(instr, "fscaleb.s 'fd, 'fj, 'fk");
+ break;
+ case FSCALEB_D:
+ Format(instr, "fscaleb.d 'fd, 'fj, 'fk");
+ break;
+ case FCOPYSIGN_S:
+ Format(instr, "fcopysign.s 'fd, 'fj, 'fk");
+ break;
+ case FCOPYSIGN_D:
+ Format(instr, "fcopysign.d 'fd, 'fj, 'fk");
+ break;
+ default:
+ UNREACHABLE();
+ }
+ return kInstrSize;
+}
+
+void Decoder::DecodeTypekOp22(Instruction* instr) {
+ switch (instr->Bits(31, 10) << 10) {
+ case CLZ_W:
+ Format(instr, "clz.w 'rd, 'rj");
+ break;
+ case CTZ_W:
+ Format(instr, "ctz.w 'rd, 'rj");
+ break;
+ case CLZ_D:
+ Format(instr, "clz.d 'rd, 'rj");
+ break;
+ case CTZ_D:
+ Format(instr, "ctz.d 'rd, 'rj");
+ break;
+ case REVB_2H:
+ Format(instr, "revb.2h 'rd, 'rj");
+ break;
+ case REVB_4H:
+ Format(instr, "revb.4h 'rd, 'rj");
+ break;
+ case REVB_2W:
+ Format(instr, "revb.2w 'rd, 'rj");
+ break;
+ case REVB_D:
+ Format(instr, "revb.d 'rd, 'rj");
+ break;
+ case REVH_2W:
+ Format(instr, "revh.2w 'rd, 'rj");
+ break;
+ case REVH_D:
+ Format(instr, "revh.d 'rd, 'rj");
+ break;
+ case BITREV_4B:
+ Format(instr, "bitrev.4b 'rd, 'rj");
+ break;
+ case BITREV_8B:
+ Format(instr, "bitrev.8b 'rd, 'rj");
+ break;
+ case BITREV_W:
+ Format(instr, "bitrev.w 'rd, 'rj");
+ break;
+ case BITREV_D:
+ Format(instr, "bitrev.d 'rd, 'rj");
+ break;
+ case EXT_W_B:
+ Format(instr, "ext.w.b 'rd, 'rj");
+ break;
+ case EXT_W_H:
+ Format(instr, "ext.w.h 'rd, 'rj");
+ break;
+ case FABS_S:
+ Format(instr, "fabs.s 'fd, 'fj");
+ break;
+ case FABS_D:
+ Format(instr, "fabs.d 'fd, 'fj");
+ break;
+ case FNEG_S:
+ Format(instr, "fneg.s 'fd, 'fj");
+ break;
+ case FNEG_D:
+ Format(instr, "fneg.d 'fd, 'fj");
+ break;
+ case FSQRT_S:
+ Format(instr, "fsqrt.s 'fd, 'fj");
+ break;
+ case FSQRT_D:
+ Format(instr, "fsqrt.d 'fd, 'fj");
+ break;
+ case FMOV_S:
+ Format(instr, "fmov.s 'fd, 'fj");
+ break;
+ case FMOV_D:
+ Format(instr, "fmov.d 'fd, 'fj");
+ break;
+ case MOVGR2FR_W:
+ Format(instr, "movgr2fr.w 'fd, 'rj");
+ break;
+ case MOVGR2FR_D:
+ Format(instr, "movgr2fr.d 'fd, 'rj");
+ break;
+ case MOVGR2FRH_W:
+ Format(instr, "movgr2frh.w 'fd, 'rj");
+ break;
+ case MOVFR2GR_S:
+ Format(instr, "movfr2gr.s 'rd, 'fj");
+ break;
+ case MOVFR2GR_D:
+ Format(instr, "movfr2gr.d 'rd, 'fj");
+ break;
+ case MOVFRH2GR_S:
+ Format(instr, "movfrh2gr.s 'rd, 'fj");
+ break;
+ case MOVGR2FCSR:
+ Format(instr, "movgr2fcsr fcsr, 'rj");
+ break;
+ case MOVFCSR2GR:
+ Format(instr, "movfcsr2gr 'rd, fcsr");
+ break;
+ case FCVT_S_D:
+ Format(instr, "fcvt.s.d 'fd, 'fj");
+ break;
+ case FCVT_D_S:
+ Format(instr, "fcvt.d.s 'fd, 'fj");
+ break;
+ case FTINTRM_W_S:
+ Format(instr, "ftintrm.w.s 'fd, 'fj");
+ break;
+ case FTINTRM_W_D:
+ Format(instr, "ftintrm.w.d 'fd, 'fj");
+ break;
+ case FTINTRM_L_S:
+ Format(instr, "ftintrm.l.s 'fd, 'fj");
+ break;
+ case FTINTRM_L_D:
+ Format(instr, "ftintrm.l.d 'fd, 'fj");
+ break;
+ case FTINTRP_W_S:
+ Format(instr, "ftintrp.w.s 'fd, 'fj");
+ break;
+ case FTINTRP_W_D:
+ Format(instr, "ftintrp.w.d 'fd, 'fj");
+ break;
+ case FTINTRP_L_S:
+ Format(instr, "ftintrp.l.s 'fd, 'fj");
+ break;
+ case FTINTRP_L_D:
+ Format(instr, "ftintrp.l.d 'fd, 'fj");
+ break;
+ case FTINTRZ_W_S:
+ Format(instr, "ftintrz.w.s 'fd, 'fj");
+ break;
+ case FTINTRZ_W_D:
+ Format(instr, "ftintrz.w.d 'fd, 'fj");
+ break;
+ case FTINTRZ_L_S:
+ Format(instr, "ftintrz.l.s 'fd, 'fj");
+ break;
+ case FTINTRZ_L_D:
+ Format(instr, "ftintrz.l.d 'fd, 'fj");
+ break;
+ case FTINTRNE_W_S:
+ Format(instr, "ftintrne.w.s 'fd, 'fj");
+ break;
+ case FTINTRNE_W_D:
+ Format(instr, "ftintrne.w.d 'fd, 'fj");
+ break;
+ case FTINTRNE_L_S:
+ Format(instr, "ftintrne.l.s 'fd, 'fj");
+ break;
+ case FTINTRNE_L_D:
+ Format(instr, "ftintrne.l.d 'fd, 'fj");
+ break;
+ case FTINT_W_S:
+ Format(instr, "ftint.w.s 'fd, 'fj");
+ break;
+ case FTINT_W_D:
+ Format(instr, "ftint.w.d 'fd, 'fj");
+ break;
+ case FTINT_L_S:
+ Format(instr, "ftint.l.s 'fd, 'fj");
+ break;
+ case FTINT_L_D:
+ Format(instr, "ftint.l.d 'fd, 'fj");
+ break;
+ case FFINT_S_W:
+ Format(instr, "ffint.s.w 'fd, 'fj");
+ break;
+ case FFINT_S_L:
+ Format(instr, "ffint.s.l 'fd, 'fj");
+ break;
+ case FFINT_D_W:
+ Format(instr, "ffint.d.w 'fd, 'fj");
+ break;
+ case FFINT_D_L:
+ Format(instr, "ffint.d.l 'fd, 'fj");
+ break;
+ case FRINT_S:
+ Format(instr, "frint.s 'fd, 'fj");
+ break;
+ case FRINT_D:
+ Format(instr, "frint.d 'fd, 'fj");
+ break;
+ case MOVFR2CF:
+ Format(instr, "movfr2cf fcc'cd, 'fj");
+ break;
+ case MOVCF2FR:
+ Format(instr, "movcf2fr 'fd, fcc'cj");
+ break;
+ case MOVGR2CF:
+ Format(instr, "movgr2cf fcc'cd, 'rj");
+ break;
+ case MOVCF2GR:
+ Format(instr, "movcf2gr 'rd, fcc'cj");
+ break;
+ case FRECIP_S:
+ Format(instr, "frecip.s 'fd, 'fj");
+ break;
+ case FRECIP_D:
+ Format(instr, "frecip.d 'fd, 'fj");
+ break;
+ case FRSQRT_S:
+ Format(instr, "frsqrt.s 'fd, 'fj");
+ break;
+ case FRSQRT_D:
+ Format(instr, "frsqrt.d 'fd, 'fj");
+ break;
+ case FCLASS_S:
+ Format(instr, "fclass.s 'fd, 'fj");
+ break;
+ case FCLASS_D:
+ Format(instr, "fclass.d 'fd, 'fj");
+ break;
+ case FLOGB_S:
+ Format(instr, "flogb.s 'fd, 'fj");
+ break;
+ case FLOGB_D:
+ Format(instr, "flogb.d 'fd, 'fj");
+ break;
+ case CLO_W:
+ Format(instr, "clo.w 'rd, 'rj");
+ break;
+ case CTO_W:
+ Format(instr, "cto.w 'rd, 'rj");
+ break;
+ case CLO_D:
+ Format(instr, "clo.d 'rd, 'rj");
+ break;
+ case CTO_D:
+ Format(instr, "cto.d 'rd, 'rj");
+ break;
+ default:
+ UNREACHABLE();
+ }
+}
+
+int Decoder::InstructionDecode(byte* instr_ptr) {
+ Instruction* instr = Instruction::At(instr_ptr);
+ out_buffer_pos_ += base::SNPrintF(out_buffer_ + out_buffer_pos_,
+ "%08x ", instr->InstructionBits());
+ switch (instr->InstructionType()) {
+ case Instruction::kOp6Type: {
+ DecodeTypekOp6(instr);
+ break;
+ }
+ case Instruction::kOp7Type: {
+ DecodeTypekOp7(instr);
+ break;
+ }
+ case Instruction::kOp8Type: {
+ DecodeTypekOp8(instr);
+ break;
+ }
+ case Instruction::kOp10Type: {
+ DecodeTypekOp10(instr);
+ break;
+ }
+ case Instruction::kOp12Type: {
+ DecodeTypekOp12(instr);
+ break;
+ }
+ case Instruction::kOp14Type: {
+ DecodeTypekOp14(instr);
+ break;
+ }
+ case Instruction::kOp17Type: {
+ return DecodeTypekOp17(instr);
+ }
+ case Instruction::kOp22Type: {
+ DecodeTypekOp22(instr);
+ break;
+ }
+ case Instruction::kUnsupported: {
+ Format(instr, "UNSUPPORTED");
+ break;
+ }
+ default: {
+ Format(instr, "UNSUPPORTED");
+ break;
+ }
+ }
+ return kInstrSize;
+}
+
+} // namespace internal
+} // namespace v8
+
+//------------------------------------------------------------------------------
+
+namespace disasm {
+
+const char* NameConverter::NameOfAddress(byte* addr) const {
+ v8::base::SNPrintF(tmp_buffer_, "%p", static_cast<void*>(addr));
+ return tmp_buffer_.begin();
+}
+
+const char* NameConverter::NameOfConstant(byte* addr) const {
+ return NameOfAddress(addr);
+}
+
+const char* NameConverter::NameOfCPURegister(int reg) const {
+ return v8::internal::Registers::Name(reg);
+}
+
+const char* NameConverter::NameOfXMMRegister(int reg) const {
+ return v8::internal::FPURegisters::Name(reg);
+}
+
+const char* NameConverter::NameOfByteCPURegister(int reg) const {
+ UNREACHABLE();
+}
+
+const char* NameConverter::NameInCode(byte* addr) const {
+ // The default name converter is called for unknown code. So we will not try
+ // to access any memory.
+ return "";
+}
+
+//------------------------------------------------------------------------------
+
+int Disassembler::InstructionDecode(v8::base::Vector<char> buffer,
+ byte* instruction) {
+ v8::internal::Decoder d(converter_, buffer);
+ return d.InstructionDecode(instruction);
+}
+
+int Disassembler::ConstantPoolSizeAt(byte* instruction) { return -1; }
+
+void Disassembler::Disassemble(FILE* f, byte* begin, byte* end,
+ UnimplementedOpcodeAction unimplemented_action) {
+ NameConverter converter;
+ Disassembler d(converter, unimplemented_action);
+ for (byte* pc = begin; pc < end;) {
+ v8::base::EmbeddedVector<char, 128> buffer;
+ buffer[0] = '\0';
+ byte* prev_pc = pc;
+ pc += d.InstructionDecode(buffer, pc);
+ v8::internal::PrintF(f, "%p %08x %s\n", static_cast<void*>(prev_pc),
+ *reinterpret_cast<int32_t*>(prev_pc), buffer.begin());
+ }
+}
+
+#undef STRING_STARTS_WITH
+
+} // namespace disasm
+
+#endif // V8_TARGET_ARCH_LOONG64
diff --git a/deps/v8/src/diagnostics/loong64/unwinder-loong64.cc b/deps/v8/src/diagnostics/loong64/unwinder-loong64.cc
new file mode 100644
index 0000000000..84d2e41cfc
--- /dev/null
+++ b/deps/v8/src/diagnostics/loong64/unwinder-loong64.cc
@@ -0,0 +1,14 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/diagnostics/unwinder.h"
+
+namespace v8 {
+
+struct RegisterState;
+
+void GetCalleeSavedRegistersFromEntryFrame(void* fp,
+ RegisterState* register_state) {}
+
+} // namespace v8
diff --git a/deps/v8/src/diagnostics/mips/disasm-mips.cc b/deps/v8/src/diagnostics/mips/disasm-mips.cc
index c5aeb27457..32a0bdb048 100644
--- a/deps/v8/src/diagnostics/mips/disasm-mips.cc
+++ b/deps/v8/src/diagnostics/mips/disasm-mips.cc
@@ -555,7 +555,6 @@ void Decoder::PrintMsaDataFormat(Instruction* instr) {
break;
default:
UNREACHABLE();
- break;
}
} else {
char DF[] = {'b', 'h', 'w', 'd'};
@@ -600,7 +599,6 @@ void Decoder::PrintMsaDataFormat(Instruction* instr) {
break;
default:
UNREACHABLE();
- break;
}
}
@@ -904,7 +902,6 @@ int Decoder::FormatOption(Instruction* instr, const char* format) {
PrintSa(instr);
return 2;
}
- break;
case 'd': {
DCHECK(STRING_STARTS_WITH(format, "sd"));
PrintSd(instr);
@@ -1521,7 +1518,6 @@ void Decoder::DecodeTypeRegisterSPECIAL3(Instruction* instr) {
}
default:
UNREACHABLE();
- break;
}
}
}
@@ -1538,7 +1534,6 @@ void Decoder::DecodeTypeRegister(Instruction* instr) {
switch (instr->RsFieldRaw()) {
case BC1: // bc1 handled in DecodeTypeImmediate.
UNREACHABLE();
- break;
case MFC1:
Format(instr, "mfc1 'rt, 'fs");
break;
@@ -1966,7 +1961,6 @@ void Decoder::DecodeTypeImmediate(Instruction* instr) {
break;
default:
UNREACHABLE();
- break;
}
}
}
@@ -1997,7 +1991,6 @@ void Decoder::DecodeTypeImmediate(Instruction* instr) {
break;
default:
UNREACHABLE();
- break;
}
break;
default:
@@ -2703,7 +2696,6 @@ const char* NameConverter::NameOfXMMRegister(int reg) const {
const char* NameConverter::NameOfByteCPURegister(int reg) const {
UNREACHABLE(); // MIPS does not have the concept of a byte register.
- return "nobytereg";
}
const char* NameConverter::NameInCode(byte* addr) const {
diff --git a/deps/v8/src/diagnostics/mips64/disasm-mips64.cc b/deps/v8/src/diagnostics/mips64/disasm-mips64.cc
index d8ff14730d..0712431fc3 100644
--- a/deps/v8/src/diagnostics/mips64/disasm-mips64.cc
+++ b/deps/v8/src/diagnostics/mips64/disasm-mips64.cc
@@ -596,7 +596,6 @@ void Decoder::PrintMsaDataFormat(Instruction* instr) {
break;
default:
UNREACHABLE();
- break;
}
} else {
char DF[] = {'b', 'h', 'w', 'd'};
@@ -641,7 +640,6 @@ void Decoder::PrintMsaDataFormat(Instruction* instr) {
break;
default:
UNREACHABLE();
- break;
}
}
@@ -945,7 +943,6 @@ int Decoder::FormatOption(Instruction* instr, const char* format) {
PrintSa(instr);
return 2;
}
- break;
case 'd': {
DCHECK(STRING_STARTS_WITH(format, "sd"));
PrintSd(instr);
@@ -1744,7 +1741,6 @@ void Decoder::DecodeTypeRegisterSPECIAL3(Instruction* instr) {
}
default:
UNREACHABLE();
- break;
}
break;
}
@@ -1761,7 +1757,6 @@ void Decoder::DecodeTypeRegisterSPECIAL3(Instruction* instr) {
break;
default:
UNREACHABLE();
- break;
}
break;
}
@@ -1782,7 +1777,6 @@ void Decoder::DecodeTypeRegisterSPECIAL3(Instruction* instr) {
}
default:
UNREACHABLE();
- break;
}
break;
}
@@ -2250,7 +2244,6 @@ void Decoder::DecodeTypeImmediate(Instruction* instr) {
break;
default:
UNREACHABLE();
- break;
}
break;
}
@@ -2285,7 +2278,6 @@ void Decoder::DecodeTypeImmediate(Instruction* instr) {
break;
default:
UNREACHABLE();
- break;
}
break;
default:
@@ -2993,7 +2985,6 @@ const char* NameConverter::NameOfXMMRegister(int reg) const {
const char* NameConverter::NameOfByteCPURegister(int reg) const {
UNREACHABLE(); // MIPS does not have the concept of a byte register.
- return "nobytereg";
}
const char* NameConverter::NameInCode(byte* addr) const {
diff --git a/deps/v8/src/diagnostics/objects-debug.cc b/deps/v8/src/diagnostics/objects-debug.cc
index e45d7580c8..a74548e949 100644
--- a/deps/v8/src/diagnostics/objects-debug.cc
+++ b/deps/v8/src/diagnostics/objects-debug.cc
@@ -343,8 +343,6 @@ void BytecodeArray::BytecodeArrayVerify(Isolate* isolate) {
}
}
-USE_TORQUE_VERIFIER(JSReceiver)
-
bool JSObject::ElementsAreSafeToExamine(PtrComprCageBase cage_base) const {
// If a GC was caused while constructing this object, the elements
// pointer may point to a one pointer filler map.
@@ -785,8 +783,6 @@ void JSDate::JSDateVerify(Isolate* isolate) {
}
}
-USE_TORQUE_VERIFIER(JSMessageObject)
-
void String::StringVerify(Isolate* isolate) {
TorqueGeneratedClassVerifiers::StringVerify(*this, isolate);
CHECK(length() >= 0 && length() <= Smi::kMaxValue);
@@ -1005,8 +1001,11 @@ void Code::CodeVerify(Isolate* isolate) {
CHECK_LE(constant_pool_offset(), code_comments_offset());
CHECK_LE(code_comments_offset(), unwinding_info_offset());
CHECK_LE(unwinding_info_offset(), MetadataSize());
+#if !defined(_MSC_VER) || defined(__clang__)
+ // See also: PlatformEmbeddedFileWriterWin::AlignToCodeAlignment.
CHECK_IMPLIES(!ReadOnlyHeap::Contains(*this),
IsAligned(InstructionStart(), kCodeAlignment));
+#endif // !defined(_MSC_VER) || defined(__clang__)
CHECK_IMPLIES(!ReadOnlyHeap::Contains(*this),
IsAligned(raw_instruction_start(), kCodeAlignment));
if (V8_EXTERNAL_CODE_SPACE_BOOL) {
@@ -1139,19 +1138,13 @@ void JSWeakRef::JSWeakRefVerify(Isolate* isolate) {
}
void JSFinalizationRegistry::JSFinalizationRegistryVerify(Isolate* isolate) {
- CHECK(IsJSFinalizationRegistry());
- JSObjectVerify(isolate);
- VerifyHeapPointer(isolate, cleanup());
- CHECK(active_cells().IsUndefined(isolate) || active_cells().IsWeakCell());
+ TorqueGeneratedClassVerifiers::JSFinalizationRegistryVerify(*this, isolate);
if (active_cells().IsWeakCell()) {
CHECK(WeakCell::cast(active_cells()).prev().IsUndefined(isolate));
}
- CHECK(cleared_cells().IsUndefined(isolate) || cleared_cells().IsWeakCell());
if (cleared_cells().IsWeakCell()) {
CHECK(WeakCell::cast(cleared_cells()).prev().IsUndefined(isolate));
}
- CHECK(next_dirty().IsUndefined(isolate) ||
- next_dirty().IsJSFinalizationRegistry());
}
void JSWeakMap::JSWeakMapVerify(Isolate* isolate) {
diff --git a/deps/v8/src/diagnostics/objects-printer.cc b/deps/v8/src/diagnostics/objects-printer.cc
index 46fccedde7..f8e967dbf1 100644
--- a/deps/v8/src/diagnostics/objects-printer.cc
+++ b/deps/v8/src/diagnostics/objects-printer.cc
@@ -1513,7 +1513,7 @@ void JSFunction::JSFunctionPrint(std::ostream& os) {
}
os << "\n - formal_parameter_count: "
- << shared().internal_formal_parameter_count();
+ << shared().internal_formal_parameter_count_without_receiver();
os << "\n - kind: " << shared().kind();
os << "\n - context: " << Brief(context());
os << "\n - code: " << Brief(raw_code());
@@ -1583,7 +1583,8 @@ void SharedFunctionInfo::SharedFunctionInfoPrint(std::ostream& os) {
os << "\n - kind: " << kind();
os << "\n - syntax kind: " << syntax_kind();
os << "\n - function_map_index: " << function_map_index();
- os << "\n - formal_parameter_count: " << internal_formal_parameter_count();
+ os << "\n - formal_parameter_count: "
+ << internal_formal_parameter_count_without_receiver();
os << "\n - expected_nof_properties: " << expected_nof_properties();
os << "\n - language_mode: " << language_mode();
os << "\n - data: " << Brief(function_data(kAcquireLoad));
@@ -1658,7 +1659,7 @@ void Code::CodePrint(std::ostream& os) {
void CodeDataContainer::CodeDataContainerPrint(std::ostream& os) {
PrintHeader(os, "CodeDataContainer");
- os << "\n - kind_specific_flags: " << kind_specific_flags();
+ os << "\n - kind_specific_flags: " << kind_specific_flags(kRelaxedLoad);
if (V8_EXTERNAL_CODE_SPACE_BOOL) {
os << "\n - code: " << Brief(code());
os << "\n - code_entry_point: "
@@ -1985,7 +1986,6 @@ void WasmInstanceObject::WasmInstanceObjectPrint(std::ostream& os) {
}
os << "\n - memory_start: " << static_cast<void*>(memory_start());
os << "\n - memory_size: " << memory_size();
- os << "\n - memory_mask: " << AsHex(memory_mask());
os << "\n - imported_function_targets: "
<< static_cast<void*>(imported_function_targets());
os << "\n - globals_start: " << static_cast<void*>(globals_start());
diff --git a/deps/v8/src/diagnostics/perf-jit.h b/deps/v8/src/diagnostics/perf-jit.h
index 746f9f7c85..47a6002b08 100644
--- a/deps/v8/src/diagnostics/perf-jit.h
+++ b/deps/v8/src/diagnostics/perf-jit.h
@@ -87,6 +87,7 @@ class PerfJitLogger : public CodeEventLogger {
static const uint32_t kElfMachARM = 40;
static const uint32_t kElfMachMIPS = 8;
static const uint32_t kElfMachMIPS64 = 8;
+ static const uint32_t kElfMachLOONG64 = 258;
static const uint32_t kElfMachARM64 = 183;
static const uint32_t kElfMachS390x = 22;
static const uint32_t kElfMachPPC64 = 21;
@@ -103,6 +104,8 @@ class PerfJitLogger : public CodeEventLogger {
return kElfMachMIPS;
#elif V8_TARGET_ARCH_MIPS64
return kElfMachMIPS64;
+#elif V8_TARGET_ARCH_LOONG64
+ return kElfMachLOONG64;
#elif V8_TARGET_ARCH_ARM64
return kElfMachARM64;
#elif V8_TARGET_ARCH_S390X
diff --git a/deps/v8/src/diagnostics/ppc/disasm-ppc.cc b/deps/v8/src/diagnostics/ppc/disasm-ppc.cc
index affbc0fc8e..7d366a6ba1 100644
--- a/deps/v8/src/diagnostics/ppc/disasm-ppc.cc
+++ b/deps/v8/src/diagnostics/ppc/disasm-ppc.cc
@@ -917,6 +917,18 @@ void Decoder::DecodeExt2(Instruction* instr) {
Format(instr, "cnttzd'. 'ra, 'rs");
return;
}
+ case BRH: {
+ Format(instr, "brh 'ra, 'rs");
+ return;
+ }
+ case BRW: {
+ Format(instr, "brw 'ra, 'rs");
+ return;
+ }
+ case BRD: {
+ Format(instr, "brd 'ra, 'rs");
+ return;
+ }
case ANDX: {
Format(instr, "and'. 'ra, 'rs, 'rb");
return;
@@ -1393,13 +1405,20 @@ void Decoder::DecodeExt6(Instruction* instr) {
#undef DECODE_XX2_B_INSTRUCTIONS
}
switch (EXT6 | (instr->BitField(10, 2))) {
-#define DECODE_XX2_A_INSTRUCTIONS(name, opcode_name, opcode_value) \
- case opcode_name: { \
- Format(instr, #name " 'Xt, 'Xb"); \
- return; \
+#define DECODE_XX2_VECTOR_A_INSTRUCTIONS(name, opcode_name, opcode_value) \
+ case opcode_name: { \
+ Format(instr, #name " 'Xt, 'Xb"); \
+ return; \
+ }
+ PPC_XX2_OPCODE_VECTOR_A_FORM_LIST(DECODE_XX2_VECTOR_A_INSTRUCTIONS)
+#undef DECODE_XX2_VECTOR_A_INSTRUCTIONS
+#define DECODE_XX2_SCALAR_A_INSTRUCTIONS(name, opcode_name, opcode_value) \
+ case opcode_name: { \
+ Format(instr, #name " 'Dt, 'Db"); \
+ return; \
}
- PPC_XX2_OPCODE_A_FORM_LIST(DECODE_XX2_A_INSTRUCTIONS)
-#undef DECODE_XX2_A_INSTRUCTIONS
+ PPC_XX2_OPCODE_SCALAR_A_FORM_LIST(DECODE_XX2_SCALAR_A_INSTRUCTIONS)
+#undef DECODE_XX2_SCALAR_A_INSTRUCTIONS
}
Unknown(instr); // not used by V8
}
diff --git a/deps/v8/src/diagnostics/ppc/eh-frame-ppc.cc b/deps/v8/src/diagnostics/ppc/eh-frame-ppc.cc
index 148d01116d..8f7198cd05 100644
--- a/deps/v8/src/diagnostics/ppc/eh-frame-ppc.cc
+++ b/deps/v8/src/diagnostics/ppc/eh-frame-ppc.cc
@@ -32,7 +32,6 @@ int EhFrameWriter::RegisterToDwarfCode(Register name) {
return kR0DwarfCode;
default:
UNIMPLEMENTED();
- return -1;
}
}
@@ -47,7 +46,6 @@ const char* EhFrameDisassembler::DwarfRegisterCodeToString(int code) {
return "sp";
default:
UNIMPLEMENTED();
- return nullptr;
}
}
diff --git a/deps/v8/src/diagnostics/riscv64/disasm-riscv64.cc b/deps/v8/src/diagnostics/riscv64/disasm-riscv64.cc
index 2955612166..c3977cbf3e 100644
--- a/deps/v8/src/diagnostics/riscv64/disasm-riscv64.cc
+++ b/deps/v8/src/diagnostics/riscv64/disasm-riscv64.cc
@@ -68,11 +68,15 @@ class Decoder {
// Printing of common values.
void PrintRegister(int reg);
void PrintFPURegister(int freg);
+ void PrintVRegister(int reg);
void PrintFPUStatusRegister(int freg);
void PrintRs1(Instruction* instr);
void PrintRs2(Instruction* instr);
void PrintRd(Instruction* instr);
+ void PrintUimm(Instruction* instr);
void PrintVs1(Instruction* instr);
+ void PrintVs2(Instruction* instr);
+ void PrintVd(Instruction* instr);
void PrintFRs1(Instruction* instr);
void PrintFRs2(Instruction* instr);
void PrintFRs3(Instruction* instr);
@@ -96,10 +100,15 @@ class Decoder {
void PrintRvcImm8Addi4spn(Instruction* instr);
void PrintRvcImm11CJ(Instruction* instr);
void PrintRvcImm8B(Instruction* instr);
+ void PrintRvvVm(Instruction* instr);
void PrintAcquireRelease(Instruction* instr);
void PrintBranchOffset(Instruction* instr);
void PrintStoreOffset(Instruction* instr);
void PrintCSRReg(Instruction* instr);
+ void PrintRvvSEW(Instruction* instr);
+ void PrintRvvLMUL(Instruction* instr);
+ void PrintRvvSimm5(Instruction* instr);
+ void PrintRvvUimm5(Instruction* instr);
void PrintRoundingMode(Instruction* instr);
void PrintMemoryOrder(Instruction* instr, bool is_pred);
@@ -123,6 +132,14 @@ class Decoder {
void DecodeCJType(Instruction* instr);
void DecodeCBType(Instruction* instr);
+ void DecodeVType(Instruction* instr);
+ void DecodeRvvIVV(Instruction* instr);
+ void DecodeRvvIVI(Instruction* instr);
+ void DecodeRvvIVX(Instruction* instr);
+ void DecodeRvvVL(Instruction* instr);
+ void DecodeRvvVS(Instruction* instr);
+ void DecodeRvvMVV(Instruction* instr);
+ void DecodeRvvMVX(Instruction* instr);
// Printing of instruction name.
void PrintInstructionName(Instruction* instr);
@@ -137,6 +154,8 @@ class Decoder {
void Format(Instruction* instr, const char* format);
void Unknown(Instruction* instr);
+ int switch_sew(Instruction* instr);
+ int switch_nf(Instruction* instr);
const disasm::NameConverter& converter_;
v8::base::Vector<char> out_buffer_;
int out_buffer_pos_;
@@ -164,6 +183,10 @@ void Decoder::PrintRegister(int reg) {
Print(converter_.NameOfCPURegister(reg));
}
+void Decoder::PrintVRegister(int reg) {
+ Print(v8::internal::VRegisters::Name(reg));
+}
+
void Decoder::PrintRs1(Instruction* instr) {
int reg = instr->Rs1Value();
PrintRegister(reg);
@@ -179,11 +202,26 @@ void Decoder::PrintRd(Instruction* instr) {
PrintRegister(reg);
}
-void Decoder::PrintVs1(Instruction* instr) {
+void Decoder::PrintUimm(Instruction* instr) {
int val = instr->Rs1Value();
out_buffer_pos_ += base::SNPrintF(out_buffer_ + out_buffer_pos_, "0x%x", val);
}
+void Decoder::PrintVs1(Instruction* instr) {
+ int reg = instr->Vs1Value();
+ PrintVRegister(reg);
+}
+
+void Decoder::PrintVs2(Instruction* instr) {
+ int reg = instr->Vs2Value();
+ PrintVRegister(reg);
+}
+
+void Decoder::PrintVd(Instruction* instr) {
+ int reg = instr->VdValue();
+ PrintVRegister(reg);
+}
+
// Print the FPUregister name according to the active name converter.
void Decoder::PrintFPURegister(int freg) {
Print(converter_.NameOfXMMRegister(freg));
@@ -247,6 +285,26 @@ void Decoder::PrintStoreOffset(Instruction* instr) {
out_buffer_pos_ += base::SNPrintF(out_buffer_ + out_buffer_pos_, "%d", imm);
}
+void Decoder::PrintRvvSEW(Instruction* instr) {
+ const char* sew = instr->RvvSEW();
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%s", sew);
+}
+
+void Decoder::PrintRvvLMUL(Instruction* instr) {
+ const char* lmul = instr->RvvLMUL();
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%s", lmul);
+}
+
+void Decoder::PrintRvvSimm5(Instruction* instr) {
+ const int simm5 = instr->RvvSimm5();
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%d", simm5);
+}
+
+void Decoder::PrintRvvUimm5(Instruction* instr) {
+ const uint32_t uimm5 = instr->RvvUimm5();
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%u", uimm5);
+}
+
void Decoder::PrintImm20U(Instruction* instr) {
int32_t imm = instr->Imm20UValue();
out_buffer_pos_ += base::SNPrintF(out_buffer_ + out_buffer_pos_, "0x%x", imm);
@@ -335,6 +393,13 @@ void Decoder::PrintRvcImm8B(Instruction* instr) {
out_buffer_pos_ += base::SNPrintF(out_buffer_ + out_buffer_pos_, "%d", imm);
}
+void Decoder::PrintRvvVm(Instruction* instr) {
+ uint8_t imm = instr->RvvVM();
+ if (imm == 0) {
+ out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, " vm");
+ }
+}
+
void Decoder::PrintAcquireRelease(Instruction* instr) {
bool aq = instr->AqValue();
bool rl = instr->RlValue();
@@ -724,13 +789,50 @@ int Decoder::FormatOption(Instruction* instr, const char* format) {
DCHECK(STRING_STARTS_WITH(format, "suc"));
PrintMemoryOrder(instr, false);
return 3;
+ } else if (format[1] == 'e') {
+ DCHECK(STRING_STARTS_WITH(format, "sew"));
+ PrintRvvSEW(instr);
+ return 3;
+ } else if (format[1] == 'i') {
+ DCHECK(STRING_STARTS_WITH(format, "simm5"));
+ PrintRvvSimm5(instr);
+ return 5;
}
UNREACHABLE();
}
case 'v': { // 'vs1: Raw values from register fields
- DCHECK(STRING_STARTS_WITH(format, "vs1"));
- PrintVs1(instr);
- return 3;
+ if (format[1] == 'd') {
+ DCHECK(STRING_STARTS_WITH(format, "vd"));
+ PrintVd(instr);
+ return 2;
+ } else if (format[2] == '1') {
+ DCHECK(STRING_STARTS_WITH(format, "vs1"));
+ PrintVs1(instr);
+ return 3;
+ } else if (format[2] == '2') {
+ DCHECK(STRING_STARTS_WITH(format, "vs2"));
+ PrintVs2(instr);
+ return 3;
+ } else {
+ DCHECK(STRING_STARTS_WITH(format, "vm"));
+ PrintRvvVm(instr);
+ return 2;
+ }
+ }
+ case 'l': {
+ DCHECK(STRING_STARTS_WITH(format, "lmul"));
+ PrintRvvLMUL(instr);
+ return 4;
+ }
+ case 'u': {
+ if (STRING_STARTS_WITH(format, "uimm5")) {
+ PrintRvvUimm5(instr);
+ return 5;
+ } else {
+ DCHECK(STRING_STARTS_WITH(format, "uimm"));
+ PrintUimm(instr);
+ return 4;
+ }
}
case 't': { // 'target: target of branch instructions'
DCHECK(STRING_STARTS_WITH(format, "target"));
@@ -1308,256 +1410,265 @@ void Decoder::DecodeR4Type(Instruction* instr) {
}
void Decoder::DecodeIType(Instruction* instr) {
- switch (instr->InstructionBits() & kITypeMask) {
- case RO_JALR:
- if (instr->RdValue() == zero_reg.code() &&
- instr->Rs1Value() == ra.code() && instr->Imm12Value() == 0)
- Format(instr, "ret");
- else if (instr->RdValue() == zero_reg.code() && instr->Imm12Value() == 0)
- Format(instr, "jr 'rs1");
- else if (instr->RdValue() == ra.code() && instr->Imm12Value() == 0)
- Format(instr, "jalr 'rs1");
- else
- Format(instr, "jalr 'rd, 'imm12('rs1)'target");
- break;
- case RO_LB:
- Format(instr, "lb 'rd, 'imm12('rs1)");
- break;
- case RO_LH:
- Format(instr, "lh 'rd, 'imm12('rs1)");
- break;
- case RO_LW:
- Format(instr, "lw 'rd, 'imm12('rs1)");
- break;
- case RO_LBU:
- Format(instr, "lbu 'rd, 'imm12('rs1)");
- break;
- case RO_LHU:
- Format(instr, "lhu 'rd, 'imm12('rs1)");
- break;
+ if (instr->vl_vs_width() != -1) {
+ DecodeRvvVL(instr);
+ } else {
+ switch (instr->InstructionBits() & kITypeMask) {
+ case RO_JALR:
+ if (instr->RdValue() == zero_reg.code() &&
+ instr->Rs1Value() == ra.code() && instr->Imm12Value() == 0)
+ Format(instr, "ret");
+ else if (instr->RdValue() == zero_reg.code() &&
+ instr->Imm12Value() == 0)
+ Format(instr, "jr 'rs1");
+ else if (instr->RdValue() == ra.code() && instr->Imm12Value() == 0)
+ Format(instr, "jalr 'rs1");
+ else
+ Format(instr, "jalr 'rd, 'imm12('rs1)");
+ break;
+ case RO_LB:
+ Format(instr, "lb 'rd, 'imm12('rs1)");
+ break;
+ case RO_LH:
+ Format(instr, "lh 'rd, 'imm12('rs1)");
+ break;
+ case RO_LW:
+ Format(instr, "lw 'rd, 'imm12('rs1)");
+ break;
+ case RO_LBU:
+ Format(instr, "lbu 'rd, 'imm12('rs1)");
+ break;
+ case RO_LHU:
+ Format(instr, "lhu 'rd, 'imm12('rs1)");
+ break;
#ifdef V8_TARGET_ARCH_64_BIT
- case RO_LWU:
- Format(instr, "lwu 'rd, 'imm12('rs1)");
- break;
- case RO_LD:
- Format(instr, "ld 'rd, 'imm12('rs1)");
- break;
+ case RO_LWU:
+ Format(instr, "lwu 'rd, 'imm12('rs1)");
+ break;
+ case RO_LD:
+ Format(instr, "ld 'rd, 'imm12('rs1)");
+ break;
#endif /*V8_TARGET_ARCH_64_BIT*/
- case RO_ADDI:
- if (instr->Imm12Value() == 0) {
- if (instr->RdValue() == zero_reg.code() &&
- instr->Rs1Value() == zero_reg.code())
- Format(instr, "nop");
+ case RO_ADDI:
+ if (instr->Imm12Value() == 0) {
+ if (instr->RdValue() == zero_reg.code() &&
+ instr->Rs1Value() == zero_reg.code())
+ Format(instr, "nop");
+ else
+ Format(instr, "mv 'rd, 'rs1");
+ } else if (instr->Rs1Value() == zero_reg.code()) {
+ Format(instr, "li 'rd, 'imm12");
+ } else {
+ Format(instr, "addi 'rd, 'rs1, 'imm12");
+ }
+ break;
+ case RO_SLTI:
+ Format(instr, "slti 'rd, 'rs1, 'imm12");
+ break;
+ case RO_SLTIU:
+ if (instr->Imm12Value() == 1)
+ Format(instr, "seqz 'rd, 'rs1");
else
- Format(instr, "mv 'rd, 'rs1");
- } else if (instr->Rs1Value() == zero_reg.code()) {
- Format(instr, "li 'rd, 'imm12");
- } else {
- Format(instr, "addi 'rd, 'rs1, 'imm12");
- }
- break;
- case RO_SLTI:
- Format(instr, "slti 'rd, 'rs1, 'imm12");
- break;
- case RO_SLTIU:
- if (instr->Imm12Value() == 1)
- Format(instr, "seqz 'rd, 'rs1");
- else
- Format(instr, "sltiu 'rd, 'rs1, 'imm12");
- break;
- case RO_XORI:
- if (instr->Imm12Value() == -1)
- Format(instr, "not 'rd, 'rs1");
- else
- Format(instr, "xori 'rd, 'rs1, 'imm12x");
- break;
- case RO_ORI:
- Format(instr, "ori 'rd, 'rs1, 'imm12x");
- break;
- case RO_ANDI:
- Format(instr, "andi 'rd, 'rs1, 'imm12x");
- break;
- case RO_SLLI:
- Format(instr, "slli 'rd, 'rs1, 's64");
- break;
- case RO_SRLI: { // RO_SRAI
- if (!instr->IsArithShift()) {
- Format(instr, "srli 'rd, 'rs1, 's64");
- } else {
- Format(instr, "srai 'rd, 'rs1, 's64");
+ Format(instr, "sltiu 'rd, 'rs1, 'imm12");
+ break;
+ case RO_XORI:
+ if (instr->Imm12Value() == -1)
+ Format(instr, "not 'rd, 'rs1");
+ else
+ Format(instr, "xori 'rd, 'rs1, 'imm12x");
+ break;
+ case RO_ORI:
+ Format(instr, "ori 'rd, 'rs1, 'imm12x");
+ break;
+ case RO_ANDI:
+ Format(instr, "andi 'rd, 'rs1, 'imm12x");
+ break;
+ case RO_SLLI:
+ Format(instr, "slli 'rd, 'rs1, 's64");
+ break;
+ case RO_SRLI: { // RO_SRAI
+ if (!instr->IsArithShift()) {
+ Format(instr, "srli 'rd, 'rs1, 's64");
+ } else {
+ Format(instr, "srai 'rd, 'rs1, 's64");
+ }
+ break;
}
- break;
- }
#ifdef V8_TARGET_ARCH_64_BIT
- case RO_ADDIW:
- if (instr->Imm12Value() == 0)
- Format(instr, "sext.w 'rd, 'rs1");
- else
- Format(instr, "addiw 'rd, 'rs1, 'imm12");
- break;
- case RO_SLLIW:
- Format(instr, "slliw 'rd, 'rs1, 's32");
- break;
- case RO_SRLIW: { // RO_SRAIW
- if (!instr->IsArithShift()) {
- Format(instr, "srliw 'rd, 'rs1, 's32");
- } else {
- Format(instr, "sraiw 'rd, 'rs1, 's32");
+ case RO_ADDIW:
+ if (instr->Imm12Value() == 0)
+ Format(instr, "sext.w 'rd, 'rs1");
+ else
+ Format(instr, "addiw 'rd, 'rs1, 'imm12");
+ break;
+ case RO_SLLIW:
+ Format(instr, "slliw 'rd, 'rs1, 's32");
+ break;
+ case RO_SRLIW: { // RO_SRAIW
+ if (!instr->IsArithShift()) {
+ Format(instr, "srliw 'rd, 'rs1, 's32");
+ } else {
+ Format(instr, "sraiw 'rd, 'rs1, 's32");
+ }
+ break;
}
- break;
- }
#endif /*V8_TARGET_ARCH_64_BIT*/
- case RO_FENCE:
- if (instr->MemoryOrder(true) == PSIORW &&
- instr->MemoryOrder(false) == PSIORW)
- Format(instr, "fence");
- else
- Format(instr, "fence 'pre, 'suc");
- break;
- case RO_ECALL: { // RO_EBREAK
- if (instr->Imm12Value() == 0) { // ECALL
- Format(instr, "ecall");
- } else if (instr->Imm12Value() == 1) { // EBREAK
- Format(instr, "ebreak");
- } else {
- UNSUPPORTED_RISCV();
+ case RO_FENCE:
+ if (instr->MemoryOrder(true) == PSIORW &&
+ instr->MemoryOrder(false) == PSIORW)
+ Format(instr, "fence");
+ else
+ Format(instr, "fence 'pre, 'suc");
+ break;
+ case RO_ECALL: { // RO_EBREAK
+ if (instr->Imm12Value() == 0) { // ECALL
+ Format(instr, "ecall");
+ } else if (instr->Imm12Value() == 1) { // EBREAK
+ Format(instr, "ebreak");
+ } else {
+ UNSUPPORTED_RISCV();
+ }
+ break;
}
- break;
- }
- // TODO(riscv): use Zifencei Standard Extension macro block
- case RO_FENCE_I:
- Format(instr, "fence.i");
- break;
- // TODO(riscv): use Zicsr Standard Extension macro block
- case RO_CSRRW:
- if (instr->CsrValue() == csr_fcsr) {
+ // TODO(riscv): use Zifencei Standard Extension macro block
+ case RO_FENCE_I:
+ Format(instr, "fence.i");
+ break;
+ // TODO(riscv): use Zicsr Standard Extension macro block
+ // FIXME(RISC-V): Add special formatting for CSR registers
+ case RO_CSRRW:
+ if (instr->CsrValue() == csr_fcsr) {
+ if (instr->RdValue() == zero_reg.code())
+ Format(instr, "fscsr 'rs1");
+ else
+ Format(instr, "fscsr 'rd, 'rs1");
+ } else if (instr->CsrValue() == csr_frm) {
+ if (instr->RdValue() == zero_reg.code())
+ Format(instr, "fsrm 'rs1");
+ else
+ Format(instr, "fsrm 'rd, 'rs1");
+ } else if (instr->CsrValue() == csr_fflags) {
+ if (instr->RdValue() == zero_reg.code())
+ Format(instr, "fsflags 'rs1");
+ else
+ Format(instr, "fsflags 'rd, 'rs1");
+ } else if (instr->RdValue() == zero_reg.code()) {
+ Format(instr, "csrw 'csr, 'rs1");
+ } else {
+ Format(instr, "csrrw 'rd, 'csr, 'rs1");
+ }
+ break;
+ case RO_CSRRS:
+ if (instr->Rs1Value() == zero_reg.code()) {
+ switch (instr->CsrValue()) {
+ case csr_instret:
+ Format(instr, "rdinstret 'rd");
+ break;
+ case csr_instreth:
+ Format(instr, "rdinstreth 'rd");
+ break;
+ case csr_time:
+ Format(instr, "rdtime 'rd");
+ break;
+ case csr_timeh:
+ Format(instr, "rdtimeh 'rd");
+ break;
+ case csr_cycle:
+ Format(instr, "rdcycle 'rd");
+ break;
+ case csr_cycleh:
+ Format(instr, "rdcycleh 'rd");
+ break;
+ case csr_fflags:
+ Format(instr, "frflags 'rd");
+ break;
+ case csr_frm:
+ Format(instr, "frrm 'rd");
+ break;
+ case csr_fcsr:
+ Format(instr, "frcsr 'rd");
+ break;
+ default:
+ UNREACHABLE();
+ }
+ } else if (instr->Rs1Value() == zero_reg.code()) {
+ Format(instr, "csrr 'rd, 'csr");
+ } else if (instr->RdValue() == zero_reg.code()) {
+ Format(instr, "csrs 'csr, 'rs1");
+ } else {
+ Format(instr, "csrrs 'rd, 'csr, 'rs1");
+ }
+ break;
+ case RO_CSRRC:
if (instr->RdValue() == zero_reg.code())
- Format(instr, "fscsr 'rs1");
+ Format(instr, "csrc 'csr, 'rs1");
else
- Format(instr, "fscsr 'rd, 'rs1");
- } else if (instr->CsrValue() == csr_frm) {
+ Format(instr, "csrrc 'rd, 'csr, 'rs1");
+ break;
+ case RO_CSRRWI:
if (instr->RdValue() == zero_reg.code())
- Format(instr, "fsrm 'rs1");
+ Format(instr, "csrwi 'csr, 'uimm");
else
- Format(instr, "fsrm 'rd, 'rs1");
- } else if (instr->CsrValue() == csr_fflags) {
+ Format(instr, "csrrwi 'rd, 'csr, 'uimm");
+ break;
+ case RO_CSRRSI:
if (instr->RdValue() == zero_reg.code())
- Format(instr, "fsflags 'rs1");
+ Format(instr, "csrsi 'csr, 'uimm");
else
- Format(instr, "fsflags 'rd, 'rs1");
- } else if (instr->RdValue() == zero_reg.code()) {
- Format(instr, "csrw 'csr, 'rs1");
- } else {
- Format(instr, "csrrw 'rd, 'csr, 'rs1");
- }
- break;
- case RO_CSRRS:
- if (instr->Rs1Value() == zero_reg.code()) {
- switch (instr->CsrValue()) {
- case csr_instret:
- Format(instr, "rdinstret 'rd");
- break;
- case csr_instreth:
- Format(instr, "rdinstreth 'rd");
- break;
- case csr_time:
- Format(instr, "rdtime 'rd");
- break;
- case csr_timeh:
- Format(instr, "rdtimeh 'rd");
- break;
- case csr_cycle:
- Format(instr, "rdcycle 'rd");
- break;
- case csr_cycleh:
- Format(instr, "rdcycleh 'rd");
- break;
- case csr_fflags:
- Format(instr, "frflags 'rd");
- break;
- case csr_frm:
- Format(instr, "frrm 'rd");
- break;
- case csr_fcsr:
- Format(instr, "frcsr 'rd");
- break;
- default:
- UNREACHABLE();
- }
- } else if (instr->Rs1Value() == zero_reg.code()) {
- Format(instr, "csrr 'rd, 'csr");
- } else if (instr->RdValue() == zero_reg.code()) {
- Format(instr, "csrs 'csr, 'rs1");
- } else {
- Format(instr, "csrrs 'rd, 'csr, 'rs1");
- }
- break;
- case RO_CSRRC:
- if (instr->RdValue() == zero_reg.code())
- Format(instr, "csrc 'csr, 'rs1");
- else
- Format(instr, "csrrc 'rd, 'csr, 'rs1");
- break;
- case RO_CSRRWI:
- if (instr->RdValue() == zero_reg.code())
- Format(instr, "csrwi 'csr, 'vs1");
- else
- Format(instr, "csrrwi 'rd, 'csr, 'vs1");
- break;
- case RO_CSRRSI:
- if (instr->RdValue() == zero_reg.code())
- Format(instr, "csrsi 'csr, 'vs1");
- else
- Format(instr, "csrrsi 'rd, 'csr, 'vs1");
- break;
- case RO_CSRRCI:
- if (instr->RdValue() == zero_reg.code())
- Format(instr, "csrci 'csr, 'vs1");
- else
- Format(instr, "csrrci 'rd, 'csr, 'vs1");
- break;
- // TODO(riscv): use F Extension macro block
- case RO_FLW:
- Format(instr, "flw 'fd, 'imm12('rs1)");
- break;
- // TODO(riscv): use D Extension macro block
- case RO_FLD:
- Format(instr, "fld 'fd, 'imm12('rs1)");
- break;
- default:
- UNSUPPORTED_RISCV();
+ Format(instr, "csrrsi 'rd, 'csr, 'uimm");
+ break;
+ case RO_CSRRCI:
+ if (instr->RdValue() == zero_reg.code())
+ Format(instr, "csrci 'csr, 'uimm");
+ else
+ Format(instr, "csrrci 'rd, 'csr, 'uimm");
+ break;
+ // TODO(riscv): use F Extension macro block
+ case RO_FLW:
+ Format(instr, "flw 'fd, 'imm12('rs1)");
+ break;
+ // TODO(riscv): use D Extension macro block
+ case RO_FLD:
+ Format(instr, "fld 'fd, 'imm12('rs1)");
+ break;
+ default:
+ UNSUPPORTED_RISCV();
+ }
}
}
void Decoder::DecodeSType(Instruction* instr) {
- switch (instr->InstructionBits() & kSTypeMask) {
- case RO_SB:
- Format(instr, "sb 'rs2, 'offS('rs1)");
- break;
- case RO_SH:
- Format(instr, "sh 'rs2, 'offS('rs1)");
- break;
- case RO_SW:
- Format(instr, "sw 'rs2, 'offS('rs1)");
- break;
+ if (instr->vl_vs_width() != -1) {
+ DecodeRvvVS(instr);
+ } else {
+ switch (instr->InstructionBits() & kSTypeMask) {
+ case RO_SB:
+ Format(instr, "sb 'rs2, 'offS('rs1)");
+ break;
+ case RO_SH:
+ Format(instr, "sh 'rs2, 'offS('rs1)");
+ break;
+ case RO_SW:
+ Format(instr, "sw 'rs2, 'offS('rs1)");
+ break;
#ifdef V8_TARGET_ARCH_64_BIT
- case RO_SD:
- Format(instr, "sd 'rs2, 'offS('rs1)");
- break;
+ case RO_SD:
+ Format(instr, "sd 'rs2, 'offS('rs1)");
+ break;
#endif /*V8_TARGET_ARCH_64_BIT*/
- // TODO(riscv): use F Extension macro block
- case RO_FSW:
- Format(instr, "fsw 'fs2, 'offS('rs1)");
- break;
- // TODO(riscv): use D Extension macro block
- case RO_FSD:
- Format(instr, "fsd 'fs2, 'offS('rs1)");
- break;
- default:
- UNSUPPORTED_RISCV();
+ // TODO(riscv): use F Extension macro block
+ case RO_FSW:
+ Format(instr, "fsw 'fs2, 'offS('rs1)");
+ break;
+ // TODO(riscv): use D Extension macro block
+ case RO_FSD:
+ Format(instr, "fsd 'fs2, 'offS('rs1)");
+ break;
+ default:
+ UNSUPPORTED_RISCV();
+ }
}
}
-
void Decoder::DecodeBType(Instruction* instr) {
switch (instr->InstructionBits() & kBTypeMask) {
case RO_BEQ:
@@ -1595,6 +1706,7 @@ void Decoder::DecodeUType(Instruction* instr) {
UNSUPPORTED_RISCV();
}
}
+// namespace internal
void Decoder::DecodeJType(Instruction* instr) {
// J Type doesn't have additional mask
switch (instr->BaseOpcodeValue()) {
@@ -1791,6 +1903,511 @@ void Decoder::DecodeCBType(Instruction* instr) {
}
}
+void Decoder::DecodeRvvIVV(Instruction* instr) {
+ DCHECK_EQ(instr->InstructionBits() & (kBaseOpcodeMask | kFunct3Mask), OP_IVV);
+ switch (instr->InstructionBits() & kVTypeMask) {
+ case RO_V_VADD_VV:
+ Format(instr, "vadd.vv 'vd, 'vs2, 'vs1'vm");
+ break;
+ case RO_V_VSADD_VV:
+ Format(instr, "vsadd.vv 'vd, 'vs2, 'vs1'vm");
+ break;
+ case RO_V_VSUB_VV:
+ Format(instr, "vsub.vv 'vd, 'vs2, 'vs1'vm");
+ break;
+ case RO_V_VSSUB_VV:
+ Format(instr, "vssub.vv 'vd, 'vs2, 'vs1'vm");
+ break;
+ case RO_V_VMIN_VV:
+ Format(instr, "vmin.vv 'vd, 'vs2, 'vs1'vm");
+ break;
+ case RO_V_VMINU_VV:
+ Format(instr, "vminu.vv 'vd, 'vs2, 'vs1'vm");
+ break;
+ case RO_V_VMAX_VV:
+ Format(instr, "vmax.vv 'vd, 'vs2, 'vs1'vm");
+ break;
+ case RO_V_VMAXU_VV:
+ Format(instr, "vmaxu.vv 'vd, 'vs2, 'vs1'vm");
+ break;
+ case RO_V_VAND_VV:
+ Format(instr, "vand.vv 'vd, 'vs2, 'vs1'vm");
+ break;
+ case RO_V_VOR_VV:
+ Format(instr, "vor.vv 'vd, 'vs2, 'vs1'vm");
+ break;
+ case RO_V_VXOR_VV:
+ Format(instr, "vxor.vv 'vd, 'vs2, 'vs1'vm");
+ break;
+ case RO_V_VRGATHER_VV:
+ Format(instr, "vrgather.vv 'vd, 'vs2, 'vs1'vm");
+ break;
+ case RO_V_VMSEQ_VV:
+ Format(instr, "vmseq.vv 'vd, 'vs2, 'vs1'vm");
+ break;
+ case RO_V_VMSNE_VV:
+ Format(instr, "vmsne.vv 'vd, 'vs2, 'vs1'vm");
+ break;
+ case RO_V_VMSLT_VV:
+ Format(instr, "vmslt.vv 'vd, 'vs2, 'vs1'vm");
+ break;
+ case RO_V_VMSLTU_VV:
+ Format(instr, "vmsltu.vv 'vd, 'vs2, 'vs1'vm");
+ break;
+ case RO_V_VMSLE_VV:
+ Format(instr, "vmsle.vv 'vd, 'vs2, 'vs1'vm");
+ break;
+ case RO_V_VMSLEU_VV:
+ Format(instr, "vmsleu.vv 'vd, 'vs2, 'vs1'vm");
+ break;
+ case RO_V_VMV_VV:
+ if (instr->RvvVM()) {
+ Format(instr, "vmv.vv 'vd, 'vs1");
+ } else {
+ Format(instr, "vmerge.vvm 'vd, 'vs2, 'vs1, v0");
+ }
+ break;
+ case RO_V_VADC_VV:
+ if (!instr->RvvVM()) {
+ Format(instr, "vadc.vvm 'vd, 'vs2, 'vs1");
+ } else {
+ UNREACHABLE();
+ }
+ break;
+ case RO_V_VMADC_VV:
+ if (!instr->RvvVM()) {
+ Format(instr, "vmadc.vvm 'vd, 'vs2, 'vs1");
+ } else {
+ UNREACHABLE();
+ }
+ break;
+ default:
+ UNSUPPORTED_RISCV();
+ break;
+ }
+}
+
+void Decoder::DecodeRvvIVI(Instruction* instr) {
+ DCHECK_EQ(instr->InstructionBits() & (kBaseOpcodeMask | kFunct3Mask), OP_IVI);
+ switch (instr->InstructionBits() & kVTypeMask) {
+ case RO_V_VADD_VI:
+ Format(instr, "vadd.vi 'vd, 'vs2, 'simm5'vm");
+ break;
+ case RO_V_VSADD_VI:
+ Format(instr, "vsadd.vi 'vd, 'vs2, 'simm5'vm");
+ break;
+ case RO_V_VRSUB_VI:
+ Format(instr, "vrsub.vi 'vd, 'vs2, 'simm5'vm");
+ break;
+ case RO_V_VAND_VI:
+ Format(instr, "vand.vi 'vd, 'vs2, 'simm5'vm");
+ break;
+ case RO_V_VOR_VI:
+ Format(instr, "vor.vi 'vd, 'vs2, 'simm5'vm");
+ break;
+ case RO_V_VXOR_VI:
+ Format(instr, "vxor.vi 'vd, 'vs2, 'simm5'vm");
+ break;
+ case RO_V_VRGATHER_VI:
+ Format(instr, "vrgather.vi 'vd, 'vs2, 'simm5'vm");
+ break;
+ case RO_V_VMV_VI:
+ if (instr->RvvVM()) {
+ Format(instr, "vmv.vi 'vd, 'simm5");
+ } else {
+ Format(instr, "vmerge.vim 'vd, 'vs2, 'simm5, v0");
+ }
+ break;
+ case RO_V_VMSEQ_VI:
+ Format(instr, "vmseq.vi 'vd, 'vs2, 'simm5'vm");
+ break;
+ case RO_V_VMSNE_VI:
+ Format(instr, "vmsne.vi 'vd, 'vs2, 'simm5'vm");
+ break;
+ case RO_V_VMSLEU_VI:
+ Format(instr, "vmsleu.vi 'vd, 'vs2, 'simm5'vm");
+ break;
+ case RO_V_VMSLE_VI:
+ Format(instr, "vmsle.vi 'vd, 'vs2, 'simm5'vm");
+ break;
+ case RO_V_VMSGTU_VI:
+ Format(instr, "vmsgtu.vi 'vd, 'vs2, 'simm5'vm");
+ break;
+ case RO_V_VMSGT_VI:
+ Format(instr, "vmsgt.vi 'vd, 'vs2, 'simm5'vm");
+ break;
+ case RO_V_VSLIDEDOWN_VI:
+ Format(instr, "vslidedown.vi 'vd, 'vs2, 'uimm5'vm");
+ break;
+ case RO_V_VSRL_VI:
+ Format(instr, "vsrl.vi 'vd, 'vs2, 'uimm5'vm");
+ break;
+ case RO_V_VSLL_VI:
+ Format(instr, "vsll.vi 'vd, 'vs2, 'uimm5'vm");
+ break;
+ case RO_V_VADC_VI:
+ if (!instr->RvvVM()) {
+ Format(instr, "vadc.vim 'vd, 'vs2, 'uimm5");
+ } else {
+ UNREACHABLE();
+ }
+ break;
+ case RO_V_VMADC_VI:
+ if (!instr->RvvVM()) {
+ Format(instr, "vmadc.vim 'vd, 'vs2, 'uimm5");
+ } else {
+ UNREACHABLE();
+ }
+ break;
+ default:
+ UNSUPPORTED_RISCV();
+ break;
+ }
+}
+
+void Decoder::DecodeRvvIVX(Instruction* instr) {
+ DCHECK_EQ(instr->InstructionBits() & (kBaseOpcodeMask | kFunct3Mask), OP_IVX);
+ switch (instr->InstructionBits() & kVTypeMask) {
+ case RO_V_VADD_VX:
+ Format(instr, "vadd.vx 'vd, 'vs2, 'rs1'vm");
+ break;
+ case RO_V_VSADD_VX:
+ Format(instr, "vsadd.vx 'vd, 'vs2, 'rs1'vm");
+ break;
+ case RO_V_VSUB_VX:
+ Format(instr, "vsub.vx 'vd, 'vs2, 'rs1'vm");
+ break;
+ case RO_V_VSSUB_VX:
+ Format(instr, "vssub.vx 'vd, 'vs2, 'rs1'vm");
+ break;
+ case RO_V_VRSUB_VX:
+ Format(instr, "vrsub.vx 'vd, 'vs2, 'rs1'vm");
+ break;
+ case RO_V_VMIN_VX:
+ Format(instr, "vmin.vx 'vd, 'vs2, 'rs1'vm");
+ break;
+ case RO_V_VMINU_VX:
+ Format(instr, "vminu.vx 'vd, 'vs2, 'rs1'vm");
+ break;
+ case RO_V_VMAX_VX:
+ Format(instr, "vmax.vx 'vd, 'vs2, 'rs1'vm");
+ break;
+ case RO_V_VMAXU_VX:
+ Format(instr, "vmaxu.vx 'vd, 'vs2, 'rs1'vm");
+ break;
+ case RO_V_VAND_VX:
+ Format(instr, "vand.vx 'vd, 'vs2, 'rs1'vm");
+ break;
+ case RO_V_VOR_VX:
+ Format(instr, "vor.vx 'vd, 'vs2, 'rs1'vm");
+ break;
+ case RO_V_VXOR_VX:
+ Format(instr, "vxor.vx 'vd, 'vs2, 'rs1'vm");
+ break;
+ case RO_V_VRGATHER_VX:
+ Format(instr, "vrgather.vx 'vd, 'vs2, 'rs1'vm");
+ break;
+ case RO_V_VMV_VX:
+ if (instr->RvvVM()) {
+ Format(instr, "vmv.vx 'vd, 'rs1");
+ } else {
+ Format(instr, "vmerge.vxm 'vd, 'vs2, 'rs1, v0");
+ }
+ break;
+ case RO_V_VMSEQ_VX:
+ Format(instr, "vmseq.vx 'vd, 'vs2, 'rs1'vm");
+ break;
+ case RO_V_VMSNE_VX:
+ Format(instr, "vmsne.vx 'vd, 'vs2, 'rs1'vm");
+ break;
+ case RO_V_VMSLT_VX:
+ Format(instr, "vmslt.vx 'vd, 'vs2, 'rs1'vm");
+ break;
+ case RO_V_VMSLTU_VX:
+ Format(instr, "vmsltu.vx 'vd, 'vs2, 'rs1'vm");
+ break;
+ case RO_V_VMSLE_VX:
+ Format(instr, "vmsle.vx 'vd, 'vs2, 'rs1'vm");
+ break;
+ case RO_V_VMSLEU_VX:
+ Format(instr, "vmsleu.vx 'vd, 'vs2, 'rs1'vm");
+ break;
+ case RO_V_VMSGT_VX:
+ Format(instr, "vmsgt.vx 'vd, 'vs2, 'rs1'vm");
+ break;
+ case RO_V_VMSGTU_VX:
+ Format(instr, "vmsgtu.vx 'vd, 'vs2, 'rs1'vm");
+ break;
+ case RO_V_VSLIDEDOWN_VX:
+ Format(instr, "vslidedown.vx 'vd, 'vs2, 'rs1'vm");
+ break;
+ case RO_V_VADC_VX:
+ if (!instr->RvvVM()) {
+ Format(instr, "vadc.vxm 'vd, 'vs2, 'rs1");
+ } else {
+ UNREACHABLE();
+ }
+ break;
+ case RO_V_VMADC_VX:
+ if (!instr->RvvVM()) {
+ Format(instr, "vmadc.vxm 'vd, 'vs2, 'rs1");
+ } else {
+ UNREACHABLE();
+ }
+ break;
+ default:
+ UNSUPPORTED_RISCV();
+ break;
+ }
+}
+
+void Decoder::DecodeRvvMVV(Instruction* instr) {
+ DCHECK_EQ(instr->InstructionBits() & (kBaseOpcodeMask | kFunct3Mask), OP_MVV);
+ switch (instr->InstructionBits() & kVTypeMask) {
+ case RO_V_VWXUNARY0:
+ if (instr->Vs1Value() == 0x0) {
+ Format(instr, "vmv.x.s 'rd, 'vs2");
+ } else {
+ UNSUPPORTED_RISCV();
+ }
+ break;
+ case RO_V_VREDMAXU:
+ Format(instr, "vredmaxu.vs 'vd, 'vs2, 'vs1'vm");
+ break;
+ case RO_V_VREDMAX:
+ Format(instr, "vredmax.vs 'vd, 'vs2, 'vs1'vm");
+ break;
+ case RO_V_VREDMIN:
+ Format(instr, "vredmin.vs 'vd, 'vs2, 'vs1'vm");
+ break;
+ case RO_V_VREDMINU:
+ Format(instr, "vredminu.vs 'vd, 'vs2, 'vs1'vm");
+ break;
+ default:
+ UNSUPPORTED_RISCV();
+ break;
+ }
+}
+
+void Decoder::DecodeRvvMVX(Instruction* instr) {
+ DCHECK_EQ(instr->InstructionBits() & (kBaseOpcodeMask | kFunct3Mask), OP_MVX);
+ switch (instr->InstructionBits() & kVTypeMask) {
+ case RO_V_VRXUNARY0:
+ if (instr->Vs2Value() == 0x0) {
+ Format(instr, "vmv.s.x 'vd, 'rs1");
+ } else {
+ UNSUPPORTED_RISCV();
+ }
+ break;
+ default:
+ UNSUPPORTED_RISCV();
+ break;
+ }
+}
+
+void Decoder::DecodeVType(Instruction* instr) {
+ switch (instr->InstructionBits() & (kBaseOpcodeMask | kFunct3Mask)) {
+ case OP_IVV:
+ DecodeRvvIVV(instr);
+ return;
+ case OP_FVV:
+ UNSUPPORTED_RISCV();
+ return;
+ case OP_MVV:
+ DecodeRvvMVV(instr);
+ return;
+ case OP_IVI:
+ DecodeRvvIVI(instr);
+ return;
+ case OP_IVX:
+ DecodeRvvIVX(instr);
+ return;
+ case OP_FVF:
+ UNSUPPORTED_RISCV();
+ return;
+ case OP_MVX:
+ DecodeRvvMVX(instr);
+ return;
+ }
+ switch (instr->InstructionBits() &
+ (kBaseOpcodeMask | kFunct3Mask | 0x80000000)) {
+ case RO_V_VSETVLI:
+ Format(instr, "vsetvli 'rd, 'rs1, 'sew, 'lmul");
+ break;
+ case RO_V_VSETVL:
+ if (!(instr->InstructionBits() & 0x40000000)) {
+ Format(instr, "vsetvl 'rd, 'rs1, 'rs2");
+ } else {
+ Format(instr, "vsetivli 'rd, 'uimm, 'sew, 'lmul");
+ }
+ break;
+ default:
+ UNSUPPORTED_RISCV();
+ break;
+ }
+}
+int Decoder::switch_nf(Instruction* instr) {
+ int nf = 0;
+ switch (instr->InstructionBits() & kRvvNfMask) {
+ case 0x20000000:
+ nf = 2;
+ break;
+ case 0x40000000:
+ nf = 3;
+ break;
+ case 0x60000000:
+ nf = 4;
+ break;
+ case 0x80000000:
+ nf = 5;
+ break;
+ case 0xa0000000:
+ nf = 6;
+ break;
+ case 0xc0000000:
+ nf = 7;
+ break;
+ case 0xe0000000:
+ nf = 8;
+ break;
+ }
+ return nf;
+}
+void Decoder::DecodeRvvVL(Instruction* instr) {
+ char str[50];
+ uint32_t instr_temp =
+ instr->InstructionBits() & (kRvvMopMask | kRvvNfMask | kBaseOpcodeMask);
+ // switch (instr->InstructionBits() &
+ // (kRvvMopMask | kRvvNfMask | kBaseOpcodeMask)) {
+ if (RO_V_VL == instr_temp) {
+ if (!(instr->InstructionBits() & (kRvvRs2Mask))) {
+ snprintf(str, sizeof(str), "vle%d.v 'vd, ('rs1)'vm",
+ instr->vl_vs_width());
+ Format(instr, str);
+ } else {
+ snprintf(str, sizeof(str), "vle%dff.v 'vd, ('rs1)'vm",
+ instr->vl_vs_width());
+ Format(instr, str);
+ }
+ } else if (RO_V_VLS == instr_temp) {
+ snprintf(str, sizeof(str), "vlse%d.v 'vd, ('rs1), 'rs2'vm",
+ instr->vl_vs_width());
+ Format(instr, str);
+
+ } else if (RO_V_VLX == instr_temp) {
+ snprintf(str, sizeof(str), "vlxei%d.v 'vd, ('rs1), 'vs2'vm",
+ instr->vl_vs_width());
+ Format(instr, str);
+ } else if (RO_V_VLSEG2 == instr_temp || RO_V_VLSEG3 == instr_temp ||
+ RO_V_VLSEG4 == instr_temp || RO_V_VLSEG5 == instr_temp ||
+ RO_V_VLSEG6 == instr_temp || RO_V_VLSEG7 == instr_temp ||
+ RO_V_VLSEG8 == instr_temp) {
+ if (!(instr->InstructionBits() & (kRvvRs2Mask))) {
+ snprintf(str, sizeof(str), "vlseg%de%d.v 'vd, ('rs1)'vm",
+ switch_nf(instr), instr->vl_vs_width());
+ } else {
+ snprintf(str, sizeof(str), "vlseg%de%dff.v 'vd, ('rs1)'vm",
+ switch_nf(instr), instr->vl_vs_width());
+ }
+ Format(instr, str);
+ } else if (RO_V_VLSSEG2 == instr_temp || RO_V_VLSSEG3 == instr_temp ||
+ RO_V_VLSSEG4 == instr_temp || RO_V_VLSSEG5 == instr_temp ||
+ RO_V_VLSSEG6 == instr_temp || RO_V_VLSSEG7 == instr_temp ||
+ RO_V_VLSSEG8 == instr_temp) {
+ snprintf(str, sizeof(str), "vlsseg%de%d.v 'vd, ('rs1), 'rs2'vm",
+ switch_nf(instr), instr->vl_vs_width());
+ Format(instr, str);
+ } else if (RO_V_VLXSEG2 == instr_temp || RO_V_VLXSEG3 == instr_temp ||
+ RO_V_VLXSEG4 == instr_temp || RO_V_VLXSEG5 == instr_temp ||
+ RO_V_VLXSEG6 == instr_temp || RO_V_VLXSEG7 == instr_temp ||
+ RO_V_VLXSEG8 == instr_temp) {
+ snprintf(str, sizeof(str), "vlxseg%dei%d.v 'vd, ('rs1), 'vs2'vm",
+ switch_nf(instr), instr->vl_vs_width());
+ Format(instr, str);
+ }
+}
+
+int Decoder::switch_sew(Instruction* instr) {
+ int width = 0;
+ if ((instr->InstructionBits() & kBaseOpcodeMask) != LOAD_FP &&
+ (instr->InstructionBits() & kBaseOpcodeMask) != STORE_FP)
+ return -1;
+ switch (instr->InstructionBits() & (kRvvWidthMask | kRvvMewMask)) {
+ case 0x0:
+ width = 8;
+ break;
+ case 0x00005000:
+ width = 16;
+ break;
+ case 0x00006000:
+ width = 32;
+ break;
+ case 0x00007000:
+ width = 64;
+ break;
+ case 0x10000000:
+ width = 128;
+ break;
+ case 0x10005000:
+ width = 256;
+ break;
+ case 0x10006000:
+ width = 512;
+ break;
+ case 0x10007000:
+ width = 1024;
+ break;
+ default:
+ width = -1;
+ break;
+ }
+ return width;
+}
+
+void Decoder::DecodeRvvVS(Instruction* instr) {
+ char str[50];
+ uint32_t instr_temp =
+ instr->InstructionBits() & (kRvvMopMask | kRvvNfMask | kBaseOpcodeMask);
+ if (RO_V_VS == instr_temp) {
+ snprintf(str, sizeof(str), "vse%d.v 'vd, ('rs1)'vm",
+ instr->vl_vs_width());
+ Format(instr, str);
+ } else if (RO_V_VSS == instr_temp) {
+ snprintf(str, sizeof(str), "vsse%d.v 'vd, ('rs1), 'rs2'vm",
+ instr->vl_vs_width());
+ Format(instr, str);
+ } else if (RO_V_VSX == instr_temp) {
+ snprintf(str, sizeof(str), "vsxei%d.v 'vd, ('rs1), 'vs2'vm",
+ instr->vl_vs_width());
+ Format(instr, str);
+ } else if (RO_V_VSU == instr_temp) {
+ snprintf(str, sizeof(str), "vsuxei%d.v 'vd, ('rs1), 'vs2'vm",
+ instr->vl_vs_width());
+ Format(instr, str);
+ } else if (RO_V_VSSEG2 == instr_temp || RO_V_VSSEG3 == instr_temp ||
+ RO_V_VSSEG4 == instr_temp || RO_V_VSSEG5 == instr_temp ||
+ RO_V_VSSEG6 == instr_temp || RO_V_VSSEG7 == instr_temp ||
+ RO_V_VSSEG8 == instr_temp) {
+ snprintf(str, sizeof(str), "vsseg%de%d.v 'vd, ('rs1)'vm",
+ switch_nf(instr), instr->vl_vs_width());
+ Format(instr, str);
+ } else if (RO_V_VSSSEG2 == instr_temp || RO_V_VSSSEG3 == instr_temp ||
+ RO_V_VSSSEG4 == instr_temp || RO_V_VSSSEG5 == instr_temp ||
+ RO_V_VSSSEG6 == instr_temp || RO_V_VSSSEG7 == instr_temp ||
+ RO_V_VSSSEG8 == instr_temp) {
+ snprintf(str, sizeof(str), "vssseg%de%d.v 'vd, ('rs1), 'rs2'vm",
+ switch_nf(instr), instr->vl_vs_width());
+ Format(instr, str);
+ } else if (RO_V_VSXSEG2 == instr_temp || RO_V_VSXSEG3 == instr_temp ||
+ RO_V_VSXSEG4 == instr_temp || RO_V_VSXSEG5 == instr_temp ||
+ RO_V_VSXSEG6 == instr_temp || RO_V_VSXSEG7 == instr_temp ||
+ RO_V_VSXSEG8 == instr_temp) {
+ snprintf(str, sizeof(str), "vsxseg%dei%d.v 'vd, ('rs1), 'vs2'vm",
+ switch_nf(instr), instr->vl_vs_width());
+ Format(instr, str);
+ }
+}
+
// Disassemble the instruction at *instr_ptr into the output buffer.
// All instructions are one word long, except for the simulator
// pseudo-instruction stop(msg). For that one special case, we return
@@ -1849,6 +2466,9 @@ int Decoder::InstructionDecode(byte* instr_ptr) {
case Instruction::kCBType:
DecodeCBType(instr);
break;
+ case Instruction::kVType:
+ DecodeVType(instr);
+ break;
default:
Format(instr, "UNSUPPORTED");
UNSUPPORTED_RISCV();
@@ -1882,7 +2502,7 @@ const char* NameConverter::NameOfXMMRegister(int reg) const {
const char* NameConverter::NameOfByteCPURegister(int reg) const {
UNREACHABLE(); // RISC-V does not have the concept of a byte register.
- return "nobytereg";
+ //return "nobytereg";
}
const char* NameConverter::NameInCode(byte* addr) const {
diff --git a/deps/v8/src/diagnostics/s390/eh-frame-s390.cc b/deps/v8/src/diagnostics/s390/eh-frame-s390.cc
index 4f5994c8da..6da3095e86 100644
--- a/deps/v8/src/diagnostics/s390/eh-frame-s390.cc
+++ b/deps/v8/src/diagnostics/s390/eh-frame-s390.cc
@@ -38,7 +38,6 @@ int EhFrameWriter::RegisterToDwarfCode(Register name) {
return kR0DwarfCode;
default:
UNIMPLEMENTED();
- return -1;
}
}
@@ -55,7 +54,6 @@ const char* EhFrameDisassembler::DwarfRegisterCodeToString(int code) {
return "sp";
default:
UNIMPLEMENTED();
- return nullptr;
}
}
diff --git a/deps/v8/src/diagnostics/system-jit-win.cc b/deps/v8/src/diagnostics/system-jit-win.cc
index c77c223183..5ca36e67e6 100644
--- a/deps/v8/src/diagnostics/system-jit-win.cc
+++ b/deps/v8/src/diagnostics/system-jit-win.cc
@@ -4,7 +4,11 @@
#include "src/diagnostics/system-jit-win.h"
-#include "include/v8.h"
+#include "include/v8-callbacks.h"
+#include "include/v8-isolate.h"
+#include "include/v8-local-handle.h"
+#include "include/v8-primitive.h"
+#include "include/v8-script.h"
#include "src/api/api-inl.h"
#include "src/base/lazy-instance.h"
#include "src/base/logging.h"
diff --git a/deps/v8/src/diagnostics/unwinder.cc b/deps/v8/src/diagnostics/unwinder.cc
index 68ff679595..00a5e7dbe6 100644
--- a/deps/v8/src/diagnostics/unwinder.cc
+++ b/deps/v8/src/diagnostics/unwinder.cc
@@ -6,7 +6,7 @@
#include <algorithm>
-#include "include/v8.h"
+#include "include/v8-unwinder.h"
#include "src/execution/frame-constants.h"
#include "src/execution/pointer-authentication.h"
diff --git a/deps/v8/src/diagnostics/unwinding-info-win64.cc b/deps/v8/src/diagnostics/unwinding-info-win64.cc
index 2a0cf4ff02..d50767421a 100644
--- a/deps/v8/src/diagnostics/unwinding-info-win64.cc
+++ b/deps/v8/src/diagnostics/unwinding-info-win64.cc
@@ -22,36 +22,6 @@
// This has to come after windows.h.
#include <versionhelpers.h> // For IsWindows8OrGreater().
-// Forward declaration to keep this independent of Win8
-NTSYSAPI
-DWORD
-NTAPI
-RtlAddGrowableFunctionTable(
- _Out_ PVOID* DynamicTable,
- _In_reads_(MaximumEntryCount) PRUNTIME_FUNCTION FunctionTable,
- _In_ DWORD EntryCount,
- _In_ DWORD MaximumEntryCount,
- _In_ ULONG_PTR RangeBase,
- _In_ ULONG_PTR RangeEnd
- );
-
-
-NTSYSAPI
-void
-NTAPI
-RtlGrowFunctionTable(
- _Inout_ PVOID DynamicTable,
- _In_ DWORD NewEntryCount
- );
-
-
-NTSYSAPI
-void
-NTAPI
-RtlDeleteGrowableFunctionTable(
- _In_ PVOID DynamicTable
- );
-
namespace v8 {
namespace internal {
namespace win64_unwindinfo {
diff --git a/deps/v8/src/diagnostics/unwinding-info-win64.h b/deps/v8/src/diagnostics/unwinding-info-win64.h
index ca66437e00..bb32f49e5d 100644
--- a/deps/v8/src/diagnostics/unwinding-info-win64.h
+++ b/deps/v8/src/diagnostics/unwinding-info-win64.h
@@ -5,7 +5,9 @@
#ifndef V8_DIAGNOSTICS_UNWINDING_INFO_WIN64_H_
#define V8_DIAGNOSTICS_UNWINDING_INFO_WIN64_H_
-#include "include/v8.h"
+#include <vector>
+
+#include "include/v8-callbacks.h"
#include "include/v8config.h"
#include "src/common/globals.h"
diff --git a/deps/v8/src/diagnostics/x64/disasm-x64.cc b/deps/v8/src/diagnostics/x64/disasm-x64.cc
index 3ddb29e064..ce0a8a4b3f 100644
--- a/deps/v8/src/diagnostics/x64/disasm-x64.cc
+++ b/deps/v8/src/diagnostics/x64/disasm-x64.cc
@@ -244,8 +244,9 @@ static const InstructionDesc cmov_instructions[16] = {
{"cmovle", TWO_OPERANDS_INSTR, REG_OPER_OP_ORDER, false},
{"cmovg", TWO_OPERANDS_INSTR, REG_OPER_OP_ORDER, false}};
-static const char* const cmp_pseudo_op[8] = {"eq", "lt", "le", "unord",
- "neq", "nlt", "nle", "ord"};
+static const char* const cmp_pseudo_op[16] = {
+ "eq", "lt", "le", "unord", "neq", "nlt", "nle", "ord",
+ "eq_uq", "nge", "ngt", "false", "neq_oq", "ge", "gt", "true"};
namespace {
int8_t Imm8(const uint8_t* data) {
@@ -279,6 +280,10 @@ int64_t Imm64(const uint8_t* data) {
//------------------------------------------------------------------------------
// DisassemblerX64 implementation.
+// Forward-declare NameOfYMMRegister to keep its implementation with the
+// NameConverter methods and register name arrays at bottom.
+const char* NameOfYMMRegister(int reg);
+
// A new DisassemblerX64 object is created to disassemble each instruction.
// The object can only disassemble a single instruction.
class DisassemblerX64 {
@@ -356,6 +361,12 @@ class DisassemblerX64 {
return (checked & 4) == 0;
}
+ bool vex_256() const {
+ DCHECK(vex_byte0_ == VEX3_PREFIX || vex_byte0_ == VEX2_PREFIX);
+ byte checked = vex_byte0_ == VEX3_PREFIX ? vex_byte2_ : vex_byte1_;
+ return (checked & 4) != 0;
+ }
+
bool vex_none() {
DCHECK(vex_byte0_ == VEX3_PREFIX || vex_byte0_ == VEX2_PREFIX);
byte checked = vex_byte0_ == VEX3_PREFIX ? vex_byte2_ : vex_byte1_;
@@ -424,6 +435,14 @@ class DisassemblerX64 {
return converter_.NameOfXMMRegister(reg);
}
+ const char* NameOfAVXRegister(int reg) const {
+ if (vex_256()) {
+ return NameOfYMMRegister(reg);
+ } else {
+ return converter_.NameOfXMMRegister(reg);
+ }
+ }
+
const char* NameOfAddress(byte* addr) const {
return converter_.NameOfAddress(addr);
}
@@ -448,6 +467,7 @@ class DisassemblerX64 {
int PrintRightOperand(byte* modrmp);
int PrintRightByteOperand(byte* modrmp);
int PrintRightXMMOperand(byte* modrmp);
+ int PrintRightAVXOperand(byte* modrmp);
int PrintOperands(const char* mnem, OperandType op_order, byte* data);
int PrintImmediate(byte* data, OperandSize size);
int PrintImmediateOp(byte* data);
@@ -606,6 +626,10 @@ int DisassemblerX64::PrintRightXMMOperand(byte* modrmp) {
return PrintRightOperandHelper(modrmp, &DisassemblerX64::NameOfXMMRegister);
}
+int DisassemblerX64::PrintRightAVXOperand(byte* modrmp) {
+ return PrintRightOperandHelper(modrmp, &DisassemblerX64::NameOfAVXRegister);
+}
+
// Returns number of bytes used including the current *data.
// Writes instruction's mnemonic, left and right operands to 'tmp_buffer_'.
int DisassemblerX64::PrintOperands(const char* mnem, OperandType op_order,
@@ -866,78 +890,98 @@ int DisassemblerX64::AVXInstruction(byte* data) {
get_modrm(*current, &mod, &regop, &rm);
switch (opcode) {
case 0x18:
- AppendToBuffer("vbroadcastss %s,", NameOfXMMRegister(regop));
+ AppendToBuffer("vbroadcastss %s,", NameOfAVXRegister(regop));
+ current += PrintRightAVXOperand(current);
+ break;
+ case 0x98:
+ AppendToBuffer("vfmadd132p%c %s,%s,", float_size_code(),
+ NameOfXMMRegister(regop), NameOfXMMRegister(vvvv));
current += PrintRightXMMOperand(current);
break;
case 0x99:
AppendToBuffer("vfmadd132s%c %s,%s,", float_size_code(),
+ NameOfAVXRegister(regop), NameOfAVXRegister(vvvv));
+ current += PrintRightAVXOperand(current);
+ break;
+ case 0xA8:
+ AppendToBuffer("vfmadd213p%c %s,%s,", float_size_code(),
NameOfXMMRegister(regop), NameOfXMMRegister(vvvv));
current += PrintRightXMMOperand(current);
break;
case 0xA9:
AppendToBuffer("vfmadd213s%c %s,%s,", float_size_code(),
- NameOfXMMRegister(regop), NameOfXMMRegister(vvvv));
- current += PrintRightXMMOperand(current);
+ NameOfAVXRegister(regop), NameOfAVXRegister(vvvv));
+ current += PrintRightAVXOperand(current);
break;
case 0xB8:
AppendToBuffer("vfmadd231p%c %s,%s,", float_size_code(),
- NameOfXMMRegister(regop), NameOfXMMRegister(vvvv));
- current += PrintRightXMMOperand(current);
+ NameOfAVXRegister(regop), NameOfAVXRegister(vvvv));
+ current += PrintRightAVXOperand(current);
break;
case 0xB9:
AppendToBuffer("vfmadd231s%c %s,%s,", float_size_code(),
- NameOfXMMRegister(regop), NameOfXMMRegister(vvvv));
- current += PrintRightXMMOperand(current);
+ NameOfAVXRegister(regop), NameOfAVXRegister(vvvv));
+ current += PrintRightAVXOperand(current);
break;
case 0x9B:
AppendToBuffer("vfmsub132s%c %s,%s,", float_size_code(),
+ NameOfAVXRegister(regop), NameOfAVXRegister(vvvv));
+ current += PrintRightAVXOperand(current);
+ break;
+ case 0x9C:
+ AppendToBuffer("vfnmadd132p%c %s,%s,", float_size_code(),
NameOfXMMRegister(regop), NameOfXMMRegister(vvvv));
current += PrintRightXMMOperand(current);
break;
case 0xAB:
AppendToBuffer("vfmsub213s%c %s,%s,", float_size_code(),
+ NameOfAVXRegister(regop), NameOfAVXRegister(vvvv));
+ current += PrintRightAVXOperand(current);
+ break;
+ case 0xAC:
+ AppendToBuffer("vfnmadd213p%c %s,%s,", float_size_code(),
NameOfXMMRegister(regop), NameOfXMMRegister(vvvv));
current += PrintRightXMMOperand(current);
break;
case 0xBB:
AppendToBuffer("vfmsub231s%c %s,%s,", float_size_code(),
- NameOfXMMRegister(regop), NameOfXMMRegister(vvvv));
- current += PrintRightXMMOperand(current);
+ NameOfAVXRegister(regop), NameOfAVXRegister(vvvv));
+ current += PrintRightAVXOperand(current);
break;
case 0xBC:
AppendToBuffer("vfnmadd231p%c %s,%s,", float_size_code(),
- NameOfXMMRegister(regop), NameOfXMMRegister(vvvv));
- current += PrintRightXMMOperand(current);
+ NameOfAVXRegister(regop), NameOfAVXRegister(vvvv));
+ current += PrintRightAVXOperand(current);
break;
case 0x9D:
AppendToBuffer("vfnmadd132s%c %s,%s,", float_size_code(),
- NameOfXMMRegister(regop), NameOfXMMRegister(vvvv));
- current += PrintRightXMMOperand(current);
+ NameOfAVXRegister(regop), NameOfAVXRegister(vvvv));
+ current += PrintRightAVXOperand(current);
break;
case 0xAD:
AppendToBuffer("vfnmadd213s%c %s,%s,", float_size_code(),
- NameOfXMMRegister(regop), NameOfXMMRegister(vvvv));
- current += PrintRightXMMOperand(current);
+ NameOfAVXRegister(regop), NameOfAVXRegister(vvvv));
+ current += PrintRightAVXOperand(current);
break;
case 0xBD:
AppendToBuffer("vfnmadd231s%c %s,%s,", float_size_code(),
- NameOfXMMRegister(regop), NameOfXMMRegister(vvvv));
- current += PrintRightXMMOperand(current);
+ NameOfAVXRegister(regop), NameOfAVXRegister(vvvv));
+ current += PrintRightAVXOperand(current);
break;
case 0x9F:
AppendToBuffer("vfnmsub132s%c %s,%s,", float_size_code(),
- NameOfXMMRegister(regop), NameOfXMMRegister(vvvv));
- current += PrintRightXMMOperand(current);
+ NameOfAVXRegister(regop), NameOfAVXRegister(vvvv));
+ current += PrintRightAVXOperand(current);
break;
case 0xAF:
AppendToBuffer("vfnmsub213s%c %s,%s,", float_size_code(),
- NameOfXMMRegister(regop), NameOfXMMRegister(vvvv));
- current += PrintRightXMMOperand(current);
+ NameOfAVXRegister(regop), NameOfAVXRegister(vvvv));
+ current += PrintRightAVXOperand(current);
break;
case 0xBF:
AppendToBuffer("vfnmsub231s%c %s,%s,", float_size_code(),
- NameOfXMMRegister(regop), NameOfXMMRegister(vvvv));
- current += PrintRightXMMOperand(current);
+ NameOfAVXRegister(regop), NameOfAVXRegister(vvvv));
+ current += PrintRightAVXOperand(current);
break;
case 0xF7:
AppendToBuffer("shlx%c %s,", operand_size_code(),
@@ -948,9 +992,9 @@ int DisassemblerX64::AVXInstruction(byte* data) {
#define DECLARE_SSE_AVX_DIS_CASE(instruction, notUsed1, notUsed2, notUsed3, \
opcode) \
case 0x##opcode: { \
- AppendToBuffer("v" #instruction " %s,%s,", NameOfXMMRegister(regop), \
- NameOfXMMRegister(vvvv)); \
- current += PrintRightXMMOperand(current); \
+ AppendToBuffer("v" #instruction " %s,%s,", NameOfAVXRegister(regop), \
+ NameOfAVXRegister(vvvv)); \
+ current += PrintRightAVXOperand(current); \
break; \
}
@@ -962,8 +1006,8 @@ int DisassemblerX64::AVXInstruction(byte* data) {
#define DECLARE_SSE_UNOP_AVX_DIS_CASE(instruction, notUsed1, notUsed2, \
notUsed3, opcode) \
case 0x##opcode: { \
- AppendToBuffer("v" #instruction " %s,", NameOfXMMRegister(regop)); \
- current += PrintRightXMMOperand(current); \
+ AppendToBuffer("v" #instruction " %s,", NameOfAVXRegister(regop)); \
+ current += PrintRightAVXOperand(current); \
break; \
}
SSSE3_UNOP_INSTRUCTION_LIST(DECLARE_SSE_UNOP_AVX_DIS_CASE)
@@ -972,8 +1016,8 @@ int DisassemblerX64::AVXInstruction(byte* data) {
#define DISASSEMBLE_AVX2_BROADCAST(instruction, _1, _2, _3, code) \
case 0x##code: \
- AppendToBuffer("" #instruction " %s,", NameOfXMMRegister(regop)); \
- current += PrintRightXMMOperand(current); \
+ AppendToBuffer("" #instruction " %s,", NameOfAVXRegister(regop)); \
+ current += PrintRightAVXOperand(current); \
break;
AVX2_BROADCAST_LIST(DISASSEMBLE_AVX2_BROADCAST)
#undef DISASSEMBLE_AVX2_BROADCAST
@@ -986,96 +1030,96 @@ int DisassemblerX64::AVXInstruction(byte* data) {
get_modrm(*current, &mod, &regop, &rm);
switch (opcode) {
case 0x08:
- AppendToBuffer("vroundps %s,", NameOfXMMRegister(regop));
- current += PrintRightXMMOperand(current);
+ AppendToBuffer("vroundps %s,", NameOfAVXRegister(regop));
+ current += PrintRightAVXOperand(current);
AppendToBuffer(",0x%x", *current++);
break;
case 0x09:
- AppendToBuffer("vroundpd %s,", NameOfXMMRegister(regop));
- current += PrintRightXMMOperand(current);
+ AppendToBuffer("vroundpd %s,", NameOfAVXRegister(regop));
+ current += PrintRightAVXOperand(current);
AppendToBuffer(",0x%x", *current++);
break;
case 0x0A:
- AppendToBuffer("vroundss %s,%s,", NameOfXMMRegister(regop),
- NameOfXMMRegister(vvvv));
- current += PrintRightXMMOperand(current);
+ AppendToBuffer("vroundss %s,%s,", NameOfAVXRegister(regop),
+ NameOfAVXRegister(vvvv));
+ current += PrintRightAVXOperand(current);
AppendToBuffer(",0x%x", *current++);
break;
case 0x0B:
- AppendToBuffer("vroundsd %s,%s,", NameOfXMMRegister(regop),
- NameOfXMMRegister(vvvv));
- current += PrintRightXMMOperand(current);
+ AppendToBuffer("vroundsd %s,%s,", NameOfAVXRegister(regop),
+ NameOfAVXRegister(vvvv));
+ current += PrintRightAVXOperand(current);
AppendToBuffer(",0x%x", *current++);
break;
case 0x0E:
- AppendToBuffer("vpblendw %s,%s,", NameOfXMMRegister(regop),
- NameOfXMMRegister(vvvv));
- current += PrintRightXMMOperand(current);
+ AppendToBuffer("vpblendw %s,%s,", NameOfAVXRegister(regop),
+ NameOfAVXRegister(vvvv));
+ current += PrintRightAVXOperand(current);
AppendToBuffer(",0x%x", *current++);
break;
case 0x0F:
- AppendToBuffer("vpalignr %s,%s,", NameOfXMMRegister(regop),
- NameOfXMMRegister(vvvv));
- current += PrintRightXMMOperand(current);
+ AppendToBuffer("vpalignr %s,%s,", NameOfAVXRegister(regop),
+ NameOfAVXRegister(vvvv));
+ current += PrintRightAVXOperand(current);
AppendToBuffer(",0x%x", *current++);
break;
case 0x14:
AppendToBuffer("vpextrb ");
current += PrintRightByteOperand(current);
- AppendToBuffer(",%s,0x%x,", NameOfXMMRegister(regop), *current++);
+ AppendToBuffer(",%s,0x%x,", NameOfAVXRegister(regop), *current++);
break;
case 0x15:
AppendToBuffer("vpextrw ");
current += PrintRightOperand(current);
- AppendToBuffer(",%s,0x%x,", NameOfXMMRegister(regop), *current++);
+ AppendToBuffer(",%s,0x%x,", NameOfAVXRegister(regop), *current++);
break;
case 0x16:
AppendToBuffer("vpextr%c ", rex_w() ? 'q' : 'd');
current += PrintRightOperand(current);
- AppendToBuffer(",%s,0x%x,", NameOfXMMRegister(regop), *current++);
+ AppendToBuffer(",%s,0x%x,", NameOfAVXRegister(regop), *current++);
break;
case 0x17:
AppendToBuffer("vextractps ");
current += PrintRightOperand(current);
- AppendToBuffer(",%s,0x%x,", NameOfXMMRegister(regop), *current++);
+ AppendToBuffer(",%s,0x%x,", NameOfAVXRegister(regop), *current++);
break;
case 0x20:
- AppendToBuffer("vpinsrb %s,%s,", NameOfXMMRegister(regop),
- NameOfXMMRegister(vvvv));
+ AppendToBuffer("vpinsrb %s,%s,", NameOfAVXRegister(regop),
+ NameOfAVXRegister(vvvv));
current += PrintRightByteOperand(current);
AppendToBuffer(",0x%x", *current++);
break;
case 0x21:
- AppendToBuffer("vinsertps %s,%s,", NameOfXMMRegister(regop),
- NameOfXMMRegister(vvvv));
- current += PrintRightXMMOperand(current);
+ AppendToBuffer("vinsertps %s,%s,", NameOfAVXRegister(regop),
+ NameOfAVXRegister(vvvv));
+ current += PrintRightAVXOperand(current);
AppendToBuffer(",0x%x", *current++);
break;
case 0x22:
AppendToBuffer("vpinsr%c %s,%s,", rex_w() ? 'q' : 'd',
- NameOfXMMRegister(regop), NameOfXMMRegister(vvvv));
+ NameOfAVXRegister(regop), NameOfAVXRegister(vvvv));
current += PrintRightOperand(current);
AppendToBuffer(",0x%x", *current++);
break;
case 0x4A: {
- AppendToBuffer("vblendvps %s,%s,", NameOfXMMRegister(regop),
- NameOfXMMRegister(vvvv));
- current += PrintRightXMMOperand(current);
- AppendToBuffer(",%s", NameOfXMMRegister((*current++) >> 4));
+ AppendToBuffer("vblendvps %s,%s,", NameOfAVXRegister(regop),
+ NameOfAVXRegister(vvvv));
+ current += PrintRightAVXOperand(current);
+ AppendToBuffer(",%s", NameOfAVXRegister((*current++) >> 4));
break;
}
case 0x4B: {
- AppendToBuffer("vblendvpd %s,%s,", NameOfXMMRegister(regop),
- NameOfXMMRegister(vvvv));
- current += PrintRightXMMOperand(current);
- AppendToBuffer(",%s", NameOfXMMRegister((*current++) >> 4));
+ AppendToBuffer("vblendvpd %s,%s,", NameOfAVXRegister(regop),
+ NameOfAVXRegister(vvvv));
+ current += PrintRightAVXOperand(current);
+ AppendToBuffer(",%s", NameOfAVXRegister((*current++) >> 4));
break;
}
case 0x4C: {
- AppendToBuffer("vpblendvb %s,%s,", NameOfXMMRegister(regop),
- NameOfXMMRegister(vvvv));
- current += PrintRightXMMOperand(current);
- AppendToBuffer(",%s", NameOfXMMRegister((*current++) >> 4));
+ AppendToBuffer("vpblendvb %s,%s,", NameOfAVXRegister(regop),
+ NameOfAVXRegister(vvvv));
+ current += PrintRightAVXOperand(current);
+ AppendToBuffer(",%s", NameOfAVXRegister((*current++) >> 4));
break;
}
default:
@@ -1086,95 +1130,95 @@ int DisassemblerX64::AVXInstruction(byte* data) {
get_modrm(*current, &mod, &regop, &rm);
switch (opcode) {
case 0x10:
- AppendToBuffer("vmovss %s,", NameOfXMMRegister(regop));
+ AppendToBuffer("vmovss %s,", NameOfAVXRegister(regop));
if (mod == 3) {
- AppendToBuffer("%s,", NameOfXMMRegister(vvvv));
+ AppendToBuffer("%s,", NameOfAVXRegister(vvvv));
}
- current += PrintRightXMMOperand(current);
+ current += PrintRightAVXOperand(current);
break;
case 0x11:
AppendToBuffer("vmovss ");
- current += PrintRightXMMOperand(current);
+ current += PrintRightAVXOperand(current);
if (mod == 3) {
- AppendToBuffer(",%s", NameOfXMMRegister(vvvv));
+ AppendToBuffer(",%s", NameOfAVXRegister(vvvv));
}
- AppendToBuffer(",%s", NameOfXMMRegister(regop));
+ AppendToBuffer(",%s", NameOfAVXRegister(regop));
break;
case 0x16:
- AppendToBuffer("vmovshdup %s,", NameOfXMMRegister(regop));
- current += PrintRightXMMOperand(current);
+ AppendToBuffer("vmovshdup %s,", NameOfAVXRegister(regop));
+ current += PrintRightAVXOperand(current);
break;
case 0x2A:
AppendToBuffer("%s %s,%s,", vex_w() ? "vcvtqsi2ss" : "vcvtlsi2ss",
- NameOfXMMRegister(regop), NameOfXMMRegister(vvvv));
+ NameOfAVXRegister(regop), NameOfAVXRegister(vvvv));
current += PrintRightOperand(current);
break;
case 0x2C:
AppendToBuffer("vcvttss2si%s %s,", vex_w() ? "q" : "",
NameOfCPURegister(regop));
- current += PrintRightXMMOperand(current);
+ current += PrintRightAVXOperand(current);
break;
case 0x51:
- AppendToBuffer("vsqrtss %s,%s,", NameOfXMMRegister(regop),
- NameOfXMMRegister(vvvv));
- current += PrintRightXMMOperand(current);
+ AppendToBuffer("vsqrtss %s,%s,", NameOfAVXRegister(regop),
+ NameOfAVXRegister(vvvv));
+ current += PrintRightAVXOperand(current);
break;
case 0x58:
- AppendToBuffer("vaddss %s,%s,", NameOfXMMRegister(regop),
- NameOfXMMRegister(vvvv));
- current += PrintRightXMMOperand(current);
+ AppendToBuffer("vaddss %s,%s,", NameOfAVXRegister(regop),
+ NameOfAVXRegister(vvvv));
+ current += PrintRightAVXOperand(current);
break;
case 0x59:
- AppendToBuffer("vmulss %s,%s,", NameOfXMMRegister(regop),
- NameOfXMMRegister(vvvv));
- current += PrintRightXMMOperand(current);
+ AppendToBuffer("vmulss %s,%s,", NameOfAVXRegister(regop),
+ NameOfAVXRegister(vvvv));
+ current += PrintRightAVXOperand(current);
break;
case 0x5A:
- AppendToBuffer("vcvtss2sd %s,%s,", NameOfXMMRegister(regop),
- NameOfXMMRegister(vvvv));
- current += PrintRightXMMOperand(current);
+ AppendToBuffer("vcvtss2sd %s,%s,", NameOfAVXRegister(regop),
+ NameOfAVXRegister(vvvv));
+ current += PrintRightAVXOperand(current);
break;
case 0x5B:
- AppendToBuffer("vcvttps2dq %s,", NameOfXMMRegister(regop));
- current += PrintRightXMMOperand(current);
+ AppendToBuffer("vcvttps2dq %s,", NameOfAVXRegister(regop));
+ current += PrintRightAVXOperand(current);
break;
case 0x5C:
- AppendToBuffer("vsubss %s,%s,", NameOfXMMRegister(regop),
- NameOfXMMRegister(vvvv));
- current += PrintRightXMMOperand(current);
+ AppendToBuffer("vsubss %s,%s,", NameOfAVXRegister(regop),
+ NameOfAVXRegister(vvvv));
+ current += PrintRightAVXOperand(current);
break;
case 0x5D:
- AppendToBuffer("vminss %s,%s,", NameOfXMMRegister(regop),
- NameOfXMMRegister(vvvv));
- current += PrintRightXMMOperand(current);
+ AppendToBuffer("vminss %s,%s,", NameOfAVXRegister(regop),
+ NameOfAVXRegister(vvvv));
+ current += PrintRightAVXOperand(current);
break;
case 0x5E:
- AppendToBuffer("vdivss %s,%s,", NameOfXMMRegister(regop),
- NameOfXMMRegister(vvvv));
- current += PrintRightXMMOperand(current);
+ AppendToBuffer("vdivss %s,%s,", NameOfAVXRegister(regop),
+ NameOfAVXRegister(vvvv));
+ current += PrintRightAVXOperand(current);
break;
case 0x5F:
- AppendToBuffer("vmaxss %s,%s,", NameOfXMMRegister(regop),
- NameOfXMMRegister(vvvv));
- current += PrintRightXMMOperand(current);
+ AppendToBuffer("vmaxss %s,%s,", NameOfAVXRegister(regop),
+ NameOfAVXRegister(vvvv));
+ current += PrintRightAVXOperand(current);
break;
case 0x6F:
- AppendToBuffer("vmovdqu %s,", NameOfXMMRegister(regop));
- current += PrintRightXMMOperand(current);
+ AppendToBuffer("vmovdqu %s,", NameOfAVXRegister(regop));
+ current += PrintRightAVXOperand(current);
break;
case 0x70:
- AppendToBuffer("vpshufhw %s,", NameOfXMMRegister(regop));
- current += PrintRightXMMOperand(current);
+ AppendToBuffer("vpshufhw %s,", NameOfAVXRegister(regop));
+ current += PrintRightAVXOperand(current);
AppendToBuffer(",0x%x", *current++);
break;
case 0x7F:
AppendToBuffer("vmovdqu ");
- current += PrintRightXMMOperand(current);
- AppendToBuffer(",%s", NameOfXMMRegister(regop));
+ current += PrintRightAVXOperand(current);
+ AppendToBuffer(",%s", NameOfAVXRegister(regop));
break;
case 0xE6:
- AppendToBuffer("vcvtdq2pd %s,", NameOfXMMRegister(regop));
- current += PrintRightXMMOperand(current);
+ AppendToBuffer("vcvtdq2pd %s,", NameOfAVXRegister(regop));
+ current += PrintRightAVXOperand(current);
break;
default:
UnimplementedInstruction();
@@ -1184,92 +1228,92 @@ int DisassemblerX64::AVXInstruction(byte* data) {
get_modrm(*current, &mod, &regop, &rm);
switch (opcode) {
case 0x10:
- AppendToBuffer("vmovsd %s,", NameOfXMMRegister(regop));
+ AppendToBuffer("vmovsd %s,", NameOfAVXRegister(regop));
if (mod == 3) {
- AppendToBuffer("%s,", NameOfXMMRegister(vvvv));
+ AppendToBuffer("%s,", NameOfAVXRegister(vvvv));
}
- current += PrintRightXMMOperand(current);
+ current += PrintRightAVXOperand(current);
break;
case 0x11:
AppendToBuffer("vmovsd ");
- current += PrintRightXMMOperand(current);
+ current += PrintRightAVXOperand(current);
if (mod == 3) {
- AppendToBuffer(",%s", NameOfXMMRegister(vvvv));
+ AppendToBuffer(",%s", NameOfAVXRegister(vvvv));
}
- AppendToBuffer(",%s", NameOfXMMRegister(regop));
+ AppendToBuffer(",%s", NameOfAVXRegister(regop));
break;
case 0x12:
- AppendToBuffer("vmovddup %s,", NameOfXMMRegister(regop));
- current += PrintRightXMMOperand(current);
+ AppendToBuffer("vmovddup %s,", NameOfAVXRegister(regop));
+ current += PrintRightAVXOperand(current);
break;
case 0x2A:
AppendToBuffer("%s %s,%s,", vex_w() ? "vcvtqsi2sd" : "vcvtlsi2sd",
- NameOfXMMRegister(regop), NameOfXMMRegister(vvvv));
+ NameOfAVXRegister(regop), NameOfAVXRegister(vvvv));
current += PrintRightOperand(current);
break;
case 0x2C:
AppendToBuffer("vcvttsd2si%s %s,", vex_w() ? "q" : "",
NameOfCPURegister(regop));
- current += PrintRightXMMOperand(current);
+ current += PrintRightAVXOperand(current);
break;
case 0x2D:
AppendToBuffer("vcvtsd2si%s %s,", vex_w() ? "q" : "",
NameOfCPURegister(regop));
- current += PrintRightXMMOperand(current);
+ current += PrintRightAVXOperand(current);
break;
case 0x51:
- AppendToBuffer("vsqrtsd %s,%s,", NameOfXMMRegister(regop),
- NameOfXMMRegister(vvvv));
- current += PrintRightXMMOperand(current);
+ AppendToBuffer("vsqrtsd %s,%s,", NameOfAVXRegister(regop),
+ NameOfAVXRegister(vvvv));
+ current += PrintRightAVXOperand(current);
break;
case 0x58:
- AppendToBuffer("vaddsd %s,%s,", NameOfXMMRegister(regop),
- NameOfXMMRegister(vvvv));
- current += PrintRightXMMOperand(current);
+ AppendToBuffer("vaddsd %s,%s,", NameOfAVXRegister(regop),
+ NameOfAVXRegister(vvvv));
+ current += PrintRightAVXOperand(current);
break;
case 0x59:
- AppendToBuffer("vmulsd %s,%s,", NameOfXMMRegister(regop),
- NameOfXMMRegister(vvvv));
- current += PrintRightXMMOperand(current);
+ AppendToBuffer("vmulsd %s,%s,", NameOfAVXRegister(regop),
+ NameOfAVXRegister(vvvv));
+ current += PrintRightAVXOperand(current);
break;
case 0x5A:
- AppendToBuffer("vcvtsd2ss %s,%s,", NameOfXMMRegister(regop),
- NameOfXMMRegister(vvvv));
- current += PrintRightXMMOperand(current);
+ AppendToBuffer("vcvtsd2ss %s,%s,", NameOfAVXRegister(regop),
+ NameOfAVXRegister(vvvv));
+ current += PrintRightAVXOperand(current);
break;
case 0x5C:
- AppendToBuffer("vsubsd %s,%s,", NameOfXMMRegister(regop),
- NameOfXMMRegister(vvvv));
- current += PrintRightXMMOperand(current);
+ AppendToBuffer("vsubsd %s,%s,", NameOfAVXRegister(regop),
+ NameOfAVXRegister(vvvv));
+ current += PrintRightAVXOperand(current);
break;
case 0x5D:
- AppendToBuffer("vminsd %s,%s,", NameOfXMMRegister(regop),
- NameOfXMMRegister(vvvv));
- current += PrintRightXMMOperand(current);
+ AppendToBuffer("vminsd %s,%s,", NameOfAVXRegister(regop),
+ NameOfAVXRegister(vvvv));
+ current += PrintRightAVXOperand(current);
break;
case 0x5E:
- AppendToBuffer("vdivsd %s,%s,", NameOfXMMRegister(regop),
- NameOfXMMRegister(vvvv));
- current += PrintRightXMMOperand(current);
+ AppendToBuffer("vdivsd %s,%s,", NameOfAVXRegister(regop),
+ NameOfAVXRegister(vvvv));
+ current += PrintRightAVXOperand(current);
break;
case 0x5F:
- AppendToBuffer("vmaxsd %s,%s,", NameOfXMMRegister(regop),
- NameOfXMMRegister(vvvv));
- current += PrintRightXMMOperand(current);
+ AppendToBuffer("vmaxsd %s,%s,", NameOfAVXRegister(regop),
+ NameOfAVXRegister(vvvv));
+ current += PrintRightAVXOperand(current);
break;
case 0xF0:
- AppendToBuffer("vlddqu %s,", NameOfXMMRegister(regop));
- current += PrintRightXMMOperand(current);
+ AppendToBuffer("vlddqu %s,", NameOfAVXRegister(regop));
+ current += PrintRightAVXOperand(current);
break;
case 0x70:
- AppendToBuffer("vpshuflw %s,", NameOfXMMRegister(regop));
- current += PrintRightXMMOperand(current);
+ AppendToBuffer("vpshuflw %s,", NameOfAVXRegister(regop));
+ current += PrintRightAVXOperand(current);
AppendToBuffer(",0x%x", *current++);
break;
case 0x7C:
- AppendToBuffer("vhaddps %s,%s,", NameOfXMMRegister(regop),
- NameOfXMMRegister(vvvv));
- current += PrintRightXMMOperand(current);
+ AppendToBuffer("vhaddps %s,%s,", NameOfAVXRegister(regop),
+ NameOfAVXRegister(vvvv));
+ current += PrintRightAVXOperand(current);
break;
default:
UnimplementedInstruction();
@@ -1387,90 +1431,90 @@ int DisassemblerX64::AVXInstruction(byte* data) {
get_modrm(*current, &mod, &regop, &rm);
switch (opcode) {
case 0x10:
- AppendToBuffer("vmovups %s,", NameOfXMMRegister(regop));
- current += PrintRightXMMOperand(current);
+ AppendToBuffer("vmovups %s,", NameOfAVXRegister(regop));
+ current += PrintRightAVXOperand(current);
break;
case 0x11:
AppendToBuffer("vmovups ");
- current += PrintRightXMMOperand(current);
- AppendToBuffer(",%s", NameOfXMMRegister(regop));
+ current += PrintRightAVXOperand(current);
+ AppendToBuffer(",%s", NameOfAVXRegister(regop));
break;
case 0x12:
if (mod == 0b11) {
- AppendToBuffer("vmovhlps %s,%s,", NameOfXMMRegister(regop),
- NameOfXMMRegister(vvvv));
- current += PrintRightXMMOperand(current);
+ AppendToBuffer("vmovhlps %s,%s,", NameOfAVXRegister(regop),
+ NameOfAVXRegister(vvvv));
+ current += PrintRightAVXOperand(current);
} else {
- AppendToBuffer("vmovlps %s,%s,", NameOfXMMRegister(regop),
- NameOfXMMRegister(vvvv));
- current += PrintRightXMMOperand(current);
+ AppendToBuffer("vmovlps %s,%s,", NameOfAVXRegister(regop),
+ NameOfAVXRegister(vvvv));
+ current += PrintRightAVXOperand(current);
}
break;
case 0x13:
AppendToBuffer("vmovlps ");
- current += PrintRightXMMOperand(current);
- AppendToBuffer(",%s", NameOfXMMRegister(regop));
+ current += PrintRightAVXOperand(current);
+ AppendToBuffer(",%s", NameOfAVXRegister(regop));
break;
case 0x16:
if (mod == 0b11) {
- AppendToBuffer("vmovlhps %s,%s,", NameOfXMMRegister(regop),
- NameOfXMMRegister(vvvv));
- current += PrintRightXMMOperand(current);
+ AppendToBuffer("vmovlhps %s,%s,", NameOfAVXRegister(regop),
+ NameOfAVXRegister(vvvv));
+ current += PrintRightAVXOperand(current);
} else {
- AppendToBuffer("vmovhps %s,%s,", NameOfXMMRegister(regop),
- NameOfXMMRegister(vvvv));
- current += PrintRightXMMOperand(current);
+ AppendToBuffer("vmovhps %s,%s,", NameOfAVXRegister(regop),
+ NameOfAVXRegister(vvvv));
+ current += PrintRightAVXOperand(current);
}
break;
case 0x17:
AppendToBuffer("vmovhps ");
- current += PrintRightXMMOperand(current);
- AppendToBuffer(",%s", NameOfXMMRegister(regop));
+ current += PrintRightAVXOperand(current);
+ AppendToBuffer(",%s", NameOfAVXRegister(regop));
break;
case 0x28:
- AppendToBuffer("vmovaps %s,", NameOfXMMRegister(regop));
- current += PrintRightXMMOperand(current);
+ AppendToBuffer("vmovaps %s,", NameOfAVXRegister(regop));
+ current += PrintRightAVXOperand(current);
break;
case 0x29:
AppendToBuffer("vmovaps ");
- current += PrintRightXMMOperand(current);
- AppendToBuffer(",%s", NameOfXMMRegister(regop));
+ current += PrintRightAVXOperand(current);
+ AppendToBuffer(",%s", NameOfAVXRegister(regop));
break;
case 0x2E:
- AppendToBuffer("vucomiss %s,", NameOfXMMRegister(regop));
- current += PrintRightXMMOperand(current);
+ AppendToBuffer("vucomiss %s,", NameOfAVXRegister(regop));
+ current += PrintRightAVXOperand(current);
break;
case 0x50:
AppendToBuffer("vmovmskps %s,", NameOfCPURegister(regop));
- current += PrintRightXMMOperand(current);
+ current += PrintRightAVXOperand(current);
break;
case 0xC2: {
- AppendToBuffer("vcmpps %s,%s,", NameOfXMMRegister(regop),
- NameOfXMMRegister(vvvv));
- current += PrintRightXMMOperand(current);
+ AppendToBuffer("vcmpps %s,%s,", NameOfAVXRegister(regop),
+ NameOfAVXRegister(vvvv));
+ current += PrintRightAVXOperand(current);
AppendToBuffer(", (%s)", cmp_pseudo_op[*current]);
current += 1;
break;
}
case 0xC6: {
- AppendToBuffer("vshufps %s,%s,", NameOfXMMRegister(regop),
- NameOfXMMRegister(vvvv));
- current += PrintRightXMMOperand(current);
+ AppendToBuffer("vshufps %s,%s,", NameOfAVXRegister(regop),
+ NameOfAVXRegister(vvvv));
+ current += PrintRightAVXOperand(current);
AppendToBuffer(",0x%x", *current++);
break;
}
#define SSE_UNOP_CASE(instruction, unused, code) \
case 0x##code: \
- AppendToBuffer("v" #instruction " %s,", NameOfXMMRegister(regop)); \
- current += PrintRightXMMOperand(current); \
+ AppendToBuffer("v" #instruction " %s,", NameOfAVXRegister(regop)); \
+ current += PrintRightAVXOperand(current); \
break;
SSE_UNOP_INSTRUCTION_LIST(SSE_UNOP_CASE)
#undef SSE_UNOP_CASE
#define SSE_BINOP_CASE(instruction, unused, code) \
case 0x##code: \
- AppendToBuffer("v" #instruction " %s,%s,", NameOfXMMRegister(regop), \
- NameOfXMMRegister(vvvv)); \
- current += PrintRightXMMOperand(current); \
+ AppendToBuffer("v" #instruction " %s,%s,", NameOfAVXRegister(regop), \
+ NameOfAVXRegister(vvvv)); \
+ current += PrintRightAVXOperand(current); \
break;
SSE_BINOP_INSTRUCTION_LIST(SSE_BINOP_CASE)
#undef SSE_BINOP_CASE
@@ -1482,92 +1526,92 @@ int DisassemblerX64::AVXInstruction(byte* data) {
get_modrm(*current, &mod, &regop, &rm);
switch (opcode) {
case 0x10:
- AppendToBuffer("vmovupd %s,", NameOfXMMRegister(regop));
- current += PrintRightXMMOperand(current);
+ AppendToBuffer("vmovupd %s,", NameOfAVXRegister(regop));
+ current += PrintRightAVXOperand(current);
break;
case 0x11:
AppendToBuffer("vmovupd ");
- current += PrintRightXMMOperand(current);
- AppendToBuffer(",%s", NameOfXMMRegister(regop));
+ current += PrintRightAVXOperand(current);
+ AppendToBuffer(",%s", NameOfAVXRegister(regop));
break;
case 0x28:
- AppendToBuffer("vmovapd %s,", NameOfXMMRegister(regop));
- current += PrintRightXMMOperand(current);
+ AppendToBuffer("vmovapd %s,", NameOfAVXRegister(regop));
+ current += PrintRightAVXOperand(current);
break;
case 0x29:
AppendToBuffer("vmovapd ");
- current += PrintRightXMMOperand(current);
- AppendToBuffer(",%s", NameOfXMMRegister(regop));
+ current += PrintRightAVXOperand(current);
+ AppendToBuffer(",%s", NameOfAVXRegister(regop));
break;
case 0x50:
AppendToBuffer("vmovmskpd %s,", NameOfCPURegister(regop));
- current += PrintRightXMMOperand(current);
+ current += PrintRightAVXOperand(current);
break;
case 0x6E:
AppendToBuffer("vmov%c %s,", vex_w() ? 'q' : 'd',
- NameOfXMMRegister(regop));
+ NameOfAVXRegister(regop));
current += PrintRightOperand(current);
break;
case 0x6F:
- AppendToBuffer("vmovdqa %s,", NameOfXMMRegister(regop));
- current += PrintRightXMMOperand(current);
+ AppendToBuffer("vmovdqa %s,", NameOfAVXRegister(regop));
+ current += PrintRightAVXOperand(current);
break;
case 0x70:
- AppendToBuffer("vpshufd %s,", NameOfXMMRegister(regop));
- current += PrintRightXMMOperand(current);
+ AppendToBuffer("vpshufd %s,", NameOfAVXRegister(regop));
+ current += PrintRightAVXOperand(current);
AppendToBuffer(",0x%x", *current++);
break;
case 0x71:
AppendToBuffer("vps%sw %s,", sf_str[regop / 2],
- NameOfXMMRegister(vvvv));
- current += PrintRightXMMOperand(current);
+ NameOfAVXRegister(vvvv));
+ current += PrintRightAVXOperand(current);
AppendToBuffer(",%u", *current++);
break;
case 0x72:
AppendToBuffer("vps%sd %s,", sf_str[regop / 2],
- NameOfXMMRegister(vvvv));
- current += PrintRightXMMOperand(current);
+ NameOfAVXRegister(vvvv));
+ current += PrintRightAVXOperand(current);
AppendToBuffer(",%u", *current++);
break;
case 0x73:
AppendToBuffer("vps%sq %s,", sf_str[regop / 2],
- NameOfXMMRegister(vvvv));
- current += PrintRightXMMOperand(current);
+ NameOfAVXRegister(vvvv));
+ current += PrintRightAVXOperand(current);
AppendToBuffer(",%u", *current++);
break;
case 0x7E:
AppendToBuffer("vmov%c ", vex_w() ? 'q' : 'd');
current += PrintRightOperand(current);
- AppendToBuffer(",%s", NameOfXMMRegister(regop));
+ AppendToBuffer(",%s", NameOfAVXRegister(regop));
break;
case 0xC2: {
- AppendToBuffer("vcmppd %s,%s,", NameOfXMMRegister(regop),
- NameOfXMMRegister(vvvv));
- current += PrintRightXMMOperand(current);
+ AppendToBuffer("vcmppd %s,%s,", NameOfAVXRegister(regop),
+ NameOfAVXRegister(vvvv));
+ current += PrintRightAVXOperand(current);
AppendToBuffer(", (%s)", cmp_pseudo_op[*current]);
current += 1;
break;
}
case 0xC4:
- AppendToBuffer("vpinsrw %s,%s,", NameOfXMMRegister(regop),
- NameOfXMMRegister(vvvv));
+ AppendToBuffer("vpinsrw %s,%s,", NameOfAVXRegister(regop),
+ NameOfAVXRegister(vvvv));
current += PrintRightOperand(current);
AppendToBuffer(",0x%x", *current++);
break;
case 0xC5:
AppendToBuffer("vpextrw %s,", NameOfCPURegister(regop));
- current += PrintRightXMMOperand(current);
+ current += PrintRightAVXOperand(current);
AppendToBuffer(",0x%x", *current++);
break;
case 0xD7:
AppendToBuffer("vpmovmskb %s,", NameOfCPURegister(regop));
- current += PrintRightXMMOperand(current);
+ current += PrintRightAVXOperand(current);
break;
#define DECLARE_SSE_AVX_DIS_CASE(instruction, notUsed1, notUsed2, opcode) \
case 0x##opcode: { \
- AppendToBuffer("v" #instruction " %s,%s,", NameOfXMMRegister(regop), \
- NameOfXMMRegister(vvvv)); \
- current += PrintRightXMMOperand(current); \
+ AppendToBuffer("v" #instruction " %s,%s,", NameOfAVXRegister(regop), \
+ NameOfAVXRegister(vvvv)); \
+ current += PrintRightAVXOperand(current); \
break; \
}
@@ -1575,8 +1619,8 @@ int DisassemblerX64::AVXInstruction(byte* data) {
#undef DECLARE_SSE_AVX_DIS_CASE
#define DECLARE_SSE_UNOP_AVX_DIS_CASE(instruction, notUsed1, notUsed2, opcode) \
case 0x##opcode: { \
- AppendToBuffer("v" #instruction " %s,", NameOfXMMRegister(regop)); \
- current += PrintRightXMMOperand(current); \
+ AppendToBuffer("v" #instruction " %s,", NameOfAVXRegister(regop)); \
+ current += PrintRightAVXOperand(current); \
break; \
}
@@ -2823,6 +2867,10 @@ static const char* const xmm_regs[16] = {
"xmm0", "xmm1", "xmm2", "xmm3", "xmm4", "xmm5", "xmm6", "xmm7",
"xmm8", "xmm9", "xmm10", "xmm11", "xmm12", "xmm13", "xmm14", "xmm15"};
+static const char* const ymm_regs[16] = {
+ "ymm0", "ymm1", "ymm2", "ymm3", "ymm4", "ymm5", "ymm6", "ymm7",
+ "ymm8", "ymm9", "ymm10", "ymm11", "ymm12", "ymm13", "ymm14", "ymm15"};
+
const char* NameConverter::NameOfAddress(byte* addr) const {
v8::base::SNPrintF(tmp_buffer_, "%p", static_cast<void*>(addr));
return tmp_buffer_.begin();
@@ -2847,6 +2895,11 @@ const char* NameConverter::NameOfXMMRegister(int reg) const {
return "noxmmreg";
}
+const char* NameOfYMMRegister(int reg) {
+ if (0 <= reg && reg < 16) return ymm_regs[reg];
+ return "noymmreg";
+}
+
const char* NameConverter::NameInCode(byte* addr) const {
// X64 does not embed debug strings at the moment.
UNREACHABLE();
diff --git a/deps/v8/src/execution/OWNERS b/deps/v8/src/execution/OWNERS
index 1a987f65e7..921f4f742a 100644
--- a/deps/v8/src/execution/OWNERS
+++ b/deps/v8/src/execution/OWNERS
@@ -1,7 +1,6 @@
ishell@chromium.org
jgruber@chromium.org
jkummerow@chromium.org
-mythria@chromium.org
delphick@chromium.org
verwaest@chromium.org
victorgomes@chromium.org
diff --git a/deps/v8/src/execution/arm/simulator-arm.cc b/deps/v8/src/execution/arm/simulator-arm.cc
index ec9c05af69..310ddab523 100644
--- a/deps/v8/src/execution/arm/simulator-arm.cc
+++ b/deps/v8/src/execution/arm/simulator-arm.cc
@@ -114,14 +114,10 @@ bool ArmDebugger::GetValue(const char* desc, int32_t* value) {
if (regnum != kNoRegister) {
*value = GetRegisterValue(regnum);
return true;
- } else {
- if (strncmp(desc, "0x", 2) == 0) {
- return SScanF(desc + 2, "%x", reinterpret_cast<uint32_t*>(value)) == 1;
- } else {
- return SScanF(desc, "%u", reinterpret_cast<uint32_t*>(value)) == 1;
- }
}
- return false;
+ if (strncmp(desc, "0x", 2) == 0)
+ return SScanF(desc + 2, "%x", reinterpret_cast<uint32_t*>(value)) == 1;
+ return SScanF(desc, "%u", reinterpret_cast<uint32_t*>(value)) == 1;
}
bool ArmDebugger::GetVFPSingleValue(const char* desc, float* value) {
@@ -1192,7 +1188,6 @@ bool Simulator::ConditionallyExecute(Instruction* instr) {
default:
UNREACHABLE();
}
- return false;
}
// Calculate and set the Negative and Zero flags.
@@ -1314,7 +1309,6 @@ int32_t Simulator::GetShiftRm(Instruction* instr, bool* carry_out) {
// by immediate
if ((shift == ROR) && (shift_amount == 0)) {
UNIMPLEMENTED();
- return result;
} else if (((shift == LSR) || (shift == ASR)) && (shift_amount == 0)) {
shift_amount = 32;
}
@@ -1373,7 +1367,6 @@ int32_t Simulator::GetShiftRm(Instruction* instr, bool* carry_out) {
default: {
UNREACHABLE();
- break;
}
}
} else {
@@ -1451,7 +1444,6 @@ int32_t Simulator::GetShiftRm(Instruction* instr, bool* carry_out) {
default: {
UNREACHABLE();
- break;
}
}
}
@@ -1486,7 +1478,6 @@ int32_t Simulator::ProcessPU(Instruction* instr, int num_regs, int reg_size,
switch (instr->PUField()) {
case da_x: {
UNIMPLEMENTED();
- break;
}
case ia_x: {
*start_address = rn_val;
@@ -1717,7 +1708,6 @@ void Simulator::SoftwareInterrupt(Instruction* instr) {
break;
default:
UNREACHABLE();
- break;
}
if (!stack_aligned) {
PrintF(" with unaligned stack %08x\n", get_register(sp));
@@ -1769,7 +1759,6 @@ void Simulator::SoftwareInterrupt(Instruction* instr) {
}
default:
UNREACHABLE();
- break;
}
if (::v8::internal::FLAG_trace_sim || !stack_aligned) {
switch (redirection->type()) {
@@ -1783,7 +1772,6 @@ void Simulator::SoftwareInterrupt(Instruction* instr) {
break;
default:
UNREACHABLE();
- break;
}
}
} else if (redirection->type() == ExternalReference::DIRECT_API_CALL) {
@@ -2121,7 +2109,6 @@ void Simulator::DecodeType01(Instruction* instr) {
}
default:
UNREACHABLE();
- break;
}
} else {
// The instruction is documented as strex rd, rt, [rn], but the
@@ -2165,7 +2152,6 @@ void Simulator::DecodeType01(Instruction* instr) {
}
default:
UNREACHABLE();
- break;
}
}
} else {
@@ -2219,7 +2205,6 @@ void Simulator::DecodeType01(Instruction* instr) {
default: {
// The PU field is a 2-bit field.
UNREACHABLE();
- break;
}
}
} else {
@@ -2262,7 +2247,6 @@ void Simulator::DecodeType01(Instruction* instr) {
default: {
// The PU field is a 2-bit field.
UNREACHABLE();
- break;
}
}
}
@@ -2600,7 +2584,6 @@ void Simulator::DecodeType01(Instruction* instr) {
default: {
UNREACHABLE();
- break;
}
}
}
@@ -2680,7 +2663,6 @@ void Simulator::DecodeType3(Instruction* instr) {
DCHECK(!instr->HasW());
Format(instr, "'memop'cond'b 'rd, ['rn], -'shift_rm");
UNIMPLEMENTED();
- break;
}
case ia_x: {
if (instr->Bit(4) == 0) {
@@ -2714,10 +2696,8 @@ void Simulator::DecodeType3(Instruction* instr) {
break;
case 1:
UNIMPLEMENTED();
- break;
case 2:
UNIMPLEMENTED();
- break;
case 3: {
// Usat.
int32_t sat_pos = instr->Bits(20, 16);
@@ -2746,7 +2726,6 @@ void Simulator::DecodeType3(Instruction* instr) {
switch (instr->Bits(22, 21)) {
case 0:
UNIMPLEMENTED();
- break;
case 1:
if (instr->Bits(9, 6) == 1) {
if (instr->Bit(20) == 0) {
@@ -3442,7 +3421,6 @@ void Simulator::DecodeTypeVFP(Instruction* instr) {
}
default:
UNREACHABLE();
- break;
}
set_neon_register(vd, q_data);
}
@@ -4433,7 +4411,6 @@ void Simulator::DecodeAdvancedSIMDTwoOrThreeRegisters(Instruction* instr) {
}
default:
UNREACHABLE();
- break;
}
break;
}
@@ -4469,13 +4446,11 @@ void Simulator::DecodeAdvancedSIMDTwoOrThreeRegisters(Instruction* instr) {
}
default:
UNREACHABLE();
- break;
}
break;
}
default:
UNREACHABLE();
- break;
}
} else if (opc1 == 0 && (opc2 == 0b0100 || opc2 == 0b0101)) {
DCHECK_EQ(1, instr->Bit(6)); // Only support Q regs.
@@ -4625,7 +4600,6 @@ void Simulator::DecodeAdvancedSIMDTwoOrThreeRegisters(Instruction* instr) {
break;
default:
UNIMPLEMENTED();
- break;
}
}
} else if (opc1 == 0b01 && (opc2 & 0b0111) == 0b111) {
@@ -4654,7 +4628,6 @@ void Simulator::DecodeAdvancedSIMDTwoOrThreeRegisters(Instruction* instr) {
break;
default:
UNIMPLEMENTED();
- break;
}
}
} else if (opc1 == 0b10 && opc2 == 0b0001) {
@@ -4674,7 +4647,6 @@ void Simulator::DecodeAdvancedSIMDTwoOrThreeRegisters(Instruction* instr) {
break;
default:
UNREACHABLE();
- break;
}
} else {
int Vd = instr->VFPDRegValue(kDoublePrecision);
@@ -4692,7 +4664,6 @@ void Simulator::DecodeAdvancedSIMDTwoOrThreeRegisters(Instruction* instr) {
break;
default:
UNREACHABLE();
- break;
}
}
} else if (opc1 == 0b10 && (opc2 & 0b1110) == 0b0010) {
@@ -4714,7 +4685,6 @@ void Simulator::DecodeAdvancedSIMDTwoOrThreeRegisters(Instruction* instr) {
break;
default:
UNREACHABLE();
- break;
}
} else {
// vuzp.<size> Qd, Qm.
@@ -4730,7 +4700,6 @@ void Simulator::DecodeAdvancedSIMDTwoOrThreeRegisters(Instruction* instr) {
break;
default:
UNREACHABLE();
- break;
}
}
} else {
@@ -4747,10 +4716,8 @@ void Simulator::DecodeAdvancedSIMDTwoOrThreeRegisters(Instruction* instr) {
break;
case Neon32:
UNIMPLEMENTED();
- break;
default:
UNREACHABLE();
- break;
}
} else {
// vuzp.<size> Dd, Dm.
@@ -4763,10 +4730,8 @@ void Simulator::DecodeAdvancedSIMDTwoOrThreeRegisters(Instruction* instr) {
break;
case Neon32:
UNIMPLEMENTED();
- break;
default:
UNREACHABLE();
- break;
}
}
}
@@ -4811,7 +4776,6 @@ void Simulator::DecodeAdvancedSIMDTwoOrThreeRegisters(Instruction* instr) {
}
case Neon64:
UNREACHABLE();
- break;
}
} else if (opc1 == 0b10 && instr->Bit(10) == 1) {
// vrint<q>.<dt> <Dd>, <Dm>
@@ -5078,7 +5042,6 @@ void Simulator::DecodeAdvancedSIMDDataProcessing(Instruction* instr) {
break;
default:
UNREACHABLE();
- break;
}
} else if (!u && opc == 1 && sz == 2 && q && op1) {
// vmov Qd, Qm.
@@ -5134,7 +5097,6 @@ void Simulator::DecodeAdvancedSIMDDataProcessing(Instruction* instr) {
break;
default:
UNREACHABLE();
- break;
}
} else if (!u && opc == 3) {
// vcge/vcgt.s<size> Qd, Qm, Qn.
@@ -5152,7 +5114,6 @@ void Simulator::DecodeAdvancedSIMDDataProcessing(Instruction* instr) {
break;
default:
UNREACHABLE();
- break;
}
} else if (!u && opc == 4 && !op1) {
// vshl s<size> Qd, Qm, Qn.
@@ -5172,7 +5133,6 @@ void Simulator::DecodeAdvancedSIMDDataProcessing(Instruction* instr) {
break;
default:
UNREACHABLE();
- break;
}
} else if (!u && opc == 6) {
// vmin/vmax.s<size> Qd, Qm, Qn.
@@ -5190,7 +5150,6 @@ void Simulator::DecodeAdvancedSIMDDataProcessing(Instruction* instr) {
break;
default:
UNREACHABLE();
- break;
}
} else if (!u && opc == 8 && op1) {
// vtst.i<size> Qd, Qm, Qn.
@@ -5207,7 +5166,6 @@ void Simulator::DecodeAdvancedSIMDDataProcessing(Instruction* instr) {
break;
default:
UNREACHABLE();
- break;
}
} else if (!u && opc == 8 && !op1) {
// vadd.i<size> Qd, Qm, Qn.
@@ -5241,7 +5199,6 @@ void Simulator::DecodeAdvancedSIMDDataProcessing(Instruction* instr) {
break;
default:
UNREACHABLE();
- break;
}
} else if (!u && opc == 0xA) {
// vpmin/vpmax.s<size> Dd, Dm, Dn.
@@ -5259,7 +5216,6 @@ void Simulator::DecodeAdvancedSIMDDataProcessing(Instruction* instr) {
break;
default:
UNREACHABLE();
- break;
}
} else if (!u && opc == 0xB) {
// vpadd.i<size> Dd, Dm, Dn.
@@ -5276,7 +5232,6 @@ void Simulator::DecodeAdvancedSIMDDataProcessing(Instruction* instr) {
break;
default:
UNREACHABLE();
- break;
}
} else if (!u && opc == 0xD && !op1) {
float src1[4], src2[4];
@@ -5347,7 +5302,6 @@ void Simulator::DecodeAdvancedSIMDDataProcessing(Instruction* instr) {
break;
default:
UNREACHABLE();
- break;
}
} else if (u && opc == 1 && sz == 1 && op1) {
// vbsl.size Qd, Qm, Qn.
@@ -5388,7 +5342,6 @@ void Simulator::DecodeAdvancedSIMDDataProcessing(Instruction* instr) {
break;
default:
UNREACHABLE();
- break;
}
} else if (u && opc == 2 && op1) {
// vqsub.u<size> Qd, Qm, Qn.
@@ -5405,7 +5358,6 @@ void Simulator::DecodeAdvancedSIMDDataProcessing(Instruction* instr) {
break;
default:
UNREACHABLE();
- break;
}
} else if (u && opc == 3) {
// vcge/vcgt.u<size> Qd, Qm, Qn.
@@ -5423,7 +5375,6 @@ void Simulator::DecodeAdvancedSIMDDataProcessing(Instruction* instr) {
break;
default:
UNREACHABLE();
- break;
}
} else if (u && opc == 4 && !op1) {
// vshl u<size> Qd, Qm, Qn.
@@ -5443,7 +5394,6 @@ void Simulator::DecodeAdvancedSIMDDataProcessing(Instruction* instr) {
break;
default:
UNREACHABLE();
- break;
}
} else if (u && opc == 6) {
// vmin/vmax.u<size> Qd, Qm, Qn.
@@ -5461,7 +5411,6 @@ void Simulator::DecodeAdvancedSIMDDataProcessing(Instruction* instr) {
break;
default:
UNREACHABLE();
- break;
}
} else if (u && opc == 8 && !op1) {
// vsub.size Qd, Qm, Qn.
@@ -5495,7 +5444,6 @@ void Simulator::DecodeAdvancedSIMDDataProcessing(Instruction* instr) {
break;
default:
UNREACHABLE();
- break;
}
} else if (u && opc == 0xA) {
// vpmin/vpmax.u<size> Dd, Dm, Dn.
@@ -5513,7 +5461,6 @@ void Simulator::DecodeAdvancedSIMDDataProcessing(Instruction* instr) {
break;
default:
UNREACHABLE();
- break;
}
} else if (u && opc == 0xD && sz == 0 && q && op1) {
// vmul.f32 Qd, Qn, Qm
@@ -5658,7 +5605,6 @@ void Simulator::DecodeAdvancedSIMDDataProcessing(Instruction* instr) {
break;
default:
UNIMPLEMENTED();
- break;
}
} else {
// vmovl signed
@@ -5677,7 +5623,6 @@ void Simulator::DecodeAdvancedSIMDDataProcessing(Instruction* instr) {
break;
default:
UNIMPLEMENTED();
- break;
}
}
} else if (!u && imm3H_L != 0 && opc == 0b0101) {
@@ -5721,7 +5666,6 @@ void Simulator::DecodeAdvancedSIMDDataProcessing(Instruction* instr) {
break;
default:
UNREACHABLE();
- break;
}
} else if (u && imm3H_L != 0 && opc == 0b0101) {
// vsli.<size> Dd, Dm, shift
@@ -5743,7 +5687,6 @@ void Simulator::DecodeAdvancedSIMDDataProcessing(Instruction* instr) {
break;
default:
UNREACHABLE();
- break;
}
}
}
@@ -5807,7 +5750,6 @@ void Simulator::DecodeAdvancedSIMDLoadStoreMultipleStructures(
break;
default:
UNIMPLEMENTED();
- break;
}
if (instr->Bit(21)) {
// vld1
@@ -5993,7 +5935,6 @@ void Simulator::DecodeFloatingPointDataProcessing(Instruction* instr) {
break;
default:
UNREACHABLE(); // Case analysis is exhaustive.
- break;
}
dd_value = canonicalizeNaN(dd_value);
set_d_register_from_double(vd, dd_value);
@@ -6019,7 +5960,6 @@ void Simulator::DecodeFloatingPointDataProcessing(Instruction* instr) {
break;
default:
UNREACHABLE(); // Case analysis is exhaustive.
- break;
}
sd_value = canonicalizeNaN(sd_value);
set_s_register_from_float(d, sd_value);
@@ -6111,7 +6051,6 @@ void Simulator::DecodeFloatingPointDataProcessing(Instruction* instr) {
break;
default:
UNREACHABLE(); // Case analysis is exhaustive.
- break;
}
if (instr->SzValue() == 0x1) {
int n = instr->VFPNRegValue(kDoublePrecision);
@@ -6132,7 +6071,6 @@ void Simulator::DecodeFloatingPointDataProcessing(Instruction* instr) {
break;
default:
UNIMPLEMENTED();
- break;
}
}
@@ -6201,7 +6139,6 @@ void Simulator::InstructionDecode(Instruction* instr) {
}
default: {
UNIMPLEMENTED();
- break;
}
}
}
diff --git a/deps/v8/src/execution/arm64/simulator-arm64.cc b/deps/v8/src/execution/arm64/simulator-arm64.cc
index 324bdd99a8..5669838006 100644
--- a/deps/v8/src/execution/arm64/simulator-arm64.cc
+++ b/deps/v8/src/execution/arm64/simulator-arm64.cc
@@ -1517,7 +1517,6 @@ void Simulator::VisitPCRelAddressing(Instruction* instr) {
break;
case ADRP: // Not implemented in the assembler.
UNIMPLEMENTED();
- break;
default:
UNREACHABLE();
}
@@ -2212,7 +2211,6 @@ Simulator::TransactionSize Simulator::get_transaction_size(unsigned size) {
default:
UNREACHABLE();
}
- return TransactionSize::None;
}
void Simulator::VisitLoadStoreAcquireRelease(Instruction* instr) {
@@ -5210,7 +5208,6 @@ void Simulator::VisitNEONScalar2RegMisc(Instruction* instr) {
break;
default:
UNIMPLEMENTED();
- break;
}
} else {
VectorFormat fpf = nfd.GetVectorFormat(nfd.FPScalarFormatMap());
diff --git a/deps/v8/src/execution/execution.cc b/deps/v8/src/execution/execution.cc
index 4b7b50bb0e..689d99057e 100644
--- a/deps/v8/src/execution/execution.cc
+++ b/deps/v8/src/execution/execution.cc
@@ -346,7 +346,6 @@ V8_WARN_UNUSED_RESULT MaybeHandle<Object> Invoke(Isolate* isolate,
// Placeholder for return value.
Object value;
-
Handle<Code> code =
JSEntry(isolate, params.execution_target, params.is_construct);
{
@@ -374,7 +373,8 @@ V8_WARN_UNUSED_RESULT MaybeHandle<Object> Invoke(Isolate* isolate,
Address** argv = reinterpret_cast<Address**>(params.argv);
RCS_SCOPE(isolate, RuntimeCallCounterId::kJS_Execution);
value = Object(stub_entry.Call(isolate->isolate_data()->isolate_root(),
- orig_func, func, recv, params.argc, argv));
+ orig_func, func, recv,
+ JSParameterCount(params.argc), argv));
} else {
DCHECK_EQ(Execution::Target::kRunMicrotasks, params.execution_target);
diff --git a/deps/v8/src/execution/frame-constants.h b/deps/v8/src/execution/frame-constants.h
index 1148a94212..d353a7092d 100644
--- a/deps/v8/src/execution/frame-constants.h
+++ b/deps/v8/src/execution/frame-constants.h
@@ -283,7 +283,9 @@ class BuiltinExitFrameConstants : public ExitFrameConstants {
static constexpr int kPaddingOffset = kArgcOffset + 1 * kSystemPointerSize;
static constexpr int kFirstArgumentOffset =
kPaddingOffset + 1 * kSystemPointerSize;
- static constexpr int kNumExtraArgsWithReceiver = 5;
+ static constexpr int kNumExtraArgsWithoutReceiver = 4;
+ static constexpr int kNumExtraArgsWithReceiver =
+ kNumExtraArgsWithoutReceiver + 1;
};
// Unoptimized frames are used for interpreted and baseline-compiled JavaScript
@@ -403,6 +405,8 @@ inline static int FrameSlotToFPOffset(int slot) {
#include "src/execution/mips/frame-constants-mips.h"
#elif V8_TARGET_ARCH_MIPS64
#include "src/execution/mips64/frame-constants-mips64.h"
+#elif V8_TARGET_ARCH_LOONG64
+#include "src/execution/loong64/frame-constants-loong64.h"
#elif V8_TARGET_ARCH_S390
#include "src/execution/s390/frame-constants-s390.h"
#elif V8_TARGET_ARCH_RISCV64
diff --git a/deps/v8/src/execution/frames.cc b/deps/v8/src/execution/frames.cc
index f24f183706..a388130ee3 100644
--- a/deps/v8/src/execution/frames.cc
+++ b/deps/v8/src/execution/frames.cc
@@ -206,19 +206,42 @@ int StackTraceFrameIterator::FrameFunctionCount() const {
return static_cast<int>(infos.size());
}
-bool StackTraceFrameIterator::IsValidFrame(StackFrame* frame) const {
+FrameSummary StackTraceFrameIterator::GetTopValidFrame() const {
+ DCHECK(!done());
+ // Like FrameSummary::GetTop, but additionally observes
+ // StackTraceFrameIterator filtering semantics.
+ std::vector<FrameSummary> frames;
+ frame()->Summarize(&frames);
+ if (is_javascript()) {
+ for (int i = static_cast<int>(frames.size()) - 1; i >= 0; i--) {
+ if (!IsValidJSFunction(*frames[i].AsJavaScript().function())) continue;
+ return frames[i];
+ }
+ UNREACHABLE();
+ }
+#if V8_ENABLE_WEBASSEMBLY
+ if (is_wasm()) return frames.back();
+#endif // V8_ENABLE_WEBASSEMBLY
+ UNREACHABLE();
+}
+
+// static
+bool StackTraceFrameIterator::IsValidFrame(StackFrame* frame) {
if (frame->is_java_script()) {
- JavaScriptFrame* js_frame = static_cast<JavaScriptFrame*>(frame);
- if (!js_frame->function().IsJSFunction()) return false;
- return js_frame->function().shared().IsSubjectToDebugging();
+ return IsValidJSFunction(static_cast<JavaScriptFrame*>(frame)->function());
}
- // Apart from JavaScript frames, only Wasm frames are valid.
#if V8_ENABLE_WEBASSEMBLY
if (frame->is_wasm()) return true;
#endif // V8_ENABLE_WEBASSEMBLY
return false;
}
+// static
+bool StackTraceFrameIterator::IsValidJSFunction(JSFunction f) {
+ if (!f.IsJSFunction()) return false;
+ return f.shared().IsSubjectToDebugging();
+}
+
// -------------------------------------------------------------------------
namespace {
@@ -1154,7 +1177,8 @@ int OptimizedFrame::ComputeParametersCount() const {
Code code = LookupCode();
if (code.kind() == CodeKind::BUILTIN) {
return static_cast<int>(
- Memory<intptr_t>(fp() + StandardFrameConstants::kArgCOffset));
+ Memory<intptr_t>(fp() + StandardFrameConstants::kArgCOffset)) -
+ kJSArgcReceiverSlots;
} else {
return JavaScriptFrame::ComputeParametersCount();
}
@@ -1327,12 +1351,13 @@ Object CommonFrameWithJSLinkage::GetParameter(int index) const {
int CommonFrameWithJSLinkage::ComputeParametersCount() const {
DCHECK(can_access_heap_objects() &&
isolate()->heap()->gc_state() == Heap::NOT_IN_GC);
- return function().shared().internal_formal_parameter_count();
+ return function().shared().internal_formal_parameter_count_without_receiver();
}
int JavaScriptFrame::GetActualArgumentCount() const {
return static_cast<int>(
- Memory<intptr_t>(fp() + StandardFrameConstants::kArgCOffset));
+ Memory<intptr_t>(fp() + StandardFrameConstants::kArgCOffset)) -
+ kJSArgcReceiverSlots;
}
Handle<FixedArray> CommonFrameWithJSLinkage::GetParameters() const {
diff --git a/deps/v8/src/execution/frames.h b/deps/v8/src/execution/frames.h
index 8d9dadd76d..d81a9dd878 100644
--- a/deps/v8/src/execution/frames.h
+++ b/deps/v8/src/execution/frames.h
@@ -5,6 +5,7 @@
#ifndef V8_EXECUTION_FRAMES_H_
#define V8_EXECUTION_FRAMES_H_
+#include "include/v8-initialization.h"
#include "src/base/bounds.h"
#include "src/codegen/safepoint-table.h"
#include "src/common/globals.h"
@@ -175,7 +176,9 @@ class StackFrame {
intptr_t type = marker >> kSmiTagSize;
// TODO(petermarshall): There is a bug in the arm simulators that causes
// invalid frame markers.
-#if defined(USE_SIMULATOR) && (V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_ARM)
+#if (defined(USE_SIMULATOR) && \
+ (V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_ARM)) || \
+ V8_TARGET_ARCH_RISCV64
if (static_cast<uintptr_t>(type) >= Type::NUMBER_OF_TYPES) {
// Appease UBSan.
return Type::NUMBER_OF_TYPES;
@@ -1273,9 +1276,14 @@ class V8_EXPORT_PRIVATE StackTraceFrameIterator {
#endif // V8_ENABLE_WEBASSEMBLY
inline JavaScriptFrame* javascript_frame() const;
+ // Use this instead of FrameSummary::GetTop(javascript_frame) to keep
+ // filtering behavior consistent with the rest of StackTraceFrameIterator.
+ FrameSummary GetTopValidFrame() const;
+
private:
StackFrameIterator iterator_;
- bool IsValidFrame(StackFrame* frame) const;
+ static bool IsValidFrame(StackFrame* frame);
+ static bool IsValidJSFunction(JSFunction f);
};
class SafeStackFrameIterator : public StackFrameIteratorBase {
diff --git a/deps/v8/src/execution/futex-emulation.h b/deps/v8/src/execution/futex-emulation.h
index cf8a9fd079..2ab84295e0 100644
--- a/deps/v8/src/execution/futex-emulation.h
+++ b/deps/v8/src/execution/futex-emulation.h
@@ -9,7 +9,7 @@
#include <map>
-#include "include/v8.h"
+#include "include/v8-persistent-handle.h"
#include "src/base/atomicops.h"
#include "src/base/lazy-instance.h"
#include "src/base/macros.h"
@@ -29,6 +29,8 @@
namespace v8 {
+class Promise;
+
namespace base {
class TimeDelta;
} // namespace base
diff --git a/deps/v8/src/execution/isolate.cc b/deps/v8/src/execution/isolate.cc
index 8363c52c49..c630cb73fa 100644
--- a/deps/v8/src/execution/isolate.cc
+++ b/deps/v8/src/execution/isolate.cc
@@ -14,6 +14,7 @@
#include <unordered_map>
#include <utility>
+#include "include/v8-template.h"
#include "src/api/api-inl.h"
#include "src/ast/ast-value-factory.h"
#include "src/ast/scopes.h"
@@ -151,26 +152,6 @@ uint32_t DefaultEmbeddedBlobDataSize() {
return v8_Default_embedded_blob_data_size_;
}
-#ifdef V8_MULTI_SNAPSHOTS
-extern "C" const uint8_t* v8_Trusted_embedded_blob_code_;
-extern "C" uint32_t v8_Trusted_embedded_blob_code_size_;
-extern "C" const uint8_t* v8_Trusted_embedded_blob_data_;
-extern "C" uint32_t v8_Trusted_embedded_blob_data_size_;
-
-const uint8_t* TrustedEmbeddedBlobCode() {
- return v8_Trusted_embedded_blob_code_;
-}
-uint32_t TrustedEmbeddedBlobCodeSize() {
- return v8_Trusted_embedded_blob_code_size_;
-}
-const uint8_t* TrustedEmbeddedBlobData() {
- return v8_Trusted_embedded_blob_data_;
-}
-uint32_t TrustedEmbeddedBlobDataSize() {
- return v8_Trusted_embedded_blob_data_size_;
-}
-#endif
-
namespace {
// These variables provide access to the current embedded blob without requiring
// an isolate instance. This is needed e.g. by Code::InstructionStart, which may
@@ -282,9 +263,6 @@ bool Isolate::CurrentEmbeddedBlobIsBinaryEmbedded() {
const uint8_t* code =
current_embedded_blob_code_.load(std::memory_order::memory_order_relaxed);
if (code == nullptr) return false;
-#ifdef V8_MULTI_SNAPSHOTS
- if (code == TrustedEmbeddedBlobCode()) return true;
-#endif
return code == DefaultEmbeddedBlobCode();
}
@@ -660,7 +638,8 @@ class StackTraceBuilder {
if (V8_UNLIKELY(FLAG_detailed_error_stack_trace)) {
parameters = isolate_->factory()->CopyFixedArrayUpTo(
handle(generator_object->parameters_and_registers(), isolate_),
- function->shared().internal_formal_parameter_count());
+ function->shared()
+ .internal_formal_parameter_count_without_receiver());
}
AppendFrame(receiver, function, code, offset, flags, parameters);
@@ -2171,20 +2150,16 @@ void Isolate::PrintCurrentStackTrace(FILE* out) {
bool Isolate::ComputeLocation(MessageLocation* target) {
StackTraceFrameIterator it(this);
if (it.done()) return false;
- CommonFrame* frame = it.frame();
// Compute the location from the function and the relocation info of the
// baseline code. For optimized code this will use the deoptimization
// information to get canonical location information.
- std::vector<FrameSummary> frames;
#if V8_ENABLE_WEBASSEMBLY
wasm::WasmCodeRefScope code_ref_scope;
#endif // V8_ENABLE_WEBASSEMBLY
- frame->Summarize(&frames);
- FrameSummary& summary = frames.back();
+ FrameSummary summary = it.GetTopValidFrame();
Handle<SharedFunctionInfo> shared;
Handle<Object> script = summary.script();
- if (!script->IsScript() ||
- (Script::cast(*script).source().IsUndefined(this))) {
+ if (!script->IsScript() || Script::cast(*script).source().IsUndefined(this)) {
return false;
}
@@ -2648,7 +2623,7 @@ Handle<Context> Isolate::GetIncumbentContext() {
// NOTE: This code assumes that the stack grows downward.
Address top_backup_incumbent =
top_backup_incumbent_scope()
- ? top_backup_incumbent_scope()->JSStackComparableAddress()
+ ? top_backup_incumbent_scope()->JSStackComparableAddressPrivate()
: 0;
if (!it.done() &&
(!top_backup_incumbent || it.frame()->sp() < top_backup_incumbent)) {
@@ -3412,15 +3387,6 @@ void Isolate::InitializeDefaultEmbeddedBlob() {
const uint8_t* data = DefaultEmbeddedBlobData();
uint32_t data_size = DefaultEmbeddedBlobDataSize();
-#ifdef V8_MULTI_SNAPSHOTS
- if (!FLAG_untrusted_code_mitigations) {
- code = TrustedEmbeddedBlobCode();
- code_size = TrustedEmbeddedBlobCodeSize();
- data = TrustedEmbeddedBlobData();
- data_size = TrustedEmbeddedBlobDataSize();
- }
-#endif
-
if (StickyEmbeddedBlobCode() != nullptr) {
base::MutexGuard guard(current_embedded_blob_refcount_mutex_.Pointer());
// Check again now that we hold the lock.
@@ -4295,7 +4261,6 @@ MaybeHandle<JSPromise> Isolate::RunHostImportModuleDynamicallyCallback(
DCHECK(host_import_module_dynamically_callback_ == nullptr ||
host_import_module_dynamically_with_import_assertions_callback_ ==
nullptr);
-
if (host_import_module_dynamically_callback_ == nullptr &&
host_import_module_dynamically_with_import_assertions_callback_ ==
nullptr) {
@@ -4309,7 +4274,6 @@ MaybeHandle<JSPromise> Isolate::RunHostImportModuleDynamicallyCallback(
if (!maybe_specifier.ToHandle(&specifier_str)) {
Handle<Object> exception(pending_exception(), this);
clear_pending_exception();
-
return NewRejectedPromise(this, api_context, exception);
}
DCHECK(!has_pending_exception());
@@ -4331,7 +4295,6 @@ MaybeHandle<JSPromise> Isolate::RunHostImportModuleDynamicallyCallback(
} else {
Handle<Object> exception(pending_exception(), this);
clear_pending_exception();
-
return NewRejectedPromise(this, api_context, exception);
}
diff --git a/deps/v8/src/execution/isolate.h b/deps/v8/src/execution/isolate.h
index e543c72718..e7908eac6a 100644
--- a/deps/v8/src/execution/isolate.h
+++ b/deps/v8/src/execution/isolate.h
@@ -13,9 +13,11 @@
#include <unordered_map>
#include <vector>
+#include "include/v8-context.h"
#include "include/v8-internal.h"
+#include "include/v8-isolate.h"
#include "include/v8-metrics.h"
-#include "include/v8.h"
+#include "include/v8-snapshot.h"
#include "src/base/macros.h"
#include "src/base/platform/mutex.h"
#include "src/builtins/builtins.h"
@@ -33,6 +35,7 @@
#include "src/heap/heap.h"
#include "src/heap/read-only-heap.h"
#include "src/init/isolate-allocator.h"
+#include "src/init/vm-cage.h"
#include "src/objects/code.h"
#include "src/objects/contexts.h"
#include "src/objects/debug-objects.h"
@@ -91,6 +94,7 @@ class EternalHandles;
class HandleScopeImplementer;
class HeapObjectToIndexHashMap;
class HeapProfiler;
+class GlobalHandles;
class InnerPointerToCodeCache;
class LazyCompileDispatcher;
class LocalIsolate;
diff --git a/deps/v8/src/execution/loong64/frame-constants-loong64.cc b/deps/v8/src/execution/loong64/frame-constants-loong64.cc
new file mode 100644
index 0000000000..4bd809266c
--- /dev/null
+++ b/deps/v8/src/execution/loong64/frame-constants-loong64.cc
@@ -0,0 +1,32 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#if V8_TARGET_ARCH_LOONG64
+
+#include "src/execution/loong64/frame-constants-loong64.h"
+
+#include "src/codegen/loong64/assembler-loong64-inl.h"
+#include "src/execution/frame-constants.h"
+#include "src/execution/frames.h"
+
+namespace v8 {
+namespace internal {
+
+Register JavaScriptFrame::fp_register() { return v8::internal::fp; }
+Register JavaScriptFrame::context_register() { return cp; }
+Register JavaScriptFrame::constant_pool_pointer_register() { UNREACHABLE(); }
+
+int UnoptimizedFrameConstants::RegisterStackSlotCount(int register_count) {
+ return register_count;
+}
+
+int BuiltinContinuationFrameConstants::PaddingSlotCount(int register_count) {
+ USE(register_count);
+ return 0;
+}
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_TARGET_ARCH_LOONG64
diff --git a/deps/v8/src/execution/loong64/frame-constants-loong64.h b/deps/v8/src/execution/loong64/frame-constants-loong64.h
new file mode 100644
index 0000000000..1395f47a7b
--- /dev/null
+++ b/deps/v8/src/execution/loong64/frame-constants-loong64.h
@@ -0,0 +1,76 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_EXECUTION_LOONG64_FRAME_CONSTANTS_LOONG64_H_
+#define V8_EXECUTION_LOONG64_FRAME_CONSTANTS_LOONG64_H_
+
+#include "src/base/bits.h"
+#include "src/base/macros.h"
+#include "src/execution/frame-constants.h"
+
+namespace v8 {
+namespace internal {
+
+class EntryFrameConstants : public AllStatic {
+ public:
+ // This is the offset to where JSEntry pushes the current value of
+ // Isolate::c_entry_fp onto the stack.
+ static constexpr int kCallerFPOffset = -3 * kSystemPointerSize;
+};
+
+class WasmCompileLazyFrameConstants : public TypedFrameConstants {
+ public:
+ static constexpr int kNumberOfSavedGpParamRegs = 7;
+ static constexpr int kNumberOfSavedFpParamRegs = 8;
+ static constexpr int kNumberOfSavedAllParamRegs = 15;
+
+ // FP-relative.
+ // See Generate_WasmCompileLazy in builtins-loong64.cc.
+ static constexpr int kWasmInstanceOffset = TYPED_FRAME_PUSHED_VALUE_OFFSET(6);
+ static constexpr int kFixedFrameSizeFromFp =
+ TypedFrameConstants::kFixedFrameSizeFromFp +
+ kNumberOfSavedGpParamRegs * kPointerSize +
+ kNumberOfSavedFpParamRegs * kDoubleSize;
+};
+
+// Frame constructed by the {WasmDebugBreak} builtin.
+// After pushing the frame type marker, the builtin pushes all Liftoff cache
+// registers (see liftoff-assembler-defs.h).
+class WasmDebugBreakFrameConstants : public TypedFrameConstants {
+ public:
+ // {a0 ... a7, t0 ... t5, s0, s1, s2, s5, s7, s8}
+ static constexpr uint32_t kPushedGpRegs = 0b11010011100000111111111111110000;
+ // {f0, f1, f2, ... f27, f28}
+ static constexpr uint32_t kPushedFpRegs = 0x1fffffff;
+
+ static constexpr int kNumPushedGpRegisters =
+ base::bits::CountPopulation(kPushedGpRegs);
+ static constexpr int kNumPushedFpRegisters =
+ base::bits::CountPopulation(kPushedFpRegs);
+
+ static constexpr int kLastPushedGpRegisterOffset =
+ -kFixedFrameSizeFromFp - kNumPushedGpRegisters * kSystemPointerSize;
+ static constexpr int kLastPushedFpRegisterOffset =
+ kLastPushedGpRegisterOffset - kNumPushedFpRegisters * kDoubleSize;
+
+ // Offsets are fp-relative.
+ static int GetPushedGpRegisterOffset(int reg_code) {
+ DCHECK_NE(0, kPushedGpRegs & (1 << reg_code));
+ uint32_t lower_regs = kPushedGpRegs & ((uint32_t{1} << reg_code) - 1);
+ return kLastPushedGpRegisterOffset +
+ base::bits::CountPopulation(lower_regs) * kSystemPointerSize;
+ }
+
+ static int GetPushedFpRegisterOffset(int reg_code) {
+ DCHECK_NE(0, kPushedFpRegs & (1 << reg_code));
+ uint32_t lower_regs = kPushedFpRegs & ((uint32_t{1} << reg_code) - 1);
+ return kLastPushedFpRegisterOffset +
+ base::bits::CountPopulation(lower_regs) * kDoubleSize;
+ }
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_EXECUTION_LOONG64_FRAME_CONSTANTS_LOONG64_H_
diff --git a/deps/v8/src/execution/loong64/simulator-loong64.cc b/deps/v8/src/execution/loong64/simulator-loong64.cc
new file mode 100644
index 0000000000..33f10304f6
--- /dev/null
+++ b/deps/v8/src/execution/loong64/simulator-loong64.cc
@@ -0,0 +1,5538 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/execution/loong64/simulator-loong64.h"
+
+// Only build the simulator if not compiling for real LOONG64 hardware.
+#if defined(USE_SIMULATOR)
+
+#include <limits.h>
+#include <stdarg.h>
+#include <stdlib.h>
+
+#include <cmath>
+
+#include "src/base/bits.h"
+#include "src/base/platform/platform.h"
+#include "src/base/platform/wrappers.h"
+#include "src/base/strings.h"
+#include "src/base/vector.h"
+#include "src/codegen/assembler-inl.h"
+#include "src/codegen/loong64/constants-loong64.h"
+#include "src/codegen/macro-assembler.h"
+#include "src/diagnostics/disasm.h"
+#include "src/heap/combined-heap.h"
+#include "src/runtime/runtime-utils.h"
+#include "src/utils/ostreams.h"
+
+namespace v8 {
+namespace internal {
+
+DEFINE_LAZY_LEAKY_OBJECT_GETTER(Simulator::GlobalMonitor,
+ Simulator::GlobalMonitor::Get)
+
+// #define PRINT_SIM_LOG
+
+// Util functions.
+inline bool HaveSameSign(int64_t a, int64_t b) { return ((a ^ b) >= 0); }
+
+uint32_t get_fcsr_condition_bit(uint32_t cc) {
+ if (cc == 0) {
+ return 23;
+ } else {
+ return 24 + cc;
+ }
+}
+
+static int64_t MultiplyHighSigned(int64_t u, int64_t v) {
+ uint64_t u0, v0, w0;
+ int64_t u1, v1, w1, w2, t;
+
+ u0 = u & 0xFFFFFFFFL;
+ u1 = u >> 32;
+ v0 = v & 0xFFFFFFFFL;
+ v1 = v >> 32;
+
+ w0 = u0 * v0;
+ t = u1 * v0 + (w0 >> 32);
+ w1 = t & 0xFFFFFFFFL;
+ w2 = t >> 32;
+ w1 = u0 * v1 + w1;
+
+ return u1 * v1 + w2 + (w1 >> 32);
+}
+
+static uint64_t MultiplyHighUnsigned(uint64_t u, uint64_t v) {
+ uint64_t u0, v0, w0;
+ uint64_t u1, v1, w1, w2, t;
+
+ u0 = u & 0xFFFFFFFFL;
+ u1 = u >> 32;
+ v0 = v & 0xFFFFFFFFL;
+ v1 = v >> 32;
+
+ w0 = u0 * v0;
+ t = u1 * v0 + (w0 >> 32);
+ w1 = t & 0xFFFFFFFFL;
+ w2 = t >> 32;
+ w1 = u0 * v1 + w1;
+
+ return u1 * v1 + w2 + (w1 >> 32);
+}
+
+#ifdef PRINT_SIM_LOG
+inline void printf_instr(const char* _Format, ...) {
+ va_list varList;
+ va_start(varList, _Format);
+ vprintf(_Format, varList);
+ va_end(varList);
+}
+#else
+#define printf_instr(...)
+#endif
+
+// This macro provides a platform independent use of sscanf. The reason for
+// SScanF not being implemented in a platform independent was through
+// ::v8::internal::OS in the same way as base::SNPrintF is that the Windows C
+// Run-Time Library does not provide vsscanf.
+#define SScanF sscanf
+
+// The Loong64Debugger class is used by the simulator while debugging simulated
+// code.
+class Loong64Debugger {
+ public:
+ explicit Loong64Debugger(Simulator* sim) : sim_(sim) {}
+
+ void Stop(Instruction* instr);
+ void Debug();
+ // Print all registers with a nice formatting.
+ void PrintAllRegs();
+ void PrintAllRegsIncludingFPU();
+
+ private:
+ // We set the breakpoint code to 0xFFFF to easily recognize it.
+ static const Instr kBreakpointInstr = BREAK | 0xFFFF;
+ static const Instr kNopInstr = 0x0;
+
+ Simulator* sim_;
+
+ int64_t GetRegisterValue(int regnum);
+ int64_t GetFPURegisterValue(int regnum);
+ float GetFPURegisterValueFloat(int regnum);
+ double GetFPURegisterValueDouble(int regnum);
+ bool GetValue(const char* desc, int64_t* value);
+
+ // Set or delete a breakpoint. Returns true if successful.
+ bool SetBreakpoint(Instruction* breakpc);
+ bool DeleteBreakpoint(Instruction* breakpc);
+
+ // Undo and redo all breakpoints. This is needed to bracket disassembly and
+ // execution to skip past breakpoints when run from the debugger.
+ void UndoBreakpoints();
+ void RedoBreakpoints();
+};
+
+inline void UNSUPPORTED() { printf("Sim: Unsupported instruction.\n"); }
+
+void Loong64Debugger::Stop(Instruction* instr) {
+ // Get the stop code.
+ uint32_t code = instr->Bits(25, 6);
+ PrintF("Simulator hit (%u)\n", code);
+ Debug();
+}
+
+int64_t Loong64Debugger::GetRegisterValue(int regnum) {
+ if (regnum == kNumSimuRegisters) {
+ return sim_->get_pc();
+ } else {
+ return sim_->get_register(regnum);
+ }
+}
+
+int64_t Loong64Debugger::GetFPURegisterValue(int regnum) {
+ if (regnum == kNumFPURegisters) {
+ return sim_->get_pc();
+ } else {
+ return sim_->get_fpu_register(regnum);
+ }
+}
+
+float Loong64Debugger::GetFPURegisterValueFloat(int regnum) {
+ if (regnum == kNumFPURegisters) {
+ return sim_->get_pc();
+ } else {
+ return sim_->get_fpu_register_float(regnum);
+ }
+}
+
+double Loong64Debugger::GetFPURegisterValueDouble(int regnum) {
+ if (regnum == kNumFPURegisters) {
+ return sim_->get_pc();
+ } else {
+ return sim_->get_fpu_register_double(regnum);
+ }
+}
+
+bool Loong64Debugger::GetValue(const char* desc, int64_t* value) {
+ int regnum = Registers::Number(desc);
+ int fpuregnum = FPURegisters::Number(desc);
+
+ if (regnum != kInvalidRegister) {
+ *value = GetRegisterValue(regnum);
+ return true;
+ } else if (fpuregnum != kInvalidFPURegister) {
+ *value = GetFPURegisterValue(fpuregnum);
+ return true;
+ } else if (strncmp(desc, "0x", 2) == 0) {
+ return SScanF(desc + 2, "%" SCNx64, reinterpret_cast<uint64_t*>(value)) ==
+ 1;
+ } else {
+ return SScanF(desc, "%" SCNu64, reinterpret_cast<uint64_t*>(value)) == 1;
+ }
+}
+
+bool Loong64Debugger::SetBreakpoint(Instruction* breakpc) {
+ // Check if a breakpoint can be set. If not return without any side-effects.
+ if (sim_->break_pc_ != nullptr) {
+ return false;
+ }
+
+ // Set the breakpoint.
+ sim_->break_pc_ = breakpc;
+ sim_->break_instr_ = breakpc->InstructionBits();
+ // Not setting the breakpoint instruction in the code itself. It will be set
+ // when the debugger shell continues.
+ return true;
+}
+
+bool Loong64Debugger::DeleteBreakpoint(Instruction* breakpc) {
+ if (sim_->break_pc_ != nullptr) {
+ sim_->break_pc_->SetInstructionBits(sim_->break_instr_);
+ }
+
+ sim_->break_pc_ = nullptr;
+ sim_->break_instr_ = 0;
+ return true;
+}
+
+void Loong64Debugger::UndoBreakpoints() {
+ if (sim_->break_pc_ != nullptr) {
+ sim_->break_pc_->SetInstructionBits(sim_->break_instr_);
+ }
+}
+
+void Loong64Debugger::RedoBreakpoints() {
+ if (sim_->break_pc_ != nullptr) {
+ sim_->break_pc_->SetInstructionBits(kBreakpointInstr);
+ }
+}
+
+void Loong64Debugger::PrintAllRegs() {
+#define REG_INFO(n) Registers::Name(n), GetRegisterValue(n), GetRegisterValue(n)
+
+ PrintF("\n");
+ // at, v0, a0.
+ PrintF("%3s: 0x%016" PRIx64 " %14" PRId64 "\t%3s: 0x%016" PRIx64 " %14" PRId64
+ "\t%3s: 0x%016" PRIx64 " %14" PRId64 "\n",
+ REG_INFO(1), REG_INFO(2), REG_INFO(4));
+ // v1, a1.
+ PrintF("%34s\t%3s: 0x%016" PRIx64 " %14" PRId64 " \t%3s: 0x%016" PRIx64
+ " %14" PRId64 " \n",
+ "", REG_INFO(3), REG_INFO(5));
+ // a2.
+ PrintF("%34s\t%34s\t%3s: 0x%016" PRIx64 " %14" PRId64 " \n", "", "",
+ REG_INFO(6));
+ // a3.
+ PrintF("%34s\t%34s\t%3s: 0x%016" PRIx64 " %14" PRId64 " \n", "", "",
+ REG_INFO(7));
+ PrintF("\n");
+ // a4-t3, s0-s7
+ for (int i = 0; i < 8; i++) {
+ PrintF("%3s: 0x%016" PRIx64 " %14" PRId64 " \t%3s: 0x%016" PRIx64
+ " %14" PRId64 " \n",
+ REG_INFO(8 + i), REG_INFO(16 + i));
+ }
+ PrintF("\n");
+ // t8, k0, LO.
+ PrintF("%3s: 0x%016" PRIx64 " %14" PRId64 " \t%3s: 0x%016" PRIx64
+ " %14" PRId64 " \t%3s: 0x%016" PRIx64 " %14" PRId64 " \n",
+ REG_INFO(24), REG_INFO(26), REG_INFO(32));
+ // t9, k1, HI.
+ PrintF("%3s: 0x%016" PRIx64 " %14" PRId64 " \t%3s: 0x%016" PRIx64
+ " %14" PRId64 " \t%3s: 0x%016" PRIx64 " %14" PRId64 " \n",
+ REG_INFO(25), REG_INFO(27), REG_INFO(33));
+ // sp, fp, gp.
+ PrintF("%3s: 0x%016" PRIx64 " %14" PRId64 " \t%3s: 0x%016" PRIx64
+ " %14" PRId64 " \t%3s: 0x%016" PRIx64 " %14" PRId64 " \n",
+ REG_INFO(29), REG_INFO(30), REG_INFO(28));
+ // pc.
+ PrintF("%3s: 0x%016" PRIx64 " %14" PRId64 " \t%3s: 0x%016" PRIx64
+ " %14" PRId64 " \n",
+ REG_INFO(31), REG_INFO(34));
+
+#undef REG_INFO
+}
+
+void Loong64Debugger::PrintAllRegsIncludingFPU() {
+#define FPU_REG_INFO(n) \
+ FPURegisters::Name(n), GetFPURegisterValue(n), GetFPURegisterValueDouble(n)
+
+ PrintAllRegs();
+
+ PrintF("\n\n");
+ // f0, f1, f2, ... f31.
+ // TODO(plind): consider printing 2 columns for space efficiency.
+ PrintF("%3s: 0x%016" PRIx64 " %16.4e\n", FPU_REG_INFO(0));
+ PrintF("%3s: 0x%016" PRIx64 " %16.4e\n", FPU_REG_INFO(1));
+ PrintF("%3s: 0x%016" PRIx64 " %16.4e\n", FPU_REG_INFO(2));
+ PrintF("%3s: 0x%016" PRIx64 " %16.4e\n", FPU_REG_INFO(3));
+ PrintF("%3s: 0x%016" PRIx64 " %16.4e\n", FPU_REG_INFO(4));
+ PrintF("%3s: 0x%016" PRIx64 " %16.4e\n", FPU_REG_INFO(5));
+ PrintF("%3s: 0x%016" PRIx64 " %16.4e\n", FPU_REG_INFO(6));
+ PrintF("%3s: 0x%016" PRIx64 " %16.4e\n", FPU_REG_INFO(7));
+ PrintF("%3s: 0x%016" PRIx64 " %16.4e\n", FPU_REG_INFO(8));
+ PrintF("%3s: 0x%016" PRIx64 " %16.4e\n", FPU_REG_INFO(9));
+ PrintF("%3s: 0x%016" PRIx64 " %16.4e\n", FPU_REG_INFO(10));
+ PrintF("%3s: 0x%016" PRIx64 " %16.4e\n", FPU_REG_INFO(11));
+ PrintF("%3s: 0x%016" PRIx64 " %16.4e\n", FPU_REG_INFO(12));
+ PrintF("%3s: 0x%016" PRIx64 " %16.4e\n", FPU_REG_INFO(13));
+ PrintF("%3s: 0x%016" PRIx64 " %16.4e\n", FPU_REG_INFO(14));
+ PrintF("%3s: 0x%016" PRIx64 " %16.4e\n", FPU_REG_INFO(15));
+ PrintF("%3s: 0x%016" PRIx64 " %16.4e\n", FPU_REG_INFO(16));
+ PrintF("%3s: 0x%016" PRIx64 " %16.4e\n", FPU_REG_INFO(17));
+ PrintF("%3s: 0x%016" PRIx64 " %16.4e\n", FPU_REG_INFO(18));
+ PrintF("%3s: 0x%016" PRIx64 " %16.4e\n", FPU_REG_INFO(19));
+ PrintF("%3s: 0x%016" PRIx64 " %16.4e\n", FPU_REG_INFO(20));
+ PrintF("%3s: 0x%016" PRIx64 " %16.4e\n", FPU_REG_INFO(21));
+ PrintF("%3s: 0x%016" PRIx64 " %16.4e\n", FPU_REG_INFO(22));
+ PrintF("%3s: 0x%016" PRIx64 " %16.4e\n", FPU_REG_INFO(23));
+ PrintF("%3s: 0x%016" PRIx64 " %16.4e\n", FPU_REG_INFO(24));
+ PrintF("%3s: 0x%016" PRIx64 " %16.4e\n", FPU_REG_INFO(25));
+ PrintF("%3s: 0x%016" PRIx64 " %16.4e\n", FPU_REG_INFO(26));
+ PrintF("%3s: 0x%016" PRIx64 " %16.4e\n", FPU_REG_INFO(27));
+ PrintF("%3s: 0x%016" PRIx64 " %16.4e\n", FPU_REG_INFO(28));
+ PrintF("%3s: 0x%016" PRIx64 " %16.4e\n", FPU_REG_INFO(29));
+ PrintF("%3s: 0x%016" PRIx64 " %16.4e\n", FPU_REG_INFO(30));
+ PrintF("%3s: 0x%016" PRIx64 " %16.4e\n", FPU_REG_INFO(31));
+
+#undef FPU_REG_INFO
+}
+
+void Loong64Debugger::Debug() {
+ intptr_t last_pc = -1;
+ bool done = false;
+
+#define COMMAND_SIZE 63
+#define ARG_SIZE 255
+
+#define STR(a) #a
+#define XSTR(a) STR(a)
+
+ char cmd[COMMAND_SIZE + 1];
+ char arg1[ARG_SIZE + 1];
+ char arg2[ARG_SIZE + 1];
+ char* argv[3] = {cmd, arg1, arg2};
+
+ // Make sure to have a proper terminating character if reaching the limit.
+ cmd[COMMAND_SIZE] = 0;
+ arg1[ARG_SIZE] = 0;
+ arg2[ARG_SIZE] = 0;
+
+ // Undo all set breakpoints while running in the debugger shell. This will
+ // make them invisible to all commands.
+ UndoBreakpoints();
+
+ while (!done && (sim_->get_pc() != Simulator::end_sim_pc)) {
+ if (last_pc != sim_->get_pc()) {
+ disasm::NameConverter converter;
+ disasm::Disassembler dasm(converter);
+ // Use a reasonably large buffer.
+ v8::base::EmbeddedVector<char, 256> buffer;
+ dasm.InstructionDecode(buffer, reinterpret_cast<byte*>(sim_->get_pc()));
+ PrintF(" 0x%016" PRIx64 " %s\n", sim_->get_pc(), buffer.begin());
+ last_pc = sim_->get_pc();
+ }
+ char* line = ReadLine("sim> ");
+ if (line == nullptr) {
+ break;
+ } else {
+ char* last_input = sim_->last_debugger_input();
+ if (strcmp(line, "\n") == 0 && last_input != nullptr) {
+ line = last_input;
+ } else {
+ // Ownership is transferred to sim_;
+ sim_->set_last_debugger_input(line);
+ }
+ // Use sscanf to parse the individual parts of the command line. At the
+ // moment no command expects more than two parameters.
+ int argc = SScanF(line,
+ "%" XSTR(COMMAND_SIZE) "s "
+ "%" XSTR(ARG_SIZE) "s "
+ "%" XSTR(ARG_SIZE) "s",
+ cmd, arg1, arg2);
+ if ((strcmp(cmd, "si") == 0) || (strcmp(cmd, "stepi") == 0)) {
+ Instruction* instr = reinterpret_cast<Instruction*>(sim_->get_pc());
+ if (!(instr->IsTrap()) ||
+ instr->InstructionBits() == rtCallRedirInstr) {
+ sim_->InstructionDecode(
+ reinterpret_cast<Instruction*>(sim_->get_pc()));
+ } else {
+ // Allow si to jump over generated breakpoints.
+ PrintF("/!\\ Jumping over generated breakpoint.\n");
+ sim_->set_pc(sim_->get_pc() + kInstrSize);
+ }
+ } else if ((strcmp(cmd, "c") == 0) || (strcmp(cmd, "cont") == 0)) {
+ // Execute the one instruction we broke at with breakpoints disabled.
+ sim_->InstructionDecode(reinterpret_cast<Instruction*>(sim_->get_pc()));
+ // Leave the debugger shell.
+ done = true;
+ } else if ((strcmp(cmd, "p") == 0) || (strcmp(cmd, "print") == 0)) {
+ if (argc == 2) {
+ int64_t value;
+ double dvalue;
+ if (strcmp(arg1, "all") == 0) {
+ PrintAllRegs();
+ } else if (strcmp(arg1, "allf") == 0) {
+ PrintAllRegsIncludingFPU();
+ } else {
+ int regnum = Registers::Number(arg1);
+ int fpuregnum = FPURegisters::Number(arg1);
+
+ if (regnum != kInvalidRegister) {
+ value = GetRegisterValue(regnum);
+ PrintF("%s: 0x%08" PRIx64 " %" PRId64 " \n", arg1, value,
+ value);
+ } else if (fpuregnum != kInvalidFPURegister) {
+ value = GetFPURegisterValue(fpuregnum);
+ dvalue = GetFPURegisterValueDouble(fpuregnum);
+ PrintF("%3s: 0x%016" PRIx64 " %16.4e\n",
+ FPURegisters::Name(fpuregnum), value, dvalue);
+ } else {
+ PrintF("%s unrecognized\n", arg1);
+ }
+ }
+ } else {
+ if (argc == 3) {
+ if (strcmp(arg2, "single") == 0) {
+ int64_t value;
+ float fvalue;
+ int fpuregnum = FPURegisters::Number(arg1);
+
+ if (fpuregnum != kInvalidFPURegister) {
+ value = GetFPURegisterValue(fpuregnum);
+ value &= 0xFFFFFFFFUL;
+ fvalue = GetFPURegisterValueFloat(fpuregnum);
+ PrintF("%s: 0x%08" PRIx64 " %11.4e\n", arg1, value, fvalue);
+ } else {
+ PrintF("%s unrecognized\n", arg1);
+ }
+ } else {
+ PrintF("print <fpu register> single\n");
+ }
+ } else {
+ PrintF("print <register> or print <fpu register> single\n");
+ }
+ }
+ } else if ((strcmp(cmd, "po") == 0) ||
+ (strcmp(cmd, "printobject") == 0)) {
+ if (argc == 2) {
+ int64_t value;
+ StdoutStream os;
+ if (GetValue(arg1, &value)) {
+ Object obj(value);
+ os << arg1 << ": \n";
+#ifdef DEBUG
+ obj.Print(os);
+ os << "\n";
+#else
+ os << Brief(obj) << "\n";
+#endif
+ } else {
+ os << arg1 << " unrecognized\n";
+ }
+ } else {
+ PrintF("printobject <value>\n");
+ }
+ } else if (strcmp(cmd, "stack") == 0 || strcmp(cmd, "mem") == 0 ||
+ strcmp(cmd, "dump") == 0) {
+ int64_t* cur = nullptr;
+ int64_t* end = nullptr;
+ int next_arg = 1;
+
+ if (strcmp(cmd, "stack") == 0) {
+ cur = reinterpret_cast<int64_t*>(sim_->get_register(Simulator::sp));
+ } else { // Command "mem".
+ int64_t value;
+ if (!GetValue(arg1, &value)) {
+ PrintF("%s unrecognized\n", arg1);
+ continue;
+ }
+ cur = reinterpret_cast<int64_t*>(value);
+ next_arg++;
+ }
+
+ int64_t words;
+ if (argc == next_arg) {
+ words = 10;
+ } else {
+ if (!GetValue(argv[next_arg], &words)) {
+ words = 10;
+ }
+ }
+ end = cur + words;
+
+ bool skip_obj_print = (strcmp(cmd, "dump") == 0);
+ while (cur < end) {
+ PrintF(" 0x%012" PRIxPTR " : 0x%016" PRIx64 " %14" PRId64 " ",
+ reinterpret_cast<intptr_t>(cur), *cur, *cur);
+ Object obj(*cur);
+ Heap* current_heap = sim_->isolate_->heap();
+ if (!skip_obj_print) {
+ if (obj.IsSmi() ||
+ IsValidHeapObject(current_heap, HeapObject::cast(obj))) {
+ PrintF(" (");
+ if (obj.IsSmi()) {
+ PrintF("smi %d", Smi::ToInt(obj));
+ } else {
+ obj.ShortPrint();
+ }
+ PrintF(")");
+ }
+ }
+ PrintF("\n");
+ cur++;
+ }
+
+ } else if ((strcmp(cmd, "disasm") == 0) || (strcmp(cmd, "dpc") == 0) ||
+ (strcmp(cmd, "di") == 0)) {
+ disasm::NameConverter converter;
+ disasm::Disassembler dasm(converter);
+ // Use a reasonably large buffer.
+ v8::base::EmbeddedVector<char, 256> buffer;
+
+ byte* cur = nullptr;
+ byte* end = nullptr;
+
+ if (argc == 1) {
+ cur = reinterpret_cast<byte*>(sim_->get_pc());
+ end = cur + (10 * kInstrSize);
+ } else if (argc == 2) {
+ int regnum = Registers::Number(arg1);
+ if (regnum != kInvalidRegister || strncmp(arg1, "0x", 2) == 0) {
+ // The argument is an address or a register name.
+ int64_t value;
+ if (GetValue(arg1, &value)) {
+ cur = reinterpret_cast<byte*>(value);
+ // Disassemble 10 instructions at <arg1>.
+ end = cur + (10 * kInstrSize);
+ }
+ } else {
+ // The argument is the number of instructions.
+ int64_t value;
+ if (GetValue(arg1, &value)) {
+ cur = reinterpret_cast<byte*>(sim_->get_pc());
+ // Disassemble <arg1> instructions.
+ end = cur + (value * kInstrSize);
+ }
+ }
+ } else {
+ int64_t value1;
+ int64_t value2;
+ if (GetValue(arg1, &value1) && GetValue(arg2, &value2)) {
+ cur = reinterpret_cast<byte*>(value1);
+ end = cur + (value2 * kInstrSize);
+ }
+ }
+
+ while (cur < end) {
+ dasm.InstructionDecode(buffer, cur);
+ PrintF(" 0x%08" PRIxPTR " %s\n", reinterpret_cast<intptr_t>(cur),
+ buffer.begin());
+ cur += kInstrSize;
+ }
+ } else if (strcmp(cmd, "gdb") == 0) {
+ PrintF("relinquishing control to gdb\n");
+ v8::base::OS::DebugBreak();
+ PrintF("regaining control from gdb\n");
+ } else if (strcmp(cmd, "break") == 0) {
+ if (argc == 2) {
+ int64_t value;
+ if (GetValue(arg1, &value)) {
+ if (!SetBreakpoint(reinterpret_cast<Instruction*>(value))) {
+ PrintF("setting breakpoint failed\n");
+ }
+ } else {
+ PrintF("%s unrecognized\n", arg1);
+ }
+ } else {
+ PrintF("break <address>\n");
+ }
+ } else if (strcmp(cmd, "del") == 0) {
+ if (!DeleteBreakpoint(nullptr)) {
+ PrintF("deleting breakpoint failed\n");
+ }
+ } else if (strcmp(cmd, "flags") == 0) {
+ PrintF("No flags on LOONG64 !\n");
+ } else if (strcmp(cmd, "stop") == 0) {
+ int64_t value;
+ intptr_t stop_pc = sim_->get_pc() - 2 * kInstrSize;
+ Instruction* stop_instr = reinterpret_cast<Instruction*>(stop_pc);
+ Instruction* msg_address =
+ reinterpret_cast<Instruction*>(stop_pc + kInstrSize);
+ if ((argc == 2) && (strcmp(arg1, "unstop") == 0)) {
+ // Remove the current stop.
+ if (sim_->IsStopInstruction(stop_instr)) {
+ stop_instr->SetInstructionBits(kNopInstr);
+ msg_address->SetInstructionBits(kNopInstr);
+ } else {
+ PrintF("Not at debugger stop.\n");
+ }
+ } else if (argc == 3) {
+ // Print information about all/the specified breakpoint(s).
+ if (strcmp(arg1, "info") == 0) {
+ if (strcmp(arg2, "all") == 0) {
+ PrintF("Stop information:\n");
+ for (uint32_t i = kMaxWatchpointCode + 1; i <= kMaxStopCode;
+ i++) {
+ sim_->PrintStopInfo(i);
+ }
+ } else if (GetValue(arg2, &value)) {
+ sim_->PrintStopInfo(value);
+ } else {
+ PrintF("Unrecognized argument.\n");
+ }
+ } else if (strcmp(arg1, "enable") == 0) {
+ // Enable all/the specified breakpoint(s).
+ if (strcmp(arg2, "all") == 0) {
+ for (uint32_t i = kMaxWatchpointCode + 1; i <= kMaxStopCode;
+ i++) {
+ sim_->EnableStop(i);
+ }
+ } else if (GetValue(arg2, &value)) {
+ sim_->EnableStop(value);
+ } else {
+ PrintF("Unrecognized argument.\n");
+ }
+ } else if (strcmp(arg1, "disable") == 0) {
+ // Disable all/the specified breakpoint(s).
+ if (strcmp(arg2, "all") == 0) {
+ for (uint32_t i = kMaxWatchpointCode + 1; i <= kMaxStopCode;
+ i++) {
+ sim_->DisableStop(i);
+ }
+ } else if (GetValue(arg2, &value)) {
+ sim_->DisableStop(value);
+ } else {
+ PrintF("Unrecognized argument.\n");
+ }
+ }
+ } else {
+ PrintF("Wrong usage. Use help command for more information.\n");
+ }
+ } else if ((strcmp(cmd, "stat") == 0) || (strcmp(cmd, "st") == 0)) {
+ // Print registers and disassemble.
+ PrintAllRegs();
+ PrintF("\n");
+
+ disasm::NameConverter converter;
+ disasm::Disassembler dasm(converter);
+ // Use a reasonably large buffer.
+ v8::base::EmbeddedVector<char, 256> buffer;
+
+ byte* cur = nullptr;
+ byte* end = nullptr;
+
+ if (argc == 1) {
+ cur = reinterpret_cast<byte*>(sim_->get_pc());
+ end = cur + (10 * kInstrSize);
+ } else if (argc == 2) {
+ int64_t value;
+ if (GetValue(arg1, &value)) {
+ cur = reinterpret_cast<byte*>(value);
+ // no length parameter passed, assume 10 instructions
+ end = cur + (10 * kInstrSize);
+ }
+ } else {
+ int64_t value1;
+ int64_t value2;
+ if (GetValue(arg1, &value1) && GetValue(arg2, &value2)) {
+ cur = reinterpret_cast<byte*>(value1);
+ end = cur + (value2 * kInstrSize);
+ }
+ }
+
+ while (cur < end) {
+ dasm.InstructionDecode(buffer, cur);
+ PrintF(" 0x%08" PRIxPTR " %s\n", reinterpret_cast<intptr_t>(cur),
+ buffer.begin());
+ cur += kInstrSize;
+ }
+ } else if ((strcmp(cmd, "h") == 0) || (strcmp(cmd, "help") == 0)) {
+ PrintF("cont\n");
+ PrintF(" continue execution (alias 'c')\n");
+ PrintF("stepi\n");
+ PrintF(" step one instruction (alias 'si')\n");
+ PrintF("print <register>\n");
+ PrintF(" print register content (alias 'p')\n");
+ PrintF(" use register name 'all' to print all registers\n");
+ PrintF("printobject <register>\n");
+ PrintF(" print an object from a register (alias 'po')\n");
+ PrintF("stack [<words>]\n");
+ PrintF(" dump stack content, default dump 10 words)\n");
+ PrintF("mem <address> [<words>]\n");
+ PrintF(" dump memory content, default dump 10 words)\n");
+ PrintF("dump [<words>]\n");
+ PrintF(
+ " dump memory content without pretty printing JS objects, default "
+ "dump 10 words)\n");
+ PrintF("flags\n");
+ PrintF(" print flags\n");
+ PrintF("disasm [<instructions>]\n");
+ PrintF("disasm [<address/register>]\n");
+ PrintF("disasm [[<address/register>] <instructions>]\n");
+ PrintF(" disassemble code, default is 10 instructions\n");
+ PrintF(" from pc (alias 'di')\n");
+ PrintF("gdb\n");
+ PrintF(" enter gdb\n");
+ PrintF("break <address>\n");
+ PrintF(" set a break point on the address\n");
+ PrintF("del\n");
+ PrintF(" delete the breakpoint\n");
+ PrintF("stop feature:\n");
+ PrintF(" Description:\n");
+ PrintF(" Stops are debug instructions inserted by\n");
+ PrintF(" the Assembler::stop() function.\n");
+ PrintF(" When hitting a stop, the Simulator will\n");
+ PrintF(" stop and give control to the Debugger.\n");
+ PrintF(" All stop codes are watched:\n");
+ PrintF(" - They can be enabled / disabled: the Simulator\n");
+ PrintF(" will / won't stop when hitting them.\n");
+ PrintF(" - The Simulator keeps track of how many times they \n");
+ PrintF(" are met. (See the info command.) Going over a\n");
+ PrintF(" disabled stop still increases its counter. \n");
+ PrintF(" Commands:\n");
+ PrintF(" stop info all/<code> : print infos about number <code>\n");
+ PrintF(" or all stop(s).\n");
+ PrintF(" stop enable/disable all/<code> : enables / disables\n");
+ PrintF(" all or number <code> stop(s)\n");
+ PrintF(" stop unstop\n");
+ PrintF(" ignore the stop instruction at the current location\n");
+ PrintF(" from now on\n");
+ } else {
+ PrintF("Unknown command: %s\n", cmd);
+ }
+ }
+ }
+
+ // Add all the breakpoints back to stop execution and enter the debugger
+ // shell when hit.
+ RedoBreakpoints();
+
+#undef COMMAND_SIZE
+#undef ARG_SIZE
+
+#undef STR
+#undef XSTR
+}
+
+bool Simulator::ICacheMatch(void* one, void* two) {
+ DCHECK_EQ(reinterpret_cast<intptr_t>(one) & CachePage::kPageMask, 0);
+ DCHECK_EQ(reinterpret_cast<intptr_t>(two) & CachePage::kPageMask, 0);
+ return one == two;
+}
+
+static uint32_t ICacheHash(void* key) {
+ return static_cast<uint32_t>(reinterpret_cast<uintptr_t>(key)) >> 2;
+}
+
+static bool AllOnOnePage(uintptr_t start, size_t size) {
+ intptr_t start_page = (start & ~CachePage::kPageMask);
+ intptr_t end_page = ((start + size) & ~CachePage::kPageMask);
+ return start_page == end_page;
+}
+
+void Simulator::set_last_debugger_input(char* input) {
+ DeleteArray(last_debugger_input_);
+ last_debugger_input_ = input;
+}
+
+void Simulator::SetRedirectInstruction(Instruction* instruction) {
+ instruction->SetInstructionBits(rtCallRedirInstr);
+}
+
+void Simulator::FlushICache(base::CustomMatcherHashMap* i_cache,
+ void* start_addr, size_t size) {
+ int64_t start = reinterpret_cast<int64_t>(start_addr);
+ int64_t intra_line = (start & CachePage::kLineMask);
+ start -= intra_line;
+ size += intra_line;
+ size = ((size - 1) | CachePage::kLineMask) + 1;
+ int offset = (start & CachePage::kPageMask);
+ while (!AllOnOnePage(start, size - 1)) {
+ int bytes_to_flush = CachePage::kPageSize - offset;
+ FlushOnePage(i_cache, start, bytes_to_flush);
+ start += bytes_to_flush;
+ size -= bytes_to_flush;
+ DCHECK_EQ((int64_t)0, start & CachePage::kPageMask);
+ offset = 0;
+ }
+ if (size != 0) {
+ FlushOnePage(i_cache, start, size);
+ }
+}
+
+CachePage* Simulator::GetCachePage(base::CustomMatcherHashMap* i_cache,
+ void* page) {
+ base::HashMap::Entry* entry = i_cache->LookupOrInsert(page, ICacheHash(page));
+ if (entry->value == nullptr) {
+ CachePage* new_page = new CachePage();
+ entry->value = new_page;
+ }
+ return reinterpret_cast<CachePage*>(entry->value);
+}
+
+// Flush from start up to and not including start + size.
+void Simulator::FlushOnePage(base::CustomMatcherHashMap* i_cache,
+ intptr_t start, size_t size) {
+ DCHECK_LE(size, CachePage::kPageSize);
+ DCHECK(AllOnOnePage(start, size - 1));
+ DCHECK_EQ(start & CachePage::kLineMask, 0);
+ DCHECK_EQ(size & CachePage::kLineMask, 0);
+ void* page = reinterpret_cast<void*>(start & (~CachePage::kPageMask));
+ int offset = (start & CachePage::kPageMask);
+ CachePage* cache_page = GetCachePage(i_cache, page);
+ char* valid_bytemap = cache_page->ValidityByte(offset);
+ memset(valid_bytemap, CachePage::LINE_INVALID, size >> CachePage::kLineShift);
+}
+
+void Simulator::CheckICache(base::CustomMatcherHashMap* i_cache,
+ Instruction* instr) {
+ int64_t address = reinterpret_cast<int64_t>(instr);
+ void* page = reinterpret_cast<void*>(address & (~CachePage::kPageMask));
+ void* line = reinterpret_cast<void*>(address & (~CachePage::kLineMask));
+ int offset = (address & CachePage::kPageMask);
+ CachePage* cache_page = GetCachePage(i_cache, page);
+ char* cache_valid_byte = cache_page->ValidityByte(offset);
+ bool cache_hit = (*cache_valid_byte == CachePage::LINE_VALID);
+ char* cached_line = cache_page->CachedData(offset & ~CachePage::kLineMask);
+ if (cache_hit) {
+ // Check that the data in memory matches the contents of the I-cache.
+ CHECK_EQ(0, memcmp(reinterpret_cast<void*>(instr),
+ cache_page->CachedData(offset), kInstrSize));
+ } else {
+ // Cache miss. Load memory into the cache.
+ memcpy(cached_line, line, CachePage::kLineLength);
+ *cache_valid_byte = CachePage::LINE_VALID;
+ }
+}
+
+Simulator::Simulator(Isolate* isolate) : isolate_(isolate) {
+ // Set up simulator support first. Some of this information is needed to
+ // setup the architecture state.
+ stack_size_ = FLAG_sim_stack_size * KB;
+ stack_ = reinterpret_cast<char*>(base::Malloc(stack_size_));
+ pc_modified_ = false;
+ icount_ = 0;
+ break_count_ = 0;
+ break_pc_ = nullptr;
+ break_instr_ = 0;
+
+ // Set up architecture state.
+ // All registers are initialized to zero to start with.
+ for (int i = 0; i < kNumSimuRegisters; i++) {
+ registers_[i] = 0;
+ }
+ for (int i = 0; i < kNumFPURegisters; i++) {
+ FPUregisters_[i] = 0;
+ }
+ for (int i = 0; i < kNumCFRegisters; i++) {
+ CFregisters_[i] = 0;
+ }
+
+ FCSR_ = 0;
+
+ // The sp is initialized to point to the bottom (high address) of the
+ // allocated stack area. To be safe in potential stack underflows we leave
+ // some buffer below.
+ registers_[sp] = reinterpret_cast<int64_t>(stack_) + stack_size_ - 64;
+ // The ra and pc are initialized to a known bad value that will cause an
+ // access violation if the simulator ever tries to execute it.
+ registers_[pc] = bad_ra;
+ registers_[ra] = bad_ra;
+
+ last_debugger_input_ = nullptr;
+}
+
+Simulator::~Simulator() {
+ GlobalMonitor::Get()->RemoveLinkedAddress(&global_monitor_thread_);
+ base::Free(stack_);
+}
+
+// Get the active Simulator for the current thread.
+Simulator* Simulator::current(Isolate* isolate) {
+ v8::internal::Isolate::PerIsolateThreadData* isolate_data =
+ isolate->FindOrAllocatePerThreadDataForThisThread();
+ DCHECK_NOT_NULL(isolate_data);
+
+ Simulator* sim = isolate_data->simulator();
+ if (sim == nullptr) {
+ // TODO(146): delete the simulator object when a thread/isolate goes away.
+ sim = new Simulator(isolate);
+ isolate_data->set_simulator(sim);
+ }
+ return sim;
+}
+
+// Sets the register in the architecture state. It will also deal with updating
+// Simulator internal state for special registers such as PC.
+void Simulator::set_register(int reg, int64_t value) {
+ DCHECK((reg >= 0) && (reg < kNumSimuRegisters));
+ if (reg == pc) {
+ pc_modified_ = true;
+ }
+
+ // Zero register always holds 0.
+ registers_[reg] = (reg == 0) ? 0 : value;
+}
+
+void Simulator::set_dw_register(int reg, const int* dbl) {
+ DCHECK((reg >= 0) && (reg < kNumSimuRegisters));
+ registers_[reg] = dbl[1];
+ registers_[reg] = registers_[reg] << 32;
+ registers_[reg] += dbl[0];
+}
+
+void Simulator::set_fpu_register(int fpureg, int64_t value) {
+ DCHECK((fpureg >= 0) && (fpureg < kNumFPURegisters));
+ FPUregisters_[fpureg] = value;
+}
+
+void Simulator::set_fpu_register_word(int fpureg, int32_t value) {
+ // Set ONLY lower 32-bits, leaving upper bits untouched.
+ DCHECK((fpureg >= 0) && (fpureg < kNumFPURegisters));
+ int32_t* pword;
+ pword = reinterpret_cast<int32_t*>(&FPUregisters_[fpureg]);
+
+ *pword = value;
+}
+
+void Simulator::set_fpu_register_hi_word(int fpureg, int32_t value) {
+ // Set ONLY upper 32-bits, leaving lower bits untouched.
+ DCHECK((fpureg >= 0) && (fpureg < kNumFPURegisters));
+ int32_t* phiword;
+ phiword = (reinterpret_cast<int32_t*>(&FPUregisters_[fpureg])) + 1;
+
+ *phiword = value;
+}
+
+void Simulator::set_fpu_register_float(int fpureg, float value) {
+ DCHECK((fpureg >= 0) && (fpureg < kNumFPURegisters));
+ *bit_cast<float*>(&FPUregisters_[fpureg]) = value;
+}
+
+void Simulator::set_fpu_register_double(int fpureg, double value) {
+ DCHECK((fpureg >= 0) && (fpureg < kNumFPURegisters));
+ *bit_cast<double*>(&FPUregisters_[fpureg]) = value;
+}
+
+void Simulator::set_cf_register(int cfreg, bool value) {
+ DCHECK((cfreg >= 0) && (cfreg < kNumCFRegisters));
+ CFregisters_[cfreg] = value;
+}
+
+// Get the register from the architecture state. This function does handle
+// the special case of accessing the PC register.
+int64_t Simulator::get_register(int reg) const {
+ DCHECK((reg >= 0) && (reg < kNumSimuRegisters));
+ if (reg == 0)
+ return 0;
+ else
+ return registers_[reg];
+}
+
+double Simulator::get_double_from_register_pair(int reg) {
+ // TODO(plind): bad ABI stuff, refactor or remove.
+ DCHECK((reg >= 0) && (reg < kNumSimuRegisters));
+
+ double dm_val = 0.0;
+ // Read the bits from the unsigned integer register_[] array
+ // into the double precision floating point value and return it.
+ char buffer[sizeof(registers_[0])];
+ memcpy(buffer, &registers_[reg], sizeof(registers_[0]));
+ memcpy(&dm_val, buffer, sizeof(registers_[0]));
+ return (dm_val);
+}
+
+int64_t Simulator::get_fpu_register(int fpureg) const {
+ DCHECK((fpureg >= 0) && (fpureg < kNumFPURegisters));
+ return FPUregisters_[fpureg];
+}
+
+int32_t Simulator::get_fpu_register_word(int fpureg) const {
+ DCHECK((fpureg >= 0) && (fpureg < kNumFPURegisters));
+ return static_cast<int32_t>(FPUregisters_[fpureg] & 0xFFFFFFFF);
+}
+
+int32_t Simulator::get_fpu_register_signed_word(int fpureg) const {
+ DCHECK((fpureg >= 0) && (fpureg < kNumFPURegisters));
+ return static_cast<int32_t>(FPUregisters_[fpureg] & 0xFFFFFFFF);
+}
+
+int32_t Simulator::get_fpu_register_hi_word(int fpureg) const {
+ DCHECK((fpureg >= 0) && (fpureg < kNumFPURegisters));
+ return static_cast<int32_t>((FPUregisters_[fpureg] >> 32) & 0xFFFFFFFF);
+}
+
+float Simulator::get_fpu_register_float(int fpureg) const {
+ DCHECK((fpureg >= 0) && (fpureg < kNumFPURegisters));
+ return *bit_cast<float*>(const_cast<int64_t*>(&FPUregisters_[fpureg]));
+}
+
+double Simulator::get_fpu_register_double(int fpureg) const {
+ DCHECK((fpureg >= 0) && (fpureg < kNumFPURegisters));
+ return *bit_cast<double*>(&FPUregisters_[fpureg]);
+}
+
+bool Simulator::get_cf_register(int cfreg) const {
+ DCHECK((cfreg >= 0) && (cfreg < kNumCFRegisters));
+ return CFregisters_[cfreg];
+}
+
+// Runtime FP routines take up to two double arguments and zero
+// or one integer arguments. All are constructed here,
+// from a0-a3 or fa0 and fa1 (n64).
+void Simulator::GetFpArgs(double* x, double* y, int32_t* z) {
+ const int fparg2 = f1;
+ *x = get_fpu_register_double(f0);
+ *y = get_fpu_register_double(fparg2);
+ *z = static_cast<int32_t>(get_register(a2));
+}
+
+// The return value is either in v0/v1 or f0.
+void Simulator::SetFpResult(const double& result) {
+ set_fpu_register_double(0, result);
+}
+
+// Helper functions for setting and testing the FCSR register's bits.
+void Simulator::set_fcsr_bit(uint32_t cc, bool value) {
+ if (value) {
+ FCSR_ |= (1 << cc);
+ } else {
+ FCSR_ &= ~(1 << cc);
+ }
+}
+
+bool Simulator::test_fcsr_bit(uint32_t cc) { return FCSR_ & (1 << cc); }
+
+void Simulator::set_fcsr_rounding_mode(FPURoundingMode mode) {
+ FCSR_ |= mode & kFPURoundingModeMask;
+}
+
+unsigned int Simulator::get_fcsr_rounding_mode() {
+ return FCSR_ & kFPURoundingModeMask;
+}
+
+// Sets the rounding error codes in FCSR based on the result of the rounding.
+// Returns true if the operation was invalid.
+bool Simulator::set_fcsr_round_error(double original, double rounded) {
+ bool ret = false;
+ double max_int32 = std::numeric_limits<int32_t>::max();
+ double min_int32 = std::numeric_limits<int32_t>::min();
+ set_fcsr_bit(kFCSRInvalidOpCauseBit, false);
+ set_fcsr_bit(kFCSRUnderflowCauseBit, false);
+ set_fcsr_bit(kFCSROverflowCauseBit, false);
+ set_fcsr_bit(kFCSRInexactCauseBit, false);
+
+ if (!std::isfinite(original) || !std::isfinite(rounded)) {
+ set_fcsr_bit(kFCSRInvalidOpCauseBit, true);
+ ret = true;
+ }
+
+ if (original != rounded) {
+ set_fcsr_bit(kFCSRInexactCauseBit, true);
+ }
+
+ if (rounded < DBL_MIN && rounded > -DBL_MIN && rounded != 0) {
+ set_fcsr_bit(kFCSRUnderflowCauseBit, true);
+ ret = true;
+ }
+
+ if (rounded > max_int32 || rounded < min_int32) {
+ set_fcsr_bit(kFCSROverflowCauseBit, true);
+ // The reference is not really clear but it seems this is required:
+ set_fcsr_bit(kFCSRInvalidOpCauseBit, true);
+ ret = true;
+ }
+
+ return ret;
+}
+
+// Sets the rounding error codes in FCSR based on the result of the rounding.
+// Returns true if the operation was invalid.
+bool Simulator::set_fcsr_round64_error(double original, double rounded) {
+ bool ret = false;
+ // The value of INT64_MAX (2^63-1) can't be represented as double exactly,
+ // loading the most accurate representation into max_int64, which is 2^63.
+ double max_int64 = static_cast<double>(std::numeric_limits<int64_t>::max());
+ double min_int64 = std::numeric_limits<int64_t>::min();
+ set_fcsr_bit(kFCSRInvalidOpCauseBit, false);
+ set_fcsr_bit(kFCSRUnderflowCauseBit, false);
+ set_fcsr_bit(kFCSROverflowCauseBit, false);
+ set_fcsr_bit(kFCSRInexactCauseBit, false);
+
+ if (!std::isfinite(original) || !std::isfinite(rounded)) {
+ set_fcsr_bit(kFCSRInvalidOpCauseBit, true);
+ ret = true;
+ }
+
+ if (original != rounded) {
+ set_fcsr_bit(kFCSRInexactCauseBit, true);
+ }
+
+ if (rounded < DBL_MIN && rounded > -DBL_MIN && rounded != 0) {
+ set_fcsr_bit(kFCSRUnderflowCauseBit, true);
+ ret = true;
+ }
+
+ if (rounded >= max_int64 || rounded < min_int64) {
+ set_fcsr_bit(kFCSROverflowCauseBit, true);
+ // The reference is not really clear but it seems this is required:
+ set_fcsr_bit(kFCSRInvalidOpCauseBit, true);
+ ret = true;
+ }
+
+ return ret;
+}
+
+// Sets the rounding error codes in FCSR based on the result of the rounding.
+// Returns true if the operation was invalid.
+bool Simulator::set_fcsr_round_error(float original, float rounded) {
+ bool ret = false;
+ double max_int32 = std::numeric_limits<int32_t>::max();
+ double min_int32 = std::numeric_limits<int32_t>::min();
+ set_fcsr_bit(kFCSRInvalidOpCauseBit, false);
+ set_fcsr_bit(kFCSRUnderflowCauseBit, false);
+ set_fcsr_bit(kFCSROverflowCauseBit, false);
+ set_fcsr_bit(kFCSRInexactCauseBit, false);
+
+ if (!std::isfinite(original) || !std::isfinite(rounded)) {
+ set_fcsr_bit(kFCSRInvalidOpCauseBit, true);
+ ret = true;
+ }
+
+ if (original != rounded) {
+ set_fcsr_bit(kFCSRInexactCauseBit, true);
+ }
+
+ if (rounded < FLT_MIN && rounded > -FLT_MIN && rounded != 0) {
+ set_fcsr_bit(kFCSRUnderflowCauseBit, true);
+ ret = true;
+ }
+
+ if (rounded > max_int32 || rounded < min_int32) {
+ set_fcsr_bit(kFCSROverflowCauseBit, true);
+ // The reference is not really clear but it seems this is required:
+ set_fcsr_bit(kFCSRInvalidOpCauseBit, true);
+ ret = true;
+ }
+
+ return ret;
+}
+
+void Simulator::set_fpu_register_word_invalid_result(float original,
+ float rounded) {
+ double max_int32 = std::numeric_limits<int32_t>::max();
+ double min_int32 = std::numeric_limits<int32_t>::min();
+ if (std::isnan(original)) {
+ set_fpu_register_word(fd_reg(), 0);
+ } else if (rounded > max_int32) {
+ set_fpu_register_word(fd_reg(), kFPUInvalidResult);
+ } else if (rounded < min_int32) {
+ set_fpu_register_word(fd_reg(), kFPUInvalidResultNegative);
+ } else {
+ UNREACHABLE();
+ }
+}
+
+void Simulator::set_fpu_register_invalid_result(float original, float rounded) {
+ double max_int32 = std::numeric_limits<int32_t>::max();
+ double min_int32 = std::numeric_limits<int32_t>::min();
+ if (std::isnan(original)) {
+ set_fpu_register(fd_reg(), 0);
+ } else if (rounded > max_int32) {
+ set_fpu_register(fd_reg(), kFPUInvalidResult);
+ } else if (rounded < min_int32) {
+ set_fpu_register(fd_reg(), kFPUInvalidResultNegative);
+ } else {
+ UNREACHABLE();
+ }
+}
+
+void Simulator::set_fpu_register_invalid_result64(float original,
+ float rounded) {
+ // The value of INT64_MAX (2^63-1) can't be represented as double exactly,
+ // loading the most accurate representation into max_int64, which is 2^63.
+ double max_int64 = static_cast<double>(std::numeric_limits<int64_t>::max());
+ double min_int64 = std::numeric_limits<int64_t>::min();
+ if (std::isnan(original)) {
+ set_fpu_register(fd_reg(), 0);
+ } else if (rounded >= max_int64) {
+ set_fpu_register(fd_reg(), kFPU64InvalidResult);
+ } else if (rounded < min_int64) {
+ set_fpu_register(fd_reg(), kFPU64InvalidResultNegative);
+ } else {
+ UNREACHABLE();
+ }
+}
+
+void Simulator::set_fpu_register_word_invalid_result(double original,
+ double rounded) {
+ double max_int32 = std::numeric_limits<int32_t>::max();
+ double min_int32 = std::numeric_limits<int32_t>::min();
+ if (std::isnan(original)) {
+ set_fpu_register_word(fd_reg(), 0);
+ } else if (rounded > max_int32) {
+ set_fpu_register_word(fd_reg(), kFPUInvalidResult);
+ } else if (rounded < min_int32) {
+ set_fpu_register_word(fd_reg(), kFPUInvalidResultNegative);
+ } else {
+ UNREACHABLE();
+ }
+}
+
+void Simulator::set_fpu_register_invalid_result(double original,
+ double rounded) {
+ double max_int32 = std::numeric_limits<int32_t>::max();
+ double min_int32 = std::numeric_limits<int32_t>::min();
+ if (std::isnan(original)) {
+ set_fpu_register(fd_reg(), 0);
+ } else if (rounded > max_int32) {
+ set_fpu_register(fd_reg(), kFPUInvalidResult);
+ } else if (rounded < min_int32) {
+ set_fpu_register(fd_reg(), kFPUInvalidResultNegative);
+ } else {
+ UNREACHABLE();
+ }
+}
+
+void Simulator::set_fpu_register_invalid_result64(double original,
+ double rounded) {
+ // The value of INT64_MAX (2^63-1) can't be represented as double exactly,
+ // loading the most accurate representation into max_int64, which is 2^63.
+ double max_int64 = static_cast<double>(std::numeric_limits<int64_t>::max());
+ double min_int64 = std::numeric_limits<int64_t>::min();
+ if (std::isnan(original)) {
+ set_fpu_register(fd_reg(), 0);
+ } else if (rounded >= max_int64) {
+ set_fpu_register(fd_reg(), kFPU64InvalidResult);
+ } else if (rounded < min_int64) {
+ set_fpu_register(fd_reg(), kFPU64InvalidResultNegative);
+ } else {
+ UNREACHABLE();
+ }
+}
+
+// Sets the rounding error codes in FCSR based on the result of the rounding.
+// Returns true if the operation was invalid.
+bool Simulator::set_fcsr_round64_error(float original, float rounded) {
+ bool ret = false;
+ // The value of INT64_MAX (2^63-1) can't be represented as double exactly,
+ // loading the most accurate representation into max_int64, which is 2^63.
+ double max_int64 = static_cast<double>(std::numeric_limits<int64_t>::max());
+ double min_int64 = std::numeric_limits<int64_t>::min();
+ set_fcsr_bit(kFCSRInvalidOpCauseBit, false);
+ set_fcsr_bit(kFCSRUnderflowCauseBit, false);
+ set_fcsr_bit(kFCSROverflowCauseBit, false);
+ set_fcsr_bit(kFCSRInexactCauseBit, false);
+
+ if (!std::isfinite(original) || !std::isfinite(rounded)) {
+ set_fcsr_bit(kFCSRInvalidOpCauseBit, true);
+ ret = true;
+ }
+
+ if (original != rounded) {
+ set_fcsr_bit(kFCSRInexactCauseBit, true);
+ }
+
+ if (rounded < FLT_MIN && rounded > -FLT_MIN && rounded != 0) {
+ set_fcsr_bit(kFCSRUnderflowCauseBit, true);
+ ret = true;
+ }
+
+ if (rounded >= max_int64 || rounded < min_int64) {
+ set_fcsr_bit(kFCSROverflowCauseBit, true);
+ // The reference is not really clear but it seems this is required:
+ set_fcsr_bit(kFCSRInvalidOpCauseBit, true);
+ ret = true;
+ }
+
+ return ret;
+}
+
+// For ftint instructions only
+void Simulator::round_according_to_fcsr(double toRound, double* rounded,
+ int32_t* rounded_int) {
+ // 0 RN (round to nearest): Round a result to the nearest
+ // representable value; if the result is exactly halfway between
+ // two representable values, round to zero.
+
+ // 1 RZ (round toward zero): Round a result to the closest
+ // representable value whose absolute value is less than or
+ // equal to the infinitely accurate result.
+
+ // 2 RP (round up, or toward +infinity): Round a result to the
+ // next representable value up.
+
+ // 3 RN (round down, or toward −infinity): Round a result to
+ // the next representable value down.
+ // switch ((FCSR_ >> 8) & 3) {
+ switch (FCSR_ & kFPURoundingModeMask) {
+ case kRoundToNearest:
+ *rounded = std::floor(toRound + 0.5);
+ *rounded_int = static_cast<int32_t>(*rounded);
+ if ((*rounded_int & 1) != 0 && *rounded_int - toRound == 0.5) {
+ // If the number is halfway between two integers,
+ // round to the even one.
+ *rounded_int -= 1;
+ *rounded -= 1.;
+ }
+ break;
+ case kRoundToZero:
+ *rounded = trunc(toRound);
+ *rounded_int = static_cast<int32_t>(*rounded);
+ break;
+ case kRoundToPlusInf:
+ *rounded = std::ceil(toRound);
+ *rounded_int = static_cast<int32_t>(*rounded);
+ break;
+ case kRoundToMinusInf:
+ *rounded = std::floor(toRound);
+ *rounded_int = static_cast<int32_t>(*rounded);
+ break;
+ }
+}
+
+void Simulator::round64_according_to_fcsr(double toRound, double* rounded,
+ int64_t* rounded_int) {
+ // 0 RN (round to nearest): Round a result to the nearest
+ // representable value; if the result is exactly halfway between
+ // two representable values, round to zero.
+
+ // 1 RZ (round toward zero): Round a result to the closest
+ // representable value whose absolute value is less than or.
+ // equal to the infinitely accurate result.
+
+ // 2 RP (round up, or toward +infinity): Round a result to the
+ // next representable value up.
+
+ // 3 RN (round down, or toward −infinity): Round a result to
+ // the next representable value down.
+ switch (FCSR_ & kFPURoundingModeMask) {
+ case kRoundToNearest:
+ *rounded = std::floor(toRound + 0.5);
+ *rounded_int = static_cast<int64_t>(*rounded);
+ if ((*rounded_int & 1) != 0 && *rounded_int - toRound == 0.5) {
+ // If the number is halfway between two integers,
+ // round to the even one.
+ *rounded_int -= 1;
+ *rounded -= 1.;
+ }
+ break;
+ case kRoundToZero:
+ *rounded = std::trunc(toRound);
+ *rounded_int = static_cast<int64_t>(*rounded);
+ break;
+ case kRoundToPlusInf:
+ *rounded = std::ceil(toRound);
+ *rounded_int = static_cast<int64_t>(*rounded);
+ break;
+ case kRoundToMinusInf:
+ *rounded = std::floor(toRound);
+ *rounded_int = static_cast<int64_t>(*rounded);
+ break;
+ }
+}
+
+void Simulator::round_according_to_fcsr(float toRound, float* rounded,
+ int32_t* rounded_int) {
+ // 0 RN (round to nearest): Round a result to the nearest
+ // representable value; if the result is exactly halfway between
+ // two representable values, round to zero.
+
+ // 1 RZ (round toward zero): Round a result to the closest
+ // representable value whose absolute value is less than or
+ // equal to the infinitely accurate result.
+
+ // 2 RP (round up, or toward +infinity): Round a result to the
+ // next representable value up.
+
+ // 3 RN (round down, or toward −infinity): Round a result to
+ // the next representable value down.
+ switch (FCSR_ & kFPURoundingModeMask) {
+ case kRoundToNearest:
+ *rounded = std::floor(toRound + 0.5);
+ *rounded_int = static_cast<int32_t>(*rounded);
+ if ((*rounded_int & 1) != 0 && *rounded_int - toRound == 0.5) {
+ // If the number is halfway between two integers,
+ // round to the even one.
+ *rounded_int -= 1;
+ *rounded -= 1.f;
+ }
+ break;
+ case kRoundToZero:
+ *rounded = std::trunc(toRound);
+ *rounded_int = static_cast<int32_t>(*rounded);
+ break;
+ case kRoundToPlusInf:
+ *rounded = std::ceil(toRound);
+ *rounded_int = static_cast<int32_t>(*rounded);
+ break;
+ case kRoundToMinusInf:
+ *rounded = std::floor(toRound);
+ *rounded_int = static_cast<int32_t>(*rounded);
+ break;
+ }
+}
+
+void Simulator::round64_according_to_fcsr(float toRound, float* rounded,
+ int64_t* rounded_int) {
+ // 0 RN (round to nearest): Round a result to the nearest
+ // representable value; if the result is exactly halfway between
+ // two representable values, round to zero.
+
+ // 1 RZ (round toward zero): Round a result to the closest
+ // representable value whose absolute value is less than or.
+ // equal to the infinitely accurate result.
+
+ // 2 RP (round up, or toward +infinity): Round a result to the
+ // next representable value up.
+
+ // 3 RN (round down, or toward −infinity): Round a result to
+ // the next representable value down.
+ switch (FCSR_ & kFPURoundingModeMask) {
+ case kRoundToNearest:
+ *rounded = std::floor(toRound + 0.5);
+ *rounded_int = static_cast<int64_t>(*rounded);
+ if ((*rounded_int & 1) != 0 && *rounded_int - toRound == 0.5) {
+ // If the number is halfway between two integers,
+ // round to the even one.
+ *rounded_int -= 1;
+ *rounded -= 1.f;
+ }
+ break;
+ case kRoundToZero:
+ *rounded = trunc(toRound);
+ *rounded_int = static_cast<int64_t>(*rounded);
+ break;
+ case kRoundToPlusInf:
+ *rounded = std::ceil(toRound);
+ *rounded_int = static_cast<int64_t>(*rounded);
+ break;
+ case kRoundToMinusInf:
+ *rounded = std::floor(toRound);
+ *rounded_int = static_cast<int64_t>(*rounded);
+ break;
+ }
+}
+
+// Raw access to the PC register.
+void Simulator::set_pc(int64_t value) {
+ pc_modified_ = true;
+ registers_[pc] = value;
+}
+
+bool Simulator::has_bad_pc() const {
+ return ((registers_[pc] == bad_ra) || (registers_[pc] == end_sim_pc));
+}
+
+// Raw access to the PC register without the special adjustment when reading.
+int64_t Simulator::get_pc() const { return registers_[pc]; }
+
+// TODO(plind): refactor this messy debug code when we do unaligned access.
+void Simulator::DieOrDebug() {
+ if ((1)) { // Flag for this was removed.
+ Loong64Debugger dbg(this);
+ dbg.Debug();
+ } else {
+ base::OS::Abort();
+ }
+}
+
+void Simulator::TraceRegWr(int64_t value, TraceType t) {
+ if (::v8::internal::FLAG_trace_sim) {
+ union {
+ int64_t fmt_int64;
+ int32_t fmt_int32[2];
+ float fmt_float[2];
+ double fmt_double;
+ } v;
+ v.fmt_int64 = value;
+
+ switch (t) {
+ case WORD:
+ base::SNPrintF(trace_buf_,
+ "%016" PRIx64 " (%" PRId64 ") int32:%" PRId32
+ " uint32:%" PRIu32,
+ v.fmt_int64, icount_, v.fmt_int32[0], v.fmt_int32[0]);
+ break;
+ case DWORD:
+ base::SNPrintF(trace_buf_,
+ "%016" PRIx64 " (%" PRId64 ") int64:%" PRId64
+ " uint64:%" PRIu64,
+ value, icount_, value, value);
+ break;
+ case FLOAT:
+ base::SNPrintF(trace_buf_, "%016" PRIx64 " (%" PRId64 ") flt:%e",
+ v.fmt_int64, icount_, v.fmt_float[0]);
+ break;
+ case DOUBLE:
+ base::SNPrintF(trace_buf_, "%016" PRIx64 " (%" PRId64 ") dbl:%e",
+ v.fmt_int64, icount_, v.fmt_double);
+ break;
+ case FLOAT_DOUBLE:
+ base::SNPrintF(trace_buf_,
+ "%016" PRIx64 " (%" PRId64 ") flt:%e dbl:%e",
+ v.fmt_int64, icount_, v.fmt_float[0], v.fmt_double);
+ break;
+ case WORD_DWORD:
+ base::SNPrintF(trace_buf_,
+ "%016" PRIx64 " (%" PRId64 ") int32:%" PRId32
+ " uint32:%" PRIu32 " int64:%" PRId64 " uint64:%" PRIu64,
+ v.fmt_int64, icount_, v.fmt_int32[0], v.fmt_int32[0],
+ v.fmt_int64, v.fmt_int64);
+ break;
+ default:
+ UNREACHABLE();
+ }
+ }
+}
+
+// TODO(plind): consider making icount_ printing a flag option.
+void Simulator::TraceMemRd(int64_t addr, int64_t value, TraceType t) {
+ if (::v8::internal::FLAG_trace_sim) {
+ union {
+ int64_t fmt_int64;
+ int32_t fmt_int32[2];
+ float fmt_float[2];
+ double fmt_double;
+ } v;
+ v.fmt_int64 = value;
+
+ switch (t) {
+ case WORD:
+ base::SNPrintF(trace_buf_,
+ "%016" PRIx64 " <-- [%016" PRIx64 "] (%" PRId64
+ ") int32:%" PRId32 " uint32:%" PRIu32,
+ v.fmt_int64, addr, icount_, v.fmt_int32[0],
+ v.fmt_int32[0]);
+ break;
+ case DWORD:
+ base::SNPrintF(trace_buf_,
+ "%016" PRIx64 " <-- [%016" PRIx64 "] (%" PRId64
+ ") int64:%" PRId64 " uint64:%" PRIu64,
+ value, addr, icount_, value, value);
+ break;
+ case FLOAT:
+ base::SNPrintF(trace_buf_,
+ "%016" PRIx64 " <-- [%016" PRIx64 "] (%" PRId64
+ ") flt:%e",
+ v.fmt_int64, addr, icount_, v.fmt_float[0]);
+ break;
+ case DOUBLE:
+ base::SNPrintF(trace_buf_,
+ "%016" PRIx64 " <-- [%016" PRIx64 "] (%" PRId64
+ ") dbl:%e",
+ v.fmt_int64, addr, icount_, v.fmt_double);
+ break;
+ case FLOAT_DOUBLE:
+ base::SNPrintF(trace_buf_,
+ "%016" PRIx64 " <-- [%016" PRIx64 "] (%" PRId64
+ ") flt:%e dbl:%e",
+ v.fmt_int64, addr, icount_, v.fmt_float[0],
+ v.fmt_double);
+ break;
+ default:
+ UNREACHABLE();
+ }
+ }
+}
+
+void Simulator::TraceMemWr(int64_t addr, int64_t value, TraceType t) {
+ if (::v8::internal::FLAG_trace_sim) {
+ switch (t) {
+ case BYTE:
+ base::SNPrintF(trace_buf_,
+ " %02" PRIx8 " --> [%016" PRIx64
+ "] (%" PRId64 ")",
+ static_cast<uint8_t>(value), addr, icount_);
+ break;
+ case HALF:
+ base::SNPrintF(trace_buf_,
+ " %04" PRIx16 " --> [%016" PRIx64
+ "] (%" PRId64 ")",
+ static_cast<uint16_t>(value), addr, icount_);
+ break;
+ case WORD:
+ base::SNPrintF(trace_buf_,
+ " %08" PRIx32 " --> [%016" PRIx64 "] (%" PRId64
+ ")",
+ static_cast<uint32_t>(value), addr, icount_);
+ break;
+ case DWORD:
+ base::SNPrintF(trace_buf_,
+ "%016" PRIx64 " --> [%016" PRIx64 "] (%" PRId64 " )",
+ value, addr, icount_);
+ break;
+ default:
+ UNREACHABLE();
+ }
+ }
+}
+
+template <typename T>
+void Simulator::TraceMemRd(int64_t addr, T value) {
+ if (::v8::internal::FLAG_trace_sim) {
+ switch (sizeof(T)) {
+ case 1:
+ base::SNPrintF(trace_buf_,
+ "%08" PRIx8 " <-- [%08" PRIx64 "] (%" PRIu64
+ ") int8:%" PRId8 " uint8:%" PRIu8,
+ static_cast<uint8_t>(value), addr, icount_,
+ static_cast<int8_t>(value), static_cast<uint8_t>(value));
+ break;
+ case 2:
+ base::SNPrintF(trace_buf_,
+ "%08" PRIx16 " <-- [%08" PRIx64 "] (%" PRIu64
+ ") int16:%" PRId16 " uint16:%" PRIu16,
+ static_cast<uint16_t>(value), addr, icount_,
+ static_cast<int16_t>(value),
+ static_cast<uint16_t>(value));
+ break;
+ case 4:
+ base::SNPrintF(trace_buf_,
+ "%08" PRIx32 " <-- [%08" PRIx64 "] (%" PRIu64
+ ") int32:%" PRId32 " uint32:%" PRIu32,
+ static_cast<uint32_t>(value), addr, icount_,
+ static_cast<int32_t>(value),
+ static_cast<uint32_t>(value));
+ break;
+ case 8:
+ base::SNPrintF(trace_buf_,
+ "%08" PRIx64 " <-- [%08" PRIx64 "] (%" PRIu64
+ ") int64:%" PRId64 " uint64:%" PRIu64,
+ static_cast<uint64_t>(value), addr, icount_,
+ static_cast<int64_t>(value),
+ static_cast<uint64_t>(value));
+ break;
+ default:
+ UNREACHABLE();
+ }
+ }
+}
+
+template <typename T>
+void Simulator::TraceMemWr(int64_t addr, T value) {
+ if (::v8::internal::FLAG_trace_sim) {
+ switch (sizeof(T)) {
+ case 1:
+ base::SNPrintF(trace_buf_,
+ " %02" PRIx8 " --> [%08" PRIx64 "] (%" PRIu64
+ ")",
+ static_cast<uint8_t>(value), addr, icount_);
+ break;
+ case 2:
+ base::SNPrintF(trace_buf_,
+ " %04" PRIx16 " --> [%08" PRIx64 "] (%" PRIu64 ")",
+ static_cast<uint16_t>(value), addr, icount_);
+ break;
+ case 4:
+ base::SNPrintF(trace_buf_,
+ "%08" PRIx32 " --> [%08" PRIx64 "] (%" PRIu64 ")",
+ static_cast<uint32_t>(value), addr, icount_);
+ break;
+ case 8:
+ base::SNPrintF(trace_buf_,
+ "%16" PRIx64 " --> [%08" PRIx64 "] (%" PRIu64 ")",
+ static_cast<uint64_t>(value), addr, icount_);
+ break;
+ default:
+ UNREACHABLE();
+ }
+ }
+}
+
+// TODO(plind): sign-extend and zero-extend not implmented properly
+// on all the ReadXX functions, I don't think re-interpret cast does it.
+int32_t Simulator::ReadW(int64_t addr, Instruction* instr, TraceType t) {
+ if (addr >= 0 && addr < 0x400) {
+ // This has to be a nullptr-dereference, drop into debugger.
+ PrintF("Memory read from bad address: 0x%08" PRIx64 " , pc=0x%08" PRIxPTR
+ " \n",
+ addr, reinterpret_cast<intptr_t>(instr));
+ DieOrDebug();
+ }
+ /* if ((addr & 0x3) == 0)*/ {
+ local_monitor_.NotifyLoad();
+ int32_t* ptr = reinterpret_cast<int32_t*>(addr);
+ TraceMemRd(addr, static_cast<int64_t>(*ptr), t);
+ return *ptr;
+ }
+ // PrintF("Unaligned read at 0x%08" PRIx64 " , pc=0x%08" V8PRIxPTR "\n",
+ // addr,
+ // reinterpret_cast<intptr_t>(instr));
+ // DieOrDebug();
+ // return 0;
+}
+
+uint32_t Simulator::ReadWU(int64_t addr, Instruction* instr) {
+ if (addr >= 0 && addr < 0x400) {
+ // This has to be a nullptr-dereference, drop into debugger.
+ PrintF("Memory read from bad address: 0x%08" PRIx64 " , pc=0x%08" PRIxPTR
+ " \n",
+ addr, reinterpret_cast<intptr_t>(instr));
+ DieOrDebug();
+ }
+ // if ((addr & 0x3) == 0) {
+ local_monitor_.NotifyLoad();
+ uint32_t* ptr = reinterpret_cast<uint32_t*>(addr);
+ TraceMemRd(addr, static_cast<int64_t>(*ptr), WORD);
+ return *ptr;
+ // }
+ // PrintF("Unaligned read at 0x%08" PRIx64 " , pc=0x%08" V8PRIxPTR "\n", addr,
+ // reinterpret_cast<intptr_t>(instr));
+ // DieOrDebug();
+ // return 0;
+}
+
+void Simulator::WriteW(int64_t addr, int32_t value, Instruction* instr) {
+ if (addr >= 0 && addr < 0x400) {
+ // This has to be a nullptr-dereference, drop into debugger.
+ PrintF("Memory write to bad address: 0x%08" PRIx64 " , pc=0x%08" PRIxPTR
+ " \n",
+ addr, reinterpret_cast<intptr_t>(instr));
+ DieOrDebug();
+ }
+ /*if ((addr & 0x3) == 0)*/ {
+ local_monitor_.NotifyStore();
+ base::MutexGuard lock_guard(&GlobalMonitor::Get()->mutex);
+ GlobalMonitor::Get()->NotifyStore_Locked(&global_monitor_thread_);
+ TraceMemWr(addr, value, WORD);
+ int* ptr = reinterpret_cast<int*>(addr);
+ *ptr = value;
+ return;
+ }
+ // PrintF("Unaligned write at 0x%08" PRIx64 " , pc=0x%08" V8PRIxPTR "\n",
+ // addr,
+ // reinterpret_cast<intptr_t>(instr));
+ // DieOrDebug();
+}
+
+void Simulator::WriteConditionalW(int64_t addr, int32_t value,
+ Instruction* instr, int32_t rk_reg) {
+ if (addr >= 0 && addr < 0x400) {
+ // This has to be a nullptr-dereference, drop into debugger.
+ PrintF("Memory write to bad address: 0x%08" PRIx64 " , pc=0x%08" PRIxPTR
+ " \n",
+ addr, reinterpret_cast<intptr_t>(instr));
+ DieOrDebug();
+ }
+ if ((addr & 0x3) == 0) {
+ base::MutexGuard lock_guard(&GlobalMonitor::Get()->mutex);
+ if (local_monitor_.NotifyStoreConditional(addr, TransactionSize::Word) &&
+ GlobalMonitor::Get()->NotifyStoreConditional_Locked(
+ addr, &global_monitor_thread_)) {
+ local_monitor_.NotifyStore();
+ GlobalMonitor::Get()->NotifyStore_Locked(&global_monitor_thread_);
+ TraceMemWr(addr, value, WORD);
+ int* ptr = reinterpret_cast<int*>(addr);
+ *ptr = value;
+ set_register(rk_reg, 1);
+ } else {
+ set_register(rk_reg, 0);
+ }
+ return;
+ }
+ PrintF("Unaligned write at 0x%08" PRIx64 " , pc=0x%08" V8PRIxPTR "\n", addr,
+ reinterpret_cast<intptr_t>(instr));
+ DieOrDebug();
+}
+
+int64_t Simulator::Read2W(int64_t addr, Instruction* instr) {
+ if (addr >= 0 && addr < 0x400) {
+ // This has to be a nullptr-dereference, drop into debugger.
+ PrintF("Memory read from bad address: 0x%08" PRIx64 " , pc=0x%08" PRIxPTR
+ " \n",
+ addr, reinterpret_cast<intptr_t>(instr));
+ DieOrDebug();
+ }
+ /* if ((addr & kPointerAlignmentMask) == 0)*/ {
+ local_monitor_.NotifyLoad();
+ int64_t* ptr = reinterpret_cast<int64_t*>(addr);
+ TraceMemRd(addr, *ptr);
+ return *ptr;
+ }
+ // PrintF("Unaligned read at 0x%08" PRIx64 " , pc=0x%08" V8PRIxPTR "\n",
+ // addr,
+ // reinterpret_cast<intptr_t>(instr));
+ // DieOrDebug();
+ // return 0;
+}
+
+void Simulator::Write2W(int64_t addr, int64_t value, Instruction* instr) {
+ if (addr >= 0 && addr < 0x400) {
+ // This has to be a nullptr-dereference, drop into debugger.
+ PrintF("Memory write to bad address: 0x%08" PRIx64 " , pc=0x%08" PRIxPTR
+ "\n",
+ addr, reinterpret_cast<intptr_t>(instr));
+ DieOrDebug();
+ }
+ /*if ((addr & kPointerAlignmentMask) == 0)*/ {
+ local_monitor_.NotifyStore();
+ base::MutexGuard lock_guard(&GlobalMonitor::Get()->mutex);
+ GlobalMonitor::Get()->NotifyStore_Locked(&global_monitor_thread_);
+ TraceMemWr(addr, value, DWORD);
+ int64_t* ptr = reinterpret_cast<int64_t*>(addr);
+ *ptr = value;
+ return;
+ }
+ // PrintF("Unaligned write at 0x%08" PRIx64 " , pc=0x%08" V8PRIxPTR "\n",
+ // addr,
+ // reinterpret_cast<intptr_t>(instr));
+ // DieOrDebug();
+}
+
+void Simulator::WriteConditional2W(int64_t addr, int64_t value,
+ Instruction* instr, int32_t rk_reg) {
+ if (addr >= 0 && addr < 0x400) {
+ // This has to be a nullptr-dereference, drop into debugger.
+ PrintF("Memory write to bad address: 0x%08" PRIx64 " , pc=0x%08" PRIxPTR
+ "\n",
+ addr, reinterpret_cast<intptr_t>(instr));
+ DieOrDebug();
+ }
+ if ((addr & kPointerAlignmentMask) == 0) {
+ base::MutexGuard lock_guard(&GlobalMonitor::Get()->mutex);
+ if (local_monitor_.NotifyStoreConditional(addr,
+ TransactionSize::DoubleWord) &&
+ GlobalMonitor::Get()->NotifyStoreConditional_Locked(
+ addr, &global_monitor_thread_)) {
+ local_monitor_.NotifyStore();
+ GlobalMonitor::Get()->NotifyStore_Locked(&global_monitor_thread_);
+ TraceMemWr(addr, value, DWORD);
+ int64_t* ptr = reinterpret_cast<int64_t*>(addr);
+ *ptr = value;
+ set_register(rk_reg, 1);
+ } else {
+ set_register(rk_reg, 0);
+ }
+ return;
+ }
+ PrintF("Unaligned write at 0x%08" PRIx64 " , pc=0x%08" V8PRIxPTR "\n", addr,
+ reinterpret_cast<intptr_t>(instr));
+ DieOrDebug();
+}
+
+double Simulator::ReadD(int64_t addr, Instruction* instr) {
+ /*if ((addr & kDoubleAlignmentMask) == 0)*/ {
+ local_monitor_.NotifyLoad();
+ double* ptr = reinterpret_cast<double*>(addr);
+ return *ptr;
+ }
+ // PrintF("Unaligned (double) read at 0x%08" PRIx64 " , pc=0x%08" V8PRIxPTR
+ // "\n",
+ // addr, reinterpret_cast<intptr_t>(instr));
+ // base::OS::Abort();
+ // return 0;
+}
+
+void Simulator::WriteD(int64_t addr, double value, Instruction* instr) {
+ /*if ((addr & kDoubleAlignmentMask) == 0)*/ {
+ local_monitor_.NotifyStore();
+ base::MutexGuard lock_guard(&GlobalMonitor::Get()->mutex);
+ GlobalMonitor::Get()->NotifyStore_Locked(&global_monitor_thread_);
+ double* ptr = reinterpret_cast<double*>(addr);
+ *ptr = value;
+ return;
+ }
+ // PrintF("Unaligned (double) write at 0x%08" PRIx64 " , pc=0x%08" V8PRIxPTR
+ // "\n",
+ // addr, reinterpret_cast<intptr_t>(instr));
+ // DieOrDebug();
+}
+
+uint16_t Simulator::ReadHU(int64_t addr, Instruction* instr) {
+ // if ((addr & 1) == 0) {
+ local_monitor_.NotifyLoad();
+ uint16_t* ptr = reinterpret_cast<uint16_t*>(addr);
+ TraceMemRd(addr, static_cast<int64_t>(*ptr));
+ return *ptr;
+ // }
+ // PrintF("Unaligned unsigned halfword read at 0x%08" PRIx64
+ // " , pc=0x%08" V8PRIxPTR "\n",
+ // addr, reinterpret_cast<intptr_t>(instr));
+ // DieOrDebug();
+ // return 0;
+}
+
+int16_t Simulator::ReadH(int64_t addr, Instruction* instr) {
+ // if ((addr & 1) == 0) {
+ local_monitor_.NotifyLoad();
+ int16_t* ptr = reinterpret_cast<int16_t*>(addr);
+ TraceMemRd(addr, static_cast<int64_t>(*ptr));
+ return *ptr;
+ // }
+ // PrintF("Unaligned signed halfword read at 0x%08" PRIx64
+ // " , pc=0x%08" V8PRIxPTR "\n",
+ // addr, reinterpret_cast<intptr_t>(instr));
+ // DieOrDebug();
+ // return 0;
+}
+
+void Simulator::WriteH(int64_t addr, uint16_t value, Instruction* instr) {
+ // if ((addr & 1) == 0) {
+ local_monitor_.NotifyStore();
+ base::MutexGuard lock_guard(&GlobalMonitor::Get()->mutex);
+ GlobalMonitor::Get()->NotifyStore_Locked(&global_monitor_thread_);
+ TraceMemWr(addr, value, HALF);
+ uint16_t* ptr = reinterpret_cast<uint16_t*>(addr);
+ *ptr = value;
+ return;
+ // }
+ // PrintF("Unaligned unsigned halfword write at 0x%08" PRIx64
+ // " , pc=0x%08" V8PRIxPTR "\n",
+ // addr, reinterpret_cast<intptr_t>(instr));
+ // DieOrDebug();
+}
+
+void Simulator::WriteH(int64_t addr, int16_t value, Instruction* instr) {
+ // if ((addr & 1) == 0) {
+ local_monitor_.NotifyStore();
+ base::MutexGuard lock_guard(&GlobalMonitor::Get()->mutex);
+ GlobalMonitor::Get()->NotifyStore_Locked(&global_monitor_thread_);
+ TraceMemWr(addr, value, HALF);
+ int16_t* ptr = reinterpret_cast<int16_t*>(addr);
+ *ptr = value;
+ return;
+ // }
+ // PrintF("Unaligned halfword write at 0x%08" PRIx64 " , pc=0x%08" V8PRIxPTR
+ // "\n",
+ // addr, reinterpret_cast<intptr_t>(instr));
+ // DieOrDebug();
+}
+
+uint32_t Simulator::ReadBU(int64_t addr) {
+ local_monitor_.NotifyLoad();
+ uint8_t* ptr = reinterpret_cast<uint8_t*>(addr);
+ TraceMemRd(addr, static_cast<int64_t>(*ptr));
+ return *ptr & 0xFF;
+}
+
+int32_t Simulator::ReadB(int64_t addr) {
+ local_monitor_.NotifyLoad();
+ int8_t* ptr = reinterpret_cast<int8_t*>(addr);
+ TraceMemRd(addr, static_cast<int64_t>(*ptr));
+ return *ptr;
+}
+
+void Simulator::WriteB(int64_t addr, uint8_t value) {
+ local_monitor_.NotifyStore();
+ base::MutexGuard lock_guard(&GlobalMonitor::Get()->mutex);
+ GlobalMonitor::Get()->NotifyStore_Locked(&global_monitor_thread_);
+ TraceMemWr(addr, value, BYTE);
+ uint8_t* ptr = reinterpret_cast<uint8_t*>(addr);
+ *ptr = value;
+}
+
+void Simulator::WriteB(int64_t addr, int8_t value) {
+ local_monitor_.NotifyStore();
+ base::MutexGuard lock_guard(&GlobalMonitor::Get()->mutex);
+ GlobalMonitor::Get()->NotifyStore_Locked(&global_monitor_thread_);
+ TraceMemWr(addr, value, BYTE);
+ int8_t* ptr = reinterpret_cast<int8_t*>(addr);
+ *ptr = value;
+}
+
+template <typename T>
+T Simulator::ReadMem(int64_t addr, Instruction* instr) {
+ int alignment_mask = (1 << sizeof(T)) - 1;
+ if ((addr & alignment_mask) == 0) {
+ local_monitor_.NotifyLoad();
+ T* ptr = reinterpret_cast<T*>(addr);
+ TraceMemRd(addr, *ptr);
+ return *ptr;
+ }
+ PrintF("Unaligned read of type sizeof(%ld) at 0x%08lx, pc=0x%08" V8PRIxPTR
+ "\n",
+ sizeof(T), addr, reinterpret_cast<intptr_t>(instr));
+ base::OS::Abort();
+ return 0;
+}
+
+template <typename T>
+void Simulator::WriteMem(int64_t addr, T value, Instruction* instr) {
+ int alignment_mask = (1 << sizeof(T)) - 1;
+ if ((addr & alignment_mask) == 0) {
+ local_monitor_.NotifyStore();
+ base::MutexGuard lock_guard(&GlobalMonitor::Get()->mutex);
+ GlobalMonitor::Get()->NotifyStore_Locked(&global_monitor_thread_);
+ T* ptr = reinterpret_cast<T*>(addr);
+ *ptr = value;
+ TraceMemWr(addr, value);
+ return;
+ }
+ PrintF("Unaligned write of type sizeof(%ld) at 0x%08lx, pc=0x%08" V8PRIxPTR
+ "\n",
+ sizeof(T), addr, reinterpret_cast<intptr_t>(instr));
+ base::OS::Abort();
+}
+
+// Returns the limit of the stack area to enable checking for stack overflows.
+uintptr_t Simulator::StackLimit(uintptr_t c_limit) const {
+ // The simulator uses a separate JS stack. If we have exhausted the C stack,
+ // we also drop down the JS limit to reflect the exhaustion on the JS stack.
+ if (base::Stack::GetCurrentStackPosition() < c_limit) {
+ return reinterpret_cast<uintptr_t>(get_sp());
+ }
+
+ // Otherwise the limit is the JS stack. Leave a safety margin of 1024 bytes
+ // to prevent overrunning the stack when pushing values.
+ return reinterpret_cast<uintptr_t>(stack_) + 1024;
+}
+
+// Unsupported instructions use Format to print an error and stop execution.
+void Simulator::Format(Instruction* instr, const char* format) {
+ PrintF("Simulator found unsupported instruction:\n 0x%08" PRIxPTR " : %s\n",
+ reinterpret_cast<intptr_t>(instr), format);
+ UNIMPLEMENTED();
+}
+
+// Calls into the V8 runtime are based on this very simple interface.
+// Note: To be able to return two values from some calls the code in runtime.cc
+// uses the ObjectPair which is essentially two 32-bit values stuffed into a
+// 64-bit value. With the code below we assume that all runtime calls return
+// 64 bits of result. If they don't, the v1 result register contains a bogus
+// value, which is fine because it is caller-saved.
+
+using SimulatorRuntimeCall = ObjectPair (*)(int64_t arg0, int64_t arg1,
+ int64_t arg2, int64_t arg3,
+ int64_t arg4, int64_t arg5,
+ int64_t arg6, int64_t arg7,
+ int64_t arg8, int64_t arg9);
+
+// These prototypes handle the four types of FP calls.
+using SimulatorRuntimeCompareCall = int64_t (*)(double darg0, double darg1);
+using SimulatorRuntimeFPFPCall = double (*)(double darg0, double darg1);
+using SimulatorRuntimeFPCall = double (*)(double darg0);
+using SimulatorRuntimeFPIntCall = double (*)(double darg0, int32_t arg0);
+
+// This signature supports direct call in to API function native callback
+// (refer to InvocationCallback in v8.h).
+using SimulatorRuntimeDirectApiCall = void (*)(int64_t arg0);
+using SimulatorRuntimeProfilingApiCall = void (*)(int64_t arg0, void* arg1);
+
+// This signature supports direct call to accessor getter callback.
+using SimulatorRuntimeDirectGetterCall = void (*)(int64_t arg0, int64_t arg1);
+using SimulatorRuntimeProfilingGetterCall = void (*)(int64_t arg0, int64_t arg1,
+ void* arg2);
+
+// Software interrupt instructions are used by the simulator to call into the
+// C-based V8 runtime. They are also used for debugging with simulator.
+void Simulator::SoftwareInterrupt() {
+ int32_t opcode_hi15 = instr_.Bits(31, 17);
+ CHECK_EQ(opcode_hi15, 0x15);
+ uint32_t code = instr_.Bits(14, 0);
+ // We first check if we met a call_rt_redirected.
+ if (instr_.InstructionBits() == rtCallRedirInstr) {
+ Redirection* redirection = Redirection::FromInstruction(instr_.instr());
+
+ int64_t* stack_pointer = reinterpret_cast<int64_t*>(get_register(sp));
+
+ int64_t arg0 = get_register(a0);
+ int64_t arg1 = get_register(a1);
+ int64_t arg2 = get_register(a2);
+ int64_t arg3 = get_register(a3);
+ int64_t arg4 = get_register(a4);
+ int64_t arg5 = get_register(a5);
+ int64_t arg6 = get_register(a6);
+ int64_t arg7 = get_register(a7);
+ int64_t arg8 = stack_pointer[0];
+ int64_t arg9 = stack_pointer[1];
+ STATIC_ASSERT(kMaxCParameters == 10);
+
+ bool fp_call =
+ (redirection->type() == ExternalReference::BUILTIN_FP_FP_CALL) ||
+ (redirection->type() == ExternalReference::BUILTIN_COMPARE_CALL) ||
+ (redirection->type() == ExternalReference::BUILTIN_FP_CALL) ||
+ (redirection->type() == ExternalReference::BUILTIN_FP_INT_CALL);
+
+ {
+ // With the hard floating point calling convention, double
+ // arguments are passed in FPU registers. Fetch the arguments
+ // from there and call the builtin using soft floating point
+ // convention.
+ switch (redirection->type()) {
+ case ExternalReference::BUILTIN_FP_FP_CALL:
+ case ExternalReference::BUILTIN_COMPARE_CALL:
+ arg0 = get_fpu_register(f0);
+ arg1 = get_fpu_register(f1);
+ arg2 = get_fpu_register(f2);
+ arg3 = get_fpu_register(f3);
+ break;
+ case ExternalReference::BUILTIN_FP_CALL:
+ arg0 = get_fpu_register(f0);
+ arg1 = get_fpu_register(f1);
+ break;
+ case ExternalReference::BUILTIN_FP_INT_CALL:
+ arg0 = get_fpu_register(f0);
+ arg1 = get_fpu_register(f1);
+ arg2 = get_register(a2);
+ break;
+ default:
+ break;
+ }
+ }
+
+ // This is dodgy but it works because the C entry stubs are never moved.
+ // See comment in codegen-arm.cc and bug 1242173.
+ int64_t saved_ra = get_register(ra);
+
+ intptr_t external =
+ reinterpret_cast<intptr_t>(redirection->external_function());
+
+ // Based on CpuFeatures::IsSupported(FPU), Loong64 will use either hardware
+ // FPU, or gcc soft-float routines. Hardware FPU is simulated in this
+ // simulator. Soft-float has additional abstraction of ExternalReference,
+ // to support serialization.
+ if (fp_call) {
+ double dval0, dval1; // one or two double parameters
+ int32_t ival; // zero or one integer parameters
+ int64_t iresult = 0; // integer return value
+ double dresult = 0; // double return value
+ GetFpArgs(&dval0, &dval1, &ival);
+ SimulatorRuntimeCall generic_target =
+ reinterpret_cast<SimulatorRuntimeCall>(external);
+ if (::v8::internal::FLAG_trace_sim) {
+ switch (redirection->type()) {
+ case ExternalReference::BUILTIN_FP_FP_CALL:
+ case ExternalReference::BUILTIN_COMPARE_CALL:
+ PrintF("Call to host function at %p with args %f, %f",
+ reinterpret_cast<void*>(FUNCTION_ADDR(generic_target)),
+ dval0, dval1);
+ break;
+ case ExternalReference::BUILTIN_FP_CALL:
+ PrintF("Call to host function at %p with arg %f",
+ reinterpret_cast<void*>(FUNCTION_ADDR(generic_target)),
+ dval0);
+ break;
+ case ExternalReference::BUILTIN_FP_INT_CALL:
+ PrintF("Call to host function at %p with args %f, %d",
+ reinterpret_cast<void*>(FUNCTION_ADDR(generic_target)),
+ dval0, ival);
+ break;
+ default:
+ UNREACHABLE();
+ }
+ }
+ switch (redirection->type()) {
+ case ExternalReference::BUILTIN_COMPARE_CALL: {
+ SimulatorRuntimeCompareCall target =
+ reinterpret_cast<SimulatorRuntimeCompareCall>(external);
+ iresult = target(dval0, dval1);
+ set_register(v0, static_cast<int64_t>(iresult));
+ // set_register(v1, static_cast<int64_t>(iresult >> 32));
+ break;
+ }
+ case ExternalReference::BUILTIN_FP_FP_CALL: {
+ SimulatorRuntimeFPFPCall target =
+ reinterpret_cast<SimulatorRuntimeFPFPCall>(external);
+ dresult = target(dval0, dval1);
+ SetFpResult(dresult);
+ break;
+ }
+ case ExternalReference::BUILTIN_FP_CALL: {
+ SimulatorRuntimeFPCall target =
+ reinterpret_cast<SimulatorRuntimeFPCall>(external);
+ dresult = target(dval0);
+ SetFpResult(dresult);
+ break;
+ }
+ case ExternalReference::BUILTIN_FP_INT_CALL: {
+ SimulatorRuntimeFPIntCall target =
+ reinterpret_cast<SimulatorRuntimeFPIntCall>(external);
+ dresult = target(dval0, ival);
+ SetFpResult(dresult);
+ break;
+ }
+ default:
+ UNREACHABLE();
+ }
+ if (::v8::internal::FLAG_trace_sim) {
+ switch (redirection->type()) {
+ case ExternalReference::BUILTIN_COMPARE_CALL:
+ PrintF("Returned %08x\n", static_cast<int32_t>(iresult));
+ break;
+ case ExternalReference::BUILTIN_FP_FP_CALL:
+ case ExternalReference::BUILTIN_FP_CALL:
+ case ExternalReference::BUILTIN_FP_INT_CALL:
+ PrintF("Returned %f\n", dresult);
+ break;
+ default:
+ UNREACHABLE();
+ }
+ }
+ } else if (redirection->type() == ExternalReference::DIRECT_API_CALL) {
+ if (::v8::internal::FLAG_trace_sim) {
+ PrintF("Call to host function at %p args %08" PRIx64 " \n",
+ reinterpret_cast<void*>(external), arg0);
+ }
+ SimulatorRuntimeDirectApiCall target =
+ reinterpret_cast<SimulatorRuntimeDirectApiCall>(external);
+ target(arg0);
+ } else if (redirection->type() == ExternalReference::PROFILING_API_CALL) {
+ if (::v8::internal::FLAG_trace_sim) {
+ PrintF("Call to host function at %p args %08" PRIx64 " %08" PRIx64
+ " \n",
+ reinterpret_cast<void*>(external), arg0, arg1);
+ }
+ SimulatorRuntimeProfilingApiCall target =
+ reinterpret_cast<SimulatorRuntimeProfilingApiCall>(external);
+ target(arg0, Redirection::ReverseRedirection(arg1));
+ } else if (redirection->type() == ExternalReference::DIRECT_GETTER_CALL) {
+ if (::v8::internal::FLAG_trace_sim) {
+ PrintF("Call to host function at %p args %08" PRIx64 " %08" PRIx64
+ " \n",
+ reinterpret_cast<void*>(external), arg0, arg1);
+ }
+ SimulatorRuntimeDirectGetterCall target =
+ reinterpret_cast<SimulatorRuntimeDirectGetterCall>(external);
+ target(arg0, arg1);
+ } else if (redirection->type() ==
+ ExternalReference::PROFILING_GETTER_CALL) {
+ if (::v8::internal::FLAG_trace_sim) {
+ PrintF("Call to host function at %p args %08" PRIx64 " %08" PRIx64
+ " %08" PRIx64 " \n",
+ reinterpret_cast<void*>(external), arg0, arg1, arg2);
+ }
+ SimulatorRuntimeProfilingGetterCall target =
+ reinterpret_cast<SimulatorRuntimeProfilingGetterCall>(external);
+ target(arg0, arg1, Redirection::ReverseRedirection(arg2));
+ } else {
+ DCHECK(redirection->type() == ExternalReference::BUILTIN_CALL ||
+ redirection->type() == ExternalReference::BUILTIN_CALL_PAIR);
+ SimulatorRuntimeCall target =
+ reinterpret_cast<SimulatorRuntimeCall>(external);
+ if (::v8::internal::FLAG_trace_sim) {
+ PrintF(
+ "Call to host function at %p "
+ "args %08" PRIx64 " , %08" PRIx64 " , %08" PRIx64 " , %08" PRIx64
+ " , %08" PRIx64 " , %08" PRIx64 " , %08" PRIx64 " , %08" PRIx64
+ " , %08" PRIx64 " , %08" PRIx64 " \n",
+ reinterpret_cast<void*>(FUNCTION_ADDR(target)), arg0, arg1, arg2,
+ arg3, arg4, arg5, arg6, arg7, arg8, arg9);
+ }
+ ObjectPair result =
+ target(arg0, arg1, arg2, arg3, arg4, arg5, arg6, arg7, arg8, arg9);
+ set_register(v0, (int64_t)(result.x));
+ set_register(v1, (int64_t)(result.y));
+ }
+ if (::v8::internal::FLAG_trace_sim) {
+ PrintF("Returned %08" PRIx64 " : %08" PRIx64 " \n", get_register(v1),
+ get_register(v0));
+ }
+ set_register(ra, saved_ra);
+ set_pc(get_register(ra));
+
+ } else if (code <= kMaxStopCode) {
+ if (IsWatchpoint(code)) {
+ PrintWatchpoint(code);
+ } else {
+ IncreaseStopCounter(code);
+ HandleStop(code, instr_.instr());
+ }
+ } else {
+ // All remaining break_ codes, and all traps are handled here.
+ Loong64Debugger dbg(this);
+ dbg.Debug();
+ }
+}
+
+// Stop helper functions.
+bool Simulator::IsWatchpoint(uint64_t code) {
+ return (code <= kMaxWatchpointCode);
+}
+
+void Simulator::PrintWatchpoint(uint64_t code) {
+ Loong64Debugger dbg(this);
+ ++break_count_;
+ PrintF("\n---- break %" PRId64 " marker: %3d (instr count: %8" PRId64
+ " ) ----------"
+ "----------------------------------",
+ code, break_count_, icount_);
+ dbg.PrintAllRegs(); // Print registers and continue running.
+}
+
+void Simulator::HandleStop(uint64_t code, Instruction* instr) {
+ // Stop if it is enabled, otherwise go on jumping over the stop
+ // and the message address.
+ if (IsEnabledStop(code)) {
+ Loong64Debugger dbg(this);
+ dbg.Stop(instr);
+ }
+}
+
+bool Simulator::IsStopInstruction(Instruction* instr) {
+ int32_t opcode_hi15 = instr->Bits(31, 17);
+ uint32_t code = static_cast<uint32_t>(instr->Bits(14, 0));
+ return (opcode_hi15 == 0x15) && code > kMaxWatchpointCode &&
+ code <= kMaxStopCode;
+}
+
+bool Simulator::IsEnabledStop(uint64_t code) {
+ DCHECK_LE(code, kMaxStopCode);
+ DCHECK_GT(code, kMaxWatchpointCode);
+ return !(watched_stops_[code].count & kStopDisabledBit);
+}
+
+void Simulator::EnableStop(uint64_t code) {
+ if (!IsEnabledStop(code)) {
+ watched_stops_[code].count &= ~kStopDisabledBit;
+ }
+}
+
+void Simulator::DisableStop(uint64_t code) {
+ if (IsEnabledStop(code)) {
+ watched_stops_[code].count |= kStopDisabledBit;
+ }
+}
+
+void Simulator::IncreaseStopCounter(uint64_t code) {
+ DCHECK_LE(code, kMaxStopCode);
+ if ((watched_stops_[code].count & ~(1 << 31)) == 0x7FFFFFFF) {
+ PrintF("Stop counter for code %" PRId64
+ " has overflowed.\n"
+ "Enabling this code and reseting the counter to 0.\n",
+ code);
+ watched_stops_[code].count = 0;
+ EnableStop(code);
+ } else {
+ watched_stops_[code].count++;
+ }
+}
+
+// Print a stop status.
+void Simulator::PrintStopInfo(uint64_t code) {
+ if (code <= kMaxWatchpointCode) {
+ PrintF("That is a watchpoint, not a stop.\n");
+ return;
+ } else if (code > kMaxStopCode) {
+ PrintF("Code too large, only %u stops can be used\n", kMaxStopCode + 1);
+ return;
+ }
+ const char* state = IsEnabledStop(code) ? "Enabled" : "Disabled";
+ int32_t count = watched_stops_[code].count & ~kStopDisabledBit;
+ // Don't print the state of unused breakpoints.
+ if (count != 0) {
+ if (watched_stops_[code].desc) {
+ PrintF("stop %" PRId64 " - 0x%" PRIx64 " : \t%s, \tcounter = %i, \t%s\n",
+ code, code, state, count, watched_stops_[code].desc);
+ } else {
+ PrintF("stop %" PRId64 " - 0x%" PRIx64 " : \t%s, \tcounter = %i\n", code,
+ code, state, count);
+ }
+ }
+}
+
+void Simulator::SignalException(Exception e) {
+ FATAL("Error: Exception %i raised.", static_cast<int>(e));
+}
+
+template <typename T>
+static T FPAbs(T a);
+
+template <>
+double FPAbs<double>(double a) {
+ return fabs(a);
+}
+
+template <>
+float FPAbs<float>(float a) {
+ return fabsf(a);
+}
+
+template <typename T>
+static bool FPUProcessNaNsAndZeros(T a, T b, MaxMinKind kind, T* result) {
+ if (std::isnan(a) && std::isnan(b)) {
+ *result = a;
+ } else if (std::isnan(a)) {
+ *result = b;
+ } else if (std::isnan(b)) {
+ *result = a;
+ } else if (b == a) {
+ // Handle -0.0 == 0.0 case.
+ // std::signbit() returns int 0 or 1 so subtracting MaxMinKind::kMax
+ // negates the result.
+ *result = std::signbit(b) - static_cast<int>(kind) ? b : a;
+ } else {
+ return false;
+ }
+ return true;
+}
+
+template <typename T>
+static T FPUMin(T a, T b) {
+ T result;
+ if (FPUProcessNaNsAndZeros(a, b, MaxMinKind::kMin, &result)) {
+ return result;
+ } else {
+ return b < a ? b : a;
+ }
+}
+
+template <typename T>
+static T FPUMax(T a, T b) {
+ T result;
+ if (FPUProcessNaNsAndZeros(a, b, MaxMinKind::kMax, &result)) {
+ return result;
+ } else {
+ return b > a ? b : a;
+ }
+}
+
+template <typename T>
+static T FPUMinA(T a, T b) {
+ T result;
+ if (!FPUProcessNaNsAndZeros(a, b, MaxMinKind::kMin, &result)) {
+ if (FPAbs(a) < FPAbs(b)) {
+ result = a;
+ } else if (FPAbs(b) < FPAbs(a)) {
+ result = b;
+ } else {
+ result = a < b ? a : b;
+ }
+ }
+ return result;
+}
+
+template <typename T>
+static T FPUMaxA(T a, T b) {
+ T result;
+ if (!FPUProcessNaNsAndZeros(a, b, MaxMinKind::kMin, &result)) {
+ if (FPAbs(a) > FPAbs(b)) {
+ result = a;
+ } else if (FPAbs(b) > FPAbs(a)) {
+ result = b;
+ } else {
+ result = a > b ? a : b;
+ }
+ }
+ return result;
+}
+
+enum class KeepSign : bool { no = false, yes };
+
+template <typename T, typename std::enable_if<std::is_floating_point<T>::value,
+ int>::type = 0>
+T FPUCanonalizeNaNArg(T result, T arg, KeepSign keepSign = KeepSign::no) {
+ DCHECK(std::isnan(arg));
+ T qNaN = std::numeric_limits<T>::quiet_NaN();
+ if (keepSign == KeepSign::yes) {
+ return std::copysign(qNaN, result);
+ }
+ return qNaN;
+}
+
+template <typename T>
+T FPUCanonalizeNaNArgs(T result, KeepSign keepSign, T first) {
+ if (std::isnan(first)) {
+ return FPUCanonalizeNaNArg(result, first, keepSign);
+ }
+ return result;
+}
+
+template <typename T, typename... Args>
+T FPUCanonalizeNaNArgs(T result, KeepSign keepSign, T first, Args... args) {
+ if (std::isnan(first)) {
+ return FPUCanonalizeNaNArg(result, first, keepSign);
+ }
+ return FPUCanonalizeNaNArgs(result, keepSign, args...);
+}
+
+template <typename Func, typename T, typename... Args>
+T FPUCanonalizeOperation(Func f, T first, Args... args) {
+ return FPUCanonalizeOperation(f, KeepSign::no, first, args...);
+}
+
+template <typename Func, typename T, typename... Args>
+T FPUCanonalizeOperation(Func f, KeepSign keepSign, T first, Args... args) {
+ T result = f(first, args...);
+ if (std::isnan(result)) {
+ result = FPUCanonalizeNaNArgs(result, keepSign, first, args...);
+ }
+ return result;
+}
+
+// Handle execution based on instruction types.
+void Simulator::DecodeTypeOp6() {
+ int64_t alu_out;
+ // Next pc.
+ int64_t next_pc = bad_ra;
+
+ // Branch instructions common part.
+ auto BranchAndLinkHelper = [this, &next_pc]() {
+ int64_t current_pc = get_pc();
+ set_register(ra, current_pc + kInstrSize);
+ int32_t offs26_low16 =
+ static_cast<uint32_t>(instr_.Bits(25, 10) << 16) >> 16;
+ int32_t offs26_high10 = static_cast<int32_t>(instr_.Bits(9, 0) << 22) >> 6;
+ int32_t offs26 = offs26_low16 | offs26_high10;
+ next_pc = current_pc + (offs26 << 2);
+ printf_instr("Offs26: %08x\n", offs26);
+ set_pc(next_pc);
+ };
+
+ auto BranchOff16Helper = [this, &next_pc](bool do_branch) {
+ int64_t current_pc = get_pc();
+ int32_t offs16 = static_cast<int32_t>(instr_.Bits(25, 10) << 16) >> 16;
+ printf_instr("Offs16: %08x\n", offs16);
+ int32_t offs = do_branch ? (offs16 << 2) : kInstrSize;
+ next_pc = current_pc + offs;
+ set_pc(next_pc);
+ };
+
+ auto BranchOff21Helper = [this, &next_pc](bool do_branch) {
+ int64_t current_pc = get_pc();
+ int32_t offs21_low16 =
+ static_cast<uint32_t>(instr_.Bits(25, 10) << 16) >> 16;
+ int32_t offs21_high5 = static_cast<int32_t>(instr_.Bits(4, 0) << 27) >> 11;
+ int32_t offs = offs21_low16 | offs21_high5;
+ printf_instr("Offs21: %08x\n", offs);
+ offs = do_branch ? (offs << 2) : kInstrSize;
+ next_pc = current_pc + offs;
+ set_pc(next_pc);
+ };
+
+ auto BranchOff26Helper = [this, &next_pc]() {
+ int64_t current_pc = get_pc();
+ int32_t offs26_low16 =
+ static_cast<uint32_t>(instr_.Bits(25, 10) << 16) >> 16;
+ int32_t offs26_high10 = static_cast<int32_t>(instr_.Bits(9, 0) << 22) >> 6;
+ int32_t offs26 = offs26_low16 | offs26_high10;
+ next_pc = current_pc + (offs26 << 2);
+ printf_instr("Offs26: %08x\n", offs26);
+ set_pc(next_pc);
+ };
+
+ auto JumpOff16Helper = [this, &next_pc]() {
+ int32_t offs16 = static_cast<int32_t>(instr_.Bits(25, 10) << 16) >> 16;
+ printf_instr("JIRL\t %s: %016lx, %s: %016lx, offs16: %x\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
+ rj(), offs16);
+ set_register(rd_reg(), get_pc() + kInstrSize);
+ next_pc = rj() + (offs16 << 2);
+ set_pc(next_pc);
+ };
+
+ switch (instr_.Bits(31, 26) << 26) {
+ case ADDU16I_D: {
+ printf_instr("ADDU16I_D\t %s: %016lx, %s: %016lx, si16: %d\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
+ rj(), si16());
+ int32_t si16_upper = static_cast<int32_t>(si16()) << 16;
+ alu_out = static_cast<int64_t>(si16_upper) + rj();
+ SetResult(rd_reg(), alu_out);
+ break;
+ }
+ case BEQZ:
+ printf_instr("BEQZ\t %s: %016lx, ", Registers::Name(rj_reg()), rj());
+ BranchOff21Helper(rj() == 0);
+ break;
+ case BNEZ:
+ printf_instr("BNEZ\t %s: %016lx, ", Registers::Name(rj_reg()), rj());
+ BranchOff21Helper(rj() != 0);
+ break;
+ case BCZ: {
+ if (instr_.Bits(9, 8) == 0b00) {
+ // BCEQZ
+ printf_instr("BCEQZ\t fcc%d: %s, ", cj_reg(), cj() ? "True" : "False");
+ BranchOff21Helper(cj() == false);
+ } else if (instr_.Bits(9, 8) == 0b01) {
+ // BCNEZ
+ printf_instr("BCNEZ\t fcc%d: %s, ", cj_reg(), cj() ? "True" : "False");
+ BranchOff21Helper(cj() == true);
+ } else {
+ UNREACHABLE();
+ }
+ break;
+ }
+ case JIRL:
+ JumpOff16Helper();
+ break;
+ case B:
+ printf_instr("B\t ");
+ BranchOff26Helper();
+ break;
+ case BL:
+ printf_instr("BL\t ");
+ BranchAndLinkHelper();
+ break;
+ case BEQ:
+ printf_instr("BEQ\t %s: %016lx, %s, %016lx, ", Registers::Name(rj_reg()),
+ rj(), Registers::Name(rd_reg()), rd());
+ BranchOff16Helper(rj() == rd());
+ break;
+ case BNE:
+ printf_instr("BNE\t %s: %016lx, %s, %016lx, ", Registers::Name(rj_reg()),
+ rj(), Registers::Name(rd_reg()), rd());
+ BranchOff16Helper(rj() != rd());
+ break;
+ case BLT:
+ printf_instr("BLT\t %s: %016lx, %s, %016lx, ", Registers::Name(rj_reg()),
+ rj(), Registers::Name(rd_reg()), rd());
+ BranchOff16Helper(rj() < rd());
+ break;
+ case BGE:
+ printf_instr("BGE\t %s: %016lx, %s, %016lx, ", Registers::Name(rj_reg()),
+ rj(), Registers::Name(rd_reg()), rd());
+ BranchOff16Helper(rj() >= rd());
+ break;
+ case BLTU:
+ printf_instr("BLTU\t %s: %016lx, %s, %016lx, ", Registers::Name(rj_reg()),
+ rj(), Registers::Name(rd_reg()), rd());
+ BranchOff16Helper(rj_u() < rd_u());
+ break;
+ case BGEU:
+ printf_instr("BGEU\t %s: %016lx, %s, %016lx, ", Registers::Name(rj_reg()),
+ rj(), Registers::Name(rd_reg()), rd());
+ BranchOff16Helper(rj_u() >= rd_u());
+ break;
+ default:
+ UNREACHABLE();
+ }
+}
+
+void Simulator::DecodeTypeOp7() {
+ int64_t alu_out;
+
+ switch (instr_.Bits(31, 25) << 25) {
+ case LU12I_W: {
+ printf_instr("LU12I_W\t %s: %016lx, si20: %d\n",
+ Registers::Name(rd_reg()), rd(), si20());
+ int32_t si20_upper = static_cast<int32_t>(si20() << 12);
+ SetResult(rd_reg(), static_cast<int64_t>(si20_upper));
+ break;
+ }
+ case LU32I_D: {
+ printf_instr("LU32I_D\t %s: %016lx, si20: %d\n",
+ Registers::Name(rd_reg()), rd(), si20());
+ int32_t si20_signExtend = static_cast<int32_t>(si20() << 12) >> 12;
+ int64_t lower_32bit_mask = 0xFFFFFFFF;
+ alu_out = (static_cast<int64_t>(si20_signExtend) << 32) |
+ (rd() & lower_32bit_mask);
+ SetResult(rd_reg(), alu_out);
+ break;
+ }
+ case PCADDI: {
+ printf_instr("PCADDI\t %s: %016lx, si20: %d\n", Registers::Name(rd_reg()),
+ rd(), si20());
+ int32_t si20_signExtend = static_cast<int32_t>(si20() << 12) >> 10;
+ int64_t current_pc = get_pc();
+ alu_out = static_cast<int64_t>(si20_signExtend) + current_pc;
+ SetResult(rd_reg(), alu_out);
+ break;
+ }
+ case PCALAU12I: {
+ printf_instr("PCALAU12I\t %s: %016lx, si20: %d\n",
+ Registers::Name(rd_reg()), rd(), si20());
+ int32_t si20_signExtend = static_cast<int32_t>(si20() << 12);
+ int64_t current_pc = get_pc();
+ int64_t clear_lower12bit_mask = 0xFFFFFFFFFFFFF000;
+ alu_out = static_cast<int64_t>(si20_signExtend) + current_pc;
+ SetResult(rd_reg(), alu_out & clear_lower12bit_mask);
+ break;
+ }
+ case PCADDU12I: {
+ printf_instr("PCADDU12I\t %s: %016lx, si20: %d\n",
+ Registers::Name(rd_reg()), rd(), si20());
+ int32_t si20_signExtend = static_cast<int32_t>(si20() << 12);
+ int64_t current_pc = get_pc();
+ alu_out = static_cast<int64_t>(si20_signExtend) + current_pc;
+ SetResult(rd_reg(), alu_out);
+ break;
+ }
+ case PCADDU18I: {
+ printf_instr("PCADDU18I\t %s: %016lx, si20: %d\n",
+ Registers::Name(rd_reg()), rd(), si20());
+ int64_t si20_signExtend = (static_cast<int64_t>(si20()) << 44) >> 26;
+ int64_t current_pc = get_pc();
+ alu_out = si20_signExtend + current_pc;
+ SetResult(rd_reg(), alu_out);
+ break;
+ }
+ default:
+ UNREACHABLE();
+ }
+}
+
+void Simulator::DecodeTypeOp8() {
+ int64_t addr = 0x0;
+ int64_t si14_se = (static_cast<int64_t>(si14()) << 50) >> 48;
+
+ switch (instr_.Bits(31, 24) << 24) {
+ case LDPTR_W:
+ printf_instr("LDPTR_W\t %s: %016lx, %s: %016lx, si14: %016lx\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
+ rj(), si14_se);
+ set_register(rd_reg(), ReadW(rj() + si14_se, instr_.instr()));
+ break;
+ case STPTR_W:
+ printf_instr("STPTR_W\t %s: %016lx, %s: %016lx, si14: %016lx\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
+ rj(), si14_se);
+ WriteW(rj() + si14_se, static_cast<int32_t>(rd()), instr_.instr());
+ break;
+ case LDPTR_D:
+ printf_instr("LDPTR_D\t %s: %016lx, %s: %016lx, si14: %016lx\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
+ rj(), si14_se);
+ set_register(rd_reg(), Read2W(rj() + si14_se, instr_.instr()));
+ break;
+ case STPTR_D:
+ printf_instr("STPTR_D\t %s: %016lx, %s: %016lx, si14: %016lx\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
+ rj(), si14_se);
+ Write2W(rj() + si14_se, rd(), instr_.instr());
+ break;
+ case LL_W: {
+ printf_instr("LL_W\t %s: %016lx, %s: %016lx, si14: %016lx\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
+ rj(), si14_se);
+ base::MutexGuard lock_guard(&GlobalMonitor::Get()->mutex);
+ addr = si14_se + rj();
+ set_register(rd_reg(), ReadW(addr, instr_.instr()));
+ local_monitor_.NotifyLoadLinked(addr, TransactionSize::Word);
+ GlobalMonitor::Get()->NotifyLoadLinked_Locked(addr,
+ &global_monitor_thread_);
+ break;
+ }
+ case SC_W: {
+ printf_instr("SC_W\t %s: %016lx, %s: %016lx, si14: %016lx\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
+ rj(), si14_se);
+ addr = si14_se + rj();
+ WriteConditionalW(addr, static_cast<int32_t>(rd()), instr_.instr(),
+ rd_reg());
+ break;
+ }
+ case LL_D: {
+ printf_instr("LL_D\t %s: %016lx, %s: %016lx, si14: %016lx\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
+ rj(), si14_se);
+ base::MutexGuard lock_guard(&GlobalMonitor::Get()->mutex);
+ addr = si14_se + rj();
+ set_register(rd_reg(), Read2W(addr, instr_.instr()));
+ local_monitor_.NotifyLoadLinked(addr, TransactionSize::DoubleWord);
+ GlobalMonitor::Get()->NotifyLoadLinked_Locked(addr,
+ &global_monitor_thread_);
+ break;
+ }
+ case SC_D: {
+ printf_instr("SC_D\t %s: %016lx, %s: %016lx, si14: %016lx\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
+ rj(), si14_se);
+ addr = si14_se + rj();
+ WriteConditional2W(addr, rd(), instr_.instr(), rd_reg());
+ break;
+ }
+ default:
+ UNREACHABLE();
+ }
+}
+
+void Simulator::DecodeTypeOp10() {
+ int64_t alu_out = 0x0;
+ int64_t si12_se = (static_cast<int64_t>(si12()) << 52) >> 52;
+ uint64_t si12_ze = (static_cast<uint64_t>(ui12()) << 52) >> 52;
+
+ switch (instr_.Bits(31, 22) << 22) {
+ case BSTR_W: {
+ CHECK_EQ(instr_.Bit(21), 1);
+ uint8_t lsbw_ = lsbw();
+ uint8_t msbw_ = msbw();
+ CHECK_LE(lsbw_, msbw_);
+ uint8_t size = msbw_ - lsbw_ + 1;
+ uint64_t mask = (1ULL << size) - 1;
+ if (instr_.Bit(15) == 0) {
+ // BSTRINS_W
+ printf_instr(
+ "BSTRINS_W\t %s: %016lx, %s: %016lx, msbw: %02x, lsbw: %02x\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), rj(),
+ msbw_, lsbw_);
+ alu_out = static_cast<int32_t>((rd_u() & ~(mask << lsbw_)) |
+ ((rj_u() & mask) << lsbw_));
+ } else {
+ // BSTRPICK_W
+ printf_instr(
+ "BSTRPICK_W\t %s: %016lx, %s: %016lx, msbw: %02x, lsbw: %02x\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), rj(),
+ msbw_, lsbw_);
+ alu_out = static_cast<int32_t>((rj_u() & (mask << lsbw_)) >> lsbw_);
+ }
+ SetResult(rd_reg(), alu_out);
+ break;
+ }
+ case BSTRINS_D: {
+ uint8_t lsbd_ = lsbd();
+ uint8_t msbd_ = msbd();
+ CHECK_LE(lsbd_, msbd_);
+ printf_instr(
+ "BSTRINS_D\t %s: %016lx, %s: %016lx, msbw: %02x, lsbw: %02x\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), rj(),
+ msbd_, lsbd_);
+ uint8_t size = msbd_ - lsbd_ + 1;
+ if (size < 64) {
+ uint64_t mask = (1ULL << size) - 1;
+ alu_out = (rd_u() & ~(mask << lsbd_)) | ((rj_u() & mask) << lsbd_);
+ SetResult(rd_reg(), alu_out);
+ } else if (size == 64) {
+ SetResult(rd_reg(), rj());
+ }
+ break;
+ }
+ case BSTRPICK_D: {
+ uint8_t lsbd_ = lsbd();
+ uint8_t msbd_ = msbd();
+ CHECK_LE(lsbd_, msbd_);
+ printf_instr(
+ "BSTRPICK_D\t %s: %016lx, %s: %016lx, msbw: %02x, lsbw: %02x\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()), rj(),
+ msbd_, lsbd_);
+ uint8_t size = msbd_ - lsbd_ + 1;
+ if (size < 64) {
+ uint64_t mask = (1ULL << size) - 1;
+ alu_out = (rj_u() & (mask << lsbd_)) >> lsbd_;
+ SetResult(rd_reg(), alu_out);
+ } else if (size == 64) {
+ SetResult(rd_reg(), rj());
+ }
+ break;
+ }
+ case SLTI:
+ printf_instr("SLTI\t %s: %016lx, %s: %016lx, si12: %016lx\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
+ rj(), si12_se);
+ SetResult(rd_reg(), rj() < si12_se ? 1 : 0);
+ break;
+ case SLTUI:
+ printf_instr("SLTUI\t %s: %016lx, %s: %016lx, si12: %016lx\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
+ rj(), si12_se);
+ SetResult(rd_reg(), rj_u() < static_cast<uint64_t>(si12_se) ? 1 : 0);
+ break;
+ case ADDI_W: {
+ printf_instr("ADDI_W\t %s: %016lx, %s: %016lx, si12: %016lx\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
+ rj(), si12_se);
+ int32_t alu32_out =
+ static_cast<int32_t>(rj()) + static_cast<int32_t>(si12_se);
+ SetResult(rd_reg(), alu32_out);
+ break;
+ }
+ case ADDI_D:
+ printf_instr("ADDI_D\t %s: %016lx, %s: %016lx, si12: %016lx\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
+ rj(), si12_se);
+ SetResult(rd_reg(), rj() + si12_se);
+ break;
+ case LU52I_D: {
+ printf_instr("LU52I_D\t %s: %016lx, %s: %016lx, si12: %016lx\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
+ rj(), si12_se);
+ int64_t si12_se = static_cast<int64_t>(si12()) << 52;
+ uint64_t mask = (1ULL << 52) - 1;
+ alu_out = si12_se + (rj() & mask);
+ SetResult(rd_reg(), alu_out);
+ break;
+ }
+ case ANDI:
+ printf_instr("ANDI\t %s: %016lx, %s: %016lx, si12: %016lx\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
+ rj(), si12_ze);
+ SetResult(rd_reg(), rj() & si12_ze);
+ break;
+ case ORI:
+ printf_instr("ORI\t %s: %016lx, %s: %016lx, si12: %016lx\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
+ rj(), si12_ze);
+ SetResult(rd_reg(), rj_u() | si12_ze);
+ break;
+ case XORI:
+ printf_instr("XORI\t %s: %016lx, %s: %016lx, si12: %016lx\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
+ rj(), si12_ze);
+ SetResult(rd_reg(), rj_u() ^ si12_ze);
+ break;
+ case LD_B:
+ printf_instr("LD_B\t %s: %016lx, %s: %016lx, si12: %016lx\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
+ rj(), si12_ze);
+ set_register(rd_reg(), ReadB(rj() + si12_se));
+ break;
+ case LD_H:
+ printf_instr("LD_H\t %s: %016lx, %s: %016lx, si12: %016lx\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
+ rj(), si12_ze);
+ set_register(rd_reg(), ReadH(rj() + si12_se, instr_.instr()));
+ break;
+ case LD_W:
+ printf_instr("LD_W\t %s: %016lx, %s: %016lx, si12: %016lx\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
+ rj(), si12_ze);
+ set_register(rd_reg(), ReadW(rj() + si12_se, instr_.instr()));
+ break;
+ case LD_D:
+ printf_instr("LD_D\t %s: %016lx, %s: %016lx, si12: %016lx\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
+ rj(), si12_ze);
+ set_register(rd_reg(), Read2W(rj() + si12_se, instr_.instr()));
+ break;
+ case ST_B:
+ printf_instr("ST_B\t %s: %016lx, %s: %016lx, si12: %016lx\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
+ rj(), si12_ze);
+ WriteB(rj() + si12_se, static_cast<int8_t>(rd()));
+ break;
+ case ST_H:
+ printf_instr("ST_H\t %s: %016lx, %s: %016lx, si12: %016lx\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
+ rj(), si12_ze);
+ WriteH(rj() + si12_se, static_cast<int16_t>(rd()), instr_.instr());
+ break;
+ case ST_W:
+ printf_instr("ST_W\t %s: %016lx, %s: %016lx, si12: %016lx\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
+ rj(), si12_ze);
+ WriteW(rj() + si12_se, static_cast<int32_t>(rd()), instr_.instr());
+ break;
+ case ST_D:
+ printf_instr("ST_D\t %s: %016lx, %s: %016lx, si12: %016lx\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
+ rj(), si12_ze);
+ Write2W(rj() + si12_se, rd(), instr_.instr());
+ break;
+ case LD_BU:
+ printf_instr("LD_BU\t %s: %016lx, %s: %016lx, si12: %016lx\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
+ rj(), si12_ze);
+ set_register(rd_reg(), ReadBU(rj() + si12_se));
+ break;
+ case LD_HU:
+ printf_instr("LD_HU\t %s: %016lx, %s: %016lx, si12: %016lx\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
+ rj(), si12_ze);
+ set_register(rd_reg(), ReadHU(rj() + si12_se, instr_.instr()));
+ break;
+ case LD_WU:
+ printf_instr("LD_WU\t %s: %016lx, %s: %016lx, si12: %016lx\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
+ rj(), si12_ze);
+ set_register(rd_reg(), ReadWU(rj() + si12_se, instr_.instr()));
+ break;
+ case FLD_S: {
+ printf_instr("FLD_S\t %s: %016f, %s: %016lx, si12: %016lx\n",
+ FPURegisters::Name(fd_reg()), fd_float(),
+ Registers::Name(rj_reg()), rj(), si12_ze);
+ set_fpu_register(fd_reg(), kFPUInvalidResult); // Trash upper 32 bits.
+ set_fpu_register_word(
+ fd_reg(), ReadW(rj() + si12_se, instr_.instr(), FLOAT_DOUBLE));
+ break;
+ }
+ case FST_S: {
+ printf_instr("FST_S\t %s: %016f, %s: %016lx, si12: %016lx\n",
+ FPURegisters::Name(fd_reg()), fd_float(),
+ Registers::Name(rj_reg()), rj(), si12_ze);
+ int32_t alu_out_32 = static_cast<int32_t>(get_fpu_register(fd_reg()));
+ WriteW(rj() + si12_se, alu_out_32, instr_.instr());
+ break;
+ }
+ case FLD_D: {
+ printf_instr("FLD_D\t %s: %016f, %s: %016lx, si12: %016lx\n",
+ FPURegisters::Name(fd_reg()), fd_double(),
+ Registers::Name(rj_reg()), rj(), si12_ze);
+ set_fpu_register_double(fd_reg(), ReadD(rj() + si12_se, instr_.instr()));
+ TraceMemRd(rj() + si12_se, get_fpu_register(fd_reg()), DOUBLE);
+ break;
+ }
+ case FST_D: {
+ printf_instr("FST_D\t %s: %016f, %s: %016lx, si12: %016lx\n",
+ FPURegisters::Name(fd_reg()), fd_double(),
+ Registers::Name(rj_reg()), rj(), si12_ze);
+ WriteD(rj() + si12_se, get_fpu_register_double(fd_reg()), instr_.instr());
+ TraceMemWr(rj() + si12_se, get_fpu_register(fd_reg()), DWORD);
+ break;
+ }
+ default:
+ UNREACHABLE();
+ }
+}
+
+void Simulator::DecodeTypeOp12() {
+ switch (instr_.Bits(31, 20) << 20) {
+ case FMADD_S:
+ printf_instr("FMADD_S\t %s: %016f, %s: %016f, %s: %016f %s: %016f\n",
+ FPURegisters::Name(fd_reg()), fd_float(),
+ FPURegisters::Name(fk_reg()), fk_float(),
+ FPURegisters::Name(fa_reg()), fa_float(),
+ FPURegisters::Name(fj_reg()), fj_float());
+ SetFPUFloatResult(fd_reg(), std::fma(fj_float(), fk_float(), fa_float()));
+ break;
+ case FMADD_D:
+ printf_instr("FMADD_D\t %s: %016f, %s: %016f, %s: %016f %s: %016f\n",
+ FPURegisters::Name(fd_reg()), fd_double(),
+ FPURegisters::Name(fk_reg()), fk_double(),
+ FPURegisters::Name(fa_reg()), fa_double(),
+ FPURegisters::Name(fj_reg()), fj_double());
+ SetFPUDoubleResult(fd_reg(),
+ std::fma(fj_double(), fk_double(), fa_double()));
+ break;
+ case FMSUB_S:
+ printf_instr("FMSUB_S\t %s: %016f, %s: %016f, %s: %016f %s: %016f\n",
+ FPURegisters::Name(fd_reg()), fd_float(),
+ FPURegisters::Name(fk_reg()), fk_float(),
+ FPURegisters::Name(fa_reg()), fa_float(),
+ FPURegisters::Name(fj_reg()), fj_float());
+ SetFPUFloatResult(fd_reg(),
+ std::fma(fj_float(), fk_float(), -fa_float()));
+ break;
+ case FMSUB_D:
+ printf_instr("FMSUB_D\t %s: %016f, %s: %016f, %s: %016f %s: %016f\n",
+ FPURegisters::Name(fd_reg()), fd_double(),
+ FPURegisters::Name(fk_reg()), fk_double(),
+ FPURegisters::Name(fa_reg()), fa_double(),
+ FPURegisters::Name(fj_reg()), fj_double());
+ SetFPUDoubleResult(fd_reg(),
+ std::fma(fj_double(), fk_double(), -fa_double()));
+ break;
+ case FNMADD_S:
+ printf_instr("FNMADD_S\t %s: %016f, %s: %016f, %s: %016f %s: %016f\n",
+ FPURegisters::Name(fd_reg()), fd_float(),
+ FPURegisters::Name(fk_reg()), fk_float(),
+ FPURegisters::Name(fa_reg()), fa_float(),
+ FPURegisters::Name(fj_reg()), fj_float());
+ SetFPUFloatResult(fd_reg(),
+ std::fma(-fj_float(), fk_float(), -fa_float()));
+ break;
+ case FNMADD_D:
+ printf_instr("FNMADD_D\t %s: %016f, %s: %016f, %s: %016f %s: %016f\n",
+ FPURegisters::Name(fd_reg()), fd_double(),
+ FPURegisters::Name(fk_reg()), fk_double(),
+ FPURegisters::Name(fa_reg()), fa_double(),
+ FPURegisters::Name(fj_reg()), fj_double());
+ SetFPUDoubleResult(fd_reg(),
+ std::fma(-fj_double(), fk_double(), -fa_double()));
+ break;
+ case FNMSUB_S:
+ printf_instr("FNMSUB_S\t %s: %016f, %s: %016f, %s: %016f %s: %016f\n",
+ FPURegisters::Name(fd_reg()), fd_float(),
+ FPURegisters::Name(fk_reg()), fk_float(),
+ FPURegisters::Name(fa_reg()), fa_float(),
+ FPURegisters::Name(fj_reg()), fj_float());
+ SetFPUFloatResult(fd_reg(),
+ std::fma(-fj_float(), fk_float(), fa_float()));
+ break;
+ case FNMSUB_D:
+ printf_instr("FNMSUB_D\t %s: %016f, %s: %016f, %s: %016f %s: %016f\n",
+ FPURegisters::Name(fd_reg()), fd_double(),
+ FPURegisters::Name(fk_reg()), fk_double(),
+ FPURegisters::Name(fa_reg()), fa_double(),
+ FPURegisters::Name(fj_reg()), fj_double());
+ SetFPUDoubleResult(fd_reg(),
+ std::fma(-fj_double(), fk_double(), fa_double()));
+ break;
+ case FCMP_COND_S: {
+ CHECK_EQ(instr_.Bits(4, 3), 0);
+ float fj = fj_float();
+ float fk = fk_float();
+ switch (cond()) {
+ case CAF: {
+ printf_instr("FCMP_CAF_S fcc%d\n", cd_reg());
+ set_cf_register(cd_reg(), false);
+ break;
+ }
+ case CUN: {
+ printf_instr("FCMP_CUN_S fcc%d, %s: %016f, %s: %016f\n", cd_reg(),
+ FPURegisters::Name(fj_reg()), fj,
+ FPURegisters::Name(fk_reg()), fk);
+ set_cf_register(cd_reg(), std::isnan(fj) || std::isnan(fk));
+ break;
+ }
+ case CEQ: {
+ printf_instr("FCMP_CEQ_S fcc%d, %s: %016f, %s: %016f\n", cd_reg(),
+ FPURegisters::Name(fj_reg()), fj,
+ FPURegisters::Name(fk_reg()), fk);
+ set_cf_register(cd_reg(), fj == fk);
+ break;
+ }
+ case CUEQ: {
+ printf_instr("FCMP_CUEQ_S fcc%d, %s: %016f, %s: %016f\n", cd_reg(),
+ FPURegisters::Name(fj_reg()), fj,
+ FPURegisters::Name(fk_reg()), fk);
+ set_cf_register(cd_reg(),
+ (fj == fk) || std::isnan(fj) || std::isnan(fk));
+ break;
+ }
+ case CLT: {
+ printf_instr("FCMP_CLT_S fcc%d, %s: %016f, %s: %016f\n", cd_reg(),
+ FPURegisters::Name(fj_reg()), fj,
+ FPURegisters::Name(fk_reg()), fk);
+ set_cf_register(cd_reg(), fj < fk);
+ break;
+ }
+ case CULT: {
+ printf_instr("FCMP_CULT_S fcc%d, %s: %016f, %s: %016f\n", cd_reg(),
+ FPURegisters::Name(fj_reg()), fj,
+ FPURegisters::Name(fk_reg()), fk);
+ set_cf_register(cd_reg(),
+ (fj < fk) || std::isnan(fj) || std::isnan(fk));
+ break;
+ }
+ case CLE: {
+ printf_instr("FCMP_CLE_S fcc%d, %s: %016f, %s: %016f\n", cd_reg(),
+ FPURegisters::Name(fj_reg()), fj,
+ FPURegisters::Name(fk_reg()), fk);
+ set_cf_register(cd_reg(), fj <= fk);
+ break;
+ }
+ case CULE: {
+ printf_instr("FCMP_CULE_S fcc%d, %s: %016f, %s: %016f\n", cd_reg(),
+ FPURegisters::Name(fj_reg()), fj,
+ FPURegisters::Name(fk_reg()), fk);
+ set_cf_register(cd_reg(),
+ (fj <= fk) || std::isnan(fj) || std::isnan(fk));
+ break;
+ }
+ case CNE: {
+ printf_instr("FCMP_CNE_S fcc%d, %s: %016f, %s: %016f\n", cd_reg(),
+ FPURegisters::Name(fj_reg()), fj,
+ FPURegisters::Name(fk_reg()), fk);
+ set_cf_register(cd_reg(), (fj < fk) || (fj > fk));
+ break;
+ }
+ case COR: {
+ printf_instr("FCMP_COR_S fcc%d, %s: %016f, %s: %016f\n", cd_reg(),
+ FPURegisters::Name(fj_reg()), fj,
+ FPURegisters::Name(fk_reg()), fk);
+ set_cf_register(cd_reg(), !std::isnan(fj) && !std::isnan(fk));
+ break;
+ }
+ case CUNE: {
+ printf_instr("FCMP_CUNE_S fcc%d, %s: %016f, %s: %016f\n", cd_reg(),
+ FPURegisters::Name(fj_reg()), fj,
+ FPURegisters::Name(fk_reg()), fk);
+ set_cf_register(cd_reg(),
+ (fj != fk) || std::isnan(fj) || std::isnan(fk));
+ break;
+ }
+ case SAF:
+ case SUN:
+ case SEQ:
+ case SUEQ:
+ case SLT:
+ case SULT:
+ case SLE:
+ case SULE:
+ case SNE:
+ case SOR:
+ case SUNE:
+ UNIMPLEMENTED();
+ default:
+ UNREACHABLE();
+ }
+ break;
+ }
+ case FCMP_COND_D: {
+ CHECK_EQ(instr_.Bits(4, 3), 0);
+ double fj = fj_double();
+ double fk = fk_double();
+ switch (cond()) {
+ case CAF: {
+ printf_instr("FCMP_CAF_D fcc%d\n", cd_reg());
+ set_cf_register(cd_reg(), false);
+ break;
+ }
+ case CUN: {
+ printf_instr("FCMP_CUN_D fcc%d, %s: %016f, %s: %016f\n", cd_reg(),
+ FPURegisters::Name(fj_reg()), fj,
+ FPURegisters::Name(fk_reg()), fk);
+ set_cf_register(cd_reg(), std::isnan(fj) || std::isnan(fk));
+ break;
+ }
+ case CEQ: {
+ printf_instr("FCMP_CEQ_D fcc%d, %s: %016f, %s: %016f\n", cd_reg(),
+ FPURegisters::Name(fj_reg()), fj,
+ FPURegisters::Name(fk_reg()), fk);
+ set_cf_register(cd_reg(), fj == fk);
+ break;
+ }
+ case CUEQ: {
+ printf_instr("FCMP_CUEQ_D fcc%d, %s: %016f, %s: %016f\n", cd_reg(),
+ FPURegisters::Name(fj_reg()), fj,
+ FPURegisters::Name(fk_reg()), fk);
+ set_cf_register(cd_reg(),
+ (fj == fk) || std::isnan(fj) || std::isnan(fk));
+ break;
+ }
+ case CLT: {
+ printf_instr("FCMP_CLT_D fcc%d, %s: %016f, %s: %016f\n", cd_reg(),
+ FPURegisters::Name(fj_reg()), fj,
+ FPURegisters::Name(fk_reg()), fk);
+ set_cf_register(cd_reg(), fj < fk);
+ break;
+ }
+ case CULT: {
+ printf_instr("FCMP_CULT_D fcc%d, %s: %016f, %s: %016f\n", cd_reg(),
+ FPURegisters::Name(fj_reg()), fj,
+ FPURegisters::Name(fk_reg()), fk);
+ set_cf_register(cd_reg(),
+ (fj < fk) || std::isnan(fj) || std::isnan(fk));
+ break;
+ }
+ case CLE: {
+ printf_instr("FCMP_CLE_D fcc%d, %s: %016f, %s: %016f\n", cd_reg(),
+ FPURegisters::Name(fj_reg()), fj,
+ FPURegisters::Name(fk_reg()), fk);
+ set_cf_register(cd_reg(), fj <= fk);
+ break;
+ }
+ case CULE: {
+ printf_instr("FCMP_CULE_D fcc%d, %s: %016f, %s: %016f\n", cd_reg(),
+ FPURegisters::Name(fj_reg()), fj,
+ FPURegisters::Name(fk_reg()), fk);
+ set_cf_register(cd_reg(),
+ (fj <= fk) || std::isnan(fj) || std::isnan(fk));
+ break;
+ }
+ case CNE: {
+ printf_instr("FCMP_CNE_D fcc%d, %s: %016f, %s: %016f\n", cd_reg(),
+ FPURegisters::Name(fj_reg()), fj,
+ FPURegisters::Name(fk_reg()), fk);
+ set_cf_register(cd_reg(), (fj < fk) || (fj > fk));
+ break;
+ }
+ case COR: {
+ printf_instr("FCMP_COR_D fcc%d, %s: %016f, %s: %016f\n", cd_reg(),
+ FPURegisters::Name(fj_reg()), fj,
+ FPURegisters::Name(fk_reg()), fk);
+ set_cf_register(cd_reg(), !std::isnan(fj) && !std::isnan(fk));
+ break;
+ }
+ case CUNE: {
+ printf_instr("FCMP_CUNE_D fcc%d, %s: %016f, %s: %016f\n", cd_reg(),
+ FPURegisters::Name(fj_reg()), fj,
+ FPURegisters::Name(fk_reg()), fk);
+ set_cf_register(cd_reg(),
+ (fj != fk) || std::isnan(fj) || std::isnan(fk));
+ break;
+ }
+ case SAF:
+ case SUN:
+ case SEQ:
+ case SUEQ:
+ case SLT:
+ case SULT:
+ case SLE:
+ case SULE:
+ case SNE:
+ case SOR:
+ case SUNE:
+ UNIMPLEMENTED();
+ default:
+ UNREACHABLE();
+ }
+ break;
+ }
+ case FSEL: {
+ CHECK_EQ(instr_.Bits(19, 18), 0);
+ printf_instr("FSEL fcc%d, %s: %016f, %s: %016f, %s: %016f\n", ca_reg(),
+ FPURegisters::Name(fd_reg()), fd_double(),
+ FPURegisters::Name(fj_reg()), fj_double(),
+ FPURegisters::Name(fk_reg()), fk_double());
+ if (ca() == 0) {
+ SetFPUDoubleResult(fd_reg(), fj_double());
+ } else {
+ SetFPUDoubleResult(fd_reg(), fk_double());
+ }
+ break;
+ }
+ default:
+ UNREACHABLE();
+ }
+}
+
+void Simulator::DecodeTypeOp14() {
+ int64_t alu_out = 0x0;
+ int32_t alu32_out = 0x0;
+
+ switch (instr_.Bits(31, 18) << 18) {
+ case ALSL: {
+ uint8_t sa = sa2() + 1;
+ alu32_out =
+ (static_cast<int32_t>(rj()) << sa) + static_cast<int32_t>(rk());
+ if (instr_.Bit(17) == 0) {
+ // ALSL_W
+ printf_instr("ALSL_W\t %s: %016lx, %s: %016lx, %s: %016lx, sa2: %d\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
+ rj(), Registers::Name(rk_reg()), rk(), sa2());
+ SetResult(rd_reg(), alu32_out);
+ } else {
+ // ALSL_WU
+ printf_instr("ALSL_WU\t %s: %016lx, %s: %016lx, %s: %016lx, sa2: %d\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
+ rj(), Registers::Name(rk_reg()), rk(), sa2());
+ SetResult(rd_reg(), static_cast<uint32_t>(alu32_out));
+ }
+ break;
+ }
+ case BYTEPICK_W: {
+ CHECK_EQ(instr_.Bit(17), 0);
+ printf_instr("BYTEPICK_W\t %s: %016lx, %s: %016lx, %s: %016lx, sa2: %d\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
+ rj(), Registers::Name(rk_reg()), rk(), sa2());
+ uint8_t sa = sa2() * 8;
+ if (sa == 0) {
+ alu32_out = static_cast<int32_t>(rk());
+ } else {
+ int32_t mask = (1 << 31) >> (sa - 1);
+ int32_t rk_hi = (static_cast<int32_t>(rk()) & (~mask)) << sa;
+ int32_t rj_lo = (static_cast<uint32_t>(rj()) & mask) >> (32 - sa);
+ alu32_out = rk_hi | rj_lo;
+ }
+ SetResult(rd_reg(), static_cast<int64_t>(alu32_out));
+ break;
+ }
+ case BYTEPICK_D: {
+ printf_instr("BYTEPICK_D\t %s: %016lx, %s: %016lx, %s: %016lx, sa3: %d\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
+ rj(), Registers::Name(rk_reg()), rk(), sa3());
+ uint8_t sa = sa3() * 8;
+ if (sa == 0) {
+ alu_out = rk();
+ } else {
+ int64_t mask = (1LL << 63) >> (sa - 1);
+ int64_t rk_hi = (rk() & (~mask)) << sa;
+ int64_t rj_lo = static_cast<uint64_t>(rj() & mask) >> (64 - sa);
+ alu_out = rk_hi | rj_lo;
+ }
+ SetResult(rd_reg(), alu_out);
+ break;
+ }
+ case ALSL_D: {
+ printf_instr("ALSL_D\t %s: %016lx, %s: %016lx, %s: %016lx, sa2: %d\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
+ rj(), Registers::Name(rk_reg()), rk(), sa2());
+ CHECK_EQ(instr_.Bit(17), 0);
+ uint8_t sa = sa2() + 1;
+ alu_out = (rj() << sa) + rk();
+ SetResult(rd_reg(), alu_out);
+ break;
+ }
+ case SLLI: {
+ DCHECK_EQ(instr_.Bit(17), 0);
+ if (instr_.Bits(17, 15) == 0b001) {
+ // SLLI_W
+ printf_instr("SLLI_W\t %s: %016lx, %s: %016lx, ui5: %d\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
+ rj(), ui5());
+ alu32_out = static_cast<int32_t>(rj()) << ui5();
+ SetResult(rd_reg(), static_cast<int64_t>(alu32_out));
+ } else if ((instr_.Bits(17, 16) == 0b01)) {
+ // SLLI_D
+ printf_instr("SLLI_D\t %s: %016lx, %s: %016lx, ui6: %d\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
+ rj(), ui6());
+ SetResult(rd_reg(), rj() << ui6());
+ }
+ break;
+ }
+ case SRLI: {
+ DCHECK_EQ(instr_.Bit(17), 0);
+ if (instr_.Bits(17, 15) == 0b001) {
+ // SRLI_W
+ printf_instr("SRLI_W\t %s: %016lx, %s: %016lx, ui5: %d\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
+ rj(), ui5());
+ alu32_out = static_cast<uint32_t>(rj()) >> ui5();
+ SetResult(rd_reg(), static_cast<int64_t>(alu32_out));
+ } else if (instr_.Bits(17, 16) == 0b01) {
+ // SRLI_D
+ printf_instr("SRLI_D\t %s: %016lx, %s: %016lx, ui6: %d\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
+ rj(), ui6());
+ SetResult(rd_reg(), rj_u() >> ui6());
+ }
+ break;
+ }
+ case SRAI: {
+ DCHECK_EQ(instr_.Bit(17), 0);
+ if (instr_.Bits(17, 15) == 0b001) {
+ // SRAI_W
+ printf_instr("SRAI_W\t %s: %016lx, %s: %016lx, ui5: %d\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
+ rj(), ui5());
+ alu32_out = static_cast<int32_t>(rj()) >> ui5();
+ SetResult(rd_reg(), static_cast<int64_t>(alu32_out));
+ } else if (instr_.Bits(17, 16) == 0b01) {
+ // SRAI_D
+ printf_instr("SRAI_D\t %s: %016lx, %s: %016lx, ui6: %d\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
+ rj(), ui6());
+ SetResult(rd_reg(), rj() >> ui6());
+ }
+ break;
+ }
+ case ROTRI: {
+ DCHECK_EQ(instr_.Bit(17), 0);
+ if (instr_.Bits(17, 15) == 0b001) {
+ // ROTRI_W
+ printf_instr("ROTRI_W\t %s: %016lx, %s: %016lx, ui5: %d\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
+ rj(), ui5());
+ alu32_out = static_cast<int32_t>(
+ base::bits::RotateRight32(static_cast<const uint32_t>(rj_u()),
+ static_cast<const uint32_t>(ui5())));
+ SetResult(rd_reg(), static_cast<int64_t>(alu32_out));
+ } else if (instr_.Bits(17, 16) == 0b01) {
+ // ROTRI_D
+ printf_instr("ROTRI_D\t %s: %016lx, %s: %016lx, ui6: %d\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
+ rj(), ui6());
+ alu_out =
+ static_cast<int64_t>(base::bits::RotateRight64(rj_u(), ui6()));
+ SetResult(rd_reg(), alu_out);
+ printf_instr("ROTRI, %s, %s, %d\n", Registers::Name(rd_reg()),
+ Registers::Name(rj_reg()), ui6());
+ }
+ break;
+ }
+ default:
+ UNREACHABLE();
+ }
+}
+
+void Simulator::DecodeTypeOp17() {
+ int64_t alu_out;
+
+ switch (instr_.Bits(31, 15) << 15) {
+ case ADD_W: {
+ printf_instr("ADD_W\t %s: %016lx, %s, %016lx, %s, %016lx\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
+ rj(), Registers::Name(rk_reg()), rk());
+ int32_t alu32_out = static_cast<int32_t>(rj() + rk());
+ // Sign-extend result of 32bit operation into 64bit register.
+ SetResult(rd_reg(), static_cast<int64_t>(alu32_out));
+ break;
+ }
+ case ADD_D:
+ printf_instr("ADD_D\t %s: %016lx, %s, %016lx, %s, %016lx\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
+ rj(), Registers::Name(rk_reg()), rk());
+ SetResult(rd_reg(), rj() + rk());
+ break;
+ case SUB_W: {
+ printf_instr("SUB_W\t %s: %016lx, %s, %016lx, %s, %016lx\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
+ rj(), Registers::Name(rk_reg()), rk());
+ int32_t alu32_out = static_cast<int32_t>(rj() - rk());
+ // Sign-extend result of 32bit operation into 64bit register.
+ SetResult(rd_reg(), static_cast<int64_t>(alu32_out));
+ break;
+ }
+ case SUB_D:
+ printf_instr("SUB_D\t %s: %016lx, %s, %016lx, %s, %016lx\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
+ rj(), Registers::Name(rk_reg()), rk());
+ SetResult(rd_reg(), rj() - rk());
+ break;
+ case SLT:
+ printf_instr("SLT\t %s: %016lx, %s, %016lx, %s, %016lx\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
+ rj(), Registers::Name(rk_reg()), rk());
+ SetResult(rd_reg(), rj() < rk() ? 1 : 0);
+ break;
+ case SLTU:
+ printf_instr("SLTU\t %s: %016lx, %s, %016lx, %s, %016lx\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
+ rj(), Registers::Name(rk_reg()), rk());
+ SetResult(rd_reg(), rj_u() < rk_u() ? 1 : 0);
+ break;
+ case MASKEQZ:
+ printf_instr("MASKEQZ\t %s: %016lx, %s, %016lx, %s, %016lx\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
+ rj(), Registers::Name(rk_reg()), rk());
+ SetResult(rd_reg(), rk() == 0 ? rj() : 0);
+ break;
+ case MASKNEZ:
+ printf_instr("MASKNEZ\t %s: %016lx, %s, %016lx, %s, %016lx\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
+ rj(), Registers::Name(rk_reg()), rk());
+ SetResult(rd_reg(), rk() != 0 ? rj() : 0);
+ break;
+ case NOR:
+ printf_instr("NOR\t %s: %016lx, %s, %016lx, %s, %016lx\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
+ rj(), Registers::Name(rk_reg()), rk());
+ SetResult(rd_reg(), ~(rj() | rk()));
+ break;
+ case AND:
+ printf_instr("AND\t %s: %016lx, %s, %016lx, %s, %016lx\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
+ rj(), Registers::Name(rk_reg()), rk());
+ SetResult(rd_reg(), rj() & rk());
+ break;
+ case OR:
+ printf_instr("OR\t %s: %016lx, %s, %016lx, %s, %016lx\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
+ rj(), Registers::Name(rk_reg()), rk());
+ SetResult(rd_reg(), rj() | rk());
+ break;
+ case XOR:
+ printf_instr("XOR\t %s: %016lx, %s, %016lx, %s, %016lx\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
+ rj(), Registers::Name(rk_reg()), rk());
+ SetResult(rd_reg(), rj() ^ rk());
+ break;
+ case ORN:
+ printf_instr("ORN\t %s: %016lx, %s, %016lx, %s, %016lx\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
+ rj(), Registers::Name(rk_reg()), rk());
+ SetResult(rd_reg(), rj() | (~rk()));
+ break;
+ case ANDN:
+ printf_instr("ANDN\t %s: %016lx, %s, %016lx, %s, %016lx\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
+ rj(), Registers::Name(rk_reg()), rk());
+ SetResult(rd_reg(), rj() & (~rk()));
+ break;
+ case SLL_W:
+ printf_instr("SLL_W\t %s: %016lx, %s, %016lx, %s, %016lx\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
+ rj(), Registers::Name(rk_reg()), rk());
+ SetResult(rd_reg(), (int32_t)rj() << (rk_u() % 32));
+ break;
+ case SRL_W: {
+ printf_instr("SRL_W\t %s: %016lx, %s, %016lx, %s, %016lx\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
+ rj(), Registers::Name(rk_reg()), rk());
+ alu_out = static_cast<int32_t>((uint32_t)rj_u() >> (rk_u() % 32));
+ SetResult(rd_reg(), alu_out);
+ break;
+ }
+ case SRA_W:
+ printf_instr("SRA_W\t %s: %016lx, %s, %016lx, %s, %016lx\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
+ rj(), Registers::Name(rk_reg()), rk());
+ SetResult(rd_reg(), (int32_t)rj() >> (rk_u() % 32));
+ break;
+ case SLL_D:
+ printf_instr("SLL_D\t %s: %016lx, %s, %016lx, %s, %016lx\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
+ rj(), Registers::Name(rk_reg()), rk());
+ SetResult(rd_reg(), rj() << (rk_u() % 64));
+ break;
+ case SRL_D: {
+ printf_instr("SRL_D\t %s: %016lx, %s, %016lx, %s, %016lx\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
+ rj(), Registers::Name(rk_reg()), rk());
+ alu_out = static_cast<int64_t>(rj_u() >> (rk_u() % 64));
+ SetResult(rd_reg(), alu_out);
+ break;
+ }
+ case SRA_D:
+ printf_instr("SRA_D\t %s: %016lx, %s, %016lx, %s, %016lx\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
+ rj(), Registers::Name(rk_reg()), rk());
+ SetResult(rd_reg(), rj() >> (rk_u() % 64));
+ break;
+ case ROTR_W: {
+ printf_instr("ROTR_W\t %s: %016lx, %s, %016lx, %s, %016lx\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
+ rj(), Registers::Name(rk_reg()), rk());
+ alu_out = static_cast<int32_t>(
+ base::bits::RotateRight32(static_cast<const uint32_t>(rj_u()),
+ static_cast<const uint32_t>(rk_u() % 32)));
+ SetResult(rd_reg(), alu_out);
+ break;
+ }
+ case ROTR_D: {
+ printf_instr("ROTR_D\t %s: %016lx, %s, %016lx, %s, %016lx\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
+ rj(), Registers::Name(rk_reg()), rk());
+ alu_out = static_cast<int64_t>(
+ base::bits::RotateRight64((rj_u()), (rk_u() % 64)));
+ SetResult(rd_reg(), alu_out);
+ break;
+ }
+ case MUL_W: {
+ printf_instr("MUL_W\t %s: %016lx, %s, %016lx, %s, %016lx\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
+ rj(), Registers::Name(rk_reg()), rk());
+ alu_out = static_cast<int32_t>(rj()) * static_cast<int32_t>(rk());
+ SetResult(rd_reg(), alu_out);
+ break;
+ }
+ case MULH_W: {
+ printf_instr("MULH_W\t %s: %016lx, %s, %016lx, %s, %016lx\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
+ rj(), Registers::Name(rk_reg()), rk());
+ int32_t rj_lo = static_cast<int32_t>(rj());
+ int32_t rk_lo = static_cast<int32_t>(rk());
+ alu_out = static_cast<int64_t>(rj_lo) * static_cast<int64_t>(rk_lo);
+ SetResult(rd_reg(), alu_out >> 32);
+ break;
+ }
+ case MULH_WU: {
+ printf_instr("MULH_WU\t %s: %016lx, %s, %016lx, %s, %016lx\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
+ rj(), Registers::Name(rk_reg()), rk());
+ uint32_t rj_lo = static_cast<uint32_t>(rj_u());
+ uint32_t rk_lo = static_cast<uint32_t>(rk_u());
+ alu_out = static_cast<uint64_t>(rj_lo) * static_cast<uint64_t>(rk_lo);
+ SetResult(rd_reg(), alu_out >> 32);
+ break;
+ }
+ case MUL_D:
+ printf_instr("MUL_D\t %s: %016lx, %s, %016lx, %s, %016lx\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
+ rj(), Registers::Name(rk_reg()), rk());
+ SetResult(rd_reg(), rj() * rk());
+ break;
+ case MULH_D:
+ printf_instr("MULH_D\t %s: %016lx, %s, %016lx, %s, %016lx\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
+ rj(), Registers::Name(rk_reg()), rk());
+ SetResult(rd_reg(), MultiplyHighSigned(rj(), rk()));
+ break;
+ case MULH_DU:
+ printf_instr("MULH_DU\t %s: %016lx, %s, %016lx, %s, %016lx\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
+ rj(), Registers::Name(rk_reg()), rk());
+ SetResult(rd_reg(), MultiplyHighUnsigned(rj_u(), rk_u()));
+ break;
+ case MULW_D_W: {
+ printf_instr("MULW_D_W\t %s: %016lx, %s, %016lx, %s, %016lx\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
+ rj(), Registers::Name(rk_reg()), rk());
+ int64_t rj_i32 = static_cast<int32_t>(rj());
+ int64_t rk_i32 = static_cast<int32_t>(rk());
+ SetResult(rd_reg(), rj_i32 * rk_i32);
+ break;
+ }
+ case MULW_D_WU: {
+ printf_instr("MULW_D_WU\t %s: %016lx, %s, %016lx, %s, %016lx\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
+ rj(), Registers::Name(rk_reg()), rk());
+ uint64_t rj_u32 = static_cast<uint32_t>(rj_u());
+ uint64_t rk_u32 = static_cast<uint32_t>(rk_u());
+ SetResult(rd_reg(), rj_u32 * rk_u32);
+ break;
+ }
+ case DIV_W: {
+ printf_instr("DIV_W\t %s: %016lx, %s, %016lx, %s, %016lx\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
+ rj(), Registers::Name(rk_reg()), rk());
+ int32_t rj_i32 = static_cast<int32_t>(rj());
+ int32_t rk_i32 = static_cast<int32_t>(rk());
+ if (rj_i32 == INT_MIN && rk_i32 == -1) {
+ SetResult(rd_reg(), INT_MIN);
+ } else if (rk_i32 != 0) {
+ SetResult(rd_reg(), rj_i32 / rk_i32);
+ }
+ break;
+ }
+ case MOD_W: {
+ printf_instr("MOD_W\t %s: %016lx, %s, %016lx, %s, %016lx\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
+ rj(), Registers::Name(rk_reg()), rk());
+ int32_t rj_i32 = static_cast<int32_t>(rj());
+ int32_t rk_i32 = static_cast<int32_t>(rk());
+ if (rj_i32 == INT_MIN && rk_i32 == -1) {
+ SetResult(rd_reg(), 0);
+ } else if (rk_i32 != 0) {
+ SetResult(rd_reg(), rj_i32 % rk_i32);
+ }
+ break;
+ }
+ case DIV_WU: {
+ printf_instr("DIV_WU\t %s: %016lx, %s, %016lx, %s, %016lx\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
+ rj(), Registers::Name(rk_reg()), rk());
+ uint32_t rj_u32 = static_cast<uint32_t>(rj());
+ uint32_t rk_u32 = static_cast<uint32_t>(rk());
+ if (rk_u32 != 0) {
+ SetResult(rd_reg(), static_cast<int32_t>(rj_u32 / rk_u32));
+ }
+ break;
+ }
+ case MOD_WU: {
+ printf_instr("MOD_WU\t %s: %016lx, %s, %016lx, %s, %016lx\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
+ rj(), Registers::Name(rk_reg()), rk());
+ uint32_t rj_u32 = static_cast<uint32_t>(rj());
+ uint32_t rk_u32 = static_cast<uint32_t>(rk());
+ if (rk_u32 != 0) {
+ SetResult(rd_reg(), static_cast<int32_t>(rj_u32 % rk_u32));
+ }
+ break;
+ }
+ case DIV_D: {
+ printf_instr("DIV_D\t %s: %016lx, %s, %016lx, %s, %016lx\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
+ rj(), Registers::Name(rk_reg()), rk());
+ if (rj() == LONG_MIN && rk() == -1) {
+ SetResult(rd_reg(), LONG_MIN);
+ } else if (rk() != 0) {
+ SetResult(rd_reg(), rj() / rk());
+ }
+ break;
+ }
+ case MOD_D: {
+ printf_instr("MOD_D\t %s: %016lx, %s, %016lx, %s, %016lx\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
+ rj(), Registers::Name(rk_reg()), rk());
+ if (rj() == LONG_MIN && rk() == -1) {
+ SetResult(rd_reg(), 0);
+ } else if (rk() != 0) {
+ SetResult(rd_reg(), rj() % rk());
+ }
+ break;
+ }
+ case DIV_DU: {
+ printf_instr("DIV_DU\t %s: %016lx, %s, %016lx, %s, %016lx\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
+ rj(), Registers::Name(rk_reg()), rk());
+ if (rk_u() != 0) {
+ SetResult(rd_reg(), static_cast<int64_t>(rj_u() / rk_u()));
+ }
+ break;
+ }
+ case MOD_DU: {
+ printf_instr("MOD_DU\t %s: %016lx, %s, %016lx, %s, %016lx\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
+ rj(), Registers::Name(rk_reg()), rk());
+ if (rk_u() != 0) {
+ SetResult(rd_reg(), static_cast<int64_t>(rj_u() % rk_u()));
+ }
+ break;
+ }
+ case BREAK:
+ printf_instr("BREAK\t code: %x\n", instr_.Bits(14, 0));
+ SoftwareInterrupt();
+ break;
+ case FADD_S: {
+ printf_instr("FADD_S\t %s: %016f, %s, %016f, %s, %016f\n",
+ FPURegisters::Name(fd_reg()), fd_float(),
+ FPURegisters::Name(fj_reg()), fj_float(),
+ FPURegisters::Name(fk_reg()), fk_float());
+ SetFPUFloatResult(
+ fd_reg(),
+ FPUCanonalizeOperation([](float lhs, float rhs) { return lhs + rhs; },
+ fj_float(), fk_float()));
+ break;
+ }
+ case FADD_D: {
+ printf_instr("FADD_D\t %s: %016f, %s, %016f, %s, %016f\n",
+ FPURegisters::Name(fd_reg()), fd_double(),
+ FPURegisters::Name(fj_reg()), fj_double(),
+ FPURegisters::Name(fk_reg()), fk_double());
+ SetFPUDoubleResult(fd_reg(),
+ FPUCanonalizeOperation(
+ [](double lhs, double rhs) { return lhs + rhs; },
+ fj_double(), fk_double()));
+ break;
+ }
+ case FSUB_S: {
+ printf_instr("FSUB_S\t %s: %016f, %s, %016f, %s, %016f\n",
+ FPURegisters::Name(fd_reg()), fd_float(),
+ FPURegisters::Name(fj_reg()), fj_float(),
+ FPURegisters::Name(fk_reg()), fk_float());
+ SetFPUFloatResult(
+ fd_reg(),
+ FPUCanonalizeOperation([](float lhs, float rhs) { return lhs - rhs; },
+ fj_float(), fk_float()));
+ break;
+ }
+ case FSUB_D: {
+ printf_instr("FSUB_D\t %s: %016f, %s, %016f, %s, %016f\n",
+ FPURegisters::Name(fd_reg()), fd_double(),
+ FPURegisters::Name(fj_reg()), fj_double(),
+ FPURegisters::Name(fk_reg()), fk_double());
+ SetFPUDoubleResult(fd_reg(),
+ FPUCanonalizeOperation(
+ [](double lhs, double rhs) { return lhs - rhs; },
+ fj_double(), fk_double()));
+ break;
+ }
+ case FMUL_S: {
+ printf_instr("FMUL_S\t %s: %016f, %s, %016f, %s, %016f\n",
+ FPURegisters::Name(fd_reg()), fd_float(),
+ FPURegisters::Name(fj_reg()), fj_float(),
+ FPURegisters::Name(fk_reg()), fk_float());
+ SetFPUFloatResult(
+ fd_reg(),
+ FPUCanonalizeOperation([](float lhs, float rhs) { return lhs * rhs; },
+ fj_float(), fk_float()));
+ break;
+ }
+ case FMUL_D: {
+ printf_instr("FMUL_D\t %s: %016f, %s, %016f, %s, %016f\n",
+ FPURegisters::Name(fd_reg()), fd_double(),
+ FPURegisters::Name(fj_reg()), fj_double(),
+ FPURegisters::Name(fk_reg()), fk_double());
+ SetFPUDoubleResult(fd_reg(),
+ FPUCanonalizeOperation(
+ [](double lhs, double rhs) { return lhs * rhs; },
+ fj_double(), fk_double()));
+ break;
+ }
+ case FDIV_S: {
+ printf_instr("FDIV_S\t %s: %016f, %s, %016f, %s, %016f\n",
+ FPURegisters::Name(fd_reg()), fd_float(),
+ FPURegisters::Name(fj_reg()), fj_float(),
+ FPURegisters::Name(fk_reg()), fk_float());
+ SetFPUFloatResult(
+ fd_reg(),
+ FPUCanonalizeOperation([](float lhs, float rhs) { return lhs / rhs; },
+ fj_float(), fk_float()));
+ break;
+ }
+ case FDIV_D: {
+ printf_instr("FDIV_D\t %s: %016f, %s, %016f, %s, %016f\n",
+ FPURegisters::Name(fd_reg()), fd_double(),
+ FPURegisters::Name(fj_reg()), fj_double(),
+ FPURegisters::Name(fk_reg()), fk_double());
+ SetFPUDoubleResult(fd_reg(),
+ FPUCanonalizeOperation(
+ [](double lhs, double rhs) { return lhs / rhs; },
+ fj_double(), fk_double()));
+ break;
+ }
+ case FMAX_S:
+ printf_instr("FMAX_S\t %s: %016f, %s, %016f, %s, %016f\n",
+ FPURegisters::Name(fd_reg()), fd_float(),
+ FPURegisters::Name(fj_reg()), fj_float(),
+ FPURegisters::Name(fk_reg()), fk_float());
+ SetFPUFloatResult(fd_reg(), FPUMax(fk_float(), fj_float()));
+ break;
+ case FMAX_D:
+ printf_instr("FMAX_D\t %s: %016f, %s, %016f, %s, %016f\n",
+ FPURegisters::Name(fd_reg()), fd_double(),
+ FPURegisters::Name(fj_reg()), fj_double(),
+ FPURegisters::Name(fk_reg()), fk_double());
+ SetFPUDoubleResult(fd_reg(), FPUMax(fk_double(), fj_double()));
+ break;
+ case FMIN_S:
+ printf_instr("FMIN_S\t %s: %016f, %s, %016f, %s, %016f\n",
+ FPURegisters::Name(fd_reg()), fd_float(),
+ FPURegisters::Name(fj_reg()), fj_float(),
+ FPURegisters::Name(fk_reg()), fk_float());
+ SetFPUFloatResult(fd_reg(), FPUMin(fk_float(), fj_float()));
+ break;
+ case FMIN_D:
+ printf_instr("FMIN_D\t %s: %016f, %s, %016f, %s, %016f\n",
+ FPURegisters::Name(fd_reg()), fd_double(),
+ FPURegisters::Name(fj_reg()), fj_double(),
+ FPURegisters::Name(fk_reg()), fk_double());
+ SetFPUDoubleResult(fd_reg(), FPUMin(fk_double(), fj_double()));
+ break;
+ case FMAXA_S:
+ printf_instr("FMAXA_S\t %s: %016f, %s, %016f, %s, %016f\n",
+ FPURegisters::Name(fd_reg()), fd_float(),
+ FPURegisters::Name(fj_reg()), fj_float(),
+ FPURegisters::Name(fk_reg()), fk_float());
+ SetFPUFloatResult(fd_reg(), FPUMaxA(fk_float(), fj_float()));
+ break;
+ case FMAXA_D:
+ printf_instr("FMAXA_D\t %s: %016f, %s, %016f, %s, %016f\n",
+ FPURegisters::Name(fd_reg()), fd_double(),
+ FPURegisters::Name(fj_reg()), fj_double(),
+ FPURegisters::Name(fk_reg()), fk_double());
+ SetFPUDoubleResult(fd_reg(), FPUMaxA(fk_double(), fj_double()));
+ break;
+ case FMINA_S:
+ printf_instr("FMINA_S\t %s: %016f, %s, %016f, %s, %016f\n",
+ FPURegisters::Name(fd_reg()), fd_float(),
+ FPURegisters::Name(fj_reg()), fj_float(),
+ FPURegisters::Name(fk_reg()), fk_float());
+ SetFPUFloatResult(fd_reg(), FPUMinA(fk_float(), fj_float()));
+ break;
+ case FMINA_D:
+ printf_instr("FMINA_D\t %s: %016f, %s, %016f, %s, %016f\n",
+ FPURegisters::Name(fd_reg()), fd_double(),
+ FPURegisters::Name(fj_reg()), fj_double(),
+ FPURegisters::Name(fk_reg()), fk_double());
+ SetFPUDoubleResult(fd_reg(), FPUMinA(fk_double(), fj_double()));
+ break;
+ case LDX_B:
+ printf_instr("LDX_B\t %s: %016lx, %s, %016lx, %s, %016lx\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
+ rj(), Registers::Name(rk_reg()), rk());
+ set_register(rd_reg(), ReadB(rj() + rk()));
+ break;
+ case LDX_H:
+ printf_instr("LDX_H\t %s: %016lx, %s, %016lx, %s, %016lx\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
+ rj(), Registers::Name(rk_reg()), rk());
+ set_register(rd_reg(), ReadH(rj() + rk(), instr_.instr()));
+ break;
+ case LDX_W:
+ printf_instr("LDX_W\t %s: %016lx, %s, %016lx, %s, %016lx\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
+ rj(), Registers::Name(rk_reg()), rk());
+ set_register(rd_reg(), ReadW(rj() + rk(), instr_.instr()));
+ break;
+ case LDX_D:
+ printf_instr("LDX_D\t %s: %016lx, %s, %016lx, %s, %016lx\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
+ rj(), Registers::Name(rk_reg()), rk());
+ set_register(rd_reg(), Read2W(rj() + rk(), instr_.instr()));
+ break;
+ case STX_B:
+ printf_instr("STX_B\t %s: %016lx, %s, %016lx, %s, %016lx\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
+ rj(), Registers::Name(rk_reg()), rk());
+ WriteB(rj() + rk(), static_cast<int8_t>(rd()));
+ break;
+ case STX_H:
+ printf_instr("STX_H\t %s: %016lx, %s, %016lx, %s, %016lx\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
+ rj(), Registers::Name(rk_reg()), rk());
+ WriteH(rj() + rk(), static_cast<int16_t>(rd()), instr_.instr());
+ break;
+ case STX_W:
+ printf_instr("STX_W\t %s: %016lx, %s, %016lx, %s, %016lx\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
+ rj(), Registers::Name(rk_reg()), rk());
+ WriteW(rj() + rk(), static_cast<int32_t>(rd()), instr_.instr());
+ break;
+ case STX_D:
+ printf_instr("STX_D\t %s: %016lx, %s, %016lx, %s, %016lx\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
+ rj(), Registers::Name(rk_reg()), rk());
+ Write2W(rj() + rk(), rd(), instr_.instr());
+ break;
+ case LDX_BU:
+ printf_instr("LDX_BU\t %s: %016lx, %s, %016lx, %s, %016lx\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
+ rj(), Registers::Name(rk_reg()), rk());
+ set_register(rd_reg(), ReadBU(rj() + rk()));
+ break;
+ case LDX_HU:
+ printf_instr("LDX_HU\t %s: %016lx, %s, %016lx, %s, %016lx\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
+ rj(), Registers::Name(rk_reg()), rk());
+ set_register(rd_reg(), ReadHU(rj() + rk(), instr_.instr()));
+ break;
+ case LDX_WU:
+ printf_instr("LDX_WU\t %s: %016lx, %s, %016lx, %s, %016lx\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
+ rj(), Registers::Name(rk_reg()), rk());
+ set_register(rd_reg(), ReadWU(rj() + rk(), instr_.instr()));
+ break;
+ case FLDX_S:
+ printf_instr("FLDX_S\t %s: %016f, %s: %016lx, %s: %016lx\n",
+ FPURegisters::Name(fd_reg()), fd_float(),
+ Registers::Name(rj_reg()), rj(), Registers::Name(rk_reg()),
+ rk());
+ set_fpu_register(fd_reg(), kFPUInvalidResult); // Trash upper 32 bits.
+ set_fpu_register_word(fd_reg(),
+ ReadW(rj() + rk(), instr_.instr(), FLOAT_DOUBLE));
+ break;
+ case FLDX_D:
+ printf_instr("FLDX_D\t %s: %016f, %s: %016lx, %s: %016lx\n",
+ FPURegisters::Name(fd_reg()), fd_double(),
+ Registers::Name(rj_reg()), rj(), Registers::Name(rk_reg()),
+ rk());
+ set_fpu_register_double(fd_reg(), ReadD(rj() + rk(), instr_.instr()));
+ break;
+ case FSTX_S:
+ printf_instr("FSTX_S\t %s: %016f, %s: %016lx, %s: %016lx\n",
+ FPURegisters::Name(fd_reg()), fd_float(),
+ Registers::Name(rj_reg()), rj(), Registers::Name(rk_reg()),
+ rk());
+ WriteW(rj() + rk(), static_cast<int32_t>(get_fpu_register(fd_reg())),
+ instr_.instr());
+ break;
+ case FSTX_D:
+ printf_instr("FSTX_D\t %s: %016f, %s: %016lx, %s: %016lx\n",
+ FPURegisters::Name(fd_reg()), fd_double(),
+ Registers::Name(rj_reg()), rj(), Registers::Name(rk_reg()),
+ rk());
+ WriteD(rj() + rk(), get_fpu_register_double(fd_reg()), instr_.instr());
+ break;
+ case AMSWAP_W:
+ printf("Sim UNIMPLEMENTED: AMSWAP_W\n");
+ UNIMPLEMENTED();
+ case AMSWAP_D:
+ printf("Sim UNIMPLEMENTED: AMSWAP_D\n");
+ UNIMPLEMENTED();
+ case AMADD_W:
+ printf("Sim UNIMPLEMENTED: AMADD_W\n");
+ UNIMPLEMENTED();
+ case AMADD_D:
+ printf("Sim UNIMPLEMENTED: AMADD_D\n");
+ UNIMPLEMENTED();
+ case AMAND_W:
+ printf("Sim UNIMPLEMENTED: AMAND_W\n");
+ UNIMPLEMENTED();
+ case AMAND_D:
+ printf("Sim UNIMPLEMENTED: AMAND_D\n");
+ UNIMPLEMENTED();
+ case AMOR_W:
+ printf("Sim UNIMPLEMENTED: AMOR_W\n");
+ UNIMPLEMENTED();
+ case AMOR_D:
+ printf("Sim UNIMPLEMENTED: AMOR_D\n");
+ UNIMPLEMENTED();
+ case AMXOR_W:
+ printf("Sim UNIMPLEMENTED: AMXOR_W\n");
+ UNIMPLEMENTED();
+ case AMXOR_D:
+ printf("Sim UNIMPLEMENTED: AMXOR_D\n");
+ UNIMPLEMENTED();
+ case AMMAX_W:
+ printf("Sim UNIMPLEMENTED: AMMAX_W\n");
+ UNIMPLEMENTED();
+ case AMMAX_D:
+ printf("Sim UNIMPLEMENTED: AMMAX_D\n");
+ UNIMPLEMENTED();
+ case AMMIN_W:
+ printf("Sim UNIMPLEMENTED: AMMIN_W\n");
+ UNIMPLEMENTED();
+ case AMMIN_D:
+ printf("Sim UNIMPLEMENTED: AMMIN_D\n");
+ UNIMPLEMENTED();
+ case AMMAX_WU:
+ printf("Sim UNIMPLEMENTED: AMMAX_WU\n");
+ UNIMPLEMENTED();
+ case AMMAX_DU:
+ printf("Sim UNIMPLEMENTED: AMMAX_DU\n");
+ UNIMPLEMENTED();
+ case AMMIN_WU:
+ printf("Sim UNIMPLEMENTED: AMMIN_WU\n");
+ UNIMPLEMENTED();
+ case AMMIN_DU:
+ printf("Sim UNIMPLEMENTED: AMMIN_DU\n");
+ UNIMPLEMENTED();
+ case AMSWAP_DB_W: {
+ printf_instr("AMSWAP_DB_W:\t %s: %016lx, %s, %016lx, %s, %016lx\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rk_reg()),
+ rk(), Registers::Name(rj_reg()), rj());
+ int32_t rdvalue;
+ do {
+ {
+ base::MutexGuard lock_guard(&GlobalMonitor::Get()->mutex);
+ set_register(rd_reg(), ReadW(rj(), instr_.instr()));
+ local_monitor_.NotifyLoadLinked(rj(), TransactionSize::Word);
+ GlobalMonitor::Get()->NotifyLoadLinked_Locked(
+ rj(), &global_monitor_thread_);
+ }
+ rdvalue = get_register(rd_reg());
+ WriteConditionalW(rj(), static_cast<int32_t>(rk()), instr_.instr(),
+ rd_reg());
+ } while (!get_register(rd_reg()));
+ set_register(rd_reg(), rdvalue);
+ } break;
+ case AMSWAP_DB_D: {
+ printf_instr("AMSWAP_DB_D:\t %s: %016lx, %s, %016lx, %s, %016lx\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rk_reg()),
+ rk(), Registers::Name(rj_reg()), rj());
+ int64_t rdvalue;
+ do {
+ {
+ base::MutexGuard lock_guard(&GlobalMonitor::Get()->mutex);
+ set_register(rd_reg(), Read2W(rj(), instr_.instr()));
+ local_monitor_.NotifyLoadLinked(rj(), TransactionSize::DoubleWord);
+ GlobalMonitor::Get()->NotifyLoadLinked_Locked(
+ rj(), &global_monitor_thread_);
+ }
+ rdvalue = get_register(rd_reg());
+ WriteConditional2W(rj(), rk(), instr_.instr(), rd_reg());
+ } while (!get_register(rd_reg()));
+ set_register(rd_reg(), rdvalue);
+ } break;
+ case AMADD_DB_W: {
+ printf_instr("AMADD_DB_W:\t %s: %016lx, %s, %016lx, %s, %016lx\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rk_reg()),
+ rk(), Registers::Name(rj_reg()), rj());
+ int32_t rdvalue;
+ do {
+ {
+ base::MutexGuard lock_guard(&GlobalMonitor::Get()->mutex);
+ set_register(rd_reg(), ReadW(rj(), instr_.instr()));
+ local_monitor_.NotifyLoadLinked(rj(), TransactionSize::Word);
+ GlobalMonitor::Get()->NotifyLoadLinked_Locked(
+ rj(), &global_monitor_thread_);
+ }
+ rdvalue = get_register(rd_reg());
+ WriteConditionalW(rj(),
+ static_cast<int32_t>(static_cast<int32_t>(rk()) +
+ static_cast<int32_t>(rd())),
+ instr_.instr(), rd_reg());
+ } while (!get_register(rd_reg()));
+ set_register(rd_reg(), rdvalue);
+ } break;
+ case AMADD_DB_D: {
+ printf_instr("AMADD_DB_D:\t %s: %016lx, %s, %016lx, %s, %016lx\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rk_reg()),
+ rk(), Registers::Name(rj_reg()), rj());
+ int64_t rdvalue;
+ do {
+ {
+ base::MutexGuard lock_guard(&GlobalMonitor::Get()->mutex);
+ set_register(rd_reg(), Read2W(rj(), instr_.instr()));
+ local_monitor_.NotifyLoadLinked(rj(), TransactionSize::DoubleWord);
+ GlobalMonitor::Get()->NotifyLoadLinked_Locked(
+ rj(), &global_monitor_thread_);
+ }
+ rdvalue = get_register(rd_reg());
+ WriteConditional2W(rj(), rk() + rd(), instr_.instr(), rd_reg());
+ } while (!get_register(rd_reg()));
+ set_register(rd_reg(), rdvalue);
+ } break;
+ case AMAND_DB_W: {
+ printf_instr("AMAND_DB_W:\t %s: %016lx, %s, %016lx, %s, %016lx\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rk_reg()),
+ rk(), Registers::Name(rj_reg()), rj());
+ int32_t rdvalue;
+ do {
+ {
+ base::MutexGuard lock_guard(&GlobalMonitor::Get()->mutex);
+ set_register(rd_reg(), ReadW(rj(), instr_.instr()));
+ local_monitor_.NotifyLoadLinked(rj(), TransactionSize::Word);
+ GlobalMonitor::Get()->NotifyLoadLinked_Locked(
+ rj(), &global_monitor_thread_);
+ }
+ rdvalue = get_register(rd_reg());
+ WriteConditionalW(rj(),
+ static_cast<int32_t>(static_cast<int32_t>(rk()) &
+ static_cast<int32_t>(rd())),
+ instr_.instr(), rd_reg());
+ } while (!get_register(rd_reg()));
+ set_register(rd_reg(), rdvalue);
+ } break;
+ case AMAND_DB_D: {
+ printf_instr("AMAND_DB_D:\t %s: %016lx, %s, %016lx, %s, %016lx\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rk_reg()),
+ rk(), Registers::Name(rj_reg()), rj());
+ int64_t rdvalue;
+ do {
+ {
+ base::MutexGuard lock_guard(&GlobalMonitor::Get()->mutex);
+ set_register(rd_reg(), Read2W(rj(), instr_.instr()));
+ local_monitor_.NotifyLoadLinked(rj(), TransactionSize::DoubleWord);
+ GlobalMonitor::Get()->NotifyLoadLinked_Locked(
+ rj(), &global_monitor_thread_);
+ }
+ rdvalue = get_register(rd_reg());
+ WriteConditional2W(rj(), rk() & rd(), instr_.instr(), rd_reg());
+ } while (!get_register(rd_reg()));
+ set_register(rd_reg(), rdvalue);
+ } break;
+ case AMOR_DB_W: {
+ printf_instr("AMOR_DB_W:\t %s: %016lx, %s, %016lx, %s, %016lx\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rk_reg()),
+ rk(), Registers::Name(rj_reg()), rj());
+ int32_t rdvalue;
+ do {
+ {
+ base::MutexGuard lock_guard(&GlobalMonitor::Get()->mutex);
+ set_register(rd_reg(), ReadW(rj(), instr_.instr()));
+ local_monitor_.NotifyLoadLinked(rj(), TransactionSize::Word);
+ GlobalMonitor::Get()->NotifyLoadLinked_Locked(
+ rj(), &global_monitor_thread_);
+ }
+ rdvalue = get_register(rd_reg());
+ WriteConditionalW(rj(),
+ static_cast<int32_t>(static_cast<int32_t>(rk()) |
+ static_cast<int32_t>(rd())),
+ instr_.instr(), rd_reg());
+ } while (!get_register(rd_reg()));
+ set_register(rd_reg(), rdvalue);
+ } break;
+ case AMOR_DB_D: {
+ printf_instr("AMOR_DB_D:\t %s: %016lx, %s, %016lx, %s, %016lx\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rk_reg()),
+ rk(), Registers::Name(rj_reg()), rj());
+ int64_t rdvalue;
+ do {
+ {
+ base::MutexGuard lock_guard(&GlobalMonitor::Get()->mutex);
+ set_register(rd_reg(), Read2W(rj(), instr_.instr()));
+ local_monitor_.NotifyLoadLinked(rj(), TransactionSize::DoubleWord);
+ GlobalMonitor::Get()->NotifyLoadLinked_Locked(
+ rj(), &global_monitor_thread_);
+ }
+ rdvalue = get_register(rd_reg());
+ WriteConditional2W(rj(), rk() | rd(), instr_.instr(), rd_reg());
+ } while (!get_register(rd_reg()));
+ set_register(rd_reg(), rdvalue);
+ } break;
+ case AMXOR_DB_W: {
+ printf_instr("AMXOR_DB_W:\t %s: %016lx, %s, %016lx, %s, %016lx\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rk_reg()),
+ rk(), Registers::Name(rj_reg()), rj());
+ int32_t rdvalue;
+ do {
+ {
+ base::MutexGuard lock_guard(&GlobalMonitor::Get()->mutex);
+ set_register(rd_reg(), ReadW(rj(), instr_.instr()));
+ local_monitor_.NotifyLoadLinked(rj(), TransactionSize::Word);
+ GlobalMonitor::Get()->NotifyLoadLinked_Locked(
+ rj(), &global_monitor_thread_);
+ }
+ rdvalue = get_register(rd_reg());
+ WriteConditionalW(rj(),
+ static_cast<int32_t>(static_cast<int32_t>(rk()) ^
+ static_cast<int32_t>(rd())),
+ instr_.instr(), rd_reg());
+ } while (!get_register(rd_reg()));
+ set_register(rd_reg(), rdvalue);
+ } break;
+ case AMXOR_DB_D: {
+ printf_instr("AMXOR_DB_D:\t %s: %016lx, %s, %016lx, %s, %016lx\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rk_reg()),
+ rk(), Registers::Name(rj_reg()), rj());
+ int64_t rdvalue;
+ do {
+ {
+ base::MutexGuard lock_guard(&GlobalMonitor::Get()->mutex);
+ set_register(rd_reg(), Read2W(rj(), instr_.instr()));
+ local_monitor_.NotifyLoadLinked(rj(), TransactionSize::DoubleWord);
+ GlobalMonitor::Get()->NotifyLoadLinked_Locked(
+ rj(), &global_monitor_thread_);
+ }
+ rdvalue = get_register(rd_reg());
+ WriteConditional2W(rj(), rk() ^ rd(), instr_.instr(), rd_reg());
+ } while (!get_register(rd_reg()));
+ set_register(rd_reg(), rdvalue);
+ } break;
+ case AMMAX_DB_W:
+ printf("Sim UNIMPLEMENTED: AMMAX_DB_W\n");
+ UNIMPLEMENTED();
+ case AMMAX_DB_D:
+ printf("Sim UNIMPLEMENTED: AMMAX_DB_D\n");
+ UNIMPLEMENTED();
+ case AMMIN_DB_W:
+ printf("Sim UNIMPLEMENTED: AMMIN_DB_W\n");
+ UNIMPLEMENTED();
+ case AMMIN_DB_D:
+ printf("Sim UNIMPLEMENTED: AMMIN_DB_D\n");
+ UNIMPLEMENTED();
+ case AMMAX_DB_WU:
+ printf("Sim UNIMPLEMENTED: AMMAX_DB_WU\n");
+ UNIMPLEMENTED();
+ case AMMAX_DB_DU:
+ printf("Sim UNIMPLEMENTED: AMMAX_DB_DU\n");
+ UNIMPLEMENTED();
+ case AMMIN_DB_WU:
+ printf("Sim UNIMPLEMENTED: AMMIN_DB_WU\n");
+ UNIMPLEMENTED();
+ case AMMIN_DB_DU:
+ printf("Sim UNIMPLEMENTED: AMMIN_DB_DU\n");
+ UNIMPLEMENTED();
+ case DBAR:
+ printf_instr("DBAR\n");
+ break;
+ case IBAR:
+ printf("Sim UNIMPLEMENTED: IBAR\n");
+ UNIMPLEMENTED();
+ case FSCALEB_S:
+ printf("Sim UNIMPLEMENTED: FSCALEB_S\n");
+ UNIMPLEMENTED();
+ case FSCALEB_D:
+ printf("Sim UNIMPLEMENTED: FSCALEB_D\n");
+ UNIMPLEMENTED();
+ case FCOPYSIGN_S:
+ printf("Sim UNIMPLEMENTED: FCOPYSIGN_S\n");
+ UNIMPLEMENTED();
+ case FCOPYSIGN_D:
+ printf("Sim UNIMPLEMENTED: FCOPYSIGN_D\n");
+ UNIMPLEMENTED();
+ default:
+ UNREACHABLE();
+ }
+}
+
+void Simulator::DecodeTypeOp22() {
+ int64_t alu_out;
+
+ switch (instr_.Bits(31, 10) << 10) {
+ case CLZ_W: {
+ printf_instr("CLZ_W\t %s: %016lx, %s, %016lx\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
+ rj());
+ alu_out = base::bits::CountLeadingZeros32(static_cast<int32_t>(rj_u()));
+ SetResult(rd_reg(), alu_out);
+ break;
+ }
+ case CTZ_W: {
+ printf_instr("CTZ_W\t %s: %016lx, %s, %016lx\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
+ rj());
+ alu_out = base::bits::CountTrailingZeros32(static_cast<int32_t>(rj_u()));
+ SetResult(rd_reg(), alu_out);
+ break;
+ }
+ case CLZ_D: {
+ printf_instr("CLZ_D\t %s: %016lx, %s, %016lx\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
+ rj());
+ alu_out = base::bits::CountLeadingZeros64(static_cast<int64_t>(rj_u()));
+ SetResult(rd_reg(), alu_out);
+ break;
+ }
+ case CTZ_D: {
+ printf_instr("CTZ_D\t %s: %016lx, %s, %016lx\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
+ rj());
+ alu_out = base::bits::CountTrailingZeros64(static_cast<int64_t>(rj_u()));
+ SetResult(rd_reg(), alu_out);
+ break;
+ }
+ case REVB_2H: {
+ printf_instr("REVB_2H\t %s: %016lx, %s, %016lx\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
+ rj());
+ uint32_t input = static_cast<uint32_t>(rj());
+ uint64_t output = 0;
+
+ uint32_t mask = 0xFF000000;
+ for (int i = 0; i < 4; i++) {
+ uint32_t tmp = mask & input;
+ if (i % 2 == 0) {
+ tmp = tmp >> 8;
+ } else {
+ tmp = tmp << 8;
+ }
+ output = output | tmp;
+ mask = mask >> 8;
+ }
+
+ alu_out = static_cast<int64_t>(static_cast<int32_t>(output));
+ SetResult(rd_reg(), alu_out);
+ break;
+ }
+ case REVB_4H: {
+ printf_instr("REVB_4H\t %s: %016lx, %s, %016lx\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
+ rj());
+ uint64_t input = rj_u();
+ uint64_t output = 0;
+
+ uint64_t mask = 0xFF00000000000000;
+ for (int i = 0; i < 8; i++) {
+ uint64_t tmp = mask & input;
+ if (i % 2 == 0) {
+ tmp = tmp >> 8;
+ } else {
+ tmp = tmp << 8;
+ }
+ output = output | tmp;
+ mask = mask >> 8;
+ }
+
+ alu_out = static_cast<int64_t>(output);
+ SetResult(rd_reg(), alu_out);
+ break;
+ }
+ case REVB_2W: {
+ printf_instr("REVB_2W\t %s: %016lx, %s, %016lx\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
+ rj());
+ uint64_t input = rj_u();
+ uint64_t output = 0;
+
+ uint64_t mask = 0xFF000000FF000000;
+ for (int i = 0; i < 4; i++) {
+ uint64_t tmp = mask & input;
+ if (i <= 1) {
+ tmp = tmp >> (24 - i * 16);
+ } else {
+ tmp = tmp << (i * 16 - 24);
+ }
+ output = output | tmp;
+ mask = mask >> 8;
+ }
+
+ alu_out = static_cast<int64_t>(output);
+ SetResult(rd_reg(), alu_out);
+ break;
+ }
+ case REVB_D: {
+ printf_instr("REVB_D\t %s: %016lx, %s, %016lx\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
+ rj());
+ uint64_t input = rj_u();
+ uint64_t output = 0;
+
+ uint64_t mask = 0xFF00000000000000;
+ for (int i = 0; i < 8; i++) {
+ uint64_t tmp = mask & input;
+ if (i <= 3) {
+ tmp = tmp >> (56 - i * 16);
+ } else {
+ tmp = tmp << (i * 16 - 56);
+ }
+ output = output | tmp;
+ mask = mask >> 8;
+ }
+
+ alu_out = static_cast<int64_t>(output);
+ SetResult(rd_reg(), alu_out);
+ break;
+ }
+ case REVH_2W: {
+ printf_instr("REVH_2W\t %s: %016lx, %s, %016lx\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
+ rj());
+ uint64_t input = rj_u();
+ uint64_t output = 0;
+
+ uint64_t mask = 0xFFFF000000000000;
+ for (int i = 0; i < 4; i++) {
+ uint64_t tmp = mask & input;
+ if (i % 2 == 0) {
+ tmp = tmp >> 16;
+ } else {
+ tmp = tmp << 16;
+ }
+ output = output | tmp;
+ mask = mask >> 16;
+ }
+
+ alu_out = static_cast<int64_t>(output);
+ SetResult(rd_reg(), alu_out);
+ break;
+ }
+ case REVH_D: {
+ printf_instr("REVH_D\t %s: %016lx, %s, %016lx\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
+ rj());
+ uint64_t input = rj_u();
+ uint64_t output = 0;
+
+ uint64_t mask = 0xFFFF000000000000;
+ for (int i = 0; i < 4; i++) {
+ uint64_t tmp = mask & input;
+ if (i <= 1) {
+ tmp = tmp >> (48 - i * 32);
+ } else {
+ tmp = tmp << (i * 32 - 48);
+ }
+ output = output | tmp;
+ mask = mask >> 16;
+ }
+
+ alu_out = static_cast<int64_t>(output);
+ SetResult(rd_reg(), alu_out);
+ break;
+ }
+ case BITREV_4B: {
+ printf_instr("BITREV_4B\t %s: %016lx, %s, %016lx\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
+ rj());
+ uint32_t input = static_cast<uint32_t>(rj());
+ uint32_t output = 0;
+ uint8_t i_byte, o_byte;
+
+ // Reverse the bit in byte for each individual byte
+ for (int i = 0; i < 4; i++) {
+ output = output >> 8;
+ i_byte = input & 0xFF;
+
+ // Fast way to reverse bits in byte
+ // Devised by Sean Anderson, July 13, 2001
+ o_byte = static_cast<uint8_t>(((i_byte * 0x0802LU & 0x22110LU) |
+ (i_byte * 0x8020LU & 0x88440LU)) *
+ 0x10101LU >>
+ 16);
+
+ output = output | (static_cast<uint32_t>(o_byte << 24));
+ input = input >> 8;
+ }
+
+ alu_out = static_cast<int64_t>(static_cast<int32_t>(output));
+ SetResult(rd_reg(), alu_out);
+ break;
+ }
+ case BITREV_8B: {
+ printf_instr("BITREV_8B\t %s: %016lx, %s, %016lx\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
+ rj());
+ uint64_t input = rj_u();
+ uint64_t output = 0;
+ uint8_t i_byte, o_byte;
+
+ // Reverse the bit in byte for each individual byte
+ for (int i = 0; i < 8; i++) {
+ output = output >> 8;
+ i_byte = input & 0xFF;
+
+ // Fast way to reverse bits in byte
+ // Devised by Sean Anderson, July 13, 2001
+ o_byte = static_cast<uint8_t>(((i_byte * 0x0802LU & 0x22110LU) |
+ (i_byte * 0x8020LU & 0x88440LU)) *
+ 0x10101LU >>
+ 16);
+
+ output = output | (static_cast<uint64_t>(o_byte) << 56);
+ input = input >> 8;
+ }
+
+ alu_out = static_cast<int64_t>(output);
+ SetResult(rd_reg(), alu_out);
+ break;
+ }
+ case BITREV_W: {
+ printf_instr("BITREV_W\t %s: %016lx, %s, %016lx\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
+ rj());
+ uint32_t input = static_cast<uint32_t>(rj());
+ uint32_t output = 0;
+ output = base::bits::ReverseBits(input);
+ alu_out = static_cast<int64_t>(static_cast<int32_t>(output));
+ SetResult(rd_reg(), alu_out);
+ break;
+ }
+ case BITREV_D: {
+ printf_instr("BITREV_D\t %s: %016lx, %s, %016lx\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
+ rj());
+ alu_out = static_cast<int64_t>(base::bits::ReverseBits(rj_u()));
+ SetResult(rd_reg(), alu_out);
+ break;
+ }
+ case EXT_W_B: {
+ printf_instr("EXT_W_B\t %s: %016lx, %s, %016lx\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
+ rj());
+ uint8_t input = static_cast<uint8_t>(rj());
+ alu_out = static_cast<int64_t>(static_cast<int8_t>(input));
+ SetResult(rd_reg(), alu_out);
+ break;
+ }
+ case EXT_W_H: {
+ printf_instr("EXT_W_H\t %s: %016lx, %s, %016lx\n",
+ Registers::Name(rd_reg()), rd(), Registers::Name(rj_reg()),
+ rj());
+ uint16_t input = static_cast<uint16_t>(rj());
+ alu_out = static_cast<int64_t>(static_cast<int16_t>(input));
+ SetResult(rd_reg(), alu_out);
+ break;
+ }
+ case FABS_S:
+ printf_instr("FABS_S\t %s: %016f, %s, %016f\n",
+ FPURegisters::Name(fd_reg()), fd_float(),
+ FPURegisters::Name(fj_reg()), fj_float());
+ SetFPUFloatResult(fd_reg(), std::abs(fj_float()));
+ break;
+ case FABS_D:
+ printf_instr("FABS_D\t %s: %016f, %s, %016f\n",
+ FPURegisters::Name(fd_reg()), fd_double(),
+ FPURegisters::Name(fj_reg()), fj_double());
+ SetFPUDoubleResult(fd_reg(), std::abs(fj_double()));
+ break;
+ case FNEG_S:
+ printf_instr("FNEG_S\t %s: %016f, %s, %016f\n",
+ FPURegisters::Name(fd_reg()), fd_float(),
+ FPURegisters::Name(fj_reg()), fj_float());
+ SetFPUFloatResult(fd_reg(), -fj_float());
+ break;
+ case FNEG_D:
+ printf_instr("FNEG_D\t %s: %016f, %s, %016f\n",
+ FPURegisters::Name(fd_reg()), fd_double(),
+ FPURegisters::Name(fj_reg()), fj_double());
+ SetFPUDoubleResult(fd_reg(), -fj_double());
+ break;
+ case FSQRT_S: {
+ printf_instr("FSQRT_S\t %s: %016f, %s, %016f\n",
+ FPURegisters::Name(fd_reg()), fd_float(),
+ FPURegisters::Name(fj_reg()), fj_float());
+ if (fj_float() >= 0) {
+ SetFPUFloatResult(fd_reg(), std::sqrt(fj_float()));
+ set_fcsr_bit(kFCSRInvalidOpCauseBit, false);
+ } else {
+ SetFPUFloatResult(fd_reg(), std::sqrt(-1)); // qnan
+ set_fcsr_bit(kFCSRInvalidOpCauseBit, true);
+ }
+ break;
+ }
+ case FSQRT_D: {
+ printf_instr("FSQRT_D\t %s: %016f, %s, %016f\n",
+ FPURegisters::Name(fd_reg()), fd_double(),
+ FPURegisters::Name(fj_reg()), fj_double());
+ if (fj_double() >= 0) {
+ SetFPUDoubleResult(fd_reg(), std::sqrt(fj_double()));
+ set_fcsr_bit(kFCSRInvalidOpCauseBit, false);
+ } else {
+ SetFPUDoubleResult(fd_reg(), std::sqrt(-1)); // qnan
+ set_fcsr_bit(kFCSRInvalidOpCauseBit, true);
+ }
+ break;
+ }
+ case FMOV_S:
+ printf_instr("FMOV_S\t %s: %016f, %s, %016f\n",
+ FPURegisters::Name(fd_reg()), fd_float(),
+ FPURegisters::Name(fj_reg()), fj_float());
+ SetFPUFloatResult(fd_reg(), fj_float());
+ break;
+ case FMOV_D:
+ printf_instr("FMOV_D\t %s: %016f, %s, %016f\n",
+ FPURegisters::Name(fd_reg()), fd_float(),
+ FPURegisters::Name(fj_reg()), fj_float());
+ SetFPUDoubleResult(fd_reg(), fj_double());
+ break;
+ case MOVGR2FR_W: {
+ printf_instr("MOVGR2FR_W\t %s: %016f, %s, %016lx\n",
+ FPURegisters::Name(fd_reg()), fd_double(),
+ Registers::Name(rj_reg()), rj());
+ set_fpu_register_word(fd_reg(), static_cast<int32_t>(rj()));
+ TraceRegWr(get_fpu_register(fd_reg()), FLOAT_DOUBLE);
+ break;
+ }
+ case MOVGR2FR_D:
+ printf_instr("MOVGR2FR_D\t %s: %016f, %s, %016lx\n",
+ FPURegisters::Name(fd_reg()), fd_double(),
+ Registers::Name(rj_reg()), rj());
+ SetFPUResult2(fd_reg(), rj());
+ break;
+ case MOVGR2FRH_W: {
+ printf_instr("MOVGR2FRH_W\t %s: %016f, %s, %016lx\n",
+ FPURegisters::Name(fd_reg()), fd_double(),
+ Registers::Name(rj_reg()), rj());
+ set_fpu_register_hi_word(fd_reg(), static_cast<int32_t>(rj()));
+ TraceRegWr(get_fpu_register(fd_reg()), DOUBLE);
+ break;
+ }
+ case MOVFR2GR_S: {
+ printf_instr("MOVFR2GR_S\t %s: %016lx, %s, %016f\n",
+ Registers::Name(rd_reg()), rd(),
+ FPURegisters::Name(fj_reg()), fj_float());
+ set_register(rd_reg(),
+ static_cast<int64_t>(get_fpu_register_word(fj_reg())));
+ TraceRegWr(get_register(rd_reg()), WORD_DWORD);
+ break;
+ }
+ case MOVFR2GR_D:
+ printf_instr("MOVFR2GR_D\t %s: %016lx, %s, %016f\n",
+ Registers::Name(rd_reg()), rd(),
+ FPURegisters::Name(fj_reg()), fj_double());
+ SetResult(rd_reg(), get_fpu_register(fj_reg()));
+ break;
+ case MOVFRH2GR_S:
+ printf_instr("MOVFRH2GR_S\t %s: %016lx, %s, %016f\n",
+ Registers::Name(rd_reg()), rd(),
+ FPURegisters::Name(fj_reg()), fj_double());
+ SetResult(rd_reg(), get_fpu_register_hi_word(fj_reg()));
+ break;
+ case MOVGR2FCSR: {
+ printf_instr("MOVGR2FCSR\t fcsr: %016x, %s, %016lx\n", FCSR_,
+ Registers::Name(rj_reg()), rj());
+ // fcsr could be 0-3
+ CHECK_LT(rd_reg(), 4);
+ FCSR_ = static_cast<uint32_t>(rj());
+ TraceRegWr(FCSR_);
+ break;
+ }
+ case MOVFCSR2GR: {
+ printf_instr("MOVFCSR2GR\t %s, %016lx, FCSR: %016x\n",
+ Registers::Name(rd_reg()), rd(), FCSR_);
+ // fcsr could be 0-3
+ CHECK_LT(rj_reg(), 4);
+ SetResult(rd_reg(), FCSR_);
+ break;
+ }
+ case FCVT_S_D:
+ printf_instr("FCVT_S_D\t %s: %016f, %s, %016f\n",
+ FPURegisters::Name(fd_reg()), fd_double(),
+ FPURegisters::Name(fj_reg()), fj_double());
+ SetFPUFloatResult(fd_reg(), static_cast<float>(fj_double()));
+ break;
+ case FCVT_D_S:
+ printf_instr("FCVT_D_S\t %s: %016f, %s, %016f\n",
+ FPURegisters::Name(fd_reg()), fd_double(),
+ FPURegisters::Name(fj_reg()), fj_float());
+ SetFPUDoubleResult(fd_reg(), static_cast<double>(fj_float()));
+ break;
+ case FTINTRM_W_S: {
+ printf_instr("FTINTRM_W_S\t %s: %016f, %s, %016f\n",
+ FPURegisters::Name(fd_reg()), fd_double(),
+ FPURegisters::Name(fj_reg()), fj_float());
+ float fj = fj_float();
+ float rounded = std::floor(fj);
+ int32_t result = static_cast<int32_t>(rounded);
+ SetFPUWordResult(fd_reg(), result);
+ if (set_fcsr_round_error(fj, rounded)) {
+ set_fpu_register_word_invalid_result(fj, rounded);
+ }
+ break;
+ }
+ case FTINTRM_W_D: {
+ printf_instr("FTINTRM_W_D\t %s: %016f, %s, %016f\n",
+ FPURegisters::Name(fd_reg()), fd_double(),
+ FPURegisters::Name(fj_reg()), fj_double());
+ double fj = fj_double();
+ double rounded = std::floor(fj);
+ int32_t result = static_cast<int32_t>(rounded);
+ SetFPUWordResult(fd_reg(), result);
+ if (set_fcsr_round_error(fj, rounded)) {
+ set_fpu_register_invalid_result(fj, rounded);
+ }
+ break;
+ }
+ case FTINTRM_L_S: {
+ printf_instr("FTINTRM_L_S\t %s: %016f, %s, %016f\n",
+ FPURegisters::Name(fd_reg()), fd_double(),
+ FPURegisters::Name(fj_reg()), fj_float());
+ float fj = fj_float();
+ float rounded = std::floor(fj);
+ int64_t result = static_cast<int64_t>(rounded);
+ SetFPUResult(fd_reg(), result);
+ if (set_fcsr_round64_error(fj, rounded)) {
+ set_fpu_register_invalid_result64(fj, rounded);
+ }
+ break;
+ }
+ case FTINTRM_L_D: {
+ printf_instr("FTINTRM_L_D\t %s: %016f, %s, %016f\n",
+ FPURegisters::Name(fd_reg()), fd_double(),
+ FPURegisters::Name(fj_reg()), fj_double());
+ double fj = fj_double();
+ double rounded = std::floor(fj);
+ int64_t result = static_cast<int64_t>(rounded);
+ SetFPUResult(fd_reg(), result);
+ if (set_fcsr_round64_error(fj, rounded)) {
+ set_fpu_register_invalid_result64(fj, rounded);
+ }
+ break;
+ }
+ case FTINTRP_W_S: {
+ printf_instr("FTINTRP_W_S\t %s: %016f, %s, %016f\n",
+ FPURegisters::Name(fd_reg()), fd_double(),
+ FPURegisters::Name(fj_reg()), fj_float());
+ float fj = fj_float();
+ float rounded = std::ceil(fj);
+ int32_t result = static_cast<int32_t>(rounded);
+ SetFPUWordResult(fd_reg(), result);
+ if (set_fcsr_round_error(fj, rounded)) {
+ set_fpu_register_word_invalid_result(fj, rounded);
+ }
+ break;
+ }
+ case FTINTRP_W_D: {
+ printf_instr("FTINTRP_W_D\t %s: %016f, %s, %016f\n",
+ FPURegisters::Name(fd_reg()), fd_double(),
+ FPURegisters::Name(fj_reg()), fj_double());
+ double fj = fj_double();
+ double rounded = std::ceil(fj);
+ int32_t result = static_cast<int32_t>(rounded);
+ SetFPUWordResult(fd_reg(), result);
+ if (set_fcsr_round_error(fj, rounded)) {
+ set_fpu_register_invalid_result(fj, rounded);
+ }
+ break;
+ }
+ case FTINTRP_L_S: {
+ printf_instr("FTINTRP_L_S\t %s: %016f, %s, %016f\n",
+ FPURegisters::Name(fd_reg()), fd_double(),
+ FPURegisters::Name(fj_reg()), fj_float());
+ float fj = fj_float();
+ float rounded = std::ceil(fj);
+ int64_t result = static_cast<int64_t>(rounded);
+ SetFPUResult(fd_reg(), result);
+ if (set_fcsr_round64_error(fj, rounded)) {
+ set_fpu_register_invalid_result64(fj, rounded);
+ }
+ break;
+ }
+ case FTINTRP_L_D: {
+ printf_instr("FTINTRP_L_D\t %s: %016f, %s, %016f\n",
+ FPURegisters::Name(fd_reg()), fd_double(),
+ FPURegisters::Name(fj_reg()), fj_double());
+ double fj = fj_double();
+ double rounded = std::ceil(fj);
+ int64_t result = static_cast<int64_t>(rounded);
+ SetFPUResult(fd_reg(), result);
+ if (set_fcsr_round64_error(fj, rounded)) {
+ set_fpu_register_invalid_result64(fj, rounded);
+ }
+ break;
+ }
+ case FTINTRZ_W_S: {
+ printf_instr("FTINTRZ_W_S\t %s: %016f, %s, %016f\n",
+ FPURegisters::Name(fd_reg()), fd_double(),
+ FPURegisters::Name(fj_reg()), fj_float());
+ float fj = fj_float();
+ float rounded = std::trunc(fj);
+ int32_t result = static_cast<int32_t>(rounded);
+ SetFPUWordResult(fd_reg(), result);
+ if (set_fcsr_round_error(fj, rounded)) {
+ set_fpu_register_word_invalid_result(fj, rounded);
+ }
+ break;
+ }
+ case FTINTRZ_W_D: {
+ printf_instr("FTINTRZ_W_D\t %s: %016f, %s, %016f\n",
+ FPURegisters::Name(fd_reg()), fd_double(),
+ FPURegisters::Name(fj_reg()), fj_double());
+ double fj = fj_double();
+ double rounded = std::trunc(fj);
+ int32_t result = static_cast<int32_t>(rounded);
+ SetFPUWordResult(fd_reg(), result);
+ if (set_fcsr_round_error(fj, rounded)) {
+ set_fpu_register_invalid_result(fj, rounded);
+ }
+ break;
+ }
+ case FTINTRZ_L_S: {
+ printf_instr("FTINTRZ_L_S\t %s: %016f, %s, %016f\n",
+ FPURegisters::Name(fd_reg()), fd_double(),
+ FPURegisters::Name(fj_reg()), fj_float());
+ float fj = fj_float();
+ float rounded = std::trunc(fj);
+ int64_t result = static_cast<int64_t>(rounded);
+ SetFPUResult(fd_reg(), result);
+ if (set_fcsr_round64_error(fj, rounded)) {
+ set_fpu_register_invalid_result64(fj, rounded);
+ }
+ break;
+ }
+ case FTINTRZ_L_D: {
+ printf_instr("FTINTRZ_L_D\t %s: %016f, %s, %016f\n",
+ FPURegisters::Name(fd_reg()), fd_double(),
+ FPURegisters::Name(fj_reg()), fj_double());
+ double fj = fj_double();
+ double rounded = std::trunc(fj);
+ int64_t result = static_cast<int64_t>(rounded);
+ SetFPUResult(fd_reg(), result);
+ if (set_fcsr_round64_error(fj, rounded)) {
+ set_fpu_register_invalid_result64(fj, rounded);
+ }
+ break;
+ }
+ case FTINTRNE_W_S: {
+ printf_instr("FTINTRNE_W_S\t %s: %016f, %s, %016f\n",
+ FPURegisters::Name(fd_reg()), fd_double(),
+ FPURegisters::Name(fj_reg()), fj_float());
+ float fj = fj_float();
+ float rounded = std::floor(fj + 0.5);
+ int32_t result = static_cast<int32_t>(rounded);
+ if ((result & 1) != 0 && result - fj == 0.5) {
+ // If the number is halfway between two integers,
+ // round to the even one.
+ result--;
+ }
+ SetFPUWordResult(fd_reg(), result);
+ if (set_fcsr_round_error(fj, rounded)) {
+ set_fpu_register_word_invalid_result(fj, rounded);
+ }
+ break;
+ }
+ case FTINTRNE_W_D: {
+ printf_instr("FTINTRNE_W_D\t %s: %016f, %s, %016f\n",
+ FPURegisters::Name(fd_reg()), fd_double(),
+ FPURegisters::Name(fj_reg()), fj_double());
+ double fj = fj_double();
+ double rounded = std::floor(fj + 0.5);
+ int32_t result = static_cast<int32_t>(rounded);
+ if ((result & 1) != 0 && result - fj == 0.5) {
+ // If the number is halfway between two integers,
+ // round to the even one.
+ result--;
+ }
+ SetFPUWordResult(fd_reg(), result);
+ if (set_fcsr_round_error(fj, rounded)) {
+ set_fpu_register_invalid_result(fj, rounded);
+ }
+ break;
+ }
+ case FTINTRNE_L_S: {
+ printf_instr("FTINTRNE_L_S\t %s: %016f, %s, %016f\n",
+ FPURegisters::Name(fd_reg()), fd_double(),
+ FPURegisters::Name(fj_reg()), fj_float());
+ float fj = fj_float();
+ float rounded = std::floor(fj + 0.5);
+ int64_t result = static_cast<int64_t>(rounded);
+ if ((result & 1) != 0 && result - fj == 0.5) {
+ // If the number is halfway between two integers,
+ // round to the even one.
+ result--;
+ }
+ SetFPUResult(fd_reg(), result);
+ if (set_fcsr_round64_error(fj, rounded)) {
+ set_fpu_register_invalid_result64(fj, rounded);
+ }
+ break;
+ }
+ case FTINTRNE_L_D: {
+ printf_instr("FTINTRNE_L_D\t %s: %016f, %s, %016f\n",
+ FPURegisters::Name(fd_reg()), fd_double(),
+ FPURegisters::Name(fj_reg()), fj_double());
+ double fj = fj_double();
+ double rounded = std::floor(fj + 0.5);
+ int64_t result = static_cast<int64_t>(rounded);
+ if ((result & 1) != 0 && result - fj == 0.5) {
+ // If the number is halfway between two integers,
+ // round to the even one.
+ result--;
+ }
+ SetFPUResult(fd_reg(), result);
+ if (set_fcsr_round64_error(fj, rounded)) {
+ set_fpu_register_invalid_result64(fj, rounded);
+ }
+ break;
+ }
+ case FTINT_W_S: {
+ printf_instr("FTINT_W_S\t %s: %016f, %s, %016f\n",
+ FPURegisters::Name(fd_reg()), fd_double(),
+ FPURegisters::Name(fj_reg()), fj_float());
+ float fj = fj_float();
+ float rounded;
+ int32_t result;
+ round_according_to_fcsr(fj, &rounded, &result);
+ SetFPUWordResult(fd_reg(), result);
+ if (set_fcsr_round_error(fj, rounded)) {
+ set_fpu_register_word_invalid_result(fj, rounded);
+ }
+ break;
+ }
+ case FTINT_W_D: {
+ printf_instr("FTINT_W_D\t %s: %016f, %s, %016f\n",
+ FPURegisters::Name(fd_reg()), fd_double(),
+ FPURegisters::Name(fj_reg()), fj_double());
+ double fj = fj_double();
+ double rounded;
+ int32_t result;
+ round_according_to_fcsr(fj, &rounded, &result);
+ SetFPUWordResult(fd_reg(), result);
+ if (set_fcsr_round_error(fj, rounded)) {
+ set_fpu_register_word_invalid_result(fj, rounded);
+ }
+ break;
+ }
+ case FTINT_L_S: {
+ printf_instr("FTINT_L_S\t %s: %016f, %s, %016f\n",
+ FPURegisters::Name(fd_reg()), fd_double(),
+ FPURegisters::Name(fj_reg()), fj_float());
+ float fj = fj_float();
+ float rounded;
+ int64_t result;
+ round64_according_to_fcsr(fj, &rounded, &result);
+ SetFPUResult(fd_reg(), result);
+ if (set_fcsr_round64_error(fj, rounded)) {
+ set_fpu_register_invalid_result64(fj, rounded);
+ }
+ break;
+ }
+ case FTINT_L_D: {
+ printf_instr("FTINT_L_D\t %s: %016f, %s, %016f\n",
+ FPURegisters::Name(fd_reg()), fd_double(),
+ FPURegisters::Name(fj_reg()), fj_double());
+ double fj = fj_double();
+ double rounded;
+ int64_t result;
+ round64_according_to_fcsr(fj, &rounded, &result);
+ SetFPUResult(fd_reg(), result);
+ if (set_fcsr_round64_error(fj, rounded)) {
+ set_fpu_register_invalid_result64(fj, rounded);
+ }
+ break;
+ }
+ case FFINT_S_W: {
+ alu_out = get_fpu_register_signed_word(fj_reg());
+ printf_instr("FFINT_S_W\t %s: %016f, %s, %016x\n",
+ FPURegisters::Name(fd_reg()), fd_double(),
+ FPURegisters::Name(fj_reg()), static_cast<int>(alu_out));
+ SetFPUFloatResult(fd_reg(), static_cast<float>(alu_out));
+ break;
+ }
+ case FFINT_S_L: {
+ alu_out = get_fpu_register(fj_reg());
+ printf_instr("FFINT_S_L\t %s: %016f, %s, %016lx\n",
+ FPURegisters::Name(fd_reg()), fd_double(),
+ FPURegisters::Name(fj_reg()), alu_out);
+ SetFPUFloatResult(fd_reg(), static_cast<float>(alu_out));
+ break;
+ }
+ case FFINT_D_W: {
+ alu_out = get_fpu_register_signed_word(fj_reg());
+ printf_instr("FFINT_D_W\t %s: %016f, %s, %016x\n",
+ FPURegisters::Name(fd_reg()), fd_double(),
+ FPURegisters::Name(fj_reg()), static_cast<int>(alu_out));
+ SetFPUDoubleResult(fd_reg(), static_cast<double>(alu_out));
+ break;
+ }
+ case FFINT_D_L: {
+ alu_out = get_fpu_register(fj_reg());
+ printf_instr("FFINT_D_L\t %s: %016f, %s, %016lx\n",
+ FPURegisters::Name(fd_reg()), fd_double(),
+ FPURegisters::Name(fj_reg()), alu_out);
+ SetFPUDoubleResult(fd_reg(), static_cast<double>(alu_out));
+ break;
+ }
+ case FRINT_S: {
+ printf_instr("FRINT_S\t %s: %016f, %s, %016f mode : ",
+ FPURegisters::Name(fd_reg()), fd_float(),
+ FPURegisters::Name(fj_reg()), fj_float());
+ float fj = fj_float();
+ float result, temp_result;
+ double temp;
+ float upper = std::ceil(fj);
+ float lower = std::floor(fj);
+ switch (get_fcsr_rounding_mode()) {
+ case kRoundToNearest:
+ printf_instr(" kRoundToNearest\n");
+ if (upper - fj < fj - lower) {
+ result = upper;
+ } else if (upper - fj > fj - lower) {
+ result = lower;
+ } else {
+ temp_result = upper / 2;
+ float reminder = std::modf(temp_result, &temp);
+ if (reminder == 0) {
+ result = upper;
+ } else {
+ result = lower;
+ }
+ }
+ break;
+ case kRoundToZero:
+ printf_instr(" kRoundToZero\n");
+ result = (fj > 0 ? lower : upper);
+ break;
+ case kRoundToPlusInf:
+ printf_instr(" kRoundToPlusInf\n");
+ result = upper;
+ break;
+ case kRoundToMinusInf:
+ printf_instr(" kRoundToMinusInf\n");
+ result = lower;
+ break;
+ }
+ SetFPUFloatResult(fd_reg(), result);
+ set_fcsr_bit(kFCSRInexactCauseBit, result != fj);
+ break;
+ }
+ case FRINT_D: {
+ printf_instr("FRINT_D\t %s: %016f, %s, %016f mode : ",
+ FPURegisters::Name(fd_reg()), fd_double(),
+ FPURegisters::Name(fj_reg()), fj_double());
+ double fj = fj_double();
+ double result, temp, temp_result;
+ double upper = std::ceil(fj);
+ double lower = std::floor(fj);
+ switch (get_fcsr_rounding_mode()) {
+ case kRoundToNearest:
+ printf_instr(" kRoundToNearest\n");
+ if (upper - fj < fj - lower) {
+ result = upper;
+ } else if (upper - fj > fj - lower) {
+ result = lower;
+ } else {
+ temp_result = upper / 2;
+ double reminder = std::modf(temp_result, &temp);
+ if (reminder == 0) {
+ result = upper;
+ } else {
+ result = lower;
+ }
+ }
+ break;
+ case kRoundToZero:
+ printf_instr(" kRoundToZero\n");
+ result = (fj > 0 ? lower : upper);
+ break;
+ case kRoundToPlusInf:
+ printf_instr(" kRoundToPlusInf\n");
+ result = upper;
+ break;
+ case kRoundToMinusInf:
+ printf_instr(" kRoundToMinusInf\n");
+ result = lower;
+ break;
+ }
+ SetFPUDoubleResult(fd_reg(), result);
+ set_fcsr_bit(kFCSRInexactCauseBit, result != fj);
+ break;
+ }
+ case MOVFR2CF:
+ printf("Sim UNIMPLEMENTED: MOVFR2CF\n");
+ UNIMPLEMENTED();
+ case MOVCF2FR:
+ printf("Sim UNIMPLEMENTED: MOVCF2FR\n");
+ UNIMPLEMENTED();
+ case MOVGR2CF:
+ printf_instr("MOVGR2CF\t FCC%d, %s: %016lx\n", cd_reg(),
+ Registers::Name(rj_reg()), rj());
+ set_cf_register(cd_reg(), rj() & 1);
+ break;
+ case MOVCF2GR:
+ printf_instr("MOVCF2GR\t %s: %016lx, FCC%d\n", Registers::Name(rd_reg()),
+ rd(), cj_reg());
+ SetResult(rd_reg(), cj());
+ break;
+ case FRECIP_S:
+ printf("Sim UNIMPLEMENTED: FRECIP_S\n");
+ UNIMPLEMENTED();
+ case FRECIP_D:
+ printf("Sim UNIMPLEMENTED: FRECIP_D\n");
+ UNIMPLEMENTED();
+ case FRSQRT_S:
+ printf("Sim UNIMPLEMENTED: FRSQRT_S\n");
+ UNIMPLEMENTED();
+ case FRSQRT_D:
+ printf("Sim UNIMPLEMENTED: FRSQRT_D\n");
+ UNIMPLEMENTED();
+ case FCLASS_S:
+ printf("Sim UNIMPLEMENTED: FCLASS_S\n");
+ UNIMPLEMENTED();
+ case FCLASS_D:
+ printf("Sim UNIMPLEMENTED: FCLASS_D\n");
+ UNIMPLEMENTED();
+ case FLOGB_S:
+ printf("Sim UNIMPLEMENTED: FLOGB_S\n");
+ UNIMPLEMENTED();
+ case FLOGB_D:
+ printf("Sim UNIMPLEMENTED: FLOGB_D\n");
+ UNIMPLEMENTED();
+ case CLO_W:
+ printf("Sim UNIMPLEMENTED: CLO_W\n");
+ UNIMPLEMENTED();
+ case CTO_W:
+ printf("Sim UNIMPLEMENTED: CTO_W\n");
+ UNIMPLEMENTED();
+ case CLO_D:
+ printf("Sim UNIMPLEMENTED: CLO_D\n");
+ UNIMPLEMENTED();
+ case CTO_D:
+ printf("Sim UNIMPLEMENTED: CTO_D\n");
+ UNIMPLEMENTED();
+ // Unimplemented opcodes raised an error in the configuration step before,
+ // so we can use the default here to set the destination register in common
+ // cases.
+ default:
+ UNREACHABLE();
+ }
+}
+
+// Executes the current instruction.
+void Simulator::InstructionDecode(Instruction* instr) {
+ if (v8::internal::FLAG_check_icache) {
+ CheckICache(i_cache(), instr);
+ }
+ pc_modified_ = false;
+
+ v8::base::EmbeddedVector<char, 256> buffer;
+
+ if (::v8::internal::FLAG_trace_sim) {
+ base::SNPrintF(trace_buf_, " ");
+ disasm::NameConverter converter;
+ disasm::Disassembler dasm(converter);
+ // Use a reasonably large buffer.
+ dasm.InstructionDecode(buffer, reinterpret_cast<byte*>(instr));
+ }
+
+ static int instr_count = 0;
+ USE(instr_count);
+ instr_ = instr;
+ printf_instr("\nInstr%3d: %08x, PC: %016lx\t", instr_count++,
+ instr_.Bits(31, 0), get_pc());
+ switch (instr_.InstructionType()) {
+ case Instruction::kOp6Type:
+ DecodeTypeOp6();
+ break;
+ case Instruction::kOp7Type:
+ DecodeTypeOp7();
+ break;
+ case Instruction::kOp8Type:
+ DecodeTypeOp8();
+ break;
+ case Instruction::kOp10Type:
+ DecodeTypeOp10();
+ break;
+ case Instruction::kOp12Type:
+ DecodeTypeOp12();
+ break;
+ case Instruction::kOp14Type:
+ DecodeTypeOp14();
+ break;
+ case Instruction::kOp17Type:
+ DecodeTypeOp17();
+ break;
+ case Instruction::kOp22Type:
+ DecodeTypeOp22();
+ break;
+ default: {
+ printf("instr_: %x\n", instr_.Bits(31, 0));
+ UNREACHABLE();
+ }
+ }
+
+ if (::v8::internal::FLAG_trace_sim) {
+ PrintF(" 0x%08" PRIxPTR " %-44s %s\n",
+ reinterpret_cast<intptr_t>(instr), buffer.begin(),
+ trace_buf_.begin());
+ }
+
+ if (!pc_modified_) {
+ set_register(pc, reinterpret_cast<int64_t>(instr) + kInstrSize);
+ }
+}
+
+void Simulator::Execute() {
+ // Get the PC to simulate. Cannot use the accessor here as we need the
+ // raw PC value and not the one used as input to arithmetic instructions.
+ int64_t program_counter = get_pc();
+ if (::v8::internal::FLAG_stop_sim_at == 0) {
+ // Fast version of the dispatch loop without checking whether the simulator
+ // should be stopping at a particular executed instruction.
+ while (program_counter != end_sim_pc) {
+ Instruction* instr = reinterpret_cast<Instruction*>(program_counter);
+ icount_++;
+ InstructionDecode(instr);
+ program_counter = get_pc();
+ }
+ } else {
+ // FLAG_stop_sim_at is at the non-default value. Stop in the debugger when
+ // we reach the particular instruction count.
+ while (program_counter != end_sim_pc) {
+ Instruction* instr = reinterpret_cast<Instruction*>(program_counter);
+ icount_++;
+ if (icount_ == static_cast<int64_t>(::v8::internal::FLAG_stop_sim_at)) {
+ Loong64Debugger dbg(this);
+ dbg.Debug();
+ } else {
+ InstructionDecode(instr);
+ }
+ program_counter = get_pc();
+ }
+ }
+}
+
+void Simulator::CallInternal(Address entry) {
+ // Adjust JS-based stack limit to C-based stack limit.
+ isolate_->stack_guard()->AdjustStackLimitForSimulator();
+
+ // Prepare to execute the code at entry.
+ set_register(pc, static_cast<int64_t>(entry));
+ // Put down marker for end of simulation. The simulator will stop simulation
+ // when the PC reaches this value. By saving the "end simulation" value into
+ // the LR the simulation stops when returning to this call point.
+ set_register(ra, end_sim_pc);
+
+ // Remember the values of callee-saved registers.
+ int64_t s0_val = get_register(s0);
+ int64_t s1_val = get_register(s1);
+ int64_t s2_val = get_register(s2);
+ int64_t s3_val = get_register(s3);
+ int64_t s4_val = get_register(s4);
+ int64_t s5_val = get_register(s5);
+ int64_t s6_val = get_register(s6);
+ int64_t s7_val = get_register(s7);
+ int64_t s8_val = get_register(s8);
+ int64_t gp_val = get_register(gp);
+ int64_t sp_val = get_register(sp);
+ int64_t tp_val = get_register(tp);
+ int64_t fp_val = get_register(fp);
+
+ // Set up the callee-saved registers with a known value. To be able to check
+ // that they are preserved properly across JS execution.
+ int64_t callee_saved_value = icount_;
+ set_register(s0, callee_saved_value);
+ set_register(s1, callee_saved_value);
+ set_register(s2, callee_saved_value);
+ set_register(s3, callee_saved_value);
+ set_register(s4, callee_saved_value);
+ set_register(s5, callee_saved_value);
+ set_register(s6, callee_saved_value);
+ set_register(s7, callee_saved_value);
+ set_register(s8, callee_saved_value);
+ set_register(gp, callee_saved_value);
+ set_register(tp, callee_saved_value);
+ set_register(fp, callee_saved_value);
+
+ // Start the simulation.
+ Execute();
+
+ // Check that the callee-saved registers have been preserved.
+ CHECK_EQ(callee_saved_value, get_register(s0));
+ CHECK_EQ(callee_saved_value, get_register(s1));
+ CHECK_EQ(callee_saved_value, get_register(s2));
+ CHECK_EQ(callee_saved_value, get_register(s3));
+ CHECK_EQ(callee_saved_value, get_register(s4));
+ CHECK_EQ(callee_saved_value, get_register(s5));
+ CHECK_EQ(callee_saved_value, get_register(s6));
+ CHECK_EQ(callee_saved_value, get_register(s7));
+ CHECK_EQ(callee_saved_value, get_register(s8));
+ CHECK_EQ(callee_saved_value, get_register(gp));
+ CHECK_EQ(callee_saved_value, get_register(tp));
+ CHECK_EQ(callee_saved_value, get_register(fp));
+
+ // Restore callee-saved registers with the original value.
+ set_register(s0, s0_val);
+ set_register(s1, s1_val);
+ set_register(s2, s2_val);
+ set_register(s3, s3_val);
+ set_register(s4, s4_val);
+ set_register(s5, s5_val);
+ set_register(s6, s6_val);
+ set_register(s7, s7_val);
+ set_register(s8, s8_val);
+ set_register(gp, gp_val);
+ set_register(sp, sp_val);
+ set_register(tp, tp_val);
+ set_register(fp, fp_val);
+}
+
+intptr_t Simulator::CallImpl(Address entry, int argument_count,
+ const intptr_t* arguments) {
+ constexpr int kRegisterPassedArguments = 8;
+ // Set up arguments.
+
+ int reg_arg_count = std::min(kRegisterPassedArguments, argument_count);
+ if (reg_arg_count > 0) set_register(a0, arguments[0]);
+ if (reg_arg_count > 1) set_register(a1, arguments[1]);
+ if (reg_arg_count > 2) set_register(a2, arguments[2]);
+ if (reg_arg_count > 3) set_register(a3, arguments[3]);
+ if (reg_arg_count > 4) set_register(a4, arguments[4]);
+ if (reg_arg_count > 5) set_register(a5, arguments[5]);
+ if (reg_arg_count > 6) set_register(a6, arguments[6]);
+ if (reg_arg_count > 7) set_register(a7, arguments[7]);
+
+ // Remaining arguments passed on stack.
+ int64_t original_stack = get_register(sp);
+ // Compute position of stack on entry to generated code.
+ int stack_args_count = argument_count - reg_arg_count;
+ int stack_args_size = stack_args_count * sizeof(*arguments);
+ int64_t entry_stack = original_stack - stack_args_size;
+
+ if (base::OS::ActivationFrameAlignment() != 0) {
+ entry_stack &= -base::OS::ActivationFrameAlignment();
+ }
+ // Store remaining arguments on stack, from low to high memory.
+ intptr_t* stack_argument = reinterpret_cast<intptr_t*>(entry_stack);
+ memcpy(stack_argument, arguments + reg_arg_count,
+ stack_args_count * sizeof(*arguments));
+ set_register(sp, entry_stack);
+
+ CallInternal(entry);
+
+ // Pop stack passed arguments.
+ CHECK_EQ(entry_stack, get_register(sp));
+ set_register(sp, original_stack);
+
+ return get_register(a0);
+}
+
+double Simulator::CallFP(Address entry, double d0, double d1) {
+ const FPURegister fparg2 = f1;
+ set_fpu_register_double(f0, d0);
+ set_fpu_register_double(fparg2, d1);
+ CallInternal(entry);
+ return get_fpu_register_double(f0);
+}
+
+uintptr_t Simulator::PushAddress(uintptr_t address) {
+ int64_t new_sp = get_register(sp) - sizeof(uintptr_t);
+ uintptr_t* stack_slot = reinterpret_cast<uintptr_t*>(new_sp);
+ *stack_slot = address;
+ set_register(sp, new_sp);
+ return new_sp;
+}
+
+uintptr_t Simulator::PopAddress() {
+ int64_t current_sp = get_register(sp);
+ uintptr_t* stack_slot = reinterpret_cast<uintptr_t*>(current_sp);
+ uintptr_t address = *stack_slot;
+ set_register(sp, current_sp + sizeof(uintptr_t));
+ return address;
+}
+
+Simulator::LocalMonitor::LocalMonitor()
+ : access_state_(MonitorAccess::Open),
+ tagged_addr_(0),
+ size_(TransactionSize::None) {}
+
+void Simulator::LocalMonitor::Clear() {
+ access_state_ = MonitorAccess::Open;
+ tagged_addr_ = 0;
+ size_ = TransactionSize::None;
+}
+
+void Simulator::LocalMonitor::NotifyLoad() {
+ if (access_state_ == MonitorAccess::RMW) {
+ // A non linked load could clear the local monitor. As a result, it's
+ // most strict to unconditionally clear the local monitor on load.
+ Clear();
+ }
+}
+
+void Simulator::LocalMonitor::NotifyLoadLinked(uintptr_t addr,
+ TransactionSize size) {
+ access_state_ = MonitorAccess::RMW;
+ tagged_addr_ = addr;
+ size_ = size;
+}
+
+void Simulator::LocalMonitor::NotifyStore() {
+ if (access_state_ == MonitorAccess::RMW) {
+ // A non exclusive store could clear the local monitor. As a result, it's
+ // most strict to unconditionally clear the local monitor on store.
+ Clear();
+ }
+}
+
+bool Simulator::LocalMonitor::NotifyStoreConditional(uintptr_t addr,
+ TransactionSize size) {
+ if (access_state_ == MonitorAccess::RMW) {
+ if (addr == tagged_addr_ && size_ == size) {
+ Clear();
+ return true;
+ } else {
+ return false;
+ }
+ } else {
+ DCHECK(access_state_ == MonitorAccess::Open);
+ return false;
+ }
+}
+
+Simulator::GlobalMonitor::LinkedAddress::LinkedAddress()
+ : access_state_(MonitorAccess::Open),
+ tagged_addr_(0),
+ next_(nullptr),
+ prev_(nullptr),
+ failure_counter_(0) {}
+
+void Simulator::GlobalMonitor::LinkedAddress::Clear_Locked() {
+ access_state_ = MonitorAccess::Open;
+ tagged_addr_ = 0;
+}
+
+void Simulator::GlobalMonitor::LinkedAddress::NotifyLoadLinked_Locked(
+ uintptr_t addr) {
+ access_state_ = MonitorAccess::RMW;
+ tagged_addr_ = addr;
+}
+
+void Simulator::GlobalMonitor::LinkedAddress::NotifyStore_Locked() {
+ if (access_state_ == MonitorAccess::RMW) {
+ // A non exclusive store could clear the global monitor. As a result, it's
+ // most strict to unconditionally clear global monitors on store.
+ Clear_Locked();
+ }
+}
+
+bool Simulator::GlobalMonitor::LinkedAddress::NotifyStoreConditional_Locked(
+ uintptr_t addr, bool is_requesting_thread) {
+ if (access_state_ == MonitorAccess::RMW) {
+ if (is_requesting_thread) {
+ if (addr == tagged_addr_) {
+ Clear_Locked();
+ // Introduce occasional sc/scd failures. This is to simulate the
+ // behavior of hardware, which can randomly fail due to background
+ // cache evictions.
+ if (failure_counter_++ >= kMaxFailureCounter) {
+ failure_counter_ = 0;
+ return false;
+ } else {
+ return true;
+ }
+ }
+ } else if ((addr & kExclusiveTaggedAddrMask) ==
+ (tagged_addr_ & kExclusiveTaggedAddrMask)) {
+ // Check the masked addresses when responding to a successful lock by
+ // another thread so the implementation is more conservative (i.e. the
+ // granularity of locking is as large as possible.)
+ Clear_Locked();
+ return false;
+ }
+ }
+ return false;
+}
+
+void Simulator::GlobalMonitor::NotifyLoadLinked_Locked(
+ uintptr_t addr, LinkedAddress* linked_address) {
+ linked_address->NotifyLoadLinked_Locked(addr);
+ PrependProcessor_Locked(linked_address);
+}
+
+void Simulator::GlobalMonitor::NotifyStore_Locked(
+ LinkedAddress* linked_address) {
+ // Notify each thread of the store operation.
+ for (LinkedAddress* iter = head_; iter; iter = iter->next_) {
+ iter->NotifyStore_Locked();
+ }
+}
+
+bool Simulator::GlobalMonitor::NotifyStoreConditional_Locked(
+ uintptr_t addr, LinkedAddress* linked_address) {
+ DCHECK(IsProcessorInLinkedList_Locked(linked_address));
+ if (linked_address->NotifyStoreConditional_Locked(addr, true)) {
+ // Notify the other processors that this StoreConditional succeeded.
+ for (LinkedAddress* iter = head_; iter; iter = iter->next_) {
+ if (iter != linked_address) {
+ iter->NotifyStoreConditional_Locked(addr, false);
+ }
+ }
+ return true;
+ } else {
+ return false;
+ }
+}
+
+bool Simulator::GlobalMonitor::IsProcessorInLinkedList_Locked(
+ LinkedAddress* linked_address) const {
+ return head_ == linked_address || linked_address->next_ ||
+ linked_address->prev_;
+}
+
+void Simulator::GlobalMonitor::PrependProcessor_Locked(
+ LinkedAddress* linked_address) {
+ if (IsProcessorInLinkedList_Locked(linked_address)) {
+ return;
+ }
+
+ if (head_) {
+ head_->prev_ = linked_address;
+ }
+ linked_address->prev_ = nullptr;
+ linked_address->next_ = head_;
+ head_ = linked_address;
+}
+
+void Simulator::GlobalMonitor::RemoveLinkedAddress(
+ LinkedAddress* linked_address) {
+ base::MutexGuard lock_guard(&mutex);
+ if (!IsProcessorInLinkedList_Locked(linked_address)) {
+ return;
+ }
+
+ if (linked_address->prev_) {
+ linked_address->prev_->next_ = linked_address->next_;
+ } else {
+ head_ = linked_address->next_;
+ }
+ if (linked_address->next_) {
+ linked_address->next_->prev_ = linked_address->prev_;
+ }
+ linked_address->prev_ = nullptr;
+ linked_address->next_ = nullptr;
+}
+
+#undef SScanF
+
+} // namespace internal
+} // namespace v8
+
+#endif // USE_SIMULATOR
diff --git a/deps/v8/src/execution/loong64/simulator-loong64.h b/deps/v8/src/execution/loong64/simulator-loong64.h
new file mode 100644
index 0000000000..b9e97b93b2
--- /dev/null
+++ b/deps/v8/src/execution/loong64/simulator-loong64.h
@@ -0,0 +1,647 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Declares a Simulator for loongisa instructions if we are not generating a
+// native loongisa binary. This Simulator allows us to run and debug loongisa
+// code generation on regular desktop machines. V8 calls into generated code via
+// the GeneratedCode wrapper, which will start execution in the Simulator or
+// forwards to the real entry on a loongisa HW platform.
+
+#ifndef V8_EXECUTION_LOONG64_SIMULATOR_LOONG64_H_
+#define V8_EXECUTION_LOONG64_SIMULATOR_LOONG64_H_
+
+// globals.h defines USE_SIMULATOR.
+#include "src/common/globals.h"
+
+template <typename T>
+int Compare(const T& a, const T& b) {
+ if (a == b)
+ return 0;
+ else if (a < b)
+ return -1;
+ else
+ return 1;
+}
+
+// Returns the negative absolute value of its argument.
+template <typename T,
+ typename = typename std::enable_if<std::is_signed<T>::value>::type>
+T Nabs(T a) {
+ return a < 0 ? a : -a;
+}
+
+#if defined(USE_SIMULATOR)
+// Running with a simulator.
+
+#include "src/base/hashmap.h"
+#include "src/base/strings.h"
+#include "src/codegen/assembler.h"
+#include "src/codegen/loong64/constants-loong64.h"
+#include "src/execution/simulator-base.h"
+#include "src/utils/allocation.h"
+
+namespace v8 {
+namespace internal {
+
+// -----------------------------------------------------------------------------
+// Utility functions
+
+class CachePage {
+ public:
+ static const int LINE_VALID = 0;
+ static const int LINE_INVALID = 1;
+
+ static const int kPageShift = 12;
+ static const int kPageSize = 1 << kPageShift;
+ static const int kPageMask = kPageSize - 1;
+ static const int kLineShift = 2; // The cache line is only 4 bytes right now.
+ static const int kLineLength = 1 << kLineShift;
+ static const int kLineMask = kLineLength - 1;
+
+ CachePage() { memset(&validity_map_, LINE_INVALID, sizeof(validity_map_)); }
+
+ char* ValidityByte(int offset) {
+ return &validity_map_[offset >> kLineShift];
+ }
+
+ char* CachedData(int offset) { return &data_[offset]; }
+
+ private:
+ char data_[kPageSize]; // The cached data.
+ static const int kValidityMapSize = kPageSize >> kLineShift;
+ char validity_map_[kValidityMapSize]; // One byte per line.
+};
+
+class SimInstructionBase : public InstructionBase {
+ public:
+ Type InstructionType() const { return type_; }
+ inline Instruction* instr() const { return instr_; }
+ inline int32_t operand() const { return operand_; }
+
+ protected:
+ SimInstructionBase() : operand_(-1), instr_(nullptr), type_(kUnsupported) {}
+ explicit SimInstructionBase(Instruction* instr) {}
+
+ int32_t operand_;
+ Instruction* instr_;
+ Type type_;
+
+ private:
+ DISALLOW_ASSIGN(SimInstructionBase);
+};
+
+class SimInstruction : public InstructionGetters<SimInstructionBase> {
+ public:
+ SimInstruction() {}
+
+ explicit SimInstruction(Instruction* instr) { *this = instr; }
+
+ SimInstruction& operator=(Instruction* instr) {
+ operand_ = *reinterpret_cast<const int32_t*>(instr);
+ instr_ = instr;
+ type_ = InstructionBase::InstructionType();
+ DCHECK(reinterpret_cast<void*>(&operand_) == this);
+ return *this;
+ }
+};
+
+class Simulator : public SimulatorBase {
+ public:
+ friend class Loong64Debugger;
+
+ // Registers are declared in order.
+ enum Register {
+ no_reg = -1,
+ zero_reg = 0,
+ ra,
+ gp,
+ sp,
+ a0,
+ a1,
+ a2,
+ a3,
+ a4,
+ a5,
+ a6,
+ a7,
+ t0,
+ t1,
+ t2,
+ t3,
+ t4,
+ t5,
+ t6,
+ t7,
+ t8,
+ tp,
+ fp,
+ s0,
+ s1,
+ s2,
+ s3,
+ s4,
+ s5,
+ s6,
+ s7,
+ s8,
+ pc, // pc must be the last register.
+ kNumSimuRegisters,
+ // aliases
+ v0 = a0,
+ v1 = a1
+ };
+
+ // Condition flag registers.
+ enum CFRegister {
+ fcc0,
+ fcc1,
+ fcc2,
+ fcc3,
+ fcc4,
+ fcc5,
+ fcc6,
+ fcc7,
+ kNumCFRegisters
+ };
+
+ // Floating point registers.
+ enum FPURegister {
+ f0,
+ f1,
+ f2,
+ f3,
+ f4,
+ f5,
+ f6,
+ f7,
+ f8,
+ f9,
+ f10,
+ f11,
+ f12,
+ f13,
+ f14,
+ f15,
+ f16,
+ f17,
+ f18,
+ f19,
+ f20,
+ f21,
+ f22,
+ f23,
+ f24,
+ f25,
+ f26,
+ f27,
+ f28,
+ f29,
+ f30,
+ f31,
+ kNumFPURegisters
+ };
+
+ explicit Simulator(Isolate* isolate);
+ ~Simulator();
+
+ // The currently executing Simulator instance. Potentially there can be one
+ // for each native thread.
+ V8_EXPORT_PRIVATE static Simulator* current(v8::internal::Isolate* isolate);
+
+ // Accessors for register state. Reading the pc value adheres to the LOONG64
+ // architecture specification and is off by a 8 from the currently executing
+ // instruction.
+ void set_register(int reg, int64_t value);
+ void set_register_word(int reg, int32_t value);
+ void set_dw_register(int dreg, const int* dbl);
+ V8_EXPORT_PRIVATE int64_t get_register(int reg) const;
+ double get_double_from_register_pair(int reg);
+ // Same for FPURegisters.
+ void set_fpu_register(int fpureg, int64_t value);
+ void set_fpu_register_word(int fpureg, int32_t value);
+ void set_fpu_register_hi_word(int fpureg, int32_t value);
+ void set_fpu_register_float(int fpureg, float value);
+ void set_fpu_register_double(int fpureg, double value);
+ void set_fpu_register_invalid_result64(float original, float rounded);
+ void set_fpu_register_invalid_result(float original, float rounded);
+ void set_fpu_register_word_invalid_result(float original, float rounded);
+ void set_fpu_register_invalid_result64(double original, double rounded);
+ void set_fpu_register_invalid_result(double original, double rounded);
+ void set_fpu_register_word_invalid_result(double original, double rounded);
+ int64_t get_fpu_register(int fpureg) const;
+ int32_t get_fpu_register_word(int fpureg) const;
+ int32_t get_fpu_register_signed_word(int fpureg) const;
+ int32_t get_fpu_register_hi_word(int fpureg) const;
+ float get_fpu_register_float(int fpureg) const;
+ double get_fpu_register_double(int fpureg) const;
+ void set_cf_register(int cfreg, bool value);
+ bool get_cf_register(int cfreg) const;
+ void set_fcsr_rounding_mode(FPURoundingMode mode);
+ unsigned int get_fcsr_rounding_mode();
+ void set_fcsr_bit(uint32_t cc, bool value);
+ bool test_fcsr_bit(uint32_t cc);
+ bool set_fcsr_round_error(double original, double rounded);
+ bool set_fcsr_round64_error(double original, double rounded);
+ bool set_fcsr_round_error(float original, float rounded);
+ bool set_fcsr_round64_error(float original, float rounded);
+ void round_according_to_fcsr(double toRound, double* rounded,
+ int32_t* rounded_int);
+ void round64_according_to_fcsr(double toRound, double* rounded,
+ int64_t* rounded_int);
+ void round_according_to_fcsr(float toRound, float* rounded,
+ int32_t* rounded_int);
+ void round64_according_to_fcsr(float toRound, float* rounded,
+ int64_t* rounded_int);
+ // Special case of set_register and get_register to access the raw PC value.
+ void set_pc(int64_t value);
+ V8_EXPORT_PRIVATE int64_t get_pc() const;
+
+ Address get_sp() const { return static_cast<Address>(get_register(sp)); }
+
+ // Accessor to the internal simulator stack area.
+ uintptr_t StackLimit(uintptr_t c_limit) const;
+
+ // Executes LOONG64 instructions until the PC reaches end_sim_pc.
+ void Execute();
+
+ template <typename Return, typename... Args>
+ Return Call(Address entry, Args... args) {
+ return VariadicCall<Return>(this, &Simulator::CallImpl, entry, args...);
+ }
+
+ // Alternative: call a 2-argument double function.
+ double CallFP(Address entry, double d0, double d1);
+
+ // Push an address onto the JS stack.
+ uintptr_t PushAddress(uintptr_t address);
+
+ // Pop an address from the JS stack.
+ uintptr_t PopAddress();
+
+ // Debugger input.
+ void set_last_debugger_input(char* input);
+ char* last_debugger_input() { return last_debugger_input_; }
+
+ // Redirection support.
+ static void SetRedirectInstruction(Instruction* instruction);
+
+ // ICache checking.
+ static bool ICacheMatch(void* one, void* two);
+ static void FlushICache(base::CustomMatcherHashMap* i_cache, void* start,
+ size_t size);
+
+ // Returns true if pc register contains one of the 'special_values' defined
+ // below (bad_ra, end_sim_pc).
+ bool has_bad_pc() const;
+
+ private:
+ enum special_values {
+ // Known bad pc value to ensure that the simulator does not execute
+ // without being properly setup.
+ bad_ra = -1,
+ // A pc value used to signal the simulator to stop execution. Generally
+ // the ra is set to this value on transition from native C code to
+ // simulated execution, so that the simulator can "return" to the native
+ // C code.
+ end_sim_pc = -2,
+ // Unpredictable value.
+ Unpredictable = 0xbadbeaf
+ };
+
+ V8_EXPORT_PRIVATE intptr_t CallImpl(Address entry, int argument_count,
+ const intptr_t* arguments);
+
+ // Unsupported instructions use Format to print an error and stop execution.
+ void Format(Instruction* instr, const char* format);
+
+ // Helpers for data value tracing.
+ enum TraceType {
+ BYTE,
+ HALF,
+ WORD,
+ DWORD,
+ FLOAT,
+ DOUBLE,
+ FLOAT_DOUBLE,
+ WORD_DWORD
+ };
+
+ // Read and write memory.
+ inline uint32_t ReadBU(int64_t addr);
+ inline int32_t ReadB(int64_t addr);
+ inline void WriteB(int64_t addr, uint8_t value);
+ inline void WriteB(int64_t addr, int8_t value);
+
+ inline uint16_t ReadHU(int64_t addr, Instruction* instr);
+ inline int16_t ReadH(int64_t addr, Instruction* instr);
+ // Note: Overloaded on the sign of the value.
+ inline void WriteH(int64_t addr, uint16_t value, Instruction* instr);
+ inline void WriteH(int64_t addr, int16_t value, Instruction* instr);
+
+ inline uint32_t ReadWU(int64_t addr, Instruction* instr);
+ inline int32_t ReadW(int64_t addr, Instruction* instr, TraceType t = WORD);
+ inline void WriteW(int64_t addr, int32_t value, Instruction* instr);
+ void WriteConditionalW(int64_t addr, int32_t value, Instruction* instr,
+ int32_t rt_reg);
+ inline int64_t Read2W(int64_t addr, Instruction* instr);
+ inline void Write2W(int64_t addr, int64_t value, Instruction* instr);
+ inline void WriteConditional2W(int64_t addr, int64_t value,
+ Instruction* instr, int32_t rt_reg);
+
+ inline double ReadD(int64_t addr, Instruction* instr);
+ inline void WriteD(int64_t addr, double value, Instruction* instr);
+
+ template <typename T>
+ T ReadMem(int64_t addr, Instruction* instr);
+ template <typename T>
+ void WriteMem(int64_t addr, T value, Instruction* instr);
+
+ // Helper for debugging memory access.
+ inline void DieOrDebug();
+
+ void TraceRegWr(int64_t value, TraceType t = DWORD);
+ void TraceMemWr(int64_t addr, int64_t value, TraceType t);
+ void TraceMemRd(int64_t addr, int64_t value, TraceType t = DWORD);
+ template <typename T>
+ void TraceMemRd(int64_t addr, T value);
+ template <typename T>
+ void TraceMemWr(int64_t addr, T value);
+
+ SimInstruction instr_;
+
+ // Executing is handled based on the instruction type.
+ void DecodeTypeOp6();
+ void DecodeTypeOp7();
+ void DecodeTypeOp8();
+ void DecodeTypeOp10();
+ void DecodeTypeOp12();
+ void DecodeTypeOp14();
+ void DecodeTypeOp17();
+ void DecodeTypeOp22();
+
+ inline int32_t rj_reg() const { return instr_.RjValue(); }
+ inline int64_t rj() const { return get_register(rj_reg()); }
+ inline uint64_t rj_u() const {
+ return static_cast<uint64_t>(get_register(rj_reg()));
+ }
+ inline int32_t rk_reg() const { return instr_.RkValue(); }
+ inline int64_t rk() const { return get_register(rk_reg()); }
+ inline uint64_t rk_u() const {
+ return static_cast<uint64_t>(get_register(rk_reg()));
+ }
+ inline int32_t rd_reg() const { return instr_.RdValue(); }
+ inline int64_t rd() const { return get_register(rd_reg()); }
+ inline uint64_t rd_u() const {
+ return static_cast<uint64_t>(get_register(rd_reg()));
+ }
+ inline int32_t fa_reg() const { return instr_.FaValue(); }
+ inline float fa_float() const { return get_fpu_register_float(fa_reg()); }
+ inline double fa_double() const { return get_fpu_register_double(fa_reg()); }
+ inline int32_t fj_reg() const { return instr_.FjValue(); }
+ inline float fj_float() const { return get_fpu_register_float(fj_reg()); }
+ inline double fj_double() const { return get_fpu_register_double(fj_reg()); }
+ inline int32_t fk_reg() const { return instr_.FkValue(); }
+ inline float fk_float() const { return get_fpu_register_float(fk_reg()); }
+ inline double fk_double() const { return get_fpu_register_double(fk_reg()); }
+ inline int32_t fd_reg() const { return instr_.FdValue(); }
+ inline float fd_float() const { return get_fpu_register_float(fd_reg()); }
+ inline double fd_double() const { return get_fpu_register_double(fd_reg()); }
+ inline int32_t cj_reg() const { return instr_.CjValue(); }
+ inline bool cj() const { return get_cf_register(cj_reg()); }
+ inline int32_t cd_reg() const { return instr_.CdValue(); }
+ inline bool cd() const { return get_cf_register(cd_reg()); }
+ inline int32_t ca_reg() const { return instr_.CaValue(); }
+ inline bool ca() const { return get_cf_register(ca_reg()); }
+ inline uint32_t sa2() const { return instr_.Sa2Value(); }
+ inline uint32_t sa3() const { return instr_.Sa3Value(); }
+ inline uint32_t ui5() const { return instr_.Ui5Value(); }
+ inline uint32_t ui6() const { return instr_.Ui6Value(); }
+ inline uint32_t lsbw() const { return instr_.LsbwValue(); }
+ inline uint32_t msbw() const { return instr_.MsbwValue(); }
+ inline uint32_t lsbd() const { return instr_.LsbdValue(); }
+ inline uint32_t msbd() const { return instr_.MsbdValue(); }
+ inline uint32_t cond() const { return instr_.CondValue(); }
+ inline int32_t si12() const { return (instr_.Si12Value() << 20) >> 20; }
+ inline uint32_t ui12() const { return instr_.Ui12Value(); }
+ inline int32_t si14() const { return (instr_.Si14Value() << 18) >> 18; }
+ inline int32_t si16() const { return (instr_.Si16Value() << 16) >> 16; }
+ inline int32_t si20() const { return (instr_.Si20Value() << 12) >> 12; }
+
+ inline void SetResult(const int32_t rd_reg, const int64_t alu_out) {
+ set_register(rd_reg, alu_out);
+ TraceRegWr(alu_out);
+ }
+
+ inline void SetFPUWordResult(int32_t fd_reg, int32_t alu_out) {
+ set_fpu_register_word(fd_reg, alu_out);
+ TraceRegWr(get_fpu_register(fd_reg), WORD);
+ }
+
+ inline void SetFPUWordResult2(int32_t fd_reg, int32_t alu_out) {
+ set_fpu_register_word(fd_reg, alu_out);
+ TraceRegWr(get_fpu_register(fd_reg));
+ }
+
+ inline void SetFPUResult(int32_t fd_reg, int64_t alu_out) {
+ set_fpu_register(fd_reg, alu_out);
+ TraceRegWr(get_fpu_register(fd_reg));
+ }
+
+ inline void SetFPUResult2(int32_t fd_reg, int64_t alu_out) {
+ set_fpu_register(fd_reg, alu_out);
+ TraceRegWr(get_fpu_register(fd_reg), DOUBLE);
+ }
+
+ inline void SetFPUFloatResult(int32_t fd_reg, float alu_out) {
+ set_fpu_register_float(fd_reg, alu_out);
+ TraceRegWr(get_fpu_register(fd_reg), FLOAT);
+ }
+
+ inline void SetFPUDoubleResult(int32_t fd_reg, double alu_out) {
+ set_fpu_register_double(fd_reg, alu_out);
+ TraceRegWr(get_fpu_register(fd_reg), DOUBLE);
+ }
+
+ // Used for breakpoints.
+ void SoftwareInterrupt();
+
+ // Stop helper functions.
+ bool IsWatchpoint(uint64_t code);
+ void PrintWatchpoint(uint64_t code);
+ void HandleStop(uint64_t code, Instruction* instr);
+ bool IsStopInstruction(Instruction* instr);
+ bool IsEnabledStop(uint64_t code);
+ void EnableStop(uint64_t code);
+ void DisableStop(uint64_t code);
+ void IncreaseStopCounter(uint64_t code);
+ void PrintStopInfo(uint64_t code);
+
+ // Executes one instruction.
+ void InstructionDecode(Instruction* instr);
+ // Execute one instruction placed in a branch delay slot.
+
+ // ICache.
+ static void CheckICache(base::CustomMatcherHashMap* i_cache,
+ Instruction* instr);
+ static void FlushOnePage(base::CustomMatcherHashMap* i_cache, intptr_t start,
+ size_t size);
+ static CachePage* GetCachePage(base::CustomMatcherHashMap* i_cache,
+ void* page);
+
+ enum Exception {
+ none,
+ kIntegerOverflow,
+ kIntegerUnderflow,
+ kDivideByZero,
+ kNumExceptions
+ };
+
+ // Exceptions.
+ void SignalException(Exception e);
+
+ // Handle arguments and return value for runtime FP functions.
+ void GetFpArgs(double* x, double* y, int32_t* z);
+ void SetFpResult(const double& result);
+
+ void CallInternal(Address entry);
+
+ // Architecture state.
+ // Registers.
+ int64_t registers_[kNumSimuRegisters];
+ // Floating point Registers.
+ int64_t FPUregisters_[kNumFPURegisters];
+ // Condition flags Registers.
+ bool CFregisters_[kNumCFRegisters];
+ // FPU control register.
+ uint32_t FCSR_;
+
+ // Simulator support.
+ // Allocate 1MB for stack.
+ size_t stack_size_;
+ char* stack_;
+ bool pc_modified_;
+ int64_t icount_;
+ int break_count_;
+ base::EmbeddedVector<char, 128> trace_buf_;
+
+ // Debugger input.
+ char* last_debugger_input_;
+
+ v8::internal::Isolate* isolate_;
+
+ // Registered breakpoints.
+ Instruction* break_pc_;
+ Instr break_instr_;
+
+ // Stop is disabled if bit 31 is set.
+ static const uint32_t kStopDisabledBit = 1 << 31;
+
+ // A stop is enabled, meaning the simulator will stop when meeting the
+ // instruction, if bit 31 of watched_stops_[code].count is unset.
+ // The value watched_stops_[code].count & ~(1 << 31) indicates how many times
+ // the breakpoint was hit or gone through.
+ struct StopCountAndDesc {
+ uint32_t count;
+ char* desc;
+ };
+ StopCountAndDesc watched_stops_[kMaxStopCode + 1];
+
+ // Synchronization primitives.
+ enum class MonitorAccess {
+ Open,
+ RMW,
+ };
+
+ enum class TransactionSize {
+ None = 0,
+ Word = 4,
+ DoubleWord = 8,
+ };
+
+ // The least-significant bits of the address are ignored. The number of bits
+ // is implementation-defined, between 3 and minimum page size.
+ static const uintptr_t kExclusiveTaggedAddrMask = ~((1 << 3) - 1);
+
+ class LocalMonitor {
+ public:
+ LocalMonitor();
+
+ // These functions manage the state machine for the local monitor, but do
+ // not actually perform loads and stores. NotifyStoreConditional only
+ // returns true if the store conditional is allowed; the global monitor will
+ // still have to be checked to see whether the memory should be updated.
+ void NotifyLoad();
+ void NotifyLoadLinked(uintptr_t addr, TransactionSize size);
+ void NotifyStore();
+ bool NotifyStoreConditional(uintptr_t addr, TransactionSize size);
+
+ private:
+ void Clear();
+
+ MonitorAccess access_state_;
+ uintptr_t tagged_addr_;
+ TransactionSize size_;
+ };
+
+ class GlobalMonitor {
+ public:
+ class LinkedAddress {
+ public:
+ LinkedAddress();
+
+ private:
+ friend class GlobalMonitor;
+ // These functions manage the state machine for the global monitor, but do
+ // not actually perform loads and stores.
+ void Clear_Locked();
+ void NotifyLoadLinked_Locked(uintptr_t addr);
+ void NotifyStore_Locked();
+ bool NotifyStoreConditional_Locked(uintptr_t addr,
+ bool is_requesting_thread);
+
+ MonitorAccess access_state_;
+ uintptr_t tagged_addr_;
+ LinkedAddress* next_;
+ LinkedAddress* prev_;
+ // A scd can fail due to background cache evictions. Rather than
+ // simulating this, we'll just occasionally introduce cases where an
+ // store conditional fails. This will happen once after every
+ // kMaxFailureCounter exclusive stores.
+ static const int kMaxFailureCounter = 5;
+ int failure_counter_;
+ };
+
+ // Exposed so it can be accessed by Simulator::{Read,Write}Ex*.
+ base::Mutex mutex;
+
+ void NotifyLoadLinked_Locked(uintptr_t addr, LinkedAddress* linked_address);
+ void NotifyStore_Locked(LinkedAddress* linked_address);
+ bool NotifyStoreConditional_Locked(uintptr_t addr,
+ LinkedAddress* linked_address);
+
+ // Called when the simulator is destroyed.
+ void RemoveLinkedAddress(LinkedAddress* linked_address);
+
+ static GlobalMonitor* Get();
+
+ private:
+ // Private constructor. Call {GlobalMonitor::Get()} to get the singleton.
+ GlobalMonitor() = default;
+ friend class base::LeakyObject<GlobalMonitor>;
+
+ bool IsProcessorInLinkedList_Locked(LinkedAddress* linked_address) const;
+ void PrependProcessor_Locked(LinkedAddress* linked_address);
+
+ LinkedAddress* head_ = nullptr;
+ };
+
+ LocalMonitor local_monitor_;
+ GlobalMonitor::LinkedAddress global_monitor_thread_;
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // defined(USE_SIMULATOR)
+#endif // V8_EXECUTION_LOONG64_SIMULATOR_LOONG64_H_
diff --git a/deps/v8/src/execution/messages.cc b/deps/v8/src/execution/messages.cc
index ad530e1f2a..2628e7a673 100644
--- a/deps/v8/src/execution/messages.cc
+++ b/deps/v8/src/execution/messages.cc
@@ -106,55 +106,55 @@ void MessageHandler::ReportMessage(Isolate* isolate, const MessageLocation* loc,
Handle<JSMessageObject> message) {
v8::Local<v8::Message> api_message_obj = v8::Utils::MessageToLocal(message);
- if (api_message_obj->ErrorLevel() == v8::Isolate::kMessageError) {
- // We are calling into embedder's code which can throw exceptions.
- // Thus we need to save current exception state, reset it to the clean one
- // and ignore scheduled exceptions callbacks can throw.
+ if (api_message_obj->ErrorLevel() != v8::Isolate::kMessageError) {
+ ReportMessageNoExceptions(isolate, loc, message, v8::Local<v8::Value>());
+ return;
+ }
- // We pass the exception object into the message handler callback though.
- Object exception_object = ReadOnlyRoots(isolate).undefined_value();
- if (isolate->has_pending_exception()) {
- exception_object = isolate->pending_exception();
- }
- Handle<Object> exception(exception_object, isolate);
+ // We are calling into embedder's code which can throw exceptions.
+ // Thus we need to save current exception state, reset it to the clean one
+ // and ignore scheduled exceptions callbacks can throw.
- Isolate::ExceptionScope exception_scope(isolate);
- isolate->clear_pending_exception();
- isolate->set_external_caught_exception(false);
+ // We pass the exception object into the message handler callback though.
+ Object exception_object = ReadOnlyRoots(isolate).undefined_value();
+ if (isolate->has_pending_exception()) {
+ exception_object = isolate->pending_exception();
+ }
+ Handle<Object> exception(exception_object, isolate);
- // Turn the exception on the message into a string if it is an object.
- if (message->argument().IsJSObject()) {
- HandleScope scope(isolate);
- Handle<Object> argument(message->argument(), isolate);
+ Isolate::ExceptionScope exception_scope(isolate);
+ isolate->clear_pending_exception();
+ isolate->set_external_caught_exception(false);
- MaybeHandle<Object> maybe_stringified;
- Handle<Object> stringified;
- // Make sure we don't leak uncaught internally generated Error objects.
- if (argument->IsJSError()) {
- maybe_stringified = Object::NoSideEffectsToString(isolate, argument);
- } else {
- v8::TryCatch catcher(reinterpret_cast<v8::Isolate*>(isolate));
- catcher.SetVerbose(false);
- catcher.SetCaptureMessage(false);
+ // Turn the exception on the message into a string if it is an object.
+ if (message->argument().IsJSObject()) {
+ HandleScope scope(isolate);
+ Handle<Object> argument(message->argument(), isolate);
- maybe_stringified = Object::ToString(isolate, argument);
- }
+ MaybeHandle<Object> maybe_stringified;
+ Handle<Object> stringified;
+ // Make sure we don't leak uncaught internally generated Error objects.
+ if (argument->IsJSError()) {
+ maybe_stringified = Object::NoSideEffectsToString(isolate, argument);
+ } else {
+ v8::TryCatch catcher(reinterpret_cast<v8::Isolate*>(isolate));
+ catcher.SetVerbose(false);
+ catcher.SetCaptureMessage(false);
- if (!maybe_stringified.ToHandle(&stringified)) {
- DCHECK(isolate->has_pending_exception());
- isolate->clear_pending_exception();
- isolate->set_external_caught_exception(false);
- stringified =
- isolate->factory()->NewStringFromAsciiChecked("exception");
- }
- message->set_argument(*stringified);
+ maybe_stringified = Object::ToString(isolate, argument);
}
- v8::Local<v8::Value> api_exception_obj = v8::Utils::ToLocal(exception);
- ReportMessageNoExceptions(isolate, loc, message, api_exception_obj);
- } else {
- ReportMessageNoExceptions(isolate, loc, message, v8::Local<v8::Value>());
+ if (!maybe_stringified.ToHandle(&stringified)) {
+ DCHECK(isolate->has_pending_exception());
+ isolate->clear_pending_exception();
+ isolate->set_external_caught_exception(false);
+ stringified = isolate->factory()->exception_string();
+ }
+ message->set_argument(*stringified);
}
+
+ v8::Local<v8::Value> api_exception_obj = v8::Utils::ToLocal(exception);
+ ReportMessageNoExceptions(isolate, loc, message, api_exception_obj);
}
void MessageHandler::ReportMessageNoExceptions(
@@ -297,10 +297,14 @@ class V8_NODISCARD PrepareStackTraceScope {
MaybeHandle<Object> ErrorUtils::FormatStackTrace(Isolate* isolate,
Handle<JSObject> error,
Handle<Object> raw_stack) {
+ if (FLAG_correctness_fuzzer_suppressions) {
+ return isolate->factory()->empty_string();
+ }
DCHECK(raw_stack->IsFixedArray());
Handle<FixedArray> elems = Handle<FixedArray>::cast(raw_stack);
const bool in_recursion = isolate->formatting_stack_trace();
+ const bool has_overflowed = i::StackLimitCheck{isolate}.HasOverflowed();
Handle<Context> error_context;
if (!in_recursion && error->GetCreationContext().ToHandle(&error_context)) {
DCHECK(error_context->IsNativeContext());
@@ -318,7 +322,7 @@ MaybeHandle<Object> ErrorUtils::FormatStackTrace(Isolate* isolate,
isolate->RunPrepareStackTraceCallback(error_context, error, sites),
Object);
return result;
- } else {
+ } else if (!has_overflowed) {
Handle<JSFunction> global_error =
handle(error_context->error_function(), isolate);
@@ -359,7 +363,6 @@ MaybeHandle<Object> ErrorUtils::FormatStackTrace(Isolate* isolate,
}
// Otherwise, run our internal formatting logic.
-
IncrementalStringBuilder builder(isolate);
RETURN_ON_EXCEPTION(isolate, AppendErrorString(isolate, error, &builder),
diff --git a/deps/v8/src/execution/messages.h b/deps/v8/src/execution/messages.h
index a945b82299..5a54279647 100644
--- a/deps/v8/src/execution/messages.h
+++ b/deps/v8/src/execution/messages.h
@@ -12,6 +12,7 @@
#include <memory>
+#include "include/v8-local-handle.h"
#include "src/base/optional.h"
#include "src/common/message-template.h"
#include "src/handles/handles.h"
diff --git a/deps/v8/src/execution/microtask-queue.h b/deps/v8/src/execution/microtask-queue.h
index e9d40a924f..6091fa3575 100644
--- a/deps/v8/src/execution/microtask-queue.h
+++ b/deps/v8/src/execution/microtask-queue.h
@@ -6,11 +6,12 @@
#define V8_EXECUTION_MICROTASK_QUEUE_H_
#include <stdint.h>
+
#include <memory>
#include <vector>
#include "include/v8-internal.h" // For Address.
-#include "include/v8.h"
+#include "include/v8-microtask-queue.h"
#include "src/base/macros.h"
namespace v8 {
diff --git a/deps/v8/src/execution/mips/simulator-mips.cc b/deps/v8/src/execution/mips/simulator-mips.cc
index c49172a564..64ef946b2d 100644
--- a/deps/v8/src/execution/mips/simulator-mips.cc
+++ b/deps/v8/src/execution/mips/simulator-mips.cc
@@ -150,7 +150,6 @@ bool MipsDebugger::GetValue(const char* desc, int32_t* value) {
} else {
return SScanF(desc, "%i", value) == 1;
}
- return false;
}
bool MipsDebugger::GetValue(const char* desc, int64_t* value) {
@@ -169,7 +168,6 @@ bool MipsDebugger::GetValue(const char* desc, int64_t* value) {
} else {
return SScanF(desc, "%" SCNu64, reinterpret_cast<uint64_t*>(value)) == 1;
}
- return false;
}
bool MipsDebugger::SetBreakpoint(Instruction* breakpc) {
@@ -2028,7 +2026,6 @@ double Simulator::ReadD(int32_t addr, Instruction* instr) {
PrintF("Unaligned (double) read at 0x%08x, pc=0x%08" V8PRIxPTR "\n", addr,
reinterpret_cast<intptr_t>(instr));
base::OS::Abort();
- return 0;
}
void Simulator::WriteD(int32_t addr, double value, Instruction* instr) {
@@ -2055,7 +2052,6 @@ uint16_t Simulator::ReadHU(int32_t addr, Instruction* instr) {
PrintF("Unaligned unsigned halfword read at 0x%08x, pc=0x%08" V8PRIxPTR "\n",
addr, reinterpret_cast<intptr_t>(instr));
base::OS::Abort();
- return 0;
}
int16_t Simulator::ReadH(int32_t addr, Instruction* instr) {
@@ -2068,7 +2064,6 @@ int16_t Simulator::ReadH(int32_t addr, Instruction* instr) {
PrintF("Unaligned signed halfword read at 0x%08x, pc=0x%08" V8PRIxPTR "\n",
addr, reinterpret_cast<intptr_t>(instr));
base::OS::Abort();
- return 0;
}
void Simulator::WriteH(int32_t addr, uint16_t value, Instruction* instr) {
@@ -2330,7 +2325,6 @@ void Simulator::SoftwareInterrupt() {
break;
default:
UNREACHABLE();
- break;
}
}
switch (redirection->type()) {
@@ -2365,7 +2359,6 @@ void Simulator::SoftwareInterrupt() {
}
default:
UNREACHABLE();
- break;
}
if (::v8::internal::FLAG_trace_sim) {
switch (redirection->type()) {
@@ -2379,7 +2372,6 @@ void Simulator::SoftwareInterrupt() {
break;
default:
UNREACHABLE();
- break;
}
}
} else if (redirection->type() == ExternalReference::DIRECT_API_CALL) {
@@ -2930,7 +2922,6 @@ void Simulator::DecodeTypeRegisterDRsType() {
UNSUPPORTED();
}
break;
- break;
}
case TRUNC_L_D: { // Mips32r2 instruction.
DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6));
@@ -4233,7 +4224,6 @@ void Simulator::DecodeTypeRegisterSPECIAL3() {
default:
alu_out = 0x12345678;
UNREACHABLE();
- break;
}
}
}
@@ -4271,7 +4261,6 @@ int Simulator::DecodeMsaDataFormat() {
break;
default:
UNREACHABLE();
- break;
}
} else {
int DF[] = {MSA_BYTE, MSA_HALF, MSA_WORD, MSA_DWORD};
@@ -4316,7 +4305,6 @@ int Simulator::DecodeMsaDataFormat() {
break;
default:
UNREACHABLE();
- break;
}
}
return df;
@@ -4682,7 +4670,6 @@ void Simulator::DecodeTypeMsaELM() {
case SPLATI:
case INSVE:
UNIMPLEMENTED();
- break;
default:
UNREACHABLE();
}
@@ -6798,7 +6785,6 @@ void Simulator::DecodeTypeImmediate() {
}
default:
UNREACHABLE();
- break;
}
}
}
@@ -6856,7 +6842,6 @@ void Simulator::DecodeTypeImmediate() {
break;
default:
UNREACHABLE();
- break;
}
break;
default:
@@ -6880,14 +6865,16 @@ void Simulator::DecodeTypeImmediate() {
// Type 3: instructions using a 26 bytes immediate. (e.g. j, jal).
void Simulator::DecodeTypeJump() {
- SimInstruction simInstr = instr_;
+ // instr_ will be overwritten by BranchDelayInstructionDecode(), so we save
+ // the result of IsLinkingInstruction now.
+ bool isLinkingInstr = instr_.IsLinkingInstruction();
// Get current pc.
int32_t current_pc = get_pc();
// Get unchanged bits of pc.
int32_t pc_high_bits = current_pc & 0xF0000000;
// Next pc.
- int32_t next_pc = pc_high_bits | (simInstr.Imm26Value() << 2);
+ int32_t next_pc = pc_high_bits | (instr_.Imm26Value() << 2);
// Execute branch delay slot.
// We don't check for end_sim_pc. First it should not be met as the current pc
@@ -6898,7 +6885,7 @@ void Simulator::DecodeTypeJump() {
// Update pc and ra if necessary.
// Do this after the branch delay execution.
- if (simInstr.IsLinkingInstruction()) {
+ if (isLinkingInstr) {
set_register(31, current_pc + 2 * kInstrSize);
}
set_pc(next_pc);
diff --git a/deps/v8/src/execution/mips64/simulator-mips64.cc b/deps/v8/src/execution/mips64/simulator-mips64.cc
index d45889e5a2..f628653900 100644
--- a/deps/v8/src/execution/mips64/simulator-mips64.cc
+++ b/deps/v8/src/execution/mips64/simulator-mips64.cc
@@ -159,7 +159,6 @@ bool MipsDebugger::GetValue(const char* desc, int64_t* value) {
} else {
return SScanF(desc, "%" SCNu64, reinterpret_cast<uint64_t*>(value)) == 1;
}
- return false;
}
bool MipsDebugger::SetBreakpoint(Instruction* breakpc) {
@@ -2039,7 +2038,6 @@ double Simulator::ReadD(int64_t addr, Instruction* instr) {
PrintF("Unaligned (double) read at 0x%08" PRIx64 " , pc=0x%08" V8PRIxPTR "\n",
addr, reinterpret_cast<intptr_t>(instr));
base::OS::Abort();
- return 0;
}
void Simulator::WriteD(int64_t addr, double value, Instruction* instr) {
@@ -2330,7 +2328,6 @@ void Simulator::SoftwareInterrupt() {
break;
default:
UNREACHABLE();
- break;
}
}
switch (redirection->type()) {
@@ -2365,7 +2362,6 @@ void Simulator::SoftwareInterrupt() {
}
default:
UNREACHABLE();
- break;
}
if (::v8::internal::FLAG_trace_sim) {
switch (redirection->type()) {
@@ -2379,7 +2375,6 @@ void Simulator::SoftwareInterrupt() {
break;
default:
UNREACHABLE();
- break;
}
}
} else if (redirection->type() == ExternalReference::DIRECT_API_CALL) {
@@ -4404,7 +4399,6 @@ void Simulator::DecodeTypeRegisterSPECIAL3() {
default:
alu_out = 0x12345678;
UNREACHABLE();
- break;
}
break;
}
@@ -4503,7 +4497,6 @@ void Simulator::DecodeTypeRegisterSPECIAL3() {
default:
alu_out = 0x12345678;
UNREACHABLE();
- break;
}
break;
}
@@ -4542,7 +4535,6 @@ int Simulator::DecodeMsaDataFormat() {
break;
default:
UNREACHABLE();
- break;
}
} else {
int DF[] = {MSA_BYTE, MSA_HALF, MSA_WORD, MSA_DWORD};
@@ -4587,7 +4579,6 @@ int Simulator::DecodeMsaDataFormat() {
break;
default:
UNREACHABLE();
- break;
}
}
return df;
@@ -4967,7 +4958,6 @@ void Simulator::DecodeTypeMsaELM() {
case SPLATI:
case INSVE:
UNIMPLEMENTED();
- break;
default:
UNREACHABLE();
}
@@ -7187,7 +7177,6 @@ void Simulator::DecodeTypeImmediate() {
}
default:
UNREACHABLE();
- break;
}
break;
}
@@ -7273,7 +7262,6 @@ void Simulator::DecodeTypeImmediate() {
break;
default:
UNREACHABLE();
- break;
}
break;
default:
@@ -7297,13 +7285,15 @@ void Simulator::DecodeTypeImmediate() {
// Type 3: instructions using a 26 bytes immediate. (e.g. j, jal).
void Simulator::DecodeTypeJump() {
- SimInstruction simInstr = instr_;
+ // instr_ will be overwritten by BranchDelayInstructionDecode(), so we save
+ // the result of IsLinkingInstruction now.
+ bool isLinkingInstr = instr_.IsLinkingInstruction();
// Get current pc.
int64_t current_pc = get_pc();
// Get unchanged bits of pc.
int64_t pc_high_bits = current_pc & 0xFFFFFFFFF0000000;
// Next pc.
- int64_t next_pc = pc_high_bits | (simInstr.Imm26Value() << 2);
+ int64_t next_pc = pc_high_bits | (instr_.Imm26Value() << 2);
// Execute branch delay slot.
// We don't check for end_sim_pc. First it should not be met as the current pc
@@ -7314,7 +7304,7 @@ void Simulator::DecodeTypeJump() {
// Update pc and ra if necessary.
// Do this after the branch delay execution.
- if (simInstr.IsLinkingInstruction()) {
+ if (isLinkingInstr) {
set_register(31, current_pc + 2 * kInstrSize);
}
set_pc(next_pc);
diff --git a/deps/v8/src/execution/mips64/simulator-mips64.h b/deps/v8/src/execution/mips64/simulator-mips64.h
index ce3f06f2ed..69e8094174 100644
--- a/deps/v8/src/execution/mips64/simulator-mips64.h
+++ b/deps/v8/src/execution/mips64/simulator-mips64.h
@@ -243,7 +243,7 @@ class Simulator : public SimulatorBase {
void set_register(int reg, int64_t value);
void set_register_word(int reg, int32_t value);
void set_dw_register(int dreg, const int* dbl);
- int64_t get_register(int reg) const;
+ V8_EXPORT_PRIVATE int64_t get_register(int reg) const;
double get_double_from_register_pair(int reg);
// Same for FPURegisters.
void set_fpu_register(int fpureg, int64_t value);
@@ -291,7 +291,7 @@ class Simulator : public SimulatorBase {
unsigned int get_msacsr_rounding_mode();
// Special case of set_register and get_register to access the raw PC value.
void set_pc(int64_t value);
- int64_t get_pc() const;
+ V8_EXPORT_PRIVATE int64_t get_pc() const;
Address get_sp() const { return static_cast<Address>(get_register(sp)); }
diff --git a/deps/v8/src/execution/ppc/simulator-ppc.cc b/deps/v8/src/execution/ppc/simulator-ppc.cc
index 5e9751c07a..f6ee75e809 100644
--- a/deps/v8/src/execution/ppc/simulator-ppc.cc
+++ b/deps/v8/src/execution/ppc/simulator-ppc.cc
@@ -93,16 +93,12 @@ bool PPCDebugger::GetValue(const char* desc, intptr_t* value) {
if (regnum != kNoRegister) {
*value = GetRegisterValue(regnum);
return true;
- } else {
- if (strncmp(desc, "0x", 2) == 0) {
- return SScanF(desc + 2, "%" V8PRIxPTR,
- reinterpret_cast<uintptr_t*>(value)) == 1;
- } else {
- return SScanF(desc, "%" V8PRIuPTR, reinterpret_cast<uintptr_t*>(value)) ==
- 1;
- }
}
- return false;
+ if (strncmp(desc, "0x", 2) == 0) {
+ return SScanF(desc + 2, "%" V8PRIxPTR,
+ reinterpret_cast<uintptr_t*>(value)) == 1;
+ }
+ return SScanF(desc, "%" V8PRIuPTR, reinterpret_cast<uintptr_t*>(value)) == 1;
}
bool PPCDebugger::GetFPDoubleValue(const char* desc, double* value) {
@@ -1031,7 +1027,6 @@ void Simulator::SoftwareInterrupt(Instruction* instr) {
break;
default:
UNREACHABLE();
- break;
}
if (!stack_aligned) {
PrintF(" with unaligned stack %08" V8PRIxPTR "\n",
@@ -1071,7 +1066,6 @@ void Simulator::SoftwareInterrupt(Instruction* instr) {
}
default:
UNREACHABLE();
- break;
}
if (::v8::internal::FLAG_trace_sim || !stack_aligned) {
switch (redirection->type()) {
@@ -1085,7 +1079,6 @@ void Simulator::SoftwareInterrupt(Instruction* instr) {
break;
default:
UNREACHABLE();
- break;
}
}
} else if (redirection->type() == ExternalReference::DIRECT_API_CALL) {
@@ -1704,7 +1697,6 @@ void Simulator::ExecuteGeneric(Instruction* instr) {
case CRORC:
case CROR: {
UNIMPLEMENTED(); // Not used by V8.
- break;
}
case RLWIMIX: {
int ra = instr->RAValue();
@@ -2552,7 +2544,7 @@ void Simulator::ExecuteGeneric(Instruction* instr) {
int rs = instr->RSValue();
int ra = instr->RAValue();
uint32_t rs_val = static_cast<uint32_t>(get_register(rs));
- uintptr_t count = __builtin_ctz(rs_val);
+ uintptr_t count = rs_val == 0 ? 32 : __builtin_ctz(rs_val);
set_register(ra, count);
if (instr->Bit(0)) { // RC Bit set
int bf = 0;
@@ -2570,7 +2562,7 @@ void Simulator::ExecuteGeneric(Instruction* instr) {
int rs = instr->RSValue();
int ra = instr->RAValue();
uint64_t rs_val = get_register(rs);
- uintptr_t count = __builtin_ctz(rs_val);
+ uintptr_t count = rs_val == 0 ? 64 : __builtin_ctzl(rs_val);
set_register(ra, count);
if (instr->Bit(0)) { // RC Bit set
int bf = 0;
@@ -3192,7 +3184,6 @@ void Simulator::ExecuteGeneric(Instruction* instr) {
case LMW:
case STMW: {
UNIMPLEMENTED();
- break;
}
case LFSU:
@@ -3282,7 +3273,25 @@ void Simulator::ExecuteGeneric(Instruction* instr) {
}
break;
}
-
+ case BRW: {
+ constexpr int kBitsPerWord = 32;
+ int rs = instr->RSValue();
+ int ra = instr->RAValue();
+ uint64_t rs_val = get_register(rs);
+ uint32_t rs_high = rs_val >> kBitsPerWord;
+ uint32_t rs_low = (rs_val << kBitsPerWord) >> kBitsPerWord;
+ uint64_t result = __builtin_bswap32(rs_high);
+ result = (result << kBitsPerWord) | __builtin_bswap32(rs_low);
+ set_register(ra, result);
+ break;
+ }
+ case BRD: {
+ int rs = instr->RSValue();
+ int ra = instr->RAValue();
+ uint64_t rs_val = get_register(rs);
+ set_register(ra, __builtin_bswap64(rs_val));
+ break;
+ }
case FCFIDS: {
// fcfids
int frt = instr->RTValue();
@@ -3512,7 +3521,6 @@ void Simulator::ExecuteGeneric(Instruction* instr) {
break;
default:
UNIMPLEMENTED(); // Not used by V8.
- break;
}
if (frb_val < static_cast<double>(kMinVal)) {
frt_val = kMinVal;
@@ -3557,7 +3565,6 @@ void Simulator::ExecuteGeneric(Instruction* instr) {
break;
default:
UNIMPLEMENTED(); // Not used by V8.
- break;
}
if (frb_val < static_cast<double>(kMinVal)) {
frt_val = kMinVal;
@@ -3609,7 +3616,6 @@ void Simulator::ExecuteGeneric(Instruction* instr) {
}
default:
UNIMPLEMENTED(); // Not used by V8.
- break;
}
if (frb_val < kMinVal) {
frt_val = kMinVal;
@@ -3653,7 +3659,6 @@ void Simulator::ExecuteGeneric(Instruction* instr) {
break;
default:
UNIMPLEMENTED(); // Not used by V8.
- break;
}
if (frb_val < kMinVal) {
frt_val = kMinVal;
@@ -3746,7 +3751,6 @@ void Simulator::ExecuteGeneric(Instruction* instr) {
break;
default:
UNIMPLEMENTED();
- break;
}
return;
}
@@ -4728,6 +4732,36 @@ void Simulator::ExecuteGeneric(Instruction* instr) {
}
break;
}
+ case XSCVSPDPN: {
+ int t = instr->RTValue();
+ int b = instr->RBValue();
+ uint64_t double_bits = get_d_register(b);
+ // Value is at the high 32 bits of the register.
+ float f =
+ bit_cast<float, uint32_t>(static_cast<uint32_t>(double_bits >> 32));
+ double_bits = bit_cast<uint64_t, double>(static_cast<double>(f));
+ // Preserve snan.
+ if (issignaling(f)) {
+ double_bits &= 0xFFF7FFFFFFFFFFFFU; // Clear bit 51.
+ }
+ set_d_register(t, double_bits);
+ break;
+ }
+ case XSCVDPSPN: {
+ int t = instr->RTValue();
+ int b = instr->RBValue();
+ double b_val = get_double_from_d_register(b);
+ uint64_t float_bits = static_cast<uint64_t>(
+ bit_cast<uint32_t, float>(static_cast<float>(b_val)));
+ // Preserve snan.
+ if (issignaling(b_val)) {
+ float_bits &= 0xFFBFFFFFU; // Clear bit 22.
+ }
+ // fp result is placed in both 32bit halfs of the dst.
+ float_bits = (float_bits << 32) | float_bits;
+ set_d_register(t, float_bits);
+ break;
+ }
#define VECTOR_UNPACK(S, D, if_high_side) \
int t = instr->RTValue(); \
int b = instr->RBValue(); \
@@ -5118,7 +5152,6 @@ void Simulator::ExecuteGeneric(Instruction* instr) {
#undef GET_ADDRESS
default: {
UNIMPLEMENTED();
- break;
}
}
}
diff --git a/deps/v8/src/execution/riscv64/simulator-riscv64.cc b/deps/v8/src/execution/riscv64/simulator-riscv64.cc
index 3ec0c0e811..1b72aa9862 100644
--- a/deps/v8/src/execution/riscv64/simulator-riscv64.cc
+++ b/deps/v8/src/execution/riscv64/simulator-riscv64.cc
@@ -60,6 +60,544 @@
#include "src/runtime/runtime-utils.h"
#include "src/utils/ostreams.h"
+// The following code about RVV was based from:
+// https://github.com/riscv/riscv-isa-sim
+// Copyright (c) 2010-2017, The Regents of the University of California
+// (Regents). All Rights Reserved.
+
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are met:
+// 1. Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// 2. Redistributions in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+// 3. Neither the name of the Regents nor the
+// names of its contributors may be used to endorse or promote products
+// derived from this software without specific prior written permission.
+
+// IN NO EVENT SHALL REGENTS BE LIABLE TO ANY PARTY FOR DIRECT, INDIRECT,
+// SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, INCLUDING LOST PROFITS,
+// ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN IF
+// REGENTS HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// REGENTS SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING, BUT NOT LIMITED
+// TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE. THE SOFTWARE AND ACCOMPANYING DOCUMENTATION, IF ANY, PROVIDED
+// HEREUNDER IS PROVIDED "AS IS". REGENTS HAS NO OBLIGATION TO PROVIDE
+// MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
+#define RVV_VI_GENERAL_LOOP_BASE \
+ for (uint64_t i = rvv_vstart(); i < rvv_vl(); i++) {
+#define RVV_VI_LOOP_END \
+ set_rvv_vstart(0); \
+ }
+
+#define RVV_VI_MASK_VARS \
+ const uint8_t midx = i / 64; \
+ const uint8_t mpos = i % 64;
+
+#define RVV_VI_LOOP_MASK_SKIP(BODY) \
+ RVV_VI_MASK_VARS \
+ if (instr_.RvvVM() == 0) { \
+ bool skip = ((Rvvelt<uint64_t>(0, midx) >> mpos) & 0x1) == 0; \
+ if (skip) { \
+ continue; \
+ } \
+ }
+
+#define RVV_VI_VV_LOOP(BODY) \
+ RVV_VI_GENERAL_LOOP_BASE \
+ RVV_VI_LOOP_MASK_SKIP() \
+ if (rvv_vsew() == E8) { \
+ VV_PARAMS(8); \
+ BODY \
+ } else if (rvv_vsew() == E16) { \
+ VV_PARAMS(16); \
+ BODY \
+ } else if (rvv_vsew() == E32) { \
+ VV_PARAMS(32); \
+ BODY \
+ } else if (rvv_vsew() == E64) { \
+ VV_PARAMS(64); \
+ BODY \
+ } else if (rvv_vsew() == E128) { \
+ VV_PARAMS(128); \
+ BODY \
+ } else { \
+ UNREACHABLE(); \
+ } \
+ RVV_VI_LOOP_END \
+ rvv_trace_vd();
+
+#define RVV_VI_VV_ULOOP(BODY) \
+ RVV_VI_GENERAL_LOOP_BASE \
+ RVV_VI_LOOP_MASK_SKIP() \
+ if (rvv_vsew() == E8) { \
+ VV_UPARAMS(8); \
+ BODY \
+ } else if (rvv_vsew() == E16) { \
+ VV_UPARAMS(16); \
+ BODY \
+ } else if (rvv_vsew() == E32) { \
+ VV_UPARAMS(32); \
+ BODY \
+ } else if (rvv_vsew() == E64) { \
+ VV_UPARAMS(64); \
+ BODY \
+ } else if (rvv_vsew() == E128) { \
+ VV_UPARAMS(128); \
+ BODY \
+ } else { \
+ UNREACHABLE(); \
+ } \
+ RVV_VI_LOOP_END \
+ rvv_trace_vd();
+
+#define RVV_VI_VX_LOOP(BODY) \
+ RVV_VI_GENERAL_LOOP_BASE \
+ RVV_VI_LOOP_MASK_SKIP() \
+ if (rvv_vsew() == E8) { \
+ VX_PARAMS(8); \
+ BODY \
+ } else if (rvv_vsew() == E16) { \
+ VX_PARAMS(16); \
+ BODY \
+ } else if (rvv_vsew() == E32) { \
+ VX_PARAMS(32); \
+ BODY \
+ } else if (rvv_vsew() == E64) { \
+ VX_PARAMS(64); \
+ BODY \
+ } else if (rvv_vsew() == E128) { \
+ VX_PARAMS(128); \
+ BODY \
+ } else { \
+ UNREACHABLE(); \
+ } \
+ RVV_VI_LOOP_END \
+ rvv_trace_vd();
+
+#define RVV_VI_VX_ULOOP(BODY) \
+ RVV_VI_GENERAL_LOOP_BASE \
+ RVV_VI_LOOP_MASK_SKIP() \
+ if (rvv_vsew() == E8) { \
+ VX_UPARAMS(8); \
+ BODY \
+ } else if (rvv_vsew() == E16) { \
+ VX_UPARAMS(16); \
+ BODY \
+ } else if (rvv_vsew() == E32) { \
+ VX_UPARAMS(32); \
+ BODY \
+ } else if (rvv_vsew() == E64) { \
+ VX_UPARAMS(64); \
+ BODY \
+ } else if (rvv_vsew() == E128) { \
+ VX_UPARAMS(128); \
+ BODY \
+ } else { \
+ UNREACHABLE(); \
+ } \
+ RVV_VI_LOOP_END \
+ rvv_trace_vd();
+
+#define RVV_VI_VI_LOOP(BODY) \
+ RVV_VI_GENERAL_LOOP_BASE \
+ RVV_VI_LOOP_MASK_SKIP() \
+ if (rvv_vsew() == E8) { \
+ VI_PARAMS(8); \
+ BODY \
+ } else if (rvv_vsew() == E16) { \
+ VI_PARAMS(16); \
+ BODY \
+ } else if (rvv_vsew() == E32) { \
+ VI_PARAMS(32); \
+ BODY \
+ } else if (rvv_vsew() == E64) { \
+ VI_PARAMS(64); \
+ BODY \
+ } else if (rvv_vsew() == E128) { \
+ VI_PARAMS(128); \
+ BODY \
+ } else { \
+ UNREACHABLE(); \
+ } \
+ RVV_VI_LOOP_END \
+ rvv_trace_vd();
+
+#define RVV_VI_VI_ULOOP(BODY) \
+ RVV_VI_GENERAL_LOOP_BASE \
+ RVV_VI_LOOP_MASK_SKIP() \
+ if (rvv_vsew() == E8) { \
+ VI_UPARAMS(8); \
+ BODY \
+ } else if (rvv_vsew() == E16) { \
+ VI_UPARAMS(16); \
+ BODY \
+ } else if (rvv_vsew() == E32) { \
+ VI_UPARAMS(32); \
+ BODY \
+ } else if (rvv_vsew() == E64) { \
+ VI_UPARAMS(64); \
+ BODY \
+ } else if (rvv_vsew() == E128) { \
+ VI_UPARAMS(128); \
+ BODY \
+ } else { \
+ UNREACHABLE(); \
+ } \
+ RVV_VI_LOOP_END \
+ rvv_trace_vd();
+
+#define RVV_VI_VVXI_MERGE_LOOP(BODY) \
+ RVV_VI_GENERAL_LOOP_BASE \
+ if (rvv_vsew() == E8) { \
+ VXI_PARAMS(8); \
+ BODY; \
+ } else if (rvv_vsew() == E16) { \
+ VXI_PARAMS(16); \
+ BODY; \
+ } else if (rvv_vsew() == E32) { \
+ VXI_PARAMS(32); \
+ BODY; \
+ } else if (rvv_vsew() == E64) { \
+ VXI_PARAMS(64); \
+ BODY; \
+ } else if (rvv_vsew() == E128) { \
+ VXI_PARAMS(128); \
+ BODY \
+ } \
+ RVV_VI_LOOP_END \
+ rvv_trace_vd();
+
+#define VV_WITH_CARRY_PARAMS(x) \
+ type_sew_t<x>::type vs2 = Rvvelt<type_sew_t<x>::type>(rvv_vs2_reg(), i); \
+ type_sew_t<x>::type vs1 = Rvvelt<type_sew_t<x>::type>(rvv_vs1_reg(), i); \
+ type_sew_t<x>::type& vd = Rvvelt<type_sew_t<x>::type>(rvv_vd_reg(), i, true);
+
+#define XI_WITH_CARRY_PARAMS(x) \
+ type_sew_t<x>::type vs2 = Rvvelt<type_sew_t<x>::type>(rvv_vs2_reg(), i); \
+ type_sew_t<x>::type rs1 = (type_sew_t<x>::type)(get_register(rs1_reg())); \
+ type_sew_t<x>::type simm5 = (type_sew_t<x>::type)instr_.RvvSimm5(); \
+ type_sew_t<x>::type& vd = Rvvelt<type_sew_t<x>::type>(rvv_vd_reg(), i, true);
+
+// carry/borrow bit loop
+#define RVV_VI_VV_LOOP_WITH_CARRY(BODY) \
+ CHECK_NE(rvv_vd_reg(), 0); \
+ RVV_VI_GENERAL_LOOP_BASE \
+ RVV_VI_MASK_VARS \
+ if (rvv_vsew() == E8) { \
+ VV_WITH_CARRY_PARAMS(8) \
+ BODY; \
+ } else if (rvv_vsew() == E16) { \
+ VV_WITH_CARRY_PARAMS(16) \
+ BODY; \
+ } else if (rvv_vsew() == E32) { \
+ VV_WITH_CARRY_PARAMS(32) \
+ BODY; \
+ } else if (rvv_vsew() == E64) { \
+ VV_WITH_CARRY_PARAMS(64) \
+ BODY; \
+ } \
+ RVV_VI_LOOP_END
+
+#define RVV_VI_XI_LOOP_WITH_CARRY(BODY) \
+ CHECK_NE(rvv_vd_reg(), 0); \
+ RVV_VI_GENERAL_LOOP_BASE \
+ RVV_VI_MASK_VARS \
+ if (rvv_vsew() == E8) { \
+ XI_WITH_CARRY_PARAMS(8) \
+ BODY; \
+ } else if (rvv_vsew() == E16) { \
+ XI_WITH_CARRY_PARAMS(16) \
+ BODY; \
+ } else if (rvv_vsew() == E32) { \
+ XI_WITH_CARRY_PARAMS(32) \
+ BODY; \
+ } else if (rvv_vsew() == E64) { \
+ XI_WITH_CARRY_PARAMS(64) \
+ BODY; \
+ } \
+ RVV_VI_LOOP_END
+
+#define VV_CMP_PARAMS(x) \
+ type_sew_t<x>::type vs1 = Rvvelt<type_sew_t<x>::type>(rvv_vs1_reg(), i); \
+ type_sew_t<x>::type vs2 = Rvvelt<type_sew_t<x>::type>(rvv_vs2_reg(), i);
+
+#define VX_CMP_PARAMS(x) \
+ type_sew_t<x>::type rs1 = (type_sew_t<x>::type)(get_register(rs1_reg())); \
+ type_sew_t<x>::type vs2 = Rvvelt<type_sew_t<x>::type>(rvv_vs2_reg(), i);
+
+#define VI_CMP_PARAMS(x) \
+ type_sew_t<x>::type simm5 = (type_sew_t<x>::type)instr_.RvvSimm5(); \
+ type_sew_t<x>::type vs2 = Rvvelt<type_sew_t<x>::type>(rvv_vs2_reg(), i);
+
+#define VV_UCMP_PARAMS(x) \
+ type_usew_t<x>::type vs1 = Rvvelt<type_usew_t<x>::type>(rvv_vs1_reg(), i); \
+ type_usew_t<x>::type vs2 = Rvvelt<type_usew_t<x>::type>(rvv_vs2_reg(), i);
+
+#define VX_UCMP_PARAMS(x) \
+ type_usew_t<x>::type rs1 = \
+ (type_sew_t<x>::type)(get_register(rvv_vs1_reg())); \
+ type_usew_t<x>::type vs2 = Rvvelt<type_usew_t<x>::type>(rvv_vs2_reg(), i);
+
+#define VI_UCMP_PARAMS(x) \
+ type_usew_t<x>::type uimm5 = (type_usew_t<x>::type)instr_.RvvUimm5(); \
+ type_usew_t<x>::type vs2 = Rvvelt<type_usew_t<x>::type>(rvv_vs2_reg(), i);
+
+#define RVV_VI_LOOP_CMP_BASE \
+ CHECK(rvv_vsew() >= E8 && rvv_vsew() <= E64); \
+ for (reg_t i = rvv_vstart(); i < rvv_vl(); ++i) { \
+ RVV_VI_LOOP_MASK_SKIP(); \
+ uint64_t mmask = uint64_t(1) << mpos; \
+ uint64_t& vdi = Rvvelt<uint64_t>(rvv_vd_reg(), midx, true); \
+ uint64_t res = 0;
+
+#define RVV_VI_LOOP_CMP_END \
+ vdi = (vdi & ~mmask) | (((res) << mpos) & mmask); \
+ } \
+ set_rvv_vstart(0);
+
+// comparision result to masking register
+#define RVV_VI_VV_LOOP_CMP(BODY) \
+ RVV_VI_LOOP_CMP_BASE \
+ if (rvv_vsew() == E8) { \
+ VV_CMP_PARAMS(8); \
+ BODY; \
+ } else if (rvv_vsew() == E16) { \
+ VV_CMP_PARAMS(16); \
+ BODY; \
+ } else if (rvv_vsew() == E32) { \
+ VV_CMP_PARAMS(32); \
+ BODY; \
+ } else if (rvv_vsew() == E64) { \
+ VV_CMP_PARAMS(64); \
+ BODY; \
+ } \
+ RVV_VI_LOOP_CMP_END \
+ rvv_trace_vd();
+
+#define RVV_VI_VX_LOOP_CMP(BODY) \
+ RVV_VI_LOOP_CMP_BASE \
+ if (rvv_vsew() == E8) { \
+ VX_CMP_PARAMS(8); \
+ BODY; \
+ } else if (rvv_vsew() == E16) { \
+ VX_CMP_PARAMS(16); \
+ BODY; \
+ } else if (rvv_vsew() == E32) { \
+ VX_CMP_PARAMS(32); \
+ BODY; \
+ } else if (rvv_vsew() == E64) { \
+ VX_CMP_PARAMS(64); \
+ BODY; \
+ } \
+ RVV_VI_LOOP_CMP_END
+
+#define RVV_VI_VI_LOOP_CMP(BODY) \
+ RVV_VI_LOOP_CMP_BASE \
+ if (rvv_vsew() == E8) { \
+ VI_CMP_PARAMS(8); \
+ BODY; \
+ } else if (rvv_vsew() == E16) { \
+ VI_CMP_PARAMS(16); \
+ BODY; \
+ } else if (rvv_vsew() == E32) { \
+ VI_CMP_PARAMS(32); \
+ BODY; \
+ } else if (rvv_vsew() == E64) { \
+ VI_CMP_PARAMS(64); \
+ BODY; \
+ } \
+ RVV_VI_LOOP_CMP_END
+
+#define RVV_VI_VV_ULOOP_CMP(BODY) \
+ RVV_VI_LOOP_CMP_BASE \
+ if (rvv_vsew() == E8) { \
+ VV_UCMP_PARAMS(8); \
+ BODY; \
+ } else if (rvv_vsew() == E16) { \
+ VV_UCMP_PARAMS(16); \
+ BODY; \
+ } else if (rvv_vsew() == E32) { \
+ VV_UCMP_PARAMS(32); \
+ BODY; \
+ } else if (rvv_vsew() == E64) { \
+ VV_UCMP_PARAMS(64); \
+ BODY; \
+ } \
+ RVV_VI_LOOP_CMP_END
+
+#define RVV_VI_VX_ULOOP_CMP(BODY) \
+ RVV_VI_LOOP_CMP_BASE \
+ if (rvv_vsew() == E8) { \
+ VX_UCMP_PARAMS(8); \
+ BODY; \
+ } else if (rvv_vsew() == E16) { \
+ VX_UCMP_PARAMS(16); \
+ BODY; \
+ } else if (rvv_vsew() == E32) { \
+ VX_UCMP_PARAMS(32); \
+ BODY; \
+ } else if (rvv_vsew() == E64) { \
+ VX_UCMP_PARAMS(64); \
+ BODY; \
+ } \
+ RVV_VI_LOOP_CMP_END
+
+#define RVV_VI_VI_ULOOP_CMP(BODY) \
+ RVV_VI_LOOP_CMP_BASE \
+ if (rvv_vsew() == E8) { \
+ VI_UCMP_PARAMS(8); \
+ BODY; \
+ } else if (rvv_vsew() == E16) { \
+ VI_UCMP_PARAMS(16); \
+ BODY; \
+ } else if (rvv_vsew() == E32) { \
+ VI_UCMP_PARAMS(32); \
+ BODY; \
+ } else if (rvv_vsew() == E64) { \
+ VI_UCMP_PARAMS(64); \
+ BODY; \
+ } \
+ RVV_VI_LOOP_CMP_END
+
+// reduction loop - signed
+#define RVV_VI_LOOP_REDUCTION_BASE(x) \
+ auto& vd_0_des = Rvvelt<type_sew_t<x>::type>(rvv_vd_reg(), 0, true); \
+ auto vd_0_res = Rvvelt<type_sew_t<x>::type>(rvv_vs1_reg(), 0); \
+ for (uint64_t i = rvv_vstart(); i < rvv_vl(); ++i) { \
+ RVV_VI_LOOP_MASK_SKIP(); \
+ auto vs2 = Rvvelt<type_sew_t<x>::type>(rvv_vs2_reg(), i);
+
+#define RVV_VI_LOOP_REDUCTION_END(x) \
+ } \
+ if (rvv_vl() > 0) { \
+ vd_0_des = vd_0_res; \
+ } \
+ set_rvv_vstart(0);
+
+#define REDUCTION_LOOP(x, BODY) \
+ RVV_VI_LOOP_REDUCTION_BASE(x) \
+ BODY; \
+ RVV_VI_LOOP_REDUCTION_END(x)
+
+#define RVV_VI_VV_LOOP_REDUCTION(BODY) \
+ if (rvv_vsew() == E8) { \
+ REDUCTION_LOOP(8, BODY) \
+ } else if (rvv_vsew() == E16) { \
+ REDUCTION_LOOP(16, BODY) \
+ } else if (rvv_vsew() == E32) { \
+ REDUCTION_LOOP(32, BODY) \
+ } else if (rvv_vsew() == E64) { \
+ REDUCTION_LOOP(64, BODY) \
+ } \
+ rvv_trace_vd();
+
+// reduction loop - unsgied
+#define RVV_VI_ULOOP_REDUCTION_BASE(x) \
+ auto& vd_0_des = Rvvelt<type_usew_t<x>::type>(rvv_vd_reg(), 0, true); \
+ auto vd_0_res = Rvvelt<type_usew_t<x>::type>(rvv_vs1_reg(), 0); \
+ for (reg_t i = rvv_vstart(); i < rvv_vl(); ++i) { \
+ RVV_VI_LOOP_MASK_SKIP(); \
+ auto vs2 = Rvvelt<type_usew_t<x>::type>(rvv_vs2_reg(), i);
+
+#define REDUCTION_ULOOP(x, BODY) \
+ RVV_VI_ULOOP_REDUCTION_BASE(x) \
+ BODY; \
+ RVV_VI_LOOP_REDUCTION_END(x)
+
+#define RVV_VI_VV_ULOOP_REDUCTION(BODY) \
+ if (rvv_vsew() == E8) { \
+ REDUCTION_ULOOP(8, BODY) \
+ } else if (rvv_vsew() == E16) { \
+ REDUCTION_ULOOP(16, BODY) \
+ } else if (rvv_vsew() == E32) { \
+ REDUCTION_ULOOP(32, BODY) \
+ } else if (rvv_vsew() == E64) { \
+ REDUCTION_ULOOP(64, BODY) \
+ } \
+ rvv_trace_vd();
+
+#define VI_STRIP(inx) reg_t vreg_inx = inx;
+
+#define VI_ELEMENT_SKIP(inx) \
+ if (inx >= vl) { \
+ continue; \
+ } else if (inx < rvv_vstart()) { \
+ continue; \
+ } else { \
+ RVV_VI_LOOP_MASK_SKIP(); \
+ }
+
+#define require_vm \
+ do { \
+ if (instr_.RvvVM() == 0) CHECK_NE(rvv_vd_reg(), 0); \
+ } while (0);
+
+#define VI_CHECK_STORE(elt_width, is_mask_ldst) \
+ reg_t veew = is_mask_ldst ? 1 : sizeof(elt_width##_t) * 8;
+// float vemul = is_mask_ldst ? 1 : ((float)veew / rvv_vsew() * P.VU.vflmul);
+// reg_t emul = vemul < 1 ? 1 : vemul;
+// require(vemul >= 0.125 && vemul <= 8);
+// require_align(rvv_rd(), vemul);
+// require((nf * emul) <= (NVPR / 4) && (rvv_rd() + nf * emul) <= NVPR);
+
+#define VI_CHECK_LOAD(elt_width, is_mask_ldst) \
+ VI_CHECK_STORE(elt_width, is_mask_ldst); \
+ require_vm;
+
+/*vd + fn * emul*/
+#define RVV_VI_LD(stride, offset, elt_width, is_mask_ldst) \
+ const reg_t nf = rvv_nf() + 1; \
+ const reg_t vl = is_mask_ldst ? ((rvv_vl() + 7) / 8) : rvv_vl(); \
+ const int64_t baseAddr = rs1(); \
+ for (reg_t i = 0; i < vl; ++i) { \
+ VI_ELEMENT_SKIP(i); \
+ VI_STRIP(i); \
+ set_rvv_vstart(i); \
+ for (reg_t fn = 0; fn < nf; ++fn) { \
+ auto val = ReadMem<elt_width##_t>( \
+ baseAddr + (stride) + (offset) * sizeof(elt_width##_t), \
+ instr_.instr()); \
+ type_sew_t<sizeof(elt_width##_t)* 8>::type& vd = \
+ Rvvelt<type_sew_t<sizeof(elt_width##_t) * 8>::type>(rvv_vd_reg(), \
+ vreg_inx, true); \
+ vd = val; \
+ } \
+ } \
+ set_rvv_vstart(0); \
+ if (::v8::internal::FLAG_trace_sim) { \
+ __int128_t value = Vregister_[rvv_vd_reg()]; \
+ SNPrintF(trace_buf_, "0x%016" PRIx64 "%016" PRIx64 " <-- 0x%016" PRIx64, \
+ *(reinterpret_cast<int64_t*>(&value) + 1), \
+ *reinterpret_cast<int64_t*>(&value), \
+ (uint64_t)(get_register(rs1_reg()))); \
+ }
+
+#define RVV_VI_ST(stride, offset, elt_width, is_mask_ldst) \
+ const reg_t nf = rvv_nf() + 1; \
+ const reg_t vl = is_mask_ldst ? ((rvv_vl() + 7) / 8) : rvv_vl(); \
+ const int64_t baseAddr = rs1(); \
+ for (reg_t i = 0; i < vl; ++i) { \
+ VI_STRIP(i) \
+ VI_ELEMENT_SKIP(i); \
+ set_rvv_vstart(i); \
+ for (reg_t fn = 0; fn < nf; ++fn) { \
+ elt_width##_t vs1 = Rvvelt<type_sew_t<sizeof(elt_width##_t) * 8>::type>( \
+ rvv_vs3_reg(), vreg_inx); \
+ WriteMem(baseAddr + (stride) + (offset) * sizeof(elt_width##_t), vs1, \
+ instr_.instr()); \
+ } \
+ } \
+ set_rvv_vstart(0); \
+ if (::v8::internal::FLAG_trace_sim) { \
+ __int128_t value = Vregister_[rvv_vd_reg()]; \
+ SNPrintF(trace_buf_, "0x%016" PRIx64 "%016" PRIx64 " --> 0x%016" PRIx64, \
+ *(reinterpret_cast<int64_t*>(&value) + 1), \
+ *reinterpret_cast<int64_t*>(&value), \
+ (uint64_t)(get_register(rs1_reg()))); \
+ }
namespace v8 {
namespace internal {
@@ -116,13 +654,14 @@ class RiscvDebugger {
int64_t GetFPURegisterValue(int regnum);
float GetFPURegisterValueFloat(int regnum);
double GetFPURegisterValueDouble(int regnum);
+ __int128_t GetVRegisterValue(int regnum);
bool GetValue(const char* desc, int64_t* value);
};
-inline void UNSUPPORTED() {
- printf("Sim: Unsupported instruction.\n");
+#define UNSUPPORTED() \
+ printf("Sim: Unsupported instruction. Func:%s Line:%d\n", __FUNCTION__, \
+ __LINE__); \
base::OS::Abort();
-}
int64_t RiscvDebugger::GetRegisterValue(int regnum) {
if (regnum == kNumSimuRegisters) {
@@ -156,6 +695,14 @@ double RiscvDebugger::GetFPURegisterValueDouble(int regnum) {
}
}
+__int128_t RiscvDebugger::GetVRegisterValue(int regnum) {
+ if (regnum == kNumVRegisters) {
+ return sim_->get_pc();
+ } else {
+ return sim_->get_vregister(regnum);
+ }
+}
+
bool RiscvDebugger::GetValue(const char* desc, int64_t* value) {
int regnum = Registers::Number(desc);
int fpuregnum = FPURegisters::Number(desc);
@@ -172,7 +719,6 @@ bool RiscvDebugger::GetValue(const char* desc, int64_t* value) {
} else {
return SScanF(desc, "%" SCNu64, reinterpret_cast<uint64_t*>(value)) == 1;
}
- return false;
}
#define REG_INFO(name) \
@@ -315,6 +861,7 @@ void RiscvDebugger::Debug() {
} else {
int regnum = Registers::Number(arg1);
int fpuregnum = FPURegisters::Number(arg1);
+ int vregnum = VRegisters::Number(arg1);
if (regnum != kInvalidRegister) {
value = GetRegisterValue(regnum);
@@ -325,6 +872,11 @@ void RiscvDebugger::Debug() {
dvalue = GetFPURegisterValueDouble(fpuregnum);
PrintF("%3s: 0x%016" PRIx64 " %16.4e\n",
FPURegisters::Name(fpuregnum), value, dvalue);
+ } else if (vregnum != kInvalidVRegister) {
+ __int128_t v = GetVRegisterValue(vregnum);
+ PrintF("\t%s:0x%016" PRIx64 "%016" PRIx64 "\n",
+ VRegisters::Name(vregnum), (uint64_t)(v >> 64),
+ (uint64_t)v);
} else {
PrintF("%s unrecognized\n", arg1);
}
@@ -960,6 +1512,11 @@ double Simulator::get_fpu_register_double(int fpureg) const {
return *bit_cast<double*>(&FPUregisters_[fpureg]);
}
+__int128_t Simulator::get_vregister(int vreg) const {
+ DCHECK((vreg >= 0) && (vreg < kNumVRegisters));
+ return Vregister_[vreg];
+}
+
// Runtime FP routines take up to two double arguments and zero
// or one integer arguments. All are constructed here,
// from fa0, fa1, and a0.
@@ -1301,6 +1858,9 @@ void Simulator::WriteMem(int64_t addr, T value, Instruction* instr) {
#endif
T* ptr = reinterpret_cast<T*>(addr);
TraceMemWr(addr, value);
+ // PrintF("Unaligned read at 0x%08" PRIx64 " , pc=0x%08" PRId64 "\n",
+ // (int64_t)ptr,
+ // (int64_t)value);
*ptr = value;
}
@@ -1424,7 +1984,6 @@ void Simulator::SoftwareInterrupt() {
break;
default:
UNREACHABLE();
- break;
}
}
switch (redirection->type()) {
@@ -1459,7 +2018,6 @@ void Simulator::SoftwareInterrupt() {
}
default:
UNREACHABLE();
- break;
}
if (::v8::internal::FLAG_trace_sim) {
switch (redirection->type()) {
@@ -1473,7 +2031,6 @@ void Simulator::SoftwareInterrupt() {
break;
default:
UNREACHABLE();
- break;
}
}
} else if (redirection->type() == ExternalReference::DIRECT_API_CALL) {
@@ -2376,7 +2933,8 @@ void Simulator::DecodeRVRFPType() {
break;
}
case 0b00001: { // RO_FCVT_WU_S
- set_rd(RoundF2IHelper<uint32_t>(original_val, instr_.RoundMode()));
+ set_rd(sext32(
+ RoundF2IHelper<uint32_t>(original_val, instr_.RoundMode())));
break;
}
#ifdef V8_TARGET_ARCH_64_BIT
@@ -2416,7 +2974,6 @@ void Simulator::DecodeRVRFPType() {
}
break;
}
- // TODO(RISCV): Implement handling of NaN (quiet and signalling).
case RO_FLE_S: { // RO_FEQ_S RO_FLT_S RO_FLE_S
switch (instr_.Funct3Value()) {
case 0b010: { // RO_FEQ_S
@@ -2624,7 +3181,6 @@ void Simulator::DecodeRVRFPType() {
case (RO_FCLASS_D & kRFPTypeMask): { // RO_FCLASS_D , 64D RO_FMV_X_D
if (instr_.Rs2Value() != 0b00000) {
UNSUPPORTED();
- break;
}
switch (instr_.Funct3Value()) {
case 0b001: { // RO_FCLASS_D
@@ -2651,7 +3207,8 @@ void Simulator::DecodeRVRFPType() {
break;
}
case 0b00001: { // RO_FCVT_WU_D
- set_rd(RoundF2IHelper<uint32_t>(original_val, instr_.RoundMode()));
+ set_rd(sext32(
+ RoundF2IHelper<uint32_t>(original_val, instr_.RoundMode())));
break;
}
#ifdef V8_TARGET_ARCH_64_BIT
@@ -2826,6 +3383,117 @@ void Simulator::DecodeRVR4Type() {
}
}
+bool Simulator::DecodeRvvVL() {
+ uint32_t instr_temp =
+ instr_.InstructionBits() & (kRvvMopMask | kRvvNfMask | kBaseOpcodeMask);
+ if (RO_V_VL == instr_temp) {
+ if (!(instr_.InstructionBits() & (kRvvRs2Mask))) {
+ switch (instr_.vl_vs_width()) {
+ case 8: {
+ RVV_VI_LD(0, (i * nf + fn), int8, false);
+ break;
+ }
+ case 16: {
+ UNIMPLEMENTED_RISCV();
+ break;
+ }
+ default:
+ UNIMPLEMENTED_RISCV();
+ break;
+ }
+ return true;
+ } else {
+ UNIMPLEMENTED_RISCV();
+ return true;
+ }
+ } else if (RO_V_VLS == instr_temp) {
+ UNIMPLEMENTED_RISCV();
+ return true;
+ } else if (RO_V_VLX == instr_temp) {
+ UNIMPLEMENTED_RISCV();
+ return true;
+ } else if (RO_V_VLSEG2 == instr_temp || RO_V_VLSEG3 == instr_temp ||
+ RO_V_VLSEG4 == instr_temp || RO_V_VLSEG5 == instr_temp ||
+ RO_V_VLSEG6 == instr_temp || RO_V_VLSEG7 == instr_temp ||
+ RO_V_VLSEG8 == instr_temp) {
+ if (!(instr_.InstructionBits() & (kRvvRs2Mask))) {
+ UNIMPLEMENTED_RISCV();
+ return true;
+ } else {
+ UNIMPLEMENTED_RISCV();
+ return true;
+ }
+ } else if (RO_V_VLSSEG2 == instr_temp || RO_V_VLSSEG3 == instr_temp ||
+ RO_V_VLSSEG4 == instr_temp || RO_V_VLSSEG5 == instr_temp ||
+ RO_V_VLSSEG6 == instr_temp || RO_V_VLSSEG7 == instr_temp ||
+ RO_V_VLSSEG8 == instr_temp) {
+ UNIMPLEMENTED_RISCV();
+ return true;
+ } else if (RO_V_VLXSEG2 == instr_temp || RO_V_VLXSEG3 == instr_temp ||
+ RO_V_VLXSEG4 == instr_temp || RO_V_VLXSEG5 == instr_temp ||
+ RO_V_VLXSEG6 == instr_temp || RO_V_VLXSEG7 == instr_temp ||
+ RO_V_VLXSEG8 == instr_temp) {
+ UNIMPLEMENTED_RISCV();
+ return true;
+ } else {
+ return false;
+ }
+}
+
+bool Simulator::DecodeRvvVS() {
+ uint32_t instr_temp =
+ instr_.InstructionBits() & (kRvvMopMask | kRvvNfMask | kBaseOpcodeMask);
+ if (RO_V_VS == instr_temp) {
+ if (!(instr_.InstructionBits() & (kRvvRs2Mask))) {
+ switch (instr_.vl_vs_width()) {
+ case 8: {
+ RVV_VI_ST(0, (i * nf + fn), uint8, false);
+ break;
+ }
+ case 16: {
+ UNIMPLEMENTED_RISCV();
+ break;
+ }
+ default:
+ UNIMPLEMENTED_RISCV();
+ break;
+ }
+ } else {
+ UNIMPLEMENTED_RISCV();
+ }
+ return true;
+ } else if (RO_V_VSS == instr_temp) {
+ UNIMPLEMENTED_RISCV();
+ return true;
+ } else if (RO_V_VSX == instr_temp) {
+ UNIMPLEMENTED_RISCV();
+ return true;
+ } else if (RO_V_VSU == instr_temp) {
+ UNIMPLEMENTED_RISCV();
+ return true;
+ } else if (RO_V_VSSEG2 == instr_temp || RO_V_VSSEG3 == instr_temp ||
+ RO_V_VSSEG4 == instr_temp || RO_V_VSSEG5 == instr_temp ||
+ RO_V_VSSEG6 == instr_temp || RO_V_VSSEG7 == instr_temp ||
+ RO_V_VSSEG8 == instr_temp) {
+ UNIMPLEMENTED_RISCV();
+ return true;
+ } else if (RO_V_VSSSEG2 == instr_temp || RO_V_VSSSEG3 == instr_temp ||
+ RO_V_VSSSEG4 == instr_temp || RO_V_VSSSEG5 == instr_temp ||
+ RO_V_VSSSEG6 == instr_temp || RO_V_VSSSEG7 == instr_temp ||
+ RO_V_VSSSEG8 == instr_temp) {
+ UNIMPLEMENTED_RISCV();
+ return true;
+ } else if (RO_V_VSXSEG2 == instr_temp || RO_V_VSXSEG3 == instr_temp ||
+ RO_V_VSXSEG4 == instr_temp || RO_V_VSXSEG5 == instr_temp ||
+ RO_V_VSXSEG6 == instr_temp || RO_V_VSXSEG7 == instr_temp ||
+ RO_V_VSXSEG8 == instr_temp) {
+ UNIMPLEMENTED_RISCV();
+ return true;
+ } else {
+ return false;
+ }
+}
+
Builtin Simulator::LookUp(Address pc) {
for (Builtin builtin = Builtins::kFirst; builtin <= Builtins::kLast;
++builtin) {
@@ -3061,8 +3729,12 @@ void Simulator::DecodeRVIType() {
TraceMemRd(addr, val, get_fpu_register(frd_reg()));
break;
}
- default:
- UNSUPPORTED();
+ default: {
+ if (!DecodeRvvVL()) {
+ UNSUPPORTED();
+ }
+ break;
+ }
}
}
@@ -3095,7 +3767,10 @@ void Simulator::DecodeRVSType() {
break;
}
default:
- UNSUPPORTED();
+ if (!DecodeRvvVS()) {
+ UNSUPPORTED();
+ }
+ break;
}
}
@@ -3403,6 +4078,794 @@ void Simulator::DecodeCBType() {
}
}
+/**
+ * RISCV-ISA-SIM
+ *
+ * @link https://github.com/riscv/riscv-isa-sim/
+ * @copyright Copyright (c) The Regents of the University of California
+ * @license hhttps://github.com/riscv/riscv-isa-sim/blob/master/LICENSE
+ */
+// ref: https://locklessinc.com/articles/sat_arithmetic/
+template <typename T, typename UT>
+static inline T sat_add(T x, T y, bool& sat) {
+ UT ux = x;
+ UT uy = y;
+ UT res = ux + uy;
+ sat = false;
+ int sh = sizeof(T) * 8 - 1;
+
+ /* Calculate overflowed result. (Don't change the sign bit of ux) */
+ ux = (ux >> sh) + (((UT)0x1 << sh) - 1);
+
+ /* Force compiler to use cmovns instruction */
+ if ((T)((ux ^ uy) | ~(uy ^ res)) >= 0) {
+ res = ux;
+ sat = true;
+ }
+
+ return res;
+}
+
+template <typename T, typename UT>
+static inline T sat_sub(T x, T y, bool& sat) {
+ UT ux = x;
+ UT uy = y;
+ UT res = ux - uy;
+ sat = false;
+ int sh = sizeof(T) * 8 - 1;
+
+ /* Calculate overflowed result. (Don't change the sign bit of ux) */
+ ux = (ux >> sh) + (((UT)0x1 << sh) - 1);
+
+ /* Force compiler to use cmovns instruction */
+ if ((T)((ux ^ uy) & (ux ^ res)) < 0) {
+ res = ux;
+ sat = true;
+ }
+
+ return res;
+}
+
+void Simulator::DecodeRvvIVV() {
+ DCHECK_EQ(instr_.InstructionBits() & (kBaseOpcodeMask | kFunct3Mask), OP_IVV);
+ switch (instr_.InstructionBits() & kVTypeMask) {
+ case RO_V_VADD_VV: {
+ RVV_VI_VV_LOOP({ vd = vs1 + vs2; });
+ break;
+ }
+ case RO_V_VSADD_VV: {
+ RVV_VI_GENERAL_LOOP_BASE
+ bool sat = false;
+ switch (rvv_vsew()) {
+ case E8: {
+ VV_PARAMS(8);
+ vd = sat_add<int8_t, uint8_t>(vs2, vs1, sat);
+ break;
+ }
+ case E16: {
+ VV_PARAMS(16);
+ vd = sat_add<int16_t, uint16_t>(vs2, vs1, sat);
+ break;
+ }
+ case E32: {
+ VV_PARAMS(32);
+ vd = sat_add<int32_t, uint32_t>(vs2, vs1, sat);
+ break;
+ }
+ default: {
+ VV_PARAMS(64);
+ vd = sat_add<int64_t, uint64_t>(vs2, vs1, sat);
+ break;
+ }
+ }
+ set_rvv_vxsat(sat);
+ RVV_VI_LOOP_END
+ break;
+ }
+ case RO_V_VSUB_VV: {
+ RVV_VI_VV_LOOP({ vd = vs2 - vs1; })
+ break;
+ }
+ case RO_V_VSSUB_VV: {
+ RVV_VI_GENERAL_LOOP_BASE
+ bool sat = false;
+ switch (rvv_vsew()) {
+ case E8: {
+ VV_PARAMS(8);
+ vd = sat_sub<int8_t, uint8_t>(vs2, vs1, sat);
+ break;
+ }
+ case E16: {
+ VV_PARAMS(16);
+ vd = sat_sub<int16_t, uint16_t>(vs2, vs1, sat);
+ break;
+ }
+ case E32: {
+ VV_PARAMS(32);
+ vd = sat_sub<int32_t, uint32_t>(vs2, vs1, sat);
+ break;
+ }
+ default: {
+ VV_PARAMS(64);
+ vd = sat_sub<int64_t, uint64_t>(vs2, vs1, sat);
+ break;
+ }
+ }
+ set_rvv_vxsat(sat);
+ RVV_VI_LOOP_END
+ break;
+ }
+ case RO_V_VAND_VV: {
+ RVV_VI_VV_LOOP({ vd = vs1 & vs2; })
+ break;
+ }
+ case RO_V_VOR_VV: {
+ RVV_VI_VV_LOOP({ vd = vs1 | vs2; })
+ break;
+ }
+ case RO_V_VXOR_VV: {
+ RVV_VI_VV_LOOP({ vd = vs1 ^ vs2; })
+ break;
+ }
+ case RO_V_VMAXU_VV: {
+ RVV_VI_VV_ULOOP({
+ if (vs1 <= vs2) {
+ vd = vs2;
+ } else {
+ vd = vs1;
+ }
+ })
+ break;
+ }
+ case RO_V_VMAX_VV: {
+ RVV_VI_VV_LOOP({
+ if (vs1 <= vs2) {
+ vd = vs2;
+ } else {
+ vd = vs1;
+ }
+ })
+ break;
+ }
+ case RO_V_VMINU_VV: {
+ RVV_VI_VV_ULOOP({
+ if (vs1 <= vs2) {
+ vd = vs1;
+ } else {
+ vd = vs2;
+ }
+ })
+ break;
+ }
+ case RO_V_VMIN_VV: {
+ RVV_VI_VV_LOOP({
+ if (vs1 <= vs2) {
+ vd = vs1;
+ } else {
+ vd = vs2;
+ }
+ })
+ break;
+ }
+ case RO_V_VMV_VV: {
+ if (instr_.RvvVM()) {
+ RVV_VI_VVXI_MERGE_LOOP({
+ vd = vs1;
+ USE(simm5);
+ USE(vs2);
+ USE(rs1);
+ });
+ } else {
+ RVV_VI_VVXI_MERGE_LOOP({
+ bool use_first = (Rvvelt<uint64_t>(0, (i / 64)) >> (i % 64)) & 0x1;
+ vd = use_first ? vs1 : vs2;
+ USE(simm5);
+ USE(rs1);
+ });
+ }
+ break;
+ }
+ case RO_V_VMSEQ_VV: {
+ RVV_VI_VV_LOOP_CMP({ res = vs1 == vs2; })
+ break;
+ }
+ case RO_V_VMSNE_VV: {
+ RVV_VI_VV_LOOP_CMP({ res = vs1 != vs2; })
+ break;
+ }
+ case RO_V_VMSLTU_VV: {
+ RVV_VI_VV_ULOOP_CMP({ res = vs2 < vs1; })
+ break;
+ }
+ case RO_V_VMSLT_VV: {
+ RVV_VI_VV_LOOP_CMP({ res = vs2 < vs1; })
+ break;
+ }
+ case RO_V_VMSLE_VV: {
+ RVV_VI_VV_LOOP_CMP({ res = vs2 <= vs1; })
+ break;
+ }
+ case RO_V_VMSLEU_VV: {
+ RVV_VI_VV_ULOOP_CMP({ res = vs2 <= vs1; })
+ break;
+ }
+ case RO_V_VADC_VV:
+ if (instr_.RvvVM()) {
+ RVV_VI_VV_LOOP_WITH_CARRY({
+ auto& v0 = Rvvelt<uint64_t>(0, midx);
+ vd = vs1 + vs2 + (v0 >> mpos) & 0x1;
+ })
+ } else {
+ UNREACHABLE();
+ }
+ break;
+ case RO_V_VSLL_VV: {
+ RVV_VI_VV_LOOP({ vd = vs2 << vs1; })
+ break;
+ }
+ case RO_V_VRGATHER_VV: {
+ RVV_VI_GENERAL_LOOP_BASE
+ switch (rvv_vsew()) {
+ case E8: {
+ auto vs1 = Rvvelt<uint8_t>(rvv_vs1_reg(), i);
+ // if (i > 255) continue;
+ Rvvelt<uint8_t>(rvv_vd_reg(), i, true) =
+ vs1 >= rvv_vlmax() ? 0 : Rvvelt<uint8_t>(rvv_vs2_reg(), vs1);
+ break;
+ }
+ case E16: {
+ auto vs1 = Rvvelt<uint16_t>(rvv_vs1_reg(), i);
+ Rvvelt<uint16_t>(rvv_vd_reg(), i, true) =
+ vs1 >= rvv_vlmax() ? 0 : Rvvelt<uint16_t>(rvv_vs2_reg(), vs1);
+ break;
+ }
+ case E32: {
+ auto vs1 = Rvvelt<uint32_t>(rvv_vs1_reg(), i);
+ Rvvelt<uint32_t>(rvv_vd_reg(), i, true) =
+ vs1 >= rvv_vlmax() ? 0 : Rvvelt<uint32_t>(rvv_vs2_reg(), vs1);
+ break;
+ }
+ default: {
+ auto vs1 = Rvvelt<uint64_t>(rvv_vs1_reg(), i);
+ Rvvelt<uint64_t>(rvv_vd_reg(), i, true) =
+ vs1 >= rvv_vlmax() ? 0 : Rvvelt<uint64_t>(rvv_vs2_reg(), vs1);
+ break;
+ }
+ }
+ RVV_VI_LOOP_END;
+ break;
+ }
+ default:
+ // v8::base::EmbeddedVector<char, 256> buffer;
+ // SNPrintF(trace_buf_, " ");
+ // disasm::NameConverter converter;
+ // disasm::Disassembler dasm(converter);
+ // // Use a reasonably large buffer.
+ // dasm.InstructionDecode(buffer, reinterpret_cast<byte*>(&instr_));
+
+ // PrintF("EXECUTING 0x%08" PRIxPTR " %-44s\n",
+ // reinterpret_cast<intptr_t>(&instr_), buffer.begin());
+ UNIMPLEMENTED_RISCV();
+ break;
+ }
+ set_rvv_vstart(0);
+}
+
+void Simulator::DecodeRvvIVI() {
+ DCHECK_EQ(instr_.InstructionBits() & (kBaseOpcodeMask | kFunct3Mask), OP_IVI);
+ switch (instr_.InstructionBits() & kVTypeMask) {
+ case RO_V_VADD_VI: {
+ RVV_VI_VI_LOOP({ vd = simm5 + vs2; })
+ break;
+ }
+ case RO_V_VSADD_VI: {
+ RVV_VI_GENERAL_LOOP_BASE
+ bool sat = false;
+ switch (rvv_vsew()) {
+ case E8: {
+ VI_PARAMS(8);
+ vd = sat_add<int8_t, uint8_t>(vs2, simm5, sat);
+ break;
+ }
+ case E16: {
+ VI_PARAMS(16);
+ vd = sat_add<int16_t, uint16_t>(vs2, simm5, sat);
+ break;
+ }
+ case E32: {
+ VI_PARAMS(32);
+ vd = sat_add<int32_t, uint32_t>(vs2, simm5, sat);
+ break;
+ }
+ default: {
+ VI_PARAMS(64);
+ vd = sat_add<int64_t, uint64_t>(vs2, simm5, sat);
+ break;
+ }
+ }
+ set_rvv_vxsat(sat);
+ RVV_VI_LOOP_END
+ break;
+ }
+ case RO_V_VRSUB_VI: {
+ RVV_VI_VI_LOOP({ vd = vs2 - simm5; })
+ break;
+ }
+ case RO_V_VAND_VI: {
+ RVV_VI_VI_LOOP({ vd = simm5 & vs2; })
+ break;
+ }
+ case RO_V_VOR_VI: {
+ RVV_VI_VI_LOOP({ vd = simm5 | vs2; })
+ break;
+ }
+ case RO_V_VXOR_VI: {
+ RVV_VI_VI_LOOP({ vd = simm5 ^ vs2; })
+ break;
+ }
+ case RO_V_VMV_VI:
+ if (instr_.RvvVM()) {
+ RVV_VI_VVXI_MERGE_LOOP({
+ vd = simm5;
+ USE(vs1);
+ USE(vs2);
+ USE(rs1);
+ });
+ } else {
+ RVV_VI_VVXI_MERGE_LOOP({
+ bool use_first = (Rvvelt<uint64_t>(0, (i / 64)) >> (i % 64)) & 0x1;
+ vd = use_first ? simm5 : vs2;
+ USE(vs1);
+ USE(rs1);
+ });
+ }
+ break;
+ case RO_V_VMSEQ_VI:
+ RVV_VI_VI_LOOP_CMP({ res = simm5 == vs2; })
+ break;
+ case RO_V_VMSNE_VI:
+ RVV_VI_VI_LOOP_CMP({ res = simm5 != vs2; })
+ break;
+ case RO_V_VMSLEU_VI:
+ RVV_VI_VI_ULOOP_CMP({ res = vs2 <= uimm5; })
+ break;
+ case RO_V_VMSLE_VI:
+ RVV_VI_VI_LOOP_CMP({ res = vs2 <= simm5; })
+ break;
+ case RO_V_VMSGT_VI:
+ RVV_VI_VI_LOOP_CMP({ res = vs2 > simm5; })
+ break;
+ case RO_V_VSLIDEDOWN_VI: {
+ const uint8_t sh = instr_.RvvUimm5();
+ RVV_VI_GENERAL_LOOP_BASE
+
+ reg_t offset = 0;
+ bool is_valid = (i + sh) < rvv_vlmax();
+
+ if (is_valid) {
+ offset = sh;
+ }
+
+ switch (rvv_sew()) {
+ case E8: {
+ VI_XI_SLIDEDOWN_PARAMS(8, offset);
+ vd = is_valid ? vs2 : 0;
+ } break;
+ case E16: {
+ VI_XI_SLIDEDOWN_PARAMS(16, offset);
+ vd = is_valid ? vs2 : 0;
+ } break;
+ case E32: {
+ VI_XI_SLIDEDOWN_PARAMS(32, offset);
+ vd = is_valid ? vs2 : 0;
+ } break;
+ default: {
+ VI_XI_SLIDEDOWN_PARAMS(64, offset);
+ vd = is_valid ? vs2 : 0;
+ } break;
+ }
+ RVV_VI_LOOP_END
+ } break;
+ case RO_V_VSRL_VI:
+ RVV_VI_VI_LOOP({ vd = vs2 >> simm5; })
+ break;
+ case RO_V_VSLL_VI:
+ RVV_VI_VI_LOOP({ vd = vs2 << simm5; })
+ break;
+ case RO_V_VADC_VI:
+ if (instr_.RvvVM()) {
+ RVV_VI_XI_LOOP_WITH_CARRY({
+ auto& v0 = Rvvelt<uint64_t>(0, midx);
+ vd = simm5 + vs2 + (v0 >> mpos) & 0x1;
+ USE(rs1);
+ })
+ } else {
+ UNREACHABLE();
+ }
+ break;
+ default:
+ UNIMPLEMENTED_RISCV();
+ break;
+ }
+}
+
+void Simulator::DecodeRvvIVX() {
+ DCHECK_EQ(instr_.InstructionBits() & (kBaseOpcodeMask | kFunct3Mask), OP_IVX);
+ switch (instr_.InstructionBits() & kVTypeMask) {
+ case RO_V_VADD_VX: {
+ RVV_VI_VX_LOOP({ vd = rs1 + vs2; })
+ break;
+ }
+ case RO_V_VSADD_VX: {
+ RVV_VI_GENERAL_LOOP_BASE
+ bool sat = false;
+ switch (rvv_vsew()) {
+ case E8: {
+ VX_PARAMS(8);
+ vd = sat_add<int8_t, uint8_t>(vs2, rs1, sat);
+ break;
+ }
+ case E16: {
+ VX_PARAMS(16);
+ vd = sat_add<int16_t, uint16_t>(vs2, rs1, sat);
+ break;
+ }
+ case E32: {
+ VX_PARAMS(32);
+ vd = sat_add<int32_t, uint32_t>(vs2, rs1, sat);
+ break;
+ }
+ default: {
+ VX_PARAMS(64);
+ vd = sat_add<int64_t, uint64_t>(vs2, rs1, sat);
+ break;
+ }
+ }
+ set_rvv_vxsat(sat);
+ RVV_VI_LOOP_END
+ break;
+ }
+ case RO_V_VSUB_VX: {
+ RVV_VI_VX_LOOP({ vd = vs2 - rs1; })
+ break;
+ }
+ case RO_V_VSSUB_VX: {
+ RVV_VI_GENERAL_LOOP_BASE
+ bool sat = false;
+ switch (rvv_vsew()) {
+ case E8: {
+ VX_PARAMS(8);
+ vd = sat_sub<int8_t, uint8_t>(vs2, rs1, sat);
+ break;
+ }
+ case E16: {
+ VX_PARAMS(16);
+ vd = sat_sub<int16_t, uint16_t>(vs2, rs1, sat);
+ break;
+ }
+ case E32: {
+ VX_PARAMS(32);
+ vd = sat_sub<int32_t, uint32_t>(vs2, rs1, sat);
+ break;
+ }
+ default: {
+ VX_PARAMS(64);
+ vd = sat_sub<int64_t, uint64_t>(vs2, rs1, sat);
+ break;
+ }
+ }
+ set_rvv_vxsat(sat);
+ RVV_VI_LOOP_END
+ break;
+ }
+ case RO_V_VRSUB_VX: {
+ RVV_VI_VX_LOOP({ vd = rs1 - vs2; })
+ break;
+ }
+ case RO_V_VAND_VX: {
+ RVV_VI_VX_LOOP({ vd = rs1 & vs2; })
+ break;
+ }
+ case RO_V_VOR_VX: {
+ RVV_VI_VX_LOOP({ vd = rs1 | vs2; })
+ break;
+ }
+ case RO_V_VXOR_VX: {
+ RVV_VI_VX_LOOP({ vd = rs1 ^ vs2; })
+ break;
+ }
+ case RO_V_VMAX_VX: {
+ RVV_VI_VX_LOOP({
+ if (rs1 <= vs2) {
+ vd = vs2;
+ } else {
+ vd = rs1;
+ }
+ })
+ break;
+ }
+ case RO_V_VMAXU_VX: {
+ RVV_VI_VX_ULOOP({
+ if (rs1 <= vs2) {
+ vd = vs2;
+ } else {
+ vd = rs1;
+ }
+ })
+ break;
+ }
+ case RO_V_VMINU_VX: {
+ RVV_VI_VX_ULOOP({
+ if (rs1 <= vs2) {
+ vd = rs1;
+ } else {
+ vd = vs2;
+ }
+ })
+ break;
+ }
+ case RO_V_VMIN_VX: {
+ RVV_VI_VX_LOOP({
+ if (rs1 <= vs2) {
+ vd = rs1;
+ } else {
+ vd = vs2;
+ }
+ })
+ break;
+ }
+ case RO_V_VMV_VX:
+ if (instr_.RvvVM()) {
+ RVV_VI_VVXI_MERGE_LOOP({
+ vd = rs1;
+ USE(vs1);
+ USE(vs2);
+ USE(simm5);
+ });
+ } else {
+ RVV_VI_VVXI_MERGE_LOOP({
+ bool use_first = (Rvvelt<uint64_t>(0, (i / 64)) >> (i % 64)) & 0x1;
+ vd = use_first ? rs1 : vs2;
+ USE(vs1);
+ USE(simm5);
+ });
+ }
+ break;
+ case RO_V_VMSEQ_VX:
+ RVV_VI_VX_LOOP_CMP({ res = vs2 == rs1; })
+ break;
+ case RO_V_VMSNE_VX:
+ RVV_VI_VX_LOOP_CMP({ res = vs2 != rs1; })
+ break;
+ case RO_V_VMSLT_VX:
+ RVV_VI_VX_LOOP_CMP({ res = vs2 < rs1; })
+ break;
+ case RO_V_VMSLTU_VX:
+ RVV_VI_VX_ULOOP_CMP({ res = vs2 < rs1; })
+ break;
+ case RO_V_VMSLE_VX:
+ RVV_VI_VX_LOOP_CMP({ res = vs2 <= rs1; })
+ break;
+ case RO_V_VMSLEU_VX:
+ RVV_VI_VX_ULOOP_CMP({ res = vs2 <= rs1; })
+ break;
+ case RO_V_VMSGT_VX:
+ RVV_VI_VX_LOOP_CMP({ res = vs2 > rs1; })
+ break;
+ case RO_V_VMSGTU_VX:
+ RVV_VI_VX_ULOOP_CMP({ res = vs2 > rs1; })
+ break;
+ case RO_V_VSLIDEDOWN_VX:
+ UNIMPLEMENTED_RISCV();
+ break;
+ case RO_V_VADC_VX:
+ if (instr_.RvvVM()) {
+ RVV_VI_XI_LOOP_WITH_CARRY({
+ auto& v0 = Rvvelt<uint64_t>(0, midx);
+ vd = rs1 + vs2 + (v0 >> mpos) & 0x1;
+ USE(simm5);
+ })
+ } else {
+ UNREACHABLE();
+ }
+ break;
+ case RO_V_VSLL_VX: {
+ RVV_VI_VX_LOOP({ vd = vs2 << rs1; })
+ break;
+ }
+ default:
+ UNIMPLEMENTED_RISCV();
+ break;
+ }
+}
+
+void Simulator::DecodeRvvMVV() {
+ DCHECK_EQ(instr_.InstructionBits() & (kBaseOpcodeMask | kFunct3Mask), OP_MVV);
+ switch (instr_.InstructionBits() & kVTypeMask) {
+ case RO_V_VWXUNARY0: {
+ if (rvv_vs1_reg() == 0) {
+ switch (rvv_vsew()) {
+ case E8:
+ set_rd(Rvvelt<type_sew_t<8>::type>(rvv_vs2_reg(), 0));
+ break;
+ case E16:
+ set_rd(Rvvelt<type_sew_t<16>::type>(rvv_vs2_reg(), 0));
+ break;
+ case E32:
+ set_rd(Rvvelt<type_sew_t<32>::type>(rvv_vs2_reg(), 0));
+ break;
+ case E64:
+ set_rd(Rvvelt<type_sew_t<64>::type>(rvv_vs2_reg(), 0));
+ break;
+ default:
+ UNREACHABLE();
+ }
+ set_rvv_vstart(0);
+ SNPrintF(trace_buf_, "0x%ld", get_register(rd_reg()));
+ } else {
+ v8::base::EmbeddedVector<char, 256> buffer;
+ disasm::NameConverter converter;
+ disasm::Disassembler dasm(converter);
+ dasm.InstructionDecode(buffer, reinterpret_cast<byte*>(&instr_));
+ PrintF("EXECUTING 0x%08" PRIxPTR " %-44s\n",
+ reinterpret_cast<intptr_t>(&instr_), buffer.begin());
+ UNIMPLEMENTED_RISCV();
+ }
+ } break;
+ case RO_V_VREDMAXU:
+ RVV_VI_VV_ULOOP_REDUCTION(
+ { vd_0_res = (vd_0_res >= vs2) ? vd_0_res : vs2; })
+ break;
+ case RO_V_VREDMAX:
+ RVV_VI_VV_LOOP_REDUCTION(
+ { vd_0_res = (vd_0_res >= vs2) ? vd_0_res : vs2; })
+ break;
+ case RO_V_VREDMINU:
+ RVV_VI_VV_ULOOP_REDUCTION(
+ { vd_0_res = (vd_0_res <= vs2) ? vd_0_res : vs2; })
+ break;
+ case RO_V_VREDMIN:
+ RVV_VI_VV_LOOP_REDUCTION(
+ { vd_0_res = (vd_0_res <= vs2) ? vd_0_res : vs2; })
+ break;
+ default:
+ v8::base::EmbeddedVector<char, 256> buffer;
+ disasm::NameConverter converter;
+ disasm::Disassembler dasm(converter);
+ dasm.InstructionDecode(buffer, reinterpret_cast<byte*>(&instr_));
+ PrintF("EXECUTING 0x%08" PRIxPTR " %-44s\n",
+ reinterpret_cast<intptr_t>(&instr_), buffer.begin());
+ UNIMPLEMENTED_RISCV();
+ break;
+ }
+}
+
+void Simulator::DecodeRvvMVX() {
+ DCHECK_EQ(instr_.InstructionBits() & (kBaseOpcodeMask | kFunct3Mask), OP_MVX);
+ switch (instr_.InstructionBits() & kVTypeMask) {
+ case RO_V_VRXUNARY0:
+ if (instr_.Vs2Value() == 0x0) {
+ if (rvv_vl() > 0 && rvv_vstart() < rvv_vl()) {
+ switch (rvv_vsew()) {
+ case E8:
+ Rvvelt<uint8_t>(rvv_vd_reg(), 0, true) =
+ (uint8_t)get_register(rs1_reg());
+ break;
+ case E16:
+ Rvvelt<uint16_t>(rvv_vd_reg(), 0, true) =
+ (uint16_t)get_register(rs1_reg());
+ break;
+ case E32:
+ Rvvelt<uint32_t>(rvv_vd_reg(), 0, true) =
+ (uint32_t)get_register(rs1_reg());
+ break;
+ case E64:
+ Rvvelt<uint64_t>(rvv_vd_reg(), 0, true) =
+ (uint64_t)get_register(rs1_reg());
+ break;
+ default:
+ UNREACHABLE();
+ }
+ // set_rvv_vl(0);
+ }
+ set_rvv_vstart(0);
+ rvv_trace_vd();
+ } else {
+ UNSUPPORTED_RISCV();
+ }
+ break;
+ default:
+ v8::base::EmbeddedVector<char, 256> buffer;
+ disasm::NameConverter converter;
+ disasm::Disassembler dasm(converter);
+ dasm.InstructionDecode(buffer, reinterpret_cast<byte*>(&instr_));
+ PrintF("EXECUTING 0x%08" PRIxPTR " %-44s\n",
+ reinterpret_cast<intptr_t>(&instr_), buffer.begin());
+ UNIMPLEMENTED_RISCV();
+ break;
+ }
+}
+
+void Simulator::DecodeVType() {
+ switch (instr_.InstructionBits() & (kFunct3Mask | kBaseOpcodeMask)) {
+ case OP_IVV:
+ DecodeRvvIVV();
+ return;
+ case OP_FVV:
+ UNIMPLEMENTED_RISCV();
+ return;
+ case OP_MVV:
+ DecodeRvvMVV();
+ return;
+ case OP_IVI:
+ DecodeRvvIVI();
+ return;
+ case OP_IVX:
+ DecodeRvvIVX();
+ return;
+ case OP_FVF:
+ UNIMPLEMENTED_RISCV();
+ return;
+ case OP_MVX:
+ DecodeRvvMVX();
+ return;
+ }
+ switch (instr_.InstructionBits() &
+ (kBaseOpcodeMask | kFunct3Mask | 0x80000000)) {
+ case RO_V_VSETVLI: {
+ uint64_t avl;
+ set_rvv_vtype(rvv_zimm());
+ if (rs1_reg() != zero_reg) {
+ avl = rs1();
+ } else if (rd_reg() != zero_reg) {
+ avl = ~0;
+ } else {
+ avl = rvv_vl();
+ }
+ avl = avl <= rvv_vlmax() ? avl : rvv_vlmax();
+ set_rvv_vl(avl);
+ set_rd(rvv_vl());
+ rvv_trace_status();
+ break;
+ }
+ case RO_V_VSETVL: {
+ if (!(instr_.InstructionBits() & 0x40000000)) {
+ uint64_t avl;
+ set_rvv_vtype(rs2());
+ if (rs1_reg() != zero_reg) {
+ avl = rs1();
+ } else if (rd_reg() != zero_reg) {
+ avl = ~0;
+ } else {
+ avl = rvv_vl();
+ }
+ avl = avl <= rvv_vlmax()
+ ? avl
+ : avl < (rvv_vlmax() * 2) ? avl / 2 : rvv_vlmax();
+ set_rvv_vl(avl);
+ set_rd(rvv_vl());
+ rvv_trace_status();
+ } else {
+ DCHECK_EQ(instr_.InstructionBits() &
+ (kBaseOpcodeMask | kFunct3Mask | 0xC0000000),
+ RO_V_VSETIVLI);
+ uint64_t avl;
+ set_rvv_vtype(rvv_zimm());
+ avl = instr_.Rvvuimm();
+ avl = avl <= rvv_vlmax()
+ ? avl
+ : avl < (rvv_vlmax() * 2) ? avl / 2 : rvv_vlmax();
+ set_rvv_vl(avl);
+ set_rd(rvv_vl());
+ rvv_trace_status();
+ break;
+ }
+ break;
+ }
+ default:
+ FATAL("Error: Unsupport on FILE:%s:%d.", __FILE__, __LINE__);
+ }
+}
// Executes the current instruction.
void Simulator::InstructionDecode(Instruction* instr) {
if (v8::internal::FLAG_check_icache) {
@@ -3473,6 +4936,9 @@ void Simulator::InstructionDecode(Instruction* instr) {
case Instruction::kCSType:
DecodeCSType();
break;
+ case Instruction::kVType:
+ DecodeVType();
+ break;
default:
if (1) {
std::cout << "Unrecognized instruction [@pc=0x" << std::hex
@@ -3483,7 +4949,7 @@ void Simulator::InstructionDecode(Instruction* instr) {
}
if (::v8::internal::FLAG_trace_sim) {
- PrintF(" 0x%012" PRIxPTR " %-44s %s\n",
+ PrintF(" 0x%012" PRIxPTR " %-44s\t%s\n",
reinterpret_cast<intptr_t>(instr), buffer.begin(),
trace_buf_.begin());
}
@@ -3524,8 +4990,6 @@ void Simulator::CallInternal(Address entry) {
set_register(ra, end_sim_pc);
// Remember the values of callee-saved registers.
- // The code below assumes that r9 is not used as sb (static base) in
- // simulator code and therefore is regarded as a callee-saved register.
int64_t s0_val = get_register(s0);
int64_t s1_val = get_register(s1);
int64_t s2_val = get_register(s2);
@@ -3534,9 +4998,12 @@ void Simulator::CallInternal(Address entry) {
int64_t s5_val = get_register(s5);
int64_t s6_val = get_register(s6);
int64_t s7_val = get_register(s7);
+ int64_t s8_val = get_register(s8);
+ int64_t s9_val = get_register(s9);
+ int64_t s10_val = get_register(s10);
+ int64_t s11_val = get_register(s11);
int64_t gp_val = get_register(gp);
int64_t sp_val = get_register(sp);
- int64_t fp_val = get_register(fp);
// Set up the callee-saved registers with a known value. To be able to check
// that they are preserved properly across JS execution.
@@ -3549,8 +5016,11 @@ void Simulator::CallInternal(Address entry) {
set_register(s5, callee_saved_value);
set_register(s6, callee_saved_value);
set_register(s7, callee_saved_value);
+ set_register(s8, callee_saved_value);
+ set_register(s9, callee_saved_value);
+ set_register(s10, callee_saved_value);
+ set_register(s11, callee_saved_value);
set_register(gp, callee_saved_value);
- set_register(fp, callee_saved_value);
// Start the simulation.
Execute();
@@ -3564,8 +5034,11 @@ void Simulator::CallInternal(Address entry) {
CHECK_EQ(callee_saved_value, get_register(s5));
CHECK_EQ(callee_saved_value, get_register(s6));
CHECK_EQ(callee_saved_value, get_register(s7));
+ CHECK_EQ(callee_saved_value, get_register(s8));
+ CHECK_EQ(callee_saved_value, get_register(s9));
+ CHECK_EQ(callee_saved_value, get_register(s10));
+ CHECK_EQ(callee_saved_value, get_register(s11));
CHECK_EQ(callee_saved_value, get_register(gp));
- CHECK_EQ(callee_saved_value, get_register(fp));
// Restore callee-saved registers with the original value.
set_register(s0, s0_val);
@@ -3576,9 +5049,12 @@ void Simulator::CallInternal(Address entry) {
set_register(s5, s5_val);
set_register(s6, s6_val);
set_register(s7, s7_val);
+ set_register(s8, s8_val);
+ set_register(s9, s9_val);
+ set_register(s10, s10_val);
+ set_register(s11, s11_val);
set_register(gp, gp_val);
set_register(sp, sp_val);
- set_register(fp, fp_val);
}
intptr_t Simulator::CallImpl(Address entry, int argument_count,
@@ -3586,15 +5062,12 @@ intptr_t Simulator::CallImpl(Address entry, int argument_count,
constexpr int kRegisterPassedArguments = 8;
// Set up arguments.
- // First four arguments passed in registers in both ABI's.
+ // RISC-V 64G ISA has a0-a7 for passing arguments
int reg_arg_count = std::min(kRegisterPassedArguments, argument_count);
if (reg_arg_count > 0) set_register(a0, arguments[0]);
if (reg_arg_count > 1) set_register(a1, arguments[1]);
if (reg_arg_count > 2) set_register(a2, arguments[2]);
if (reg_arg_count > 3) set_register(a3, arguments[3]);
-
- // Up to eight arguments passed in registers in N64 ABI.
- // TODO(plind): N64 ABI calls these regs a4 - a7. Clarify this.
if (reg_arg_count > 4) set_register(a4, arguments[4]);
if (reg_arg_count > 5) set_register(a5, arguments[5]);
if (reg_arg_count > 6) set_register(a6, arguments[6]);
@@ -3602,12 +5075,13 @@ intptr_t Simulator::CallImpl(Address entry, int argument_count,
if (::v8::internal::FLAG_trace_sim) {
std::cout << "CallImpl: reg_arg_count = " << reg_arg_count << std::hex
- << " entry-pc (JSEntry) = 0x" << entry << " a0 (Isolate) = 0x"
- << get_register(a0) << " a1 (orig_func/new_target) = 0x"
- << get_register(a1) << " a2 (func/target) = 0x"
- << get_register(a2) << " a3 (receiver) = 0x" << get_register(a3)
- << " a4 (argc) = 0x" << get_register(a4) << " a5 (argv) = 0x"
- << get_register(a5) << std::endl;
+ << " entry-pc (JSEntry) = 0x" << entry
+ << " a0 (Isolate-root) = 0x" << get_register(a0)
+ << " a1 (orig_func/new_target) = 0x" << get_register(a1)
+ << " a2 (func/target) = 0x" << get_register(a2)
+ << " a3 (receiver) = 0x" << get_register(a3) << " a4 (argc) = 0x"
+ << get_register(a4) << " a5 (argv) = 0x" << get_register(a5)
+ << std::endl;
}
// Remaining arguments passed on stack.
diff --git a/deps/v8/src/execution/riscv64/simulator-riscv64.h b/deps/v8/src/execution/riscv64/simulator-riscv64.h
index 2fa40cea4e..90f0edec4c 100644
--- a/deps/v8/src/execution/riscv64/simulator-riscv64.h
+++ b/deps/v8/src/execution/riscv64/simulator-riscv64.h
@@ -299,6 +299,42 @@ class Simulator : public SimulatorBase {
kNumFPURegisters
};
+ enum VRegister {
+ v0,
+ v1,
+ v2,
+ v3,
+ v4,
+ v5,
+ v6,
+ v7,
+ v8,
+ v9,
+ v10,
+ v11,
+ v12,
+ v13,
+ v14,
+ v15,
+ v16,
+ v17,
+ v18,
+ v19,
+ v20,
+ v21,
+ v22,
+ v23,
+ v24,
+ v25,
+ v26,
+ v27,
+ v28,
+ v29,
+ v30,
+ v31,
+ kNumVRegisters
+ };
+
explicit Simulator(Isolate* isolate);
~Simulator();
@@ -312,7 +348,7 @@ class Simulator : public SimulatorBase {
void set_register(int reg, int64_t value);
void set_register_word(int reg, int32_t value);
void set_dw_register(int dreg, const int* dbl);
- int64_t get_register(int reg) const;
+ V8_EXPORT_PRIVATE int64_t get_register(int reg) const;
double get_double_from_register_pair(int reg);
// Same for FPURegisters.
@@ -338,6 +374,59 @@ class Simulator : public SimulatorBase {
void set_fflags(uint32_t flags) { set_csr_bits(csr_fflags, flags); }
void clear_fflags(int32_t flags) { clear_csr_bits(csr_fflags, flags); }
+ // RVV CSR
+ __int128_t get_vregister(int vreg) const;
+ inline uint64_t rvv_vlen() const { return kRvvVLEN; }
+ inline uint64_t rvv_vtype() const { return vtype_; }
+ inline uint64_t rvv_vl() const { return vl_; }
+ inline uint64_t rvv_vstart() const { return vstart_; }
+ inline uint64_t rvv_vxsat() const { return vxsat_; }
+ inline uint64_t rvv_vxrm() const { return vxrm_; }
+ inline uint64_t rvv_vcsr() const { return vcsr_; }
+ inline uint64_t rvv_vlenb() const { return vlenb_; }
+ inline uint32_t rvv_zimm() const { return instr_.Rvvzimm(); }
+ inline uint32_t rvv_vlmul() const { return (rvv_vtype() & 0x7); }
+ inline uint32_t rvv_vsew() const { return ((rvv_vtype() >> 3) & 0x7); }
+
+ inline const char* rvv_sew_s() const {
+ uint32_t vsew = rvv_vsew();
+ switch (vsew) {
+#define CAST_VSEW(name) \
+ case name: \
+ return #name;
+ RVV_SEW(CAST_VSEW)
+ default:
+ return "unknown";
+#undef CAST_VSEW
+ }
+ }
+
+ inline const char* rvv_lmul_s() const {
+ uint32_t vlmul = rvv_vlmul();
+ switch (vlmul) {
+#define CAST_VLMUL(name) \
+ case name: \
+ return #name;
+ RVV_LMUL(CAST_VLMUL)
+ default:
+ return "unknown";
+#undef CAST_VSEW
+ }
+ }
+
+ // return size of lane.8 16 32 64
+ inline uint32_t rvv_sew() const {
+ DCHECK_EQ(rvv_vsew() & (~0x7), 0x0);
+ return (0x1 << rvv_vsew()) * 8;
+ }
+ inline uint64_t rvv_vlmax() const {
+ if ((rvv_vlmul() & 0b100) != 0) {
+ return (rvv_vlen() / rvv_sew()) >> (rvv_vlmul() & 0b11);
+ } else {
+ return ((rvv_vlen() << rvv_vlmul()) / rvv_sew());
+ }
+ }
+
inline uint32_t get_dynamic_rounding_mode();
inline bool test_fflags_bits(uint32_t mask);
@@ -354,7 +443,7 @@ class Simulator : public SimulatorBase {
// Special case of set_register and get_register to access the raw PC value.
void set_pc(int64_t value);
- int64_t get_pc() const;
+ V8_EXPORT_PRIVATE int64_t get_pc() const;
Address get_sp() const { return static_cast<Address>(get_register(sp)); }
@@ -550,6 +639,234 @@ class Simulator : public SimulatorBase {
}
}
+ // RVV
+ // The following code about RVV was based from:
+ // https://github.com/riscv/riscv-isa-sim
+ // Copyright (c) 2010-2017, The Regents of the University of California
+ // (Regents). All Rights Reserved.
+
+ // Redistribution and use in source and binary forms, with or without
+ // modification, are permitted provided that the following conditions are met:
+ // 1. Redistributions of source code must retain the above copyright
+ // notice, this list of conditions and the following disclaimer.
+ // 2. Redistributions in binary form must reproduce the above copyright
+ // notice, this list of conditions and the following disclaimer in the
+ // documentation and/or other materials provided with the distribution.
+ // 3. Neither the name of the Regents nor the
+ // names of its contributors may be used to endorse or promote products
+ // derived from this software without specific prior written permission.
+
+ // IN NO EVENT SHALL REGENTS BE LIABLE TO ANY PARTY FOR DIRECT, INDIRECT,
+ // SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, INCLUDING LOST PROFITS,
+ // ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN IF
+ // REGENTS HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+ // REGENTS SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING, BUT NOT LIMITED
+ // TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ // PURPOSE. THE SOFTWARE AND ACCOMPANYING DOCUMENTATION, IF ANY, PROVIDED
+ // HEREUNDER IS PROVIDED "AS IS". REGENTS HAS NO OBLIGATION TO PROVIDE
+ // MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
+ template <uint64_t N>
+ struct type_usew_t;
+ template <>
+ struct type_usew_t<8> {
+ using type = uint8_t;
+ };
+
+ template <>
+ struct type_usew_t<16> {
+ using type = uint16_t;
+ };
+
+ template <>
+ struct type_usew_t<32> {
+ using type = uint32_t;
+ };
+
+ template <>
+ struct type_usew_t<64> {
+ using type = uint64_t;
+ };
+
+ template <>
+ struct type_usew_t<128> {
+ using type = __uint128_t;
+ };
+ template <uint64_t N>
+ struct type_sew_t;
+
+ template <>
+ struct type_sew_t<8> {
+ using type = int8_t;
+ };
+
+ template <>
+ struct type_sew_t<16> {
+ using type = int16_t;
+ };
+
+ template <>
+ struct type_sew_t<32> {
+ using type = int32_t;
+ };
+
+ template <>
+ struct type_sew_t<64> {
+ using type = int64_t;
+ };
+
+ template <>
+ struct type_sew_t<128> {
+ using type = __int128_t;
+ };
+
+#define VV_PARAMS(x) \
+ type_sew_t<x>::type& vd = \
+ Rvvelt<type_sew_t<x>::type>(rvv_vd_reg(), i, true); \
+ type_sew_t<x>::type vs1 = Rvvelt<type_sew_t<x>::type>(rvv_vs1_reg(), i); \
+ type_sew_t<x>::type vs2 = Rvvelt<type_sew_t<x>::type>(rvv_vs2_reg(), i);
+
+#define VV_UPARAMS(x) \
+ type_usew_t<x>::type& vd = \
+ Rvvelt<type_usew_t<x>::type>(rvv_vd_reg(), i, true); \
+ type_usew_t<x>::type vs1 = Rvvelt<type_usew_t<x>::type>(rvv_vs1_reg(), i); \
+ type_usew_t<x>::type vs2 = Rvvelt<type_usew_t<x>::type>(rvv_vs2_reg(), i);
+
+#define VX_PARAMS(x) \
+ type_sew_t<x>::type& vd = \
+ Rvvelt<type_sew_t<x>::type>(rvv_vd_reg(), i, true); \
+ type_sew_t<x>::type rs1 = (type_sew_t<x>::type)(get_register(rs1_reg())); \
+ type_sew_t<x>::type vs2 = Rvvelt<type_sew_t<x>::type>(rvv_vs2_reg(), i);
+
+#define VX_UPARAMS(x) \
+ type_usew_t<x>::type& vd = \
+ Rvvelt<type_usew_t<x>::type>(rvv_vd_reg(), i, true); \
+ type_usew_t<x>::type rs1 = (type_usew_t<x>::type)(get_register(rs1_reg())); \
+ type_usew_t<x>::type vs2 = Rvvelt<type_usew_t<x>::type>(rvv_vs2_reg(), i);
+
+#define VI_PARAMS(x) \
+ type_sew_t<x>::type& vd = \
+ Rvvelt<type_sew_t<x>::type>(rvv_vd_reg(), i, true); \
+ type_sew_t<x>::type simm5 = (type_sew_t<x>::type)(instr_.RvvSimm5()); \
+ type_sew_t<x>::type vs2 = Rvvelt<type_sew_t<x>::type>(rvv_vs2_reg(), i);
+
+#define VI_UPARAMS(x) \
+ type_usew_t<x>::type& vd = \
+ Rvvelt<type_usew_t<x>::type>(rvv_vd_reg(), i, true); \
+ type_usew_t<x>::type uimm5 = (type_usew_t<x>::type)(instr_.RvvUimm5()); \
+ type_usew_t<x>::type vs2 = Rvvelt<type_usew_t<x>::type>(rvv_vs2_reg(), i);
+
+#define VXI_PARAMS(x) \
+ type_sew_t<x>::type& vd = \
+ Rvvelt<type_sew_t<x>::type>(rvv_vd_reg(), i, true); \
+ type_sew_t<x>::type vs1 = Rvvelt<type_sew_t<x>::type>(rvv_vs1_reg(), i); \
+ type_sew_t<x>::type vs2 = Rvvelt<type_sew_t<x>::type>(rvv_vs2_reg(), i); \
+ type_sew_t<x>::type rs1 = (type_sew_t<x>::type)(get_register(rs1_reg())); \
+ type_sew_t<x>::type simm5 = (type_sew_t<x>::type)(instr_.RvvSimm5());
+
+#define VI_XI_SLIDEDOWN_PARAMS(x, off) \
+ auto& vd = Rvvelt<type_sew_t<x>::type>(rvv_vd_reg(), i, true); \
+ auto vs2 = Rvvelt<type_sew_t<x>::type>(rvv_vs2_reg(), i + off);
+
+#define VI_XI_SLIDEUP_PARAMS(x, offset) \
+ auto& vd = Rvvelt<type_sew_t<x>::type>(rvv_vd_reg(), i, true); \
+ auto vs2 = Rvvelt<type_sew_t<x>::type>(rvv_vs2_reg(), i - offset);
+
+ inline void rvv_trace_vd() {
+ if (::v8::internal::FLAG_trace_sim) {
+ __int128_t value = Vregister_[rvv_vd_reg()];
+ SNPrintF(trace_buf_, "0x%016" PRIx64 "%016" PRIx64 " (%" PRId64 ")",
+ *(reinterpret_cast<int64_t*>(&value) + 1),
+ *reinterpret_cast<int64_t*>(&value), icount_);
+ }
+ }
+
+ inline void rvv_trace_vs1() {
+ if (::v8::internal::FLAG_trace_sim) {
+ PrintF("\t%s:0x%016" PRIx64 "%016" PRIx64 "\n",
+ v8::internal::VRegisters::Name(static_cast<int>(rvv_vs1_reg())),
+ (uint64_t)(get_vregister(static_cast<int>(rvv_vs1_reg())) >> 64),
+ (uint64_t)get_vregister(static_cast<int>(rvv_vs1_reg())));
+ }
+ }
+
+ inline void rvv_trace_vs2() {
+ if (::v8::internal::FLAG_trace_sim) {
+ PrintF("\t%s:0x%016" PRIx64 "%016" PRIx64 "\n",
+ v8::internal::VRegisters::Name(static_cast<int>(rvv_vs2_reg())),
+ (uint64_t)(get_vregister(static_cast<int>(rvv_vs2_reg())) >> 64),
+ (uint64_t)get_vregister(static_cast<int>(rvv_vs2_reg())));
+ }
+ }
+ inline void rvv_trace_v0() {
+ if (::v8::internal::FLAG_trace_sim) {
+ PrintF("\t%s:0x%016" PRIx64 "%016" PRIx64 "\n",
+ v8::internal::VRegisters::Name(v0),
+ (uint64_t)(get_vregister(v0) >> 64), (uint64_t)get_vregister(v0));
+ }
+ }
+
+ inline void rvv_trace_rs1() {
+ if (::v8::internal::FLAG_trace_sim) {
+ PrintF("\t%s:0x%016" PRIx64 "\n",
+ v8::internal::Registers::Name(static_cast<int>(rs1_reg())),
+ (uint64_t)(get_register(rs1_reg())));
+ }
+ }
+
+ inline void rvv_trace_status() {
+ if (::v8::internal::FLAG_trace_sim) {
+ int i = 0;
+ for (; i < trace_buf_.length(); i++) {
+ if (trace_buf_[i] == '\0') break;
+ }
+ SNPrintF(trace_buf_.SubVector(i, trace_buf_.length()),
+ " sew:%s lmul:%s vstart:%lu vl:%lu", rvv_sew_s(), rvv_lmul_s(),
+ rvv_vstart(), rvv_vl());
+ }
+ }
+
+ template <class T>
+ T& Rvvelt(reg_t vReg, uint64_t n, bool is_write = false) {
+ CHECK_NE(rvv_sew(), 0);
+ CHECK_GT((rvv_vlen() >> 3) / sizeof(T), 0);
+ reg_t elts_per_reg = (rvv_vlen() >> 3) / (sizeof(T));
+ vReg += n / elts_per_reg;
+ n = n % elts_per_reg;
+ T* regStart = reinterpret_cast<T*>(reinterpret_cast<char*>(Vregister_) +
+ vReg * (rvv_vlen() >> 3));
+ return regStart[n];
+ }
+
+ inline int32_t rvv_vs1_reg() { return instr_.Vs1Value(); }
+ inline reg_t rvv_vs1() { UNIMPLEMENTED(); }
+ inline int32_t rvv_vs2_reg() { return instr_.Vs2Value(); }
+ inline reg_t rvv_vs2() { UNIMPLEMENTED(); }
+ inline int32_t rvv_vd_reg() { return instr_.VdValue(); }
+ inline int32_t rvv_vs3_reg() { return instr_.VdValue(); }
+ inline reg_t rvv_vd() { UNIMPLEMENTED(); }
+ inline int32_t rvv_nf() {
+ return (instr_.InstructionBits() & kRvvNfMask) >> kRvvNfShift;
+ }
+
+ inline void set_vrd() { UNIMPLEMENTED(); }
+
+ inline void set_rvv_vtype(uint64_t value, bool trace = true) {
+ vtype_ = value;
+ }
+ inline void set_rvv_vl(uint64_t value, bool trace = true) { vl_ = value; }
+ inline void set_rvv_vstart(uint64_t value, bool trace = true) {
+ vstart_ = value;
+ }
+ inline void set_rvv_vxsat(uint64_t value, bool trace = true) {
+ vxsat_ = value;
+ }
+ inline void set_rvv_vxrm(uint64_t value, bool trace = true) { vxrm_ = value; }
+ inline void set_rvv_vcsr(uint64_t value, bool trace = true) { vcsr_ = value; }
+ inline void set_rvv_vlenb(uint64_t value, bool trace = true) {
+ vlenb_ = value;
+ }
+
template <typename T, typename Func>
inline T CanonicalizeFPUOp3(Func fn) {
DCHECK(std::is_floating_point<T>::value);
@@ -634,6 +951,14 @@ class Simulator : public SimulatorBase {
void DecodeCSType();
void DecodeCJType();
void DecodeCBType();
+ void DecodeVType();
+ void DecodeRvvIVV();
+ void DecodeRvvIVI();
+ void DecodeRvvIVX();
+ void DecodeRvvMVV();
+ void DecodeRvvMVX();
+ bool DecodeRvvVL();
+ bool DecodeRvvVS();
// Used for breakpoints and traps.
void SoftwareInterrupt();
@@ -700,6 +1025,10 @@ class Simulator : public SimulatorBase {
// Floating-point control and status register.
uint32_t FCSR_;
+ // RVV registers
+ __int128_t Vregister_[kNumVRegisters];
+ static_assert(sizeof(__int128_t) == kRvvVLEN / 8, "unmatch vlen");
+ uint64_t vstart_, vxsat_, vxrm_, vcsr_, vtype_, vl_, vlenb_;
// Simulator support.
// Allocate 1MB for stack.
size_t stack_size_;
@@ -707,7 +1036,7 @@ class Simulator : public SimulatorBase {
bool pc_modified_;
int64_t icount_;
int break_count_;
- base::EmbeddedVector<char, 128> trace_buf_;
+ base::EmbeddedVector<char, 256> trace_buf_;
// Debugger input.
char* last_debugger_input_;
@@ -820,7 +1149,6 @@ class Simulator : public SimulatorBase {
LocalMonitor local_monitor_;
GlobalMonitor::LinkedAddress global_monitor_thread_;
};
-
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/execution/runtime-profiler.cc b/deps/v8/src/execution/runtime-profiler.cc
index 5ce45f43c2..4d710c5aaa 100644
--- a/deps/v8/src/execution/runtime-profiler.cc
+++ b/deps/v8/src/execution/runtime-profiler.cc
@@ -20,24 +20,11 @@
namespace v8 {
namespace internal {
-// Number of times a function has to be seen on the stack before it is
-// optimized.
-static const int kProfilerTicksBeforeOptimization = 3;
-
-// The number of ticks required for optimizing a function increases with
-// the size of the bytecode. This is in addition to the
-// kProfilerTicksBeforeOptimization required for any function.
-static const int kBytecodeSizeAllowancePerTick = 1100;
-
// Maximum size in bytes of generate code for a function to allow OSR.
static const int kOSRBytecodeSizeAllowanceBase = 119;
static const int kOSRBytecodeSizeAllowancePerTick = 44;
-// Maximum size in bytes of generated code for a function to be optimized
-// the very first time it is seen on the stack.
-static const int kMaxBytecodeSizeForEarlyOpt = 81;
-
#define OPTIMIZATION_REASON_LIST(V) \
V(DoNotOptimize, "do not optimize") \
V(HotAndStable, "hot and stable") \
@@ -191,7 +178,7 @@ namespace {
bool ShouldOptimizeAsSmallFunction(int bytecode_size, int ticks,
bool any_ic_changed,
bool active_tier_is_turboprop) {
- if (any_ic_changed || bytecode_size >= kMaxBytecodeSizeForEarlyOpt)
+ if (any_ic_changed || bytecode_size >= FLAG_max_bytecode_size_for_early_opt)
return false;
return true;
}
@@ -209,8 +196,8 @@ OptimizationReason RuntimeProfiler::ShouldOptimize(JSFunction function,
int ticks = function.feedback_vector().profiler_ticks();
bool active_tier_is_turboprop = function.ActiveTierIsMidtierTurboprop();
int ticks_for_optimization =
- kProfilerTicksBeforeOptimization +
- (bytecode.length() / kBytecodeSizeAllowancePerTick);
+ FLAG_ticks_before_optimization +
+ (bytecode.length() / FLAG_bytecode_size_allowance_per_tick);
if (ticks >= ticks_for_optimization) {
return OptimizationReason::kHotAndStable;
} else if (ShouldOptimizeAsSmallFunction(bytecode.length(), ticks,
@@ -227,7 +214,7 @@ OptimizationReason RuntimeProfiler::ShouldOptimize(JSFunction function,
PrintF("ICs changed]\n");
} else {
PrintF(" too large for small function optimization: %d/%d]\n",
- bytecode.length(), kMaxBytecodeSizeForEarlyOpt);
+ bytecode.length(), FLAG_max_bytecode_size_for_early_opt);
}
}
return OptimizationReason::kDoNotOptimize;
@@ -250,7 +237,7 @@ void RuntimeProfiler::MarkCandidatesForOptimization(JavaScriptFrame* frame) {
MarkCandidatesForOptimizationScope scope(this);
JSFunction function = frame->function();
- CodeKind code_kind = function.GetActiveTier();
+ CodeKind code_kind = function.GetActiveTier().value();
DCHECK(function.shared().is_compiled());
DCHECK(function.shared().IsInterpreted());
diff --git a/deps/v8/src/execution/s390/simulator-s390.cc b/deps/v8/src/execution/s390/simulator-s390.cc
index 88a8cb4121..31a03eed4e 100644
--- a/deps/v8/src/execution/s390/simulator-s390.cc
+++ b/deps/v8/src/execution/s390/simulator-s390.cc
@@ -109,7 +109,6 @@ bool S390Debugger::GetValue(const char* desc, intptr_t* value) {
1;
}
}
- return false;
}
bool S390Debugger::GetFPDoubleValue(const char* desc, double* value) {
@@ -758,8 +757,14 @@ void Simulator::EvalTableInit() {
V(vlrep, VLREP, 0xE705) /* type = VRX VECTOR LOAD AND REPLICATE */ \
V(vrepi, VREPI, 0xE745) /* type = VRI_A VECTOR REPLICATE IMMEDIATE */ \
V(vlr, VLR, 0xE756) /* type = VRR_A VECTOR LOAD */ \
+ V(vsteb, VSTEB, 0xE708) /* type = VRX VECTOR STORE ELEMENT (8) */ \
+ V(vsteh, VSTEH, 0xE709) /* type = VRX VECTOR STORE ELEMENT (16) */ \
V(vstef, VSTEF, 0xE70B) /* type = VRX VECTOR STORE ELEMENT (32) */ \
+ V(vsteg, VSTEG, 0xE70A) /* type = VRX VECTOR STORE ELEMENT (64) */ \
+ V(vleb, VLEB, 0xE701) /* type = VRX VECTOR LOAD ELEMENT (8) */ \
+ V(vleh, VLEH, 0xE701) /* type = VRX VECTOR LOAD ELEMENT (16) */ \
V(vlef, VLEF, 0xE703) /* type = VRX VECTOR LOAD ELEMENT (32) */ \
+ V(vleg, VLEG, 0xE702) /* type = VRX VECTOR LOAD ELEMENT (64) */ \
V(vavgl, VAVGL, 0xE7F0) /* type = VRR_C VECTOR AVERAGE LOGICAL */ \
V(va, VA, 0xE7F3) /* type = VRR_C VECTOR ADD */ \
V(vs, VS, 0xE7F7) /* type = VRR_C VECTOR SUBTRACT */ \
@@ -1775,50 +1780,50 @@ void Simulator::TrashCallerSaveRegisters() {
#endif
}
-uint32_t Simulator::ReadWU(intptr_t addr, Instruction* instr) {
+uint32_t Simulator::ReadWU(intptr_t addr) {
uint32_t* ptr = reinterpret_cast<uint32_t*>(addr);
return *ptr;
}
-int64_t Simulator::ReadW64(intptr_t addr, Instruction* instr) {
+int64_t Simulator::ReadW64(intptr_t addr) {
int64_t* ptr = reinterpret_cast<int64_t*>(addr);
return *ptr;
}
-int32_t Simulator::ReadW(intptr_t addr, Instruction* instr) {
+int32_t Simulator::ReadW(intptr_t addr) {
int32_t* ptr = reinterpret_cast<int32_t*>(addr);
return *ptr;
}
-void Simulator::WriteW(intptr_t addr, uint32_t value, Instruction* instr) {
+void Simulator::WriteW(intptr_t addr, uint32_t value) {
uint32_t* ptr = reinterpret_cast<uint32_t*>(addr);
*ptr = value;
return;
}
-void Simulator::WriteW(intptr_t addr, int32_t value, Instruction* instr) {
+void Simulator::WriteW(intptr_t addr, int32_t value) {
int32_t* ptr = reinterpret_cast<int32_t*>(addr);
*ptr = value;
return;
}
-uint16_t Simulator::ReadHU(intptr_t addr, Instruction* instr) {
+uint16_t Simulator::ReadHU(intptr_t addr) {
uint16_t* ptr = reinterpret_cast<uint16_t*>(addr);
return *ptr;
}
-int16_t Simulator::ReadH(intptr_t addr, Instruction* instr) {
+int16_t Simulator::ReadH(intptr_t addr) {
int16_t* ptr = reinterpret_cast<int16_t*>(addr);
return *ptr;
}
-void Simulator::WriteH(intptr_t addr, uint16_t value, Instruction* instr) {
+void Simulator::WriteH(intptr_t addr, uint16_t value) {
uint16_t* ptr = reinterpret_cast<uint16_t*>(addr);
*ptr = value;
return;
}
-void Simulator::WriteH(intptr_t addr, int16_t value, Instruction* instr) {
+void Simulator::WriteH(intptr_t addr, int16_t value) {
int16_t* ptr = reinterpret_cast<int16_t*>(addr);
*ptr = value;
return;
@@ -2036,7 +2041,6 @@ void Simulator::SoftwareInterrupt(Instruction* instr) {
break;
default:
UNREACHABLE();
- break;
}
if (!stack_aligned) {
PrintF(" with unaligned stack %08" V8PRIxPTR "\n",
@@ -2076,7 +2080,6 @@ void Simulator::SoftwareInterrupt(Instruction* instr) {
}
default:
UNREACHABLE();
- break;
}
if (::v8::internal::FLAG_trace_sim || !stack_aligned) {
switch (redirection->type()) {
@@ -2090,7 +2093,6 @@ void Simulator::SoftwareInterrupt(Instruction* instr) {
break;
default:
UNREACHABLE();
- break;
}
}
} else if (redirection->type() == ExternalReference::DIRECT_API_CALL) {
@@ -3187,12 +3189,57 @@ EVALUATE(VLR) {
return length;
}
+EVALUATE(VSTEB) {
+ DCHECK_OPCODE(VSTEB);
+ DECODE_VRX_INSTRUCTION(r1, x2, b2, d2, m3);
+ intptr_t addr = GET_ADDRESS(x2, b2, d2);
+ int8_t value = get_simd_register_by_lane<int8_t>(r1, m3);
+ WriteB(addr, value);
+ return length;
+}
+
+EVALUATE(VSTEH) {
+ DCHECK_OPCODE(VSTEH);
+ DECODE_VRX_INSTRUCTION(r1, x2, b2, d2, m3);
+ intptr_t addr = GET_ADDRESS(x2, b2, d2);
+ int16_t value = get_simd_register_by_lane<int16_t>(r1, m3);
+ WriteH(addr, value);
+ return length;
+}
+
EVALUATE(VSTEF) {
DCHECK_OPCODE(VSTEF);
DECODE_VRX_INSTRUCTION(r1, x2, b2, d2, m3);
intptr_t addr = GET_ADDRESS(x2, b2, d2);
int32_t value = get_simd_register_by_lane<int32_t>(r1, m3);
- WriteW(addr, value, instr);
+ WriteW(addr, value);
+ return length;
+}
+
+EVALUATE(VSTEG) {
+ DCHECK_OPCODE(VSTEG);
+ DECODE_VRX_INSTRUCTION(r1, x2, b2, d2, m3);
+ intptr_t addr = GET_ADDRESS(x2, b2, d2);
+ int64_t value = get_simd_register_by_lane<int64_t>(r1, m3);
+ WriteDW(addr, value);
+ return length;
+}
+
+EVALUATE(VLEB) {
+ DCHECK_OPCODE(VLEB);
+ DECODE_VRX_INSTRUCTION(r1, x2, b2, d2, m3);
+ intptr_t addr = GET_ADDRESS(x2, b2, d2);
+ int8_t value = ReadB(addr);
+ set_simd_register_by_lane<int8_t>(r1, m3, value);
+ return length;
+}
+
+EVALUATE(VLEH) {
+ DCHECK_OPCODE(VLEH);
+ DECODE_VRX_INSTRUCTION(r1, x2, b2, d2, m3);
+ intptr_t addr = GET_ADDRESS(x2, b2, d2);
+ int16_t value = ReadH(addr);
+ set_simd_register_by_lane<int16_t>(r1, m3, value);
return length;
}
@@ -3200,11 +3247,20 @@ EVALUATE(VLEF) {
DCHECK_OPCODE(VLEF);
DECODE_VRX_INSTRUCTION(r1, x2, b2, d2, m3);
intptr_t addr = GET_ADDRESS(x2, b2, d2);
- int32_t value = ReadW(addr, instr);
+ int32_t value = ReadW(addr);
set_simd_register_by_lane<int32_t>(r1, m3, value);
return length;
}
+EVALUATE(VLEG) {
+ DCHECK_OPCODE(VLEG);
+ DECODE_VRX_INSTRUCTION(r1, x2, b2, d2, m3);
+ intptr_t addr = GET_ADDRESS(x2, b2, d2);
+ uint64_t value = ReadDW(addr);
+ set_simd_register_by_lane<uint64_t>(r1, m3, value);
+ return length;
+}
+
// TODO(john): unify most fp binary operations
template <class T, class Operation>
inline static void VectorBinaryOp(Simulator* sim, int dst, int src1, int src2,
@@ -4368,20 +4424,35 @@ EVALUATE(VFMAX) {
#undef CASE
template <class S, class D, class Operation>
-void VectorFPCompare(Simulator* sim, int dst, int src1, int src2,
+void VectorFPCompare(Simulator* sim, int dst, int src1, int src2, int m6,
Operation op) {
static_assert(sizeof(S) == sizeof(D),
"Expect input type size == output type size");
+ bool some_zero = false;
+ bool all_zero = true;
FOR_EACH_LANE(i, D) {
S src1_val = sim->get_simd_register_by_lane<S>(src1, i);
S src2_val = sim->get_simd_register_by_lane<S>(src2, i);
D value = op(src1_val, src2_val);
sim->set_simd_register_by_lane<D>(dst, i, value);
+ if (value) {
+ all_zero = false;
+ } else {
+ some_zero = true;
+ }
+ }
+ // TODO(miladfarca) implement other conditions.
+ if (m6) {
+ if (all_zero) {
+ sim->condition_reg_ = CC_OF;
+ } else if (some_zero) {
+ sim->condition_reg_ = 0x04;
+ }
}
}
-#define VECTOR_FP_COMPARE_FOR_TYPE(S, D, op) \
- VectorFPCompare<S, D>(this, r1, r2, r3, \
+#define VECTOR_FP_COMPARE_FOR_TYPE(S, D, op) \
+ VectorFPCompare<S, D>(this, r1, r2, r3, m6, \
[](S a, S b) { return (a op b) ? -1 : 0; });
#define VECTOR_FP_COMPARE(op) \
@@ -4415,7 +4486,6 @@ void VectorFPCompare(Simulator* sim, int dst, int src1, int src2,
EVALUATE(VFCE) {
DCHECK_OPCODE(VFCE);
DECODE_VRR_C_INSTRUCTION(r1, r2, r3, m6, m5, m4);
- USE(m6);
VECTOR_FP_COMPARE(==)
return length;
}
@@ -4578,7 +4648,7 @@ EVALUATE(L) {
int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
intptr_t addr = b2_val + x2_val + d2_val;
- int32_t mem_val = ReadW(addr, instr);
+ int32_t mem_val = ReadW(addr);
set_low_register(r1, mem_val);
return length;
}
@@ -4727,7 +4797,7 @@ EVALUATE(LGF) {
DCHECK_OPCODE(LGF);
DECODE_RXY_A_INSTRUCTION(r1, x2, b2, d2);
intptr_t addr = GET_ADDRESS(x2, b2, d2);
- int64_t mem_val = static_cast<int64_t>(ReadW(addr, instr));
+ int64_t mem_val = static_cast<int64_t>(ReadW(addr));
set_register(r1, mem_val);
return length;
}
@@ -4739,7 +4809,7 @@ EVALUATE(ST) {
int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
intptr_t addr = b2_val + x2_val + d2_val;
- WriteW(addr, r1_val, instr);
+ WriteW(addr, r1_val);
return length;
}
@@ -4757,7 +4827,7 @@ EVALUATE(STY) {
DECODE_RXY_A_INSTRUCTION(r1, x2, b2, d2);
intptr_t addr = GET_ADDRESS(x2, b2, d2);
uint32_t value = get_low_register<uint32_t>(r1);
- WriteW(addr, value, instr);
+ WriteW(addr, value);
return length;
}
@@ -4765,7 +4835,7 @@ EVALUATE(LY) {
DCHECK_OPCODE(LY);
DECODE_RXY_A_INSTRUCTION(r1, x2, b2, d2);
intptr_t addr = GET_ADDRESS(x2, b2, d2);
- uint32_t mem_val = ReadWU(addr, instr);
+ uint32_t mem_val = ReadWU(addr);
set_low_register(r1, mem_val);
return length;
}
@@ -5166,7 +5236,7 @@ EVALUATE(STH) {
int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
intptr_t mem_addr = b2_val + x2_val + d2_val;
- WriteH(mem_addr, r1_val, instr);
+ WriteH(mem_addr, r1_val);
return length;
}
@@ -5248,7 +5318,7 @@ EVALUATE(LH) {
int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
intptr_t mem_addr = x2_val + b2_val + d2_val;
- int32_t result = static_cast<int32_t>(ReadH(mem_addr, instr));
+ int32_t result = static_cast<int32_t>(ReadH(mem_addr));
set_low_register(r1, result);
return length;
}
@@ -5266,7 +5336,7 @@ EVALUATE(AH) {
int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
intptr_t addr = b2_val + x2_val + d2_val;
- int32_t mem_val = static_cast<int32_t>(ReadH(addr, instr));
+ int32_t mem_val = static_cast<int32_t>(ReadH(addr));
int32_t alu_out = 0;
bool isOF = false;
isOF = CheckOverflowForIntAdd(r1_val, mem_val, int32_t);
@@ -5285,7 +5355,7 @@ EVALUATE(SH) {
int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
intptr_t addr = b2_val + x2_val + d2_val;
- int32_t mem_val = static_cast<int32_t>(ReadH(addr, instr));
+ int32_t mem_val = static_cast<int32_t>(ReadH(addr));
int32_t alu_out = 0;
bool isOF = false;
isOF = CheckOverflowForIntSub(r1_val, mem_val, int32_t);
@@ -5303,7 +5373,7 @@ EVALUATE(MH) {
int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
intptr_t addr = b2_val + x2_val + d2_val;
- int32_t mem_val = static_cast<int32_t>(ReadH(addr, instr));
+ int32_t mem_val = static_cast<int32_t>(ReadH(addr));
int32_t alu_out = 0;
alu_out = r1_val * mem_val;
set_low_register(r1, alu_out);
@@ -5341,7 +5411,7 @@ EVALUATE(N) {
int32_t r1_val = get_low_register<int32_t>(r1);
int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
- int32_t mem_val = ReadW(b2_val + x2_val + d2_val, instr);
+ int32_t mem_val = ReadW(b2_val + x2_val + d2_val);
int32_t alu_out = 0;
alu_out = r1_val & mem_val;
SetS390BitWiseConditionCode<uint32_t>(alu_out);
@@ -5356,7 +5426,7 @@ EVALUATE(CL) {
int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
intptr_t addr = b2_val + x2_val + d2_val;
- int32_t mem_val = ReadW(addr, instr);
+ int32_t mem_val = ReadW(addr);
SetS390ConditionCode<uint32_t>(r1_val, mem_val);
return length;
}
@@ -5368,7 +5438,7 @@ EVALUATE(O) {
int32_t r1_val = get_low_register<int32_t>(r1);
int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
- int32_t mem_val = ReadW(b2_val + x2_val + d2_val, instr);
+ int32_t mem_val = ReadW(b2_val + x2_val + d2_val);
int32_t alu_out = 0;
alu_out = r1_val | mem_val;
SetS390BitWiseConditionCode<uint32_t>(alu_out);
@@ -5383,7 +5453,7 @@ EVALUATE(X) {
int32_t r1_val = get_low_register<int32_t>(r1);
int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
- int32_t mem_val = ReadW(b2_val + x2_val + d2_val, instr);
+ int32_t mem_val = ReadW(b2_val + x2_val + d2_val);
int32_t alu_out = 0;
alu_out = r1_val ^ mem_val;
SetS390BitWiseConditionCode<uint32_t>(alu_out);
@@ -5398,7 +5468,7 @@ EVALUATE(C) {
int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
intptr_t addr = b2_val + x2_val + d2_val;
- int32_t mem_val = ReadW(addr, instr);
+ int32_t mem_val = ReadW(addr);
SetS390ConditionCode<int32_t>(r1_val, mem_val);
return length;
}
@@ -5410,7 +5480,7 @@ EVALUATE(A) {
int32_t r1_val = get_low_register<int32_t>(r1);
int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
- int32_t mem_val = ReadW(b2_val + x2_val + d2_val, instr);
+ int32_t mem_val = ReadW(b2_val + x2_val + d2_val);
int32_t alu_out = 0;
bool isOF = false;
isOF = CheckOverflowForIntAdd(r1_val, mem_val, int32_t);
@@ -5428,7 +5498,7 @@ EVALUATE(S) {
int32_t r1_val = get_low_register<int32_t>(r1);
int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
- int32_t mem_val = ReadW(b2_val + x2_val + d2_val, instr);
+ int32_t mem_val = ReadW(b2_val + x2_val + d2_val);
int32_t alu_out = 0;
bool isOF = false;
isOF = CheckOverflowForIntSub(r1_val, mem_val, int32_t);
@@ -5446,7 +5516,7 @@ EVALUATE(M) {
int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
intptr_t addr = b2_val + x2_val + d2_val;
DCHECK_EQ(r1 % 2, 0);
- int32_t mem_val = ReadW(addr, instr);
+ int32_t mem_val = ReadW(addr);
int32_t r1_val = get_low_register<int32_t>(r1 + 1);
int64_t product =
static_cast<int64_t>(r1_val) * static_cast<int64_t>(mem_val);
@@ -5511,7 +5581,7 @@ EVALUATE(STE) {
int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
intptr_t addr = b2_val + x2_val + d2_val;
int32_t frs_val = get_fpr<int32_t>(r1);
- WriteW(addr, frs_val, instr);
+ WriteW(addr, frs_val);
return length;
}
@@ -5520,7 +5590,7 @@ EVALUATE(MS) {
DECODE_RX_A_INSTRUCTION(x2, b2, r1, d2_val);
int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
- int32_t mem_val = ReadW(b2_val + x2_val + d2_val, instr);
+ int32_t mem_val = ReadW(b2_val + x2_val + d2_val);
int32_t r1_val = get_low_register<int32_t>(r1);
set_low_register(r1, r1_val * mem_val);
return length;
@@ -5733,7 +5803,7 @@ EVALUATE(STM) {
// Store each register in ascending order.
for (int i = 0; i <= r3 - r1; i++) {
int32_t value = get_low_register<int32_t>((r1 + i) % 16);
- WriteW(rb_val + offset + 4 * i, value, instr);
+ WriteW(rb_val + offset + 4 * i, value);
}
return length;
}
@@ -5793,7 +5863,7 @@ EVALUATE(LM) {
// Store each register in ascending order.
for (int i = 0; i <= r3 - r1; i++) {
- int32_t value = ReadW(rb_val + offset + 4 * i, instr);
+ int32_t value = ReadW(rb_val + offset + 4 * i);
set_low_register((r1 + i) % 16, value);
}
return length;
@@ -9254,7 +9324,7 @@ EVALUATE(LT) {
int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
intptr_t addr = x2_val + b2_val + d2;
- int32_t value = ReadW(addr, instr);
+ int32_t value = ReadW(addr);
set_low_register(r1, value);
SetS390ConditionCode<int32_t>(value, 0);
return length;
@@ -9267,7 +9337,7 @@ EVALUATE(LGH) {
int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
intptr_t addr = x2_val + b2_val + d2;
- int64_t mem_val = static_cast<int64_t>(ReadH(addr, instr));
+ int64_t mem_val = static_cast<int64_t>(ReadH(addr));
set_register(r1, mem_val);
return length;
}
@@ -9279,7 +9349,7 @@ EVALUATE(LLGF) {
int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
intptr_t addr = x2_val + b2_val + d2;
- uint64_t mem_val = static_cast<uint64_t>(ReadWU(addr, instr));
+ uint64_t mem_val = static_cast<uint64_t>(ReadWU(addr));
set_register(r1, mem_val);
return length;
}
@@ -9298,7 +9368,7 @@ EVALUATE(AGF) {
int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
intptr_t d2_val = d2;
uint64_t alu_out = r1_val;
- uint32_t mem_val = ReadW(b2_val + d2_val + x2_val, instr);
+ uint32_t mem_val = ReadW(b2_val + d2_val + x2_val);
alu_out += mem_val;
SetS390ConditionCode<int64_t>(alu_out, 0);
set_register(r1, alu_out);
@@ -9313,7 +9383,7 @@ EVALUATE(SGF) {
int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
intptr_t d2_val = d2;
uint64_t alu_out = r1_val;
- uint32_t mem_val = ReadW(b2_val + d2_val + x2_val, instr);
+ uint32_t mem_val = ReadW(b2_val + d2_val + x2_val);
alu_out -= mem_val;
SetS390ConditionCode<int64_t>(alu_out, 0);
set_register(r1, alu_out);
@@ -9338,8 +9408,7 @@ EVALUATE(MSGF) {
int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
intptr_t d2_val = d2;
- int64_t mem_val =
- static_cast<int64_t>(ReadW(b2_val + d2_val + x2_val, instr));
+ int64_t mem_val = static_cast<int64_t>(ReadW(b2_val + d2_val + x2_val));
int64_t r1_val = get_register(r1);
int64_t product = r1_val * mem_val;
set_register(r1, product);
@@ -9353,8 +9422,7 @@ EVALUATE(DSGF) {
int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
intptr_t d2_val = d2;
- int64_t mem_val =
- static_cast<int64_t>(ReadW(b2_val + d2_val + x2_val, instr));
+ int64_t mem_val = static_cast<int64_t>(ReadW(b2_val + d2_val + x2_val));
int64_t r1_val = get_register(r1 + 1);
int64_t quotient = r1_val / mem_val;
int64_t remainder = r1_val % mem_val;
@@ -9369,7 +9437,7 @@ EVALUATE(LRVG) {
int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
intptr_t mem_addr = b2_val + x2_val + d2;
- int64_t mem_val = ReadW64(mem_addr, instr);
+ int64_t mem_val = ReadW64(mem_addr);
set_register(r1, ByteReverse(mem_val));
return length;
}
@@ -9380,7 +9448,7 @@ EVALUATE(LRV) {
int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
intptr_t mem_addr = b2_val + x2_val + d2;
- int32_t mem_val = ReadW(mem_addr, instr);
+ int32_t mem_val = ReadW(mem_addr);
set_low_register(r1, ByteReverse(mem_val));
return length;
}
@@ -9392,7 +9460,7 @@ EVALUATE(LRVH) {
int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
intptr_t mem_addr = b2_val + x2_val + d2;
- int16_t mem_val = ReadH(mem_addr, instr);
+ int16_t mem_val = ReadH(mem_addr);
int32_t result = ByteReverse(mem_val) & 0x0000FFFF;
result |= r1_val & 0xFFFF0000;
set_low_register(r1, result);
@@ -9478,7 +9546,7 @@ EVALUATE(STRV) {
int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
intptr_t mem_addr = b2_val + x2_val + d2;
- WriteW(mem_addr, ByteReverse(r1_val), instr);
+ WriteW(mem_addr, ByteReverse(r1_val));
return length;
}
@@ -9501,7 +9569,7 @@ EVALUATE(STRVH) {
int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
intptr_t mem_addr = b2_val + x2_val + d2;
int16_t result = static_cast<int16_t>(r1_val >> 16);
- WriteH(mem_addr, ByteReverse(result), instr);
+ WriteH(mem_addr, ByteReverse(result));
return length;
}
@@ -9517,7 +9585,7 @@ EVALUATE(MSY) {
int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
intptr_t d2_val = d2;
- int32_t mem_val = ReadW(b2_val + d2_val + x2_val, instr);
+ int32_t mem_val = ReadW(b2_val + d2_val + x2_val);
int32_t r1_val = get_low_register<int32_t>(r1);
set_low_register(r1, mem_val * r1_val);
return length;
@@ -9529,7 +9597,7 @@ EVALUATE(MSC) {
int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
intptr_t d2_val = d2;
- int32_t mem_val = ReadW(b2_val + d2_val + x2_val, instr);
+ int32_t mem_val = ReadW(b2_val + d2_val + x2_val);
int32_t r1_val = get_low_register<int32_t>(r1);
int64_t result64 =
static_cast<int64_t>(r1_val) * static_cast<int64_t>(mem_val);
@@ -9548,7 +9616,7 @@ EVALUATE(NY) {
int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
int32_t alu_out = get_low_register<int32_t>(r1);
- int32_t mem_val = ReadW(b2_val + x2_val + d2, instr);
+ int32_t mem_val = ReadW(b2_val + x2_val + d2);
alu_out &= mem_val;
SetS390BitWiseConditionCode<uint32_t>(alu_out);
set_low_register(r1, alu_out);
@@ -9561,7 +9629,7 @@ EVALUATE(CLY) {
int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
uint32_t alu_out = get_low_register<uint32_t>(r1);
- uint32_t mem_val = ReadWU(b2_val + x2_val + d2, instr);
+ uint32_t mem_val = ReadWU(b2_val + x2_val + d2);
SetS390ConditionCode<uint32_t>(alu_out, mem_val);
return length;
}
@@ -9572,7 +9640,7 @@ EVALUATE(OY) {
int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
int32_t alu_out = get_low_register<int32_t>(r1);
- int32_t mem_val = ReadW(b2_val + x2_val + d2, instr);
+ int32_t mem_val = ReadW(b2_val + x2_val + d2);
alu_out |= mem_val;
SetS390BitWiseConditionCode<uint32_t>(alu_out);
set_low_register(r1, alu_out);
@@ -9585,7 +9653,7 @@ EVALUATE(XY) {
int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
int32_t alu_out = get_low_register<int32_t>(r1);
- int32_t mem_val = ReadW(b2_val + x2_val + d2, instr);
+ int32_t mem_val = ReadW(b2_val + x2_val + d2);
alu_out ^= mem_val;
SetS390BitWiseConditionCode<uint32_t>(alu_out);
set_low_register(r1, alu_out);
@@ -9598,7 +9666,7 @@ EVALUATE(CY) {
int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
int32_t alu_out = get_low_register<int32_t>(r1);
- int32_t mem_val = ReadW(b2_val + x2_val + d2, instr);
+ int32_t mem_val = ReadW(b2_val + x2_val + d2);
SetS390ConditionCode<int32_t>(alu_out, mem_val);
return length;
}
@@ -9609,7 +9677,7 @@ EVALUATE(AY) {
int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
int32_t alu_out = get_low_register<int32_t>(r1);
- int32_t mem_val = ReadW(b2_val + x2_val + d2, instr);
+ int32_t mem_val = ReadW(b2_val + x2_val + d2);
bool isOF = false;
isOF = CheckOverflowForIntAdd(alu_out, mem_val, int32_t);
alu_out += mem_val;
@@ -9625,7 +9693,7 @@ EVALUATE(SY) {
int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
int32_t alu_out = get_low_register<int32_t>(r1);
- int32_t mem_val = ReadW(b2_val + x2_val + d2, instr);
+ int32_t mem_val = ReadW(b2_val + x2_val + d2);
bool isOF = false;
isOF = CheckOverflowForIntSub(alu_out, mem_val, int32_t);
alu_out -= mem_val;
@@ -9641,7 +9709,7 @@ EVALUATE(MFY) {
int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
DCHECK_EQ(r1 % 2, 0);
- int32_t mem_val = ReadW(b2_val + x2_val + d2, instr);
+ int32_t mem_val = ReadW(b2_val + x2_val + d2);
int32_t r1_val = get_low_register<int32_t>(r1 + 1);
int64_t product =
static_cast<int64_t>(r1_val) * static_cast<int64_t>(mem_val);
@@ -9659,7 +9727,7 @@ EVALUATE(ALY) {
int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
uint32_t alu_out = get_low_register<uint32_t>(r1);
- uint32_t mem_val = ReadWU(b2_val + x2_val + d2, instr);
+ uint32_t mem_val = ReadWU(b2_val + x2_val + d2);
alu_out += mem_val;
set_low_register(r1, alu_out);
SetS390ConditionCode<uint32_t>(alu_out, 0);
@@ -9672,7 +9740,7 @@ EVALUATE(SLY) {
int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
uint32_t alu_out = get_low_register<uint32_t>(r1);
- uint32_t mem_val = ReadWU(b2_val + x2_val + d2, instr);
+ uint32_t mem_val = ReadWU(b2_val + x2_val + d2);
alu_out -= mem_val;
set_low_register(r1, alu_out);
SetS390ConditionCode<uint32_t>(alu_out, 0);
@@ -9687,7 +9755,7 @@ EVALUATE(STHY) {
int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
intptr_t addr = x2_val + b2_val + d2;
uint16_t value = get_low_register<uint32_t>(r1);
- WriteH(addr, value, instr);
+ WriteH(addr, value);
return length;
}
@@ -9759,7 +9827,7 @@ EVALUATE(LHY) {
int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
intptr_t addr = x2_val + b2_val + d2;
- int32_t result = static_cast<int32_t>(ReadH(addr, instr));
+ int32_t result = static_cast<int32_t>(ReadH(addr));
set_low_register(r1, result);
return length;
}
@@ -9777,8 +9845,7 @@ EVALUATE(AHY) {
int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
intptr_t d2_val = d2;
- int32_t mem_val =
- static_cast<int32_t>(ReadH(b2_val + d2_val + x2_val, instr));
+ int32_t mem_val = static_cast<int32_t>(ReadH(b2_val + d2_val + x2_val));
int32_t alu_out = 0;
bool isOF = false;
alu_out = r1_val + mem_val;
@@ -9796,8 +9863,7 @@ EVALUATE(SHY) {
int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
intptr_t d2_val = d2;
- int32_t mem_val =
- static_cast<int32_t>(ReadH(b2_val + d2_val + x2_val, instr));
+ int32_t mem_val = static_cast<int32_t>(ReadH(b2_val + d2_val + x2_val));
int32_t alu_out = 0;
bool isOF = false;
alu_out = r1_val - mem_val;
@@ -9919,7 +9985,7 @@ EVALUATE(LLGH) {
int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
intptr_t d2_val = d2;
- uint16_t mem_val = ReadHU(b2_val + d2_val + x2_val, instr);
+ uint16_t mem_val = ReadHU(b2_val + d2_val + x2_val);
set_register(r1, mem_val);
return length;
}
@@ -9931,7 +9997,7 @@ EVALUATE(LLH) {
int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
intptr_t d2_val = d2;
- uint16_t mem_val = ReadHU(b2_val + d2_val + x2_val, instr);
+ uint16_t mem_val = ReadHU(b2_val + d2_val + x2_val);
set_low_register(r1, mem_val);
return length;
}
@@ -9942,7 +10008,7 @@ EVALUATE(ML) {
int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
DCHECK_EQ(r1 % 2, 0);
- uint32_t mem_val = ReadWU(b2_val + x2_val + d2, instr);
+ uint32_t mem_val = ReadWU(b2_val + x2_val + d2);
uint32_t r1_val = get_low_register<uint32_t>(r1 + 1);
uint64_t product =
static_cast<uint64_t>(r1_val) * static_cast<uint64_t>(mem_val);
@@ -9960,7 +10026,7 @@ EVALUATE(DL) {
int64_t x2_val = (x2 == 0) ? 0 : get_register(x2);
int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
DCHECK_EQ(r1 % 2, 0);
- uint32_t mem_val = ReadWU(b2_val + x2_val + d2, instr);
+ uint32_t mem_val = ReadWU(b2_val + x2_val + d2);
uint32_t r1_val = get_low_register<uint32_t>(r1 + 1);
uint64_t quotient =
static_cast<uint64_t>(r1_val) / static_cast<uint64_t>(mem_val);
@@ -10089,7 +10155,7 @@ EVALUATE(MVHI) {
DECODE_SIL_INSTRUCTION(b1, d1, i2);
int64_t b1_val = (b1 == 0) ? 0 : get_register(b1);
intptr_t src_addr = b1_val + d1;
- WriteW(src_addr, i2, instr);
+ WriteW(src_addr, i2);
return length;
}
@@ -10461,12 +10527,12 @@ EVALUATE(ASI) {
int d1_val = d1;
intptr_t addr = b1_val + d1_val;
- int32_t mem_val = ReadW(addr, instr);
+ int32_t mem_val = ReadW(addr);
bool isOF = CheckOverflowForIntAdd(mem_val, i2, int32_t);
int32_t alu_out = mem_val + i2;
SetS390ConditionCode<int32_t>(alu_out, 0);
SetS390OverflowCode(isOF);
- WriteW(addr, alu_out, instr);
+ WriteW(addr, alu_out);
return length;
}
@@ -10545,7 +10611,7 @@ EVALUATE(STMY) {
// Store each register in ascending order.
for (int i = 0; i <= r3 - r1; i++) {
int32_t value = get_low_register<int32_t>((r1 + i) % 16);
- WriteW(b2_val + offset + 4 * i, value, instr);
+ WriteW(b2_val + offset + 4 * i, value);
}
return length;
}
@@ -10571,7 +10637,7 @@ EVALUATE(LMY) {
// Store each register in ascending order.
for (int i = 0; i <= r3 - r1; i++) {
- int32_t value = ReadW(b2_val + offset + 4 * i, instr);
+ int32_t value = ReadW(b2_val + offset + 4 * i);
set_low_register((r1 + i) % 16, value);
}
return length;
@@ -11232,7 +11298,7 @@ EVALUATE(STEY) {
int64_t b2_val = (b2 == 0) ? 0 : get_register(b2);
intptr_t addr = x2_val + b2_val + d2;
int32_t frs_val = get_fpr<int32_t>(r1);
- WriteW(addr, frs_val, instr);
+ WriteW(addr, frs_val);
return length;
}
diff --git a/deps/v8/src/execution/s390/simulator-s390.h b/deps/v8/src/execution/s390/simulator-s390.h
index 4c1b0a4924..cbe628691c 100644
--- a/deps/v8/src/execution/s390/simulator-s390.h
+++ b/deps/v8/src/execution/s390/simulator-s390.h
@@ -280,17 +280,17 @@ class Simulator : public SimulatorBase {
inline void WriteB(intptr_t addr, uint8_t value);
inline void WriteB(intptr_t addr, int8_t value);
- inline uint16_t ReadHU(intptr_t addr, Instruction* instr);
- inline int16_t ReadH(intptr_t addr, Instruction* instr);
+ inline uint16_t ReadHU(intptr_t addr);
+ inline int16_t ReadH(intptr_t addr);
// Note: Overloaded on the sign of the value.
- inline void WriteH(intptr_t addr, uint16_t value, Instruction* instr);
- inline void WriteH(intptr_t addr, int16_t value, Instruction* instr);
-
- inline uint32_t ReadWU(intptr_t addr, Instruction* instr);
- inline int32_t ReadW(intptr_t addr, Instruction* instr);
- inline int64_t ReadW64(intptr_t addr, Instruction* instr);
- inline void WriteW(intptr_t addr, uint32_t value, Instruction* instr);
- inline void WriteW(intptr_t addr, int32_t value, Instruction* instr);
+ inline void WriteH(intptr_t addr, uint16_t value);
+ inline void WriteH(intptr_t addr, int16_t value);
+
+ inline uint32_t ReadWU(intptr_t addr);
+ inline int32_t ReadW(intptr_t addr);
+ inline int64_t ReadW64(intptr_t addr);
+ inline void WriteW(intptr_t addr, uint32_t value);
+ inline void WriteW(intptr_t addr, int32_t value);
inline int64_t ReadDW(intptr_t addr);
inline double ReadDouble(intptr_t addr);
diff --git a/deps/v8/src/execution/simulator-base.h b/deps/v8/src/execution/simulator-base.h
index 9edc60a3f3..90e9441609 100644
--- a/deps/v8/src/execution/simulator-base.h
+++ b/deps/v8/src/execution/simulator-base.h
@@ -88,9 +88,9 @@ class SimulatorBase {
static typename std::enable_if<std::is_integral<T>::value, intptr_t>::type
ConvertArg(T arg) {
static_assert(sizeof(T) <= sizeof(intptr_t), "type bigger than ptrsize");
-#if V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_RISCV64
- // The MIPS64 and RISCV64 calling convention is to sign extend all values,
- // even unsigned ones.
+#if V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_RISCV64 || V8_TARGET_ARCH_LOONG64
+ // The MIPS64, LOONG64 and RISCV64 calling convention is to sign extend all
+ // values, even unsigned ones.
using signed_t = typename std::make_signed<T>::type;
return static_cast<intptr_t>(static_cast<signed_t>(arg));
#else
diff --git a/deps/v8/src/execution/simulator.h b/deps/v8/src/execution/simulator.h
index 3b824e7632..5bf9d4612e 100644
--- a/deps/v8/src/execution/simulator.h
+++ b/deps/v8/src/execution/simulator.h
@@ -24,6 +24,8 @@
#include "src/execution/mips/simulator-mips.h"
#elif V8_TARGET_ARCH_MIPS64
#include "src/execution/mips64/simulator-mips64.h"
+#elif V8_TARGET_ARCH_LOONG64
+#include "src/execution/loong64/simulator-loong64.h"
#elif V8_TARGET_ARCH_S390
#include "src/execution/s390/simulator-s390.h"
#elif V8_TARGET_ARCH_RISCV64
diff --git a/deps/v8/src/execution/thread-local-top.h b/deps/v8/src/execution/thread-local-top.h
index f903747aeb..236beda8a0 100644
--- a/deps/v8/src/execution/thread-local-top.h
+++ b/deps/v8/src/execution/thread-local-top.h
@@ -5,6 +5,9 @@
#ifndef V8_EXECUTION_THREAD_LOCAL_TOP_H_
#define V8_EXECUTION_THREAD_LOCAL_TOP_H_
+#include "include/v8-callbacks.h"
+#include "include/v8-exception.h"
+#include "include/v8-unwinder.h"
#include "src/common/globals.h"
#include "src/execution/thread-id.h"
#include "src/objects/contexts.h"
@@ -63,8 +66,10 @@ class ThreadLocalTop {
// corresponds to the place on the JS stack where the C++ handler
// would have been if the stack were not separate.
Address try_catch_handler_address() {
- return reinterpret_cast<Address>(
- v8::TryCatch::JSStackComparableAddress(try_catch_handler_));
+ if (try_catch_handler_) {
+ return try_catch_handler_->JSStackComparableAddressPrivate();
+ }
+ return kNullAddress;
}
// Call depth represents nested v8 api calls. Instead of storing the nesting
diff --git a/deps/v8/src/execution/v8threads.cc b/deps/v8/src/execution/v8threads.cc
index 06575e9c64..3138823f7b 100644
--- a/deps/v8/src/execution/v8threads.cc
+++ b/deps/v8/src/execution/v8threads.cc
@@ -4,6 +4,7 @@
#include "src/execution/v8threads.h"
+#include "include/v8-locker.h"
#include "src/api/api.h"
#include "src/debug/debug.h"
#include "src/execution/execution.h"
diff --git a/deps/v8/src/execution/vm-state.h b/deps/v8/src/execution/vm-state.h
index 9621bee421..d903b222ee 100644
--- a/deps/v8/src/execution/vm-state.h
+++ b/deps/v8/src/execution/vm-state.h
@@ -5,7 +5,7 @@
#ifndef V8_EXECUTION_VM_STATE_H_
#define V8_EXECUTION_VM_STATE_H_
-#include "include/v8.h"
+#include "include/v8-unwinder.h"
#include "src/common/globals.h"
#include "src/logging/counters-scopes.h"
diff --git a/deps/v8/src/extensions/cputracemark-extension.cc b/deps/v8/src/extensions/cputracemark-extension.cc
index 029ad0f3cb..881ca3b1dc 100644
--- a/deps/v8/src/extensions/cputracemark-extension.cc
+++ b/deps/v8/src/extensions/cputracemark-extension.cc
@@ -4,6 +4,9 @@
#include "src/extensions/cputracemark-extension.h"
+#include "include/v8-isolate.h"
+#include "include/v8-template.h"
+
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/extensions/cputracemark-extension.h b/deps/v8/src/extensions/cputracemark-extension.h
index 362bdcebd3..4eca092d4b 100644
--- a/deps/v8/src/extensions/cputracemark-extension.h
+++ b/deps/v8/src/extensions/cputracemark-extension.h
@@ -5,10 +5,14 @@
#ifndef V8_EXTENSIONS_CPUTRACEMARK_EXTENSION_H_
#define V8_EXTENSIONS_CPUTRACEMARK_EXTENSION_H_
-#include "include/v8.h"
+#include "include/v8-extension.h"
#include "src/base/strings.h"
namespace v8 {
+
+template <typename T>
+class FunctionCallbackInfo;
+
namespace internal {
class CpuTraceMarkExtension : public v8::Extension {
diff --git a/deps/v8/src/extensions/externalize-string-extension.cc b/deps/v8/src/extensions/externalize-string-extension.cc
index 755023d8d6..dab8c224c4 100644
--- a/deps/v8/src/extensions/externalize-string-extension.cc
+++ b/deps/v8/src/extensions/externalize-string-extension.cc
@@ -4,6 +4,7 @@
#include "src/extensions/externalize-string-extension.h"
+#include "include/v8-template.h"
#include "src/api/api-inl.h"
#include "src/base/strings.h"
#include "src/execution/isolate.h"
diff --git a/deps/v8/src/extensions/externalize-string-extension.h b/deps/v8/src/extensions/externalize-string-extension.h
index 8d08a7474a..8fce62191d 100644
--- a/deps/v8/src/extensions/externalize-string-extension.h
+++ b/deps/v8/src/extensions/externalize-string-extension.h
@@ -5,9 +5,13 @@
#ifndef V8_EXTENSIONS_EXTERNALIZE_STRING_EXTENSION_H_
#define V8_EXTENSIONS_EXTERNALIZE_STRING_EXTENSION_H_
-#include "include/v8.h"
+#include "include/v8-extension.h"
namespace v8 {
+
+template <typename T>
+class FunctionCallbackInfo;
+
namespace internal {
class ExternalizeStringExtension : public v8::Extension {
diff --git a/deps/v8/src/extensions/gc-extension.cc b/deps/v8/src/extensions/gc-extension.cc
index 6f1c601d8d..cda90bd507 100644
--- a/deps/v8/src/extensions/gc-extension.cc
+++ b/deps/v8/src/extensions/gc-extension.cc
@@ -4,7 +4,11 @@
#include "src/extensions/gc-extension.h"
-#include "include/v8.h"
+#include "include/v8-isolate.h"
+#include "include/v8-object.h"
+#include "include/v8-persistent-handle.h"
+#include "include/v8-primitive.h"
+#include "include/v8-template.h"
#include "src/base/platform/platform.h"
#include "src/execution/isolate.h"
#include "src/heap/heap.h"
diff --git a/deps/v8/src/extensions/gc-extension.h b/deps/v8/src/extensions/gc-extension.h
index c5750c5e80..f38a946b9f 100644
--- a/deps/v8/src/extensions/gc-extension.h
+++ b/deps/v8/src/extensions/gc-extension.h
@@ -5,10 +5,15 @@
#ifndef V8_EXTENSIONS_GC_EXTENSION_H_
#define V8_EXTENSIONS_GC_EXTENSION_H_
-#include "include/v8.h"
+#include "include/v8-extension.h"
+#include "include/v8-local-handle.h"
#include "src/base/strings.h"
namespace v8 {
+
+template <typename T>
+class FunctionCallbackInfo;
+
namespace internal {
// Provides garbage collection on invoking |fun_name|(options), where
diff --git a/deps/v8/src/extensions/ignition-statistics-extension.cc b/deps/v8/src/extensions/ignition-statistics-extension.cc
index 93ceeeeddf..454a85f50a 100644
--- a/deps/v8/src/extensions/ignition-statistics-extension.cc
+++ b/deps/v8/src/extensions/ignition-statistics-extension.cc
@@ -4,6 +4,8 @@
#include "src/extensions/ignition-statistics-extension.h"
+#include "include/v8-template.h"
+#include "src/api/api-inl.h"
#include "src/base/logging.h"
#include "src/execution/isolate.h"
#include "src/interpreter/bytecodes.h"
@@ -27,9 +29,10 @@ const char* const IgnitionStatisticsExtension::kSource =
void IgnitionStatisticsExtension::GetIgnitionDispatchCounters(
const v8::FunctionCallbackInfo<v8::Value>& args) {
- args.GetReturnValue().Set(reinterpret_cast<Isolate*>(args.GetIsolate())
- ->interpreter()
- ->GetDispatchCountersObject());
+ args.GetReturnValue().Set(
+ Utils::ToLocal(reinterpret_cast<Isolate*>(args.GetIsolate())
+ ->interpreter()
+ ->GetDispatchCountersObject()));
}
} // namespace internal
diff --git a/deps/v8/src/extensions/ignition-statistics-extension.h b/deps/v8/src/extensions/ignition-statistics-extension.h
index fee55f6128..deffe4c915 100644
--- a/deps/v8/src/extensions/ignition-statistics-extension.h
+++ b/deps/v8/src/extensions/ignition-statistics-extension.h
@@ -5,9 +5,13 @@
#ifndef V8_EXTENSIONS_IGNITION_STATISTICS_EXTENSION_H_
#define V8_EXTENSIONS_IGNITION_STATISTICS_EXTENSION_H_
-#include "include/v8.h"
+#include "include/v8-extension.h"
namespace v8 {
+
+template <typename T>
+class FunctionCallbackInfo;
+
namespace internal {
class IgnitionStatisticsExtension : public v8::Extension {
diff --git a/deps/v8/src/extensions/statistics-extension.cc b/deps/v8/src/extensions/statistics-extension.cc
index 1911dfc39e..976a97ad73 100644
--- a/deps/v8/src/extensions/statistics-extension.cc
+++ b/deps/v8/src/extensions/statistics-extension.cc
@@ -4,6 +4,7 @@
#include "src/extensions/statistics-extension.h"
+#include "include/v8-template.h"
#include "src/common/assert-scope.h"
#include "src/execution/isolate.h"
#include "src/heap/heap-inl.h" // crbug.com/v8/8499
diff --git a/deps/v8/src/extensions/statistics-extension.h b/deps/v8/src/extensions/statistics-extension.h
index 4c53cbfdea..f2b0256ee2 100644
--- a/deps/v8/src/extensions/statistics-extension.h
+++ b/deps/v8/src/extensions/statistics-extension.h
@@ -5,9 +5,13 @@
#ifndef V8_EXTENSIONS_STATISTICS_EXTENSION_H_
#define V8_EXTENSIONS_STATISTICS_EXTENSION_H_
-#include "include/v8.h"
+#include "include/v8-extension.h"
namespace v8 {
+
+template <typename T>
+class FunctionCallbackInfo;
+
namespace internal {
class StatisticsExtension : public v8::Extension {
diff --git a/deps/v8/src/extensions/trigger-failure-extension.cc b/deps/v8/src/extensions/trigger-failure-extension.cc
index 44c07fbc00..2c66d036a2 100644
--- a/deps/v8/src/extensions/trigger-failure-extension.cc
+++ b/deps/v8/src/extensions/trigger-failure-extension.cc
@@ -4,6 +4,7 @@
#include "src/extensions/trigger-failure-extension.h"
+#include "include/v8-template.h"
#include "src/base/logging.h"
#include "src/common/checks.h"
diff --git a/deps/v8/src/extensions/trigger-failure-extension.h b/deps/v8/src/extensions/trigger-failure-extension.h
index e2cfac1eb3..22039ccb27 100644
--- a/deps/v8/src/extensions/trigger-failure-extension.h
+++ b/deps/v8/src/extensions/trigger-failure-extension.h
@@ -5,9 +5,13 @@
#ifndef V8_EXTENSIONS_TRIGGER_FAILURE_EXTENSION_H_
#define V8_EXTENSIONS_TRIGGER_FAILURE_EXTENSION_H_
-#include "include/v8.h"
+#include "include/v8-extension.h"
namespace v8 {
+
+template <typename T>
+class FunctionCallbackInfo;
+
namespace internal {
class TriggerFailureExtension : public v8::Extension {
diff --git a/deps/v8/src/extensions/vtunedomain-support-extension.cc b/deps/v8/src/extensions/vtunedomain-support-extension.cc
index 9a7715bb23..fcf2aa6961 100644
--- a/deps/v8/src/extensions/vtunedomain-support-extension.cc
+++ b/deps/v8/src/extensions/vtunedomain-support-extension.cc
@@ -3,9 +3,13 @@
// found in the LICENSE file.
#include "src/extensions/vtunedomain-support-extension.h"
+
#include <string>
#include <vector>
+#include "include/v8-isolate.h"
+#include "include/v8-template.h"
+
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/extensions/vtunedomain-support-extension.h b/deps/v8/src/extensions/vtunedomain-support-extension.h
index 4640d0dfa5..cccfd74223 100644
--- a/deps/v8/src/extensions/vtunedomain-support-extension.h
+++ b/deps/v8/src/extensions/vtunedomain-support-extension.h
@@ -5,7 +5,7 @@
#ifndef V8_EXTENSIONS_VTUNEDOMAIN_SUPPORT_EXTENSION_H_
#define V8_EXTENSIONS_VTUNEDOMAIN_SUPPORT_EXTENSION_H_
-#include "include/v8.h"
+#include "include/v8-extension.h"
#include "src/base/strings.h"
#include "src/base/vector.h"
#include "src/third_party/vtune/vtuneapi.h"
@@ -19,6 +19,10 @@
#define TASK_END_FAILED 1 << 6
namespace v8 {
+
+template <typename T>
+class FunctionCallbackInfo;
+
namespace internal {
class VTuneDomainSupportExtension : public v8::Extension {
diff --git a/deps/v8/src/flags/flag-definitions.h b/deps/v8/src/flags/flag-definitions.h
index 312d17b52f..ca8ed311a8 100644
--- a/deps/v8/src/flags/flag-definitions.h
+++ b/deps/v8/src/flags/flag-definitions.h
@@ -175,6 +175,20 @@ struct MaybeBoolFlag {
#define V8_HEAP_SANDBOX_BOOL false
#endif
+#ifdef V8_VIRTUAL_MEMORY_CAGE
+#define V8_VIRTUAL_MEMORY_CAGE_BOOL true
+#else
+#define V8_VIRTUAL_MEMORY_CAGE_BOOL false
+#endif
+
+// D8's MultiMappedAllocator is only available on Linux, and only if the virtual
+// memory cage is not enabled.
+#if V8_OS_LINUX && !V8_VIRTUAL_MEMORY_CAGE_BOOL
+#define MULTI_MAPPED_ALLOCATOR_AVAILABLE true
+#else
+#define MULTI_MAPPED_ALLOCATOR_AVAILABLE false
+#endif
+
#ifdef V8_ENABLE_CONTROL_FLOW_INTEGRITY
#define ENABLE_CONTROL_FLOW_INTEGRITY_BOOL true
#else
@@ -183,7 +197,7 @@ struct MaybeBoolFlag {
#if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64 || \
V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_RISCV64 || V8_TARGET_ARCH_MIPS64 || \
- V8_TARGET_ARCH_MIPS
+ V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_LOONG64
#define ENABLE_SPARKPLUG true
#else
// TODO(v8:11421): Enable Sparkplug for other architectures
@@ -299,10 +313,8 @@ DEFINE_BOOL(harmony_shipping, true, "enable all shipped harmony features")
#define HARMONY_STAGED(V) \
HARMONY_STAGED_BASE(V) \
V(harmony_intl_best_fit_matcher, "Intl BestFitMatcher") \
- V(harmony_intl_displaynames_v2, "Intl.DisplayNames v2") \
- V(harmony_intl_locale_info, "Intl locale info") \
- V(harmony_intl_more_timezone, \
- "Extend Intl.DateTimeFormat timeZoneName Option")
+ V(harmony_intl_enumeration, "Intl Enumberation API") \
+ V(harmony_intl_locale_info, "Intl locale info")
#else
#define HARMONY_STAGED(V) HARMONY_STAGED_BASE(V)
#endif
@@ -319,10 +331,13 @@ DEFINE_BOOL(harmony_shipping, true, "enable all shipped harmony features")
V(harmony_class_static_blocks, "harmony static initializer blocks")
#ifdef V8_INTL_SUPPORT
-#define HARMONY_SHIPPING(V) \
- HARMONY_SHIPPING_BASE(V) \
- V(harmony_intl_dateformat_day_period, \
- "Add dayPeriod option to DateTimeFormat")
+#define HARMONY_SHIPPING(V) \
+ HARMONY_SHIPPING_BASE(V) \
+ V(harmony_intl_dateformat_day_period, \
+ "Add dayPeriod option to DateTimeFormat") \
+ V(harmony_intl_displaynames_v2, "Intl.DisplayNames v2") \
+ V(harmony_intl_more_timezone, \
+ "Extend Intl.DateTimeFormat timeZoneName Option")
#else
#define HARMONY_SHIPPING(V) HARMONY_SHIPPING_BASE(V)
#endif
@@ -490,6 +505,7 @@ DEFINE_BOOL(future, FUTURE_BOOL,
DEFINE_WEAK_IMPLICATION(future, turbo_inline_js_wasm_calls)
#if ENABLE_SPARKPLUG
DEFINE_WEAK_IMPLICATION(future, sparkplug)
+DEFINE_WEAK_IMPLICATION(future, flush_baseline_code)
#endif
#if V8_SHORT_BUILTIN_CALLS
DEFINE_WEAK_IMPLICATION(future, short_builtin_calls)
@@ -519,9 +535,9 @@ DEFINE_NEG_IMPLICATION(jitless, interpreted_frames_native_stack)
DEFINE_BOOL(assert_types, false,
"generate runtime type assertions to test the typer")
-DEFINE_BOOL(trace_code_dependencies, false, "trace code dependencies")
+DEFINE_BOOL(trace_compilation_dependencies, false, "trace code dependencies")
// Depend on --trace-deopt-verbose for reporting dependency invalidations.
-DEFINE_IMPLICATION(trace_code_dependencies, trace_deopt_verbose)
+DEFINE_IMPLICATION(trace_compilation_dependencies, trace_deopt_verbose)
#ifdef V8_ALLOCATION_SITE_TRACKING
#define V8_ALLOCATION_SITE_TRACKING_BOOL true
@@ -567,8 +583,17 @@ DEFINE_BOOL_READONLY(enable_sealed_frozen_elements_kind, true,
DEFINE_BOOL(unbox_double_arrays, true, "automatically unbox arrays of doubles")
DEFINE_BOOL_READONLY(string_slices, true, "use string slices")
+DEFINE_INT(ticks_before_optimization, 3,
+ "the number of times we have to go through the interrupt budget "
+ "before considering this function for optimization")
+DEFINE_INT(bytecode_size_allowance_per_tick, 1100,
+ "increases the number of ticks required for optimization by "
+ "bytecode.length/X")
DEFINE_INT(interrupt_budget, 132 * KB,
"interrupt budget which should be used for the profiler counter")
+DEFINE_INT(
+ max_bytecode_size_for_early_opt, 81,
+ "Maximum bytecode length for a function to be optimized on the first tick")
// Flags for inline caching and feedback vectors.
DEFINE_BOOL(use_ic, true, "use inline caching")
@@ -695,19 +720,21 @@ DEFINE_INT(concurrent_recompilation_queue_length, 8,
"the length of the concurrent compilation queue")
DEFINE_INT(concurrent_recompilation_delay, 0,
"artificial compilation delay in ms")
-DEFINE_BOOL(block_concurrent_recompilation, false,
- "block queued jobs until released")
DEFINE_BOOL(concurrent_inlining, false,
"run optimizing compiler's inlining phase on a separate thread")
-DEFINE_BOOL(stress_concurrent_inlining, false,
- "makes concurrent inlining more likely to trigger in tests")
+DEFINE_BOOL(
+ stress_concurrent_inlining, false,
+ "create additional concurrent optimization jobs but throw away result")
DEFINE_IMPLICATION(stress_concurrent_inlining, concurrent_inlining)
DEFINE_NEG_IMPLICATION(stress_concurrent_inlining, lazy_feedback_allocation)
DEFINE_WEAK_VALUE_IMPLICATION(stress_concurrent_inlining, interrupt_budget,
15 * KB)
+DEFINE_BOOL(stress_concurrent_inlining_attach_code, false,
+ "create additional concurrent optimization jobs")
+DEFINE_IMPLICATION(stress_concurrent_inlining_attach_code,
+ stress_concurrent_inlining)
DEFINE_INT(max_serializer_nesting, 25,
"maximum levels for nesting child serializers")
-DEFINE_WEAK_IMPLICATION(future, concurrent_inlining)
DEFINE_BOOL(trace_heap_broker_verbose, false,
"trace the heap broker verbosely (all reports)")
DEFINE_BOOL(trace_heap_broker_memory, false,
@@ -882,15 +909,6 @@ DEFINE_BOOL(optimize_for_size, false,
"speed")
DEFINE_VALUE_IMPLICATION(optimize_for_size, max_semi_space_size, 1)
-#ifdef DISABLE_UNTRUSTED_CODE_MITIGATIONS
-#define V8_DEFAULT_UNTRUSTED_CODE_MITIGATIONS false
-#else
-#define V8_DEFAULT_UNTRUSTED_CODE_MITIGATIONS true
-#endif
-DEFINE_BOOL(untrusted_code_mitigations, V8_DEFAULT_UNTRUSTED_CODE_MITIGATIONS,
- "Enable mitigations for executing untrusted code")
-#undef V8_DEFAULT_UNTRUSTED_CODE_MITIGATIONS
-
// Flags for WebAssembly.
#if V8_ENABLE_WEBASSEMBLY
@@ -988,7 +1006,6 @@ DEFINE_STRING(dump_wasm_module_path, nullptr,
FOREACH_WASM_FEATURE_FLAG(DECL_WASM_FLAG)
#undef DECL_WASM_FLAG
-DEFINE_IMPLICATION(experimental_wasm_gc_experiments, experimental_wasm_gc)
DEFINE_IMPLICATION(experimental_wasm_gc, experimental_wasm_typed_funcref)
DEFINE_IMPLICATION(experimental_wasm_typed_funcref, experimental_wasm_reftypes)
@@ -1015,6 +1032,9 @@ DEFINE_NEG_NEG_IMPLICATION(wasm_bounds_checks, wasm_enforce_bounds_checks)
DEFINE_BOOL(wasm_math_intrinsics, true,
"intrinsify some Math imports into wasm")
+DEFINE_BOOL(
+ wasm_inlining, false,
+ "enable inlining of wasm functions into wasm functions (experimental)")
DEFINE_BOOL(wasm_loop_unrolling, true,
"enable loop unrolling for wasm functions")
DEFINE_BOOL(wasm_fuzzer_gen_test, false,
@@ -1580,8 +1600,9 @@ DEFINE_BOOL(debug_sim, false, "Enable debugging the simulator")
DEFINE_BOOL(check_icache, false,
"Check icache flushes in ARM and MIPS simulator")
DEFINE_INT(stop_sim_at, 0, "Simulator stop after x number of instructions")
-#if defined(V8_TARGET_ARCH_ARM64) || defined(V8_TARGET_ARCH_MIPS64) || \
- defined(V8_TARGET_ARCH_PPC64) || defined(V8_TARGET_ARCH_RISCV64)
+#if defined(V8_TARGET_ARCH_ARM64) || defined(V8_TARGET_ARCH_MIPS64) || \
+ defined(V8_TARGET_ARCH_PPC64) || defined(V8_TARGET_ARCH_RISCV64) || \
+ defined(V8_TARGET_ARCH_LOONG64)
DEFINE_INT(sim_stack_alignment, 16,
"Stack alignment in bytes in simulator. This must be a power of two "
"and it must be at least 16. 16 is default.")
@@ -1796,7 +1817,7 @@ DEFINE_BOOL(mock_arraybuffer_allocator, false,
DEFINE_SIZE_T(mock_arraybuffer_allocator_limit, 0,
"Memory limit for mock ArrayBuffer allocator used to simulate "
"OOM for testing.")
-#if V8_OS_LINUX
+#if MULTI_MAPPED_ALLOCATOR_AVAILABLE
DEFINE_BOOL(multi_mapped_mock_allocator, false,
"Use a multi-mapped mock ArrayBuffer allocator for testing.")
#endif
@@ -2118,6 +2139,7 @@ DEFINE_NEG_IMPLICATION(single_threaded_gc, stress_concurrent_allocation)
DEFINE_BOOL(verify_predictable, false,
"this mode is used for checking that V8 behaves predictably")
+DEFINE_IMPLICATION(verify_predictable, predictable)
DEFINE_INT(dump_allocations_digest_at_alloc, -1,
"dump allocations digest each n-th allocation")
diff --git a/deps/v8/src/handles/DIR_METADATA b/deps/v8/src/handles/DIR_METADATA
index ff55846b31..af999da1f2 100644
--- a/deps/v8/src/handles/DIR_METADATA
+++ b/deps/v8/src/handles/DIR_METADATA
@@ -7,5 +7,5 @@
# https://source.chromium.org/chromium/infra/infra/+/master:go/src/infra/tools/dirmd/proto/dir_metadata.proto
monorail {
- component: "Blink>JavaScript>GC"
-} \ No newline at end of file
+ component: "Blink>JavaScript>GarbageCollection"
+}
diff --git a/deps/v8/src/handles/global-handles.cc b/deps/v8/src/handles/global-handles.cc
index 55230a6d0b..d8d5016667 100644
--- a/deps/v8/src/handles/global-handles.cc
+++ b/deps/v8/src/handles/global-handles.cc
@@ -8,7 +8,7 @@
#include <cstdint>
#include <map>
-#include "include/v8.h"
+#include "include/v8-traced-handle.h"
#include "src/api/api-inl.h"
#include "src/base/compiler-specific.h"
#include "src/base/sanitizer/asan.h"
diff --git a/deps/v8/src/handles/global-handles.h b/deps/v8/src/handles/global-handles.h
index 237cedbbb5..d7f68e5b55 100644
--- a/deps/v8/src/handles/global-handles.h
+++ b/deps/v8/src/handles/global-handles.h
@@ -10,8 +10,9 @@
#include <utility>
#include <vector>
+#include "include/v8-callbacks.h"
+#include "include/v8-persistent-handle.h"
#include "include/v8-profiler.h"
-#include "include/v8.h"
#include "src/handles/handles.h"
#include "src/heap/heap.h"
#include "src/objects/objects.h"
diff --git a/deps/v8/src/handles/handles.h b/deps/v8/src/handles/handles.h
index 929cba0bc7..166b7ee4ab 100644
--- a/deps/v8/src/handles/handles.h
+++ b/deps/v8/src/handles/handles.h
@@ -7,7 +7,6 @@
#include <type_traits>
-#include "include/v8.h"
#include "src/base/functional.h"
#include "src/base/macros.h"
#include "src/common/checks.h"
@@ -15,6 +14,9 @@
#include "src/zone/zone.h"
namespace v8 {
+
+class HandleScope;
+
namespace internal {
// Forward declarations.
diff --git a/deps/v8/src/heap/DIR_METADATA b/deps/v8/src/heap/DIR_METADATA
index ff55846b31..af999da1f2 100644
--- a/deps/v8/src/heap/DIR_METADATA
+++ b/deps/v8/src/heap/DIR_METADATA
@@ -7,5 +7,5 @@
# https://source.chromium.org/chromium/infra/infra/+/master:go/src/infra/tools/dirmd/proto/dir_metadata.proto
monorail {
- component: "Blink>JavaScript>GC"
-} \ No newline at end of file
+ component: "Blink>JavaScript>GarbageCollection"
+}
diff --git a/deps/v8/src/heap/array-buffer-sweeper.cc b/deps/v8/src/heap/array-buffer-sweeper.cc
index 597e4d0f93..2bdcec0bf7 100644
--- a/deps/v8/src/heap/array-buffer-sweeper.cc
+++ b/deps/v8/src/heap/array-buffer-sweeper.cc
@@ -71,6 +71,7 @@ size_t ArrayBufferList::BytesSlow() {
void ArrayBufferSweeper::EnsureFinished() {
if (!sweeping_in_progress_) return;
+ TRACE_GC(heap_->tracer(), GCTracer::Scope::MC_COMPLETE_SWEEP_ARRAY_BUFFERS);
TryAbortResult abort_result =
heap_->isolate()->cancelable_task_manager()->TryAbort(job_->id_);
@@ -112,9 +113,10 @@ void ArrayBufferSweeper::MergeBackExtensionsWhenSwept() {
if (job_->state_ == SweepingState::kDone) {
Merge();
sweeping_in_progress_ = false;
- } else {
- UpdateCountersForConcurrentlySweptExtensions();
}
+ // Update freed counters either way. It is necessary to update the counter
+ // in case sweeping is done to avoid counter overflows.
+ UpdateCountersForConcurrentlySweptExtensions();
}
}
diff --git a/deps/v8/src/heap/base/asm/loong64/push_registers_asm.cc b/deps/v8/src/heap/base/asm/loong64/push_registers_asm.cc
new file mode 100644
index 0000000000..aa8dcd356b
--- /dev/null
+++ b/deps/v8/src/heap/base/asm/loong64/push_registers_asm.cc
@@ -0,0 +1,48 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Push all callee-saved registers to get them on the stack for conservative
+// stack scanning.
+//
+// See asm/x64/push_registers_clang.cc for why the function is not generated
+// using clang.
+//
+// Do not depend on V8_TARGET_OS_* defines as some embedders may override the
+// GN toolchain (e.g. ChromeOS) and not provide them.
+asm(".text \n"
+ ".global PushAllRegistersAndIterateStack \n"
+ ".type PushAllRegistersAndIterateStack, %function \n"
+ ".hidden PushAllRegistersAndIterateStack \n"
+ "PushAllRegistersAndIterateStack: \n"
+ // Push all callee-saved registers and save return address.
+ " addi.d $sp, $sp, -96 \n"
+ " st.d $ra, $sp, 88 \n"
+ " st.d $s8, $sp, 80 \n"
+ " st.d $sp, $sp, 72 \n"
+ " st.d $fp, $sp, 64 \n"
+ " st.d $s7, $sp, 56 \n"
+ " st.d $s6, $sp, 48 \n"
+ " st.d $s5, $sp, 40 \n"
+ " st.d $s4, $sp, 32 \n"
+ " st.d $s3, $sp, 24 \n"
+ " st.d $s2, $sp, 16 \n"
+ " st.d $s1, $sp, 8 \n"
+ " st.d $s0, $sp, 0 \n"
+ // Maintain frame pointer.
+ " addi.d $s8, $sp, 0 \n"
+ // Pass 1st parameter (a0) unchanged (Stack*).
+ // Pass 2nd parameter (a1) unchanged (StackVisitor*).
+ // Save 3rd parameter (a2; IterateStackCallback).
+ " addi.d $a3, $a2, 0 \n"
+ // Call the callback.
+ // Pass 3rd parameter as sp (stack pointer).
+ " addi.d $a2, $sp, 0 \n"
+ " jirl $ra, $a3, 0 \n"
+ // Load return address.
+ " ld.d $ra, $sp, 88 \n"
+ // Restore frame pointer.
+ " ld.d $s8, $sp, 80 \n"
+ // Discard all callee-saved registers.
+ " addi.d $sp, $sp, 96 \n"
+ " jirl $zero, $ra, 0 \n");
diff --git a/deps/v8/src/heap/base/stack.cc b/deps/v8/src/heap/base/stack.cc
index fd5eab4528..8b6713e687 100644
--- a/deps/v8/src/heap/base/stack.cc
+++ b/deps/v8/src/heap/base/stack.cc
@@ -9,6 +9,7 @@
#include "src/base/platform/platform.h"
#include "src/base/sanitizer/asan.h"
#include "src/base/sanitizer/msan.h"
+#include "src/base/sanitizer/tsan.h"
#include "src/heap/cppgc/globals.h"
namespace heap {
@@ -43,6 +44,10 @@ namespace {
// No ASAN support as accessing fake frames otherwise results in
// "stack-use-after-scope" warnings.
DISABLE_ASAN
+// No TSAN support as the stack may not be exclusively owned by the current
+// thread, e.g., for interrupt handling. Atomic reads are not enough as the
+// other thread may use a lock to synchronize the access.
+DISABLE_TSAN
void IterateAsanFakeFrameIfNecessary(StackVisitor* visitor,
void* asan_fake_stack,
const void* stack_start,
@@ -103,6 +108,10 @@ void IterateSafeStackIfNecessary(StackVisitor* visitor) {
V8_NOINLINE
// No ASAN support as method accesses redzones while walking the stack.
DISABLE_ASAN
+// No TSAN support as the stack may not be exclusively owned by the current
+// thread, e.g., for interrupt handling. Atomic reads are not enough as the
+// other thread may use a lock to synchronize the access.
+DISABLE_TSAN
void IteratePointersImpl(const Stack* stack, StackVisitor* visitor,
intptr_t* stack_end) {
#ifdef V8_USE_ADDRESS_SANITIZER
@@ -133,6 +142,7 @@ void Stack::IteratePointers(StackVisitor* visitor) const {
PushAllRegistersAndIterateStack(this, visitor, &IteratePointersImpl);
// No need to deal with callee-saved registers as they will be kept alive by
// the regular conservative stack iteration.
+ // TODO(chromium:1056170): Add support for SIMD and/or filtering.
IterateSafeStackIfNecessary(visitor);
}
diff --git a/deps/v8/src/heap/basic-memory-chunk.cc b/deps/v8/src/heap/basic-memory-chunk.cc
index 6fb0467c39..0c7a8170cf 100644
--- a/deps/v8/src/heap/basic-memory-chunk.cc
+++ b/deps/v8/src/heap/basic-memory-chunk.cc
@@ -25,6 +25,26 @@ STATIC_ASSERT(BasicMemoryChunk::kFlagsOffset ==
STATIC_ASSERT(BasicMemoryChunk::kHeapOffset ==
heap_internals::MemoryChunk::kHeapOffset);
+// static
+constexpr BasicMemoryChunk::MainThreadFlags BasicMemoryChunk::kAllFlagsMask;
+// static
+constexpr BasicMemoryChunk::MainThreadFlags
+ BasicMemoryChunk::kPointersToHereAreInterestingMask;
+// static
+constexpr BasicMemoryChunk::MainThreadFlags
+ BasicMemoryChunk::kPointersFromHereAreInterestingMask;
+// static
+constexpr BasicMemoryChunk::MainThreadFlags
+ BasicMemoryChunk::kEvacuationCandidateMask;
+// static
+constexpr BasicMemoryChunk::MainThreadFlags
+ BasicMemoryChunk::kIsInYoungGenerationMask;
+// static
+constexpr BasicMemoryChunk::MainThreadFlags BasicMemoryChunk::kIsLargePageMask;
+// static
+constexpr BasicMemoryChunk::MainThreadFlags
+ BasicMemoryChunk::kSkipEvacuationSlotsRecordingMask;
+
BasicMemoryChunk::BasicMemoryChunk(size_t size, Address area_start,
Address area_end) {
size_ = size;
@@ -75,13 +95,11 @@ class BasicMemoryChunkValidator {
STATIC_ASSERT(BasicMemoryChunk::kSizeOffset ==
offsetof(BasicMemoryChunk, size_));
STATIC_ASSERT(BasicMemoryChunk::kFlagsOffset ==
- offsetof(BasicMemoryChunk, flags_));
+ offsetof(BasicMemoryChunk, main_thread_flags_));
STATIC_ASSERT(BasicMemoryChunk::kHeapOffset ==
offsetof(BasicMemoryChunk, heap_));
STATIC_ASSERT(offsetof(BasicMemoryChunk, size_) ==
MemoryChunkLayout::kSizeOffset);
- STATIC_ASSERT(offsetof(BasicMemoryChunk, flags_) ==
- MemoryChunkLayout::kFlagsOffset);
STATIC_ASSERT(offsetof(BasicMemoryChunk, heap_) ==
MemoryChunkLayout::kHeapOffset);
STATIC_ASSERT(offsetof(BasicMemoryChunk, area_start_) ==
diff --git a/deps/v8/src/heap/basic-memory-chunk.h b/deps/v8/src/heap/basic-memory-chunk.h
index 993291dc0e..de91e6ea9f 100644
--- a/deps/v8/src/heap/basic-memory-chunk.h
+++ b/deps/v8/src/heap/basic-memory-chunk.h
@@ -9,6 +9,7 @@
#include <unordered_map>
#include "src/base/atomic-utils.h"
+#include "src/base/flags.h"
#include "src/common/globals.h"
#include "src/flags/flags.h"
#include "src/heap/marking.h"
@@ -30,7 +31,7 @@ class BasicMemoryChunk {
}
};
- enum Flag {
+ enum Flag : uintptr_t {
NO_FLAGS = 0u,
IS_EXECUTABLE = 1u << 0,
POINTERS_TO_HERE_ARE_INTERESTING = 1u << 1,
@@ -44,12 +45,6 @@ class BasicMemoryChunk {
EVACUATION_CANDIDATE = 1u << 6,
NEVER_EVACUATE = 1u << 7,
- // Large objects can have a progress bar in their page header. These object
- // are scanned in increments and will be kept black while being scanned.
- // Even if the mutator writes to them they will be kept black and a white
- // to grey transition is performed in the value.
- HAS_PROGRESS_BAR = 1u << 8,
-
// |PAGE_NEW_OLD_PROMOTION|: A page tagged with this flag has been promoted
// from new to old space during evacuation.
PAGE_NEW_OLD_PROMOTION = 1u << 9,
@@ -111,6 +106,28 @@ class BasicMemoryChunk {
IN_SHARED_HEAP = 1u << 23,
};
+ using MainThreadFlags = base::Flags<Flag, uintptr_t>;
+
+ static constexpr MainThreadFlags kAllFlagsMask = ~MainThreadFlags(NO_FLAGS);
+
+ static constexpr MainThreadFlags kPointersToHereAreInterestingMask =
+ POINTERS_TO_HERE_ARE_INTERESTING;
+
+ static constexpr MainThreadFlags kPointersFromHereAreInterestingMask =
+ POINTERS_FROM_HERE_ARE_INTERESTING;
+
+ static constexpr MainThreadFlags kEvacuationCandidateMask =
+ EVACUATION_CANDIDATE;
+
+ static constexpr MainThreadFlags kIsInYoungGenerationMask =
+ MainThreadFlags(FROM_PAGE) | MainThreadFlags(TO_PAGE);
+
+ static constexpr MainThreadFlags kIsLargePageMask = LARGE_PAGE;
+
+ static constexpr MainThreadFlags kSkipEvacuationSlotsRecordingMask =
+ MainThreadFlags(kEvacuationCandidateMask) |
+ MainThreadFlags(kIsInYoungGenerationMask);
+
static const intptr_t kAlignment =
(static_cast<uintptr_t>(1) << kPageSizeBits);
@@ -157,54 +174,20 @@ class BasicMemoryChunk {
void set_owner(BaseSpace* space) { owner_ = space; }
- template <AccessMode access_mode = AccessMode::NON_ATOMIC>
- void SetFlag(Flag flag) {
- if (access_mode == AccessMode::NON_ATOMIC) {
- flags_ |= flag;
- } else {
- base::AsAtomicWord::SetBits<uintptr_t>(&flags_, flag, flag);
- }
- }
-
- template <AccessMode access_mode = AccessMode::NON_ATOMIC>
- bool IsFlagSet(Flag flag) const {
- return (GetFlags<access_mode>() & flag) != 0;
+ void SetFlag(Flag flag) { main_thread_flags_ |= flag; }
+ bool IsFlagSet(Flag flag) const { return main_thread_flags_ & flag; }
+ void ClearFlag(Flag flag) {
+ main_thread_flags_ = main_thread_flags_.without(flag);
}
-
- void ClearFlag(Flag flag) { flags_ &= ~flag; }
-
- // Set or clear multiple flags at a time. The flags in the mask are set to
- // the value in "flags", the rest retain the current value in |flags_|.
- void SetFlags(uintptr_t flags, uintptr_t mask) {
- flags_ = (flags_ & ~mask) | (flags & mask);
+ void ClearFlags(MainThreadFlags flags) { main_thread_flags_ &= ~flags; }
+ // Set or clear multiple flags at a time. `mask` indicates which flags are
+ // should be replaced with new `flags`.
+ void SetFlags(MainThreadFlags flags, MainThreadFlags mask) {
+ main_thread_flags_ = (main_thread_flags_ & ~mask) | (flags & mask);
}
// Return all current flags.
- template <AccessMode access_mode = AccessMode::NON_ATOMIC>
- uintptr_t GetFlags() const {
- if (access_mode == AccessMode::NON_ATOMIC) {
- return flags_;
- } else {
- return base::AsAtomicWord::Relaxed_Load(&flags_);
- }
- }
-
- using Flags = uintptr_t;
-
- static const Flags kPointersToHereAreInterestingMask =
- POINTERS_TO_HERE_ARE_INTERESTING;
-
- static const Flags kPointersFromHereAreInterestingMask =
- POINTERS_FROM_HERE_ARE_INTERESTING;
-
- static const Flags kEvacuationCandidateMask = EVACUATION_CANDIDATE;
-
- static const Flags kIsInYoungGenerationMask = FROM_PAGE | TO_PAGE;
-
- static const Flags kIsLargePageMask = LARGE_PAGE;
-
- static const Flags kSkipEvacuationSlotsRecordingMask =
- kEvacuationCandidateMask | kIsInYoungGenerationMask;
+ MainThreadFlags GetFlags() const { return main_thread_flags_; }
private:
bool InReadOnlySpaceRaw() const { return IsFlagSet(READ_ONLY_HEAP); }
@@ -227,16 +210,13 @@ class BasicMemoryChunk {
return !IsEvacuationCandidate() && !IsFlagSet(NEVER_ALLOCATE_ON_PAGE);
}
- template <AccessMode access_mode = AccessMode::NON_ATOMIC>
- bool IsEvacuationCandidate() {
- DCHECK(!(IsFlagSet<access_mode>(NEVER_EVACUATE) &&
- IsFlagSet<access_mode>(EVACUATION_CANDIDATE)));
- return IsFlagSet<access_mode>(EVACUATION_CANDIDATE);
+ bool IsEvacuationCandidate() const {
+ DCHECK(!(IsFlagSet(NEVER_EVACUATE) && IsFlagSet(EVACUATION_CANDIDATE)));
+ return IsFlagSet(EVACUATION_CANDIDATE);
}
- template <AccessMode access_mode = AccessMode::NON_ATOMIC>
- bool ShouldSkipEvacuationSlotRecording() {
- uintptr_t flags = GetFlags<access_mode>();
+ bool ShouldSkipEvacuationSlotRecording() const {
+ MainThreadFlags flags = GetFlags();
return ((flags & kSkipEvacuationSlotsRecordingMask) != 0) &&
((flags & COMPACTION_WAS_ABORTED) == 0);
}
@@ -360,7 +340,9 @@ class BasicMemoryChunk {
// Overall size of the chunk, including the header and guards.
size_t size_;
- uintptr_t flags_ = NO_FLAGS;
+ // Flags that are only mutable from the main thread when no concurrent
+ // component (e.g. marker, sweeper) is running.
+ MainThreadFlags main_thread_flags_{NO_FLAGS};
// TODO(v8:7464): Find a way to remove this.
// This goes against the spirit for the BasicMemoryChunk, but until C++14/17
@@ -399,6 +381,8 @@ class BasicMemoryChunk {
friend class PagedSpace;
};
+DEFINE_OPERATORS_FOR_FLAGS(BasicMemoryChunk::MainThreadFlags)
+
STATIC_ASSERT(std::is_standard_layout<BasicMemoryChunk>::value);
} // namespace internal
diff --git a/deps/v8/src/heap/cppgc-js/DEPS b/deps/v8/src/heap/cppgc-js/DEPS
new file mode 100644
index 0000000000..37049928d5
--- /dev/null
+++ b/deps/v8/src/heap/cppgc-js/DEPS
@@ -0,0 +1,3 @@
+include_rules = [
+ "+include/cppgc",
+]
diff --git a/deps/v8/src/heap/cppgc-js/cpp-heap.cc b/deps/v8/src/heap/cppgc-js/cpp-heap.cc
index 8c5813867f..c21d1ceb50 100644
--- a/deps/v8/src/heap/cppgc-js/cpp-heap.cc
+++ b/deps/v8/src/heap/cppgc-js/cpp-heap.cc
@@ -10,8 +10,8 @@
#include "include/cppgc/heap-consistency.h"
#include "include/cppgc/platform.h"
+#include "include/v8-local-handle.h"
#include "include/v8-platform.h"
-#include "include/v8.h"
#include "src/base/logging.h"
#include "src/base/macros.h"
#include "src/base/platform/platform.h"
@@ -217,6 +217,14 @@ void UnifiedHeapMarker::AddObject(void* object) {
cppgc::internal::HeapObjectHeader::FromObject(object));
}
+void FatalOutOfMemoryHandlerImpl(const std::string& reason,
+ const SourceLocation&, HeapBase* heap) {
+ FatalProcessOutOfMemory(
+ reinterpret_cast<v8::internal::Isolate*>(
+ static_cast<v8::internal::CppHeap*>(heap)->isolate()),
+ reason.c_str());
+}
+
} // namespace
void CppHeap::MetricRecorderAdapter::AddMainThreadEvent(
@@ -355,6 +363,7 @@ void CppHeap::AttachIsolate(Isolate* isolate) {
wrapper_descriptor_);
SetMetricRecorder(std::make_unique<MetricRecorderAdapter>(*this));
SetStackStart(base::Stack::GetStackStart());
+ oom_handler().SetCustomHandler(&FatalOutOfMemoryHandlerImpl);
no_gc_scope_--;
}
@@ -376,6 +385,7 @@ void CppHeap::DetachIsolate() {
isolate_ = nullptr;
// Any future garbage collections will ignore the V8->C++ references.
isolate()->SetEmbedderHeapTracer(nullptr);
+ oom_handler().SetCustomHandler(nullptr);
// Enter no GC scope.
no_gc_scope_++;
}
@@ -483,13 +493,14 @@ void CppHeap::TraceEpilogue(TraceSummary* trace_summary) {
// The allocated bytes counter in v8 was reset to the current marked bytes, so
// any pending allocated bytes updates should be discarded.
buffered_allocated_bytes_ = 0;
- ExecutePreFinalizers();
- // TODO(chromium:1056170): replace build flag with dedicated flag.
-#if DEBUG
+ const size_t bytes_allocated_in_prefinalizers = ExecutePreFinalizers();
+#if CPPGC_VERIFY_HEAP
UnifiedHeapMarkingVerifier verifier(*this);
- verifier.Run(stack_state_of_prev_gc(), stack_end_of_current_gc(),
- stats_collector()->marked_bytes());
-#endif
+ verifier.Run(
+ stack_state_of_prev_gc(), stack_end_of_current_gc(),
+ stats_collector()->marked_bytes() + bytes_allocated_in_prefinalizers);
+#endif // CPPGC_VERIFY_HEAP
+ USE(bytes_allocated_in_prefinalizers);
{
cppgc::subtle::NoGarbageCollectionScope no_gc(*this);
diff --git a/deps/v8/src/heap/cppgc-js/cpp-heap.h b/deps/v8/src/heap/cppgc-js/cpp-heap.h
index 8e4c047d1c..a2d11bcd39 100644
--- a/deps/v8/src/heap/cppgc-js/cpp-heap.h
+++ b/deps/v8/src/heap/cppgc-js/cpp-heap.h
@@ -10,8 +10,10 @@ static_assert(
false, "V8 targets can not be built with cppgc_is_standalone set to true.");
#endif
+#include "include/v8-callbacks.h"
#include "include/v8-cppgc.h"
-#include "include/v8.h"
+#include "include/v8-embedder-heap.h"
+#include "include/v8-metrics.h"
#include "src/base/macros.h"
#include "src/heap/cppgc/heap-base.h"
#include "src/heap/cppgc/stats-collector.h"
diff --git a/deps/v8/src/heap/cppgc-js/cpp-snapshot.cc b/deps/v8/src/heap/cppgc-js/cpp-snapshot.cc
index dc55753ff6..9b20b5c0a7 100644
--- a/deps/v8/src/heap/cppgc-js/cpp-snapshot.cc
+++ b/deps/v8/src/heap/cppgc-js/cpp-snapshot.cc
@@ -264,6 +264,10 @@ class State final : public StateBase {
ephemeron_edges_.insert(&value);
}
+ void AddEagerEphemeronEdge(const void* value, cppgc::TraceCallback callback) {
+ eager_ephemeron_edges_.insert({value, callback});
+ }
+
template <typename Callback>
void ForAllEphemeronEdges(Callback callback) {
for (const HeapObjectHeader* value : ephemeron_edges_) {
@@ -271,10 +275,20 @@ class State final : public StateBase {
}
}
+ template <typename Callback>
+ void ForAllEagerEphemeronEdges(Callback callback) {
+ for (const auto& pair : eager_ephemeron_edges_) {
+ callback(pair.first, pair.second);
+ }
+ }
+
private:
bool is_weak_container_ = false;
// Values that are held alive through ephemerons by this particular key.
std::unordered_set<const HeapObjectHeader*> ephemeron_edges_;
+ // Values that are eagerly traced and held alive through ephemerons by this
+ // particular key.
+ std::unordered_map<const void*, cppgc::TraceCallback> eager_ephemeron_edges_;
};
// Root states are similar to regular states with the difference that they are
@@ -404,6 +418,9 @@ class CppGraphBuilderImpl final {
void VisitForVisibility(State& parent, const TracedReferenceBase&);
void VisitEphemeronForVisibility(const HeapObjectHeader& key,
const HeapObjectHeader& value);
+ void VisitEphemeronWithNonGarbageCollectedValueForVisibility(
+ const HeapObjectHeader& key, const void* value,
+ cppgc::TraceDescriptor value_desc);
void VisitWeakContainerForVisibility(const HeapObjectHeader&);
void VisitRootForGraphBuilding(RootState&, const HeapObjectHeader&,
const cppgc::SourceLocation&);
@@ -421,7 +438,7 @@ class CppGraphBuilderImpl final {
}
void AddEdge(State& parent, const HeapObjectHeader& header,
- const std::string& edge_name = {}) {
+ const std::string& edge_name) {
DCHECK(parent.IsVisibleNotDependent());
auto& current = states_.GetExistingState(header);
if (!current.IsVisibleNotDependent()) return;
@@ -443,7 +460,8 @@ class CppGraphBuilderImpl final {
}
}
- void AddEdge(State& parent, const TracedReferenceBase& ref) {
+ void AddEdge(State& parent, const TracedReferenceBase& ref,
+ const std::string& edge_name) {
DCHECK(parent.IsVisibleNotDependent());
v8::Local<v8::Value> v8_value = ref.Get(cpp_heap_.isolate());
if (!v8_value.IsEmpty()) {
@@ -451,12 +469,19 @@ class CppGraphBuilderImpl final {
parent.set_node(AddNode(*parent.header()));
}
auto* v8_node = graph_.V8Node(v8_value);
- graph_.AddEdge(parent.get_node(), v8_node);
+ if (!edge_name.empty()) {
+ graph_.AddEdge(parent.get_node(), v8_node,
+ parent.get_node()->InternalizeEdgeName(edge_name));
+ } else {
+ graph_.AddEdge(parent.get_node(), v8_node);
+ }
// References that have a class id set may have their internal fields
// pointing back to the object. Set up a wrapper node for the graph so
// that the snapshot generator can merge the nodes appropriately.
- if (!ref.WrapperClassId()) return;
+ // Even with a set class id, do not set up a wrapper node when the edge
+ // has a specific name.
+ if (!ref.WrapperClassId() || !edge_name.empty()) return;
void* back_reference_object = ExtractEmbedderDataBackref(
reinterpret_cast<v8::internal::Isolate*>(cpp_heap_.isolate()),
@@ -598,8 +623,18 @@ class WeakVisitor : public JSVisitor {
void VisitEphemeron(const void* key, const void* value,
cppgc::TraceDescriptor value_desc) final {
// For ephemerons, the key retains the value.
+ // Key always must be a GarbageCollected object.
+ auto& key_header = HeapObjectHeader::FromObject(key);
+ if (!value_desc.base_object_payload) {
+ // Value does not represent an actual GarbageCollected object but rather
+ // should be traced eagerly.
+ graph_builder_.VisitEphemeronWithNonGarbageCollectedValueForVisibility(
+ key_header, value, value_desc);
+ return;
+ }
+ // Regular path where both key and value are GarbageCollected objects.
graph_builder_.VisitEphemeronForVisibility(
- HeapObjectHeader::FromObject(key), HeapObjectHeader::FromObject(value));
+ key_header, HeapObjectHeader::FromObject(value));
}
protected:
@@ -645,7 +680,7 @@ class GraphBuildingVisitor final : public JSVisitor {
void Visit(const void*, cppgc::TraceDescriptor desc) final {
graph_builder_.AddEdge(
parent_scope_.ParentAsRegularState(),
- HeapObjectHeader::FromObject(desc.base_object_payload));
+ HeapObjectHeader::FromObject(desc.base_object_payload), edge_name_);
}
void VisitWeakContainer(const void* object,
cppgc::TraceDescriptor strong_desc,
@@ -655,7 +690,8 @@ class GraphBuildingVisitor final : public JSVisitor {
// container itself.
graph_builder_.AddEdge(
parent_scope_.ParentAsRegularState(),
- HeapObjectHeader::FromObject(strong_desc.base_object_payload));
+ HeapObjectHeader::FromObject(strong_desc.base_object_payload),
+ edge_name_);
}
void VisitRoot(const void*, cppgc::TraceDescriptor desc,
const cppgc::SourceLocation& loc) final {
@@ -667,12 +703,18 @@ class GraphBuildingVisitor final : public JSVisitor {
const void*, const cppgc::SourceLocation&) final {}
// JS handling.
void Visit(const TracedReferenceBase& ref) final {
- graph_builder_.AddEdge(parent_scope_.ParentAsRegularState(), ref);
+ graph_builder_.AddEdge(parent_scope_.ParentAsRegularState(), ref,
+ edge_name_);
+ }
+
+ void set_edge_name(std::string edge_name) {
+ edge_name_ = std::move(edge_name);
}
private:
CppGraphBuilderImpl& graph_builder_;
const ParentScope& parent_scope_;
+ std::string edge_name_;
};
// Base class for transforming recursion into iteration. Items are processed
@@ -765,6 +807,19 @@ void CppGraphBuilderImpl::VisitForVisibility(State* parent,
}
}
+void CppGraphBuilderImpl::
+ VisitEphemeronWithNonGarbageCollectedValueForVisibility(
+ const HeapObjectHeader& key, const void* value,
+ cppgc::TraceDescriptor value_desc) {
+ auto& key_state = states_.GetOrCreateState(key);
+ // Eagerly trace the value here, effectively marking key as visible and
+ // queuing processing for all reachable values.
+ ParentScope parent_scope(key_state);
+ VisiblityVisitor visitor(*this, parent_scope);
+ value_desc.callback(&visitor, value);
+ key_state.AddEagerEphemeronEdge(value, value_desc.callback);
+}
+
void CppGraphBuilderImpl::VisitEphemeronForVisibility(
const HeapObjectHeader& key, const HeapObjectHeader& value) {
auto& key_state = states_.GetOrCreateState(key);
@@ -820,6 +875,12 @@ void CppGraphBuilderImpl::Run() {
state.ForAllEphemeronEdges([this, &state](const HeapObjectHeader& value) {
AddEdge(state, value, "part of key -> value pair in ephemeron table");
});
+ object_visitor.set_edge_name(
+ "part of key -> value pair in ephemeron table");
+ state.ForAllEagerEphemeronEdges(
+ [&object_visitor](const void* value, cppgc::TraceCallback callback) {
+ callback(&object_visitor, value);
+ });
});
// Add roots.
{
diff --git a/deps/v8/src/heap/cppgc-js/unified-heap-marking-state.h b/deps/v8/src/heap/cppgc-js/unified-heap-marking-state.h
index d98e2b54bf..388fa94aab 100644
--- a/deps/v8/src/heap/cppgc-js/unified-heap-marking-state.h
+++ b/deps/v8/src/heap/cppgc-js/unified-heap-marking-state.h
@@ -6,7 +6,6 @@
#define V8_HEAP_CPPGC_JS_UNIFIED_HEAP_MARKING_STATE_H_
#include "include/v8-cppgc.h"
-#include "include/v8.h"
#include "src/base/logging.h"
#include "src/heap/heap.h"
diff --git a/deps/v8/src/heap/cppgc-js/unified-heap-marking-visitor.cc b/deps/v8/src/heap/cppgc-js/unified-heap-marking-visitor.cc
index e9da1163e4..09564055dc 100644
--- a/deps/v8/src/heap/cppgc-js/unified-heap-marking-visitor.cc
+++ b/deps/v8/src/heap/cppgc-js/unified-heap-marking-visitor.cc
@@ -4,7 +4,6 @@
#include "src/heap/cppgc-js/unified-heap-marking-visitor.h"
-#include "include/v8.h"
#include "src/heap/cppgc-js/unified-heap-marking-state.h"
#include "src/heap/cppgc/heap.h"
#include "src/heap/cppgc/marking-state.h"
diff --git a/deps/v8/src/heap/cppgc/DEPS b/deps/v8/src/heap/cppgc/DEPS
new file mode 100644
index 0000000000..37049928d5
--- /dev/null
+++ b/deps/v8/src/heap/cppgc/DEPS
@@ -0,0 +1,3 @@
+include_rules = [
+ "+include/cppgc",
+]
diff --git a/deps/v8/src/heap/cppgc/caged-heap-local-data.cc b/deps/v8/src/heap/cppgc/caged-heap-local-data.cc
index 55ededdc08..b1ce0df00f 100644
--- a/deps/v8/src/heap/cppgc/caged-heap-local-data.cc
+++ b/deps/v8/src/heap/cppgc/caged-heap-local-data.cc
@@ -13,6 +13,14 @@
namespace cppgc {
namespace internal {
+CagedHeapLocalData::CagedHeapLocalData(HeapBase& heap_base,
+ PageAllocator& allocator)
+ : heap_base(heap_base) {
+#if defined(CPPGC_YOUNG_GENERATION)
+ age_table.Reset(&allocator);
+#endif // defined(CPPGC_YOUNG_GENERATION)
+}
+
#if defined(CPPGC_YOUNG_GENERATION)
static_assert(
@@ -30,7 +38,7 @@ void AgeTable::Reset(PageAllocator* allocator) {
allocator->DiscardSystemPages(reinterpret_cast<void*>(begin), end - begin);
}
-#endif
+#endif // defined(CPPGC_YOUNG_GENERATION)
} // namespace internal
} // namespace cppgc
diff --git a/deps/v8/src/heap/cppgc/caged-heap.cc b/deps/v8/src/heap/cppgc/caged-heap.cc
index c43ea6e3a5..2b5fed4af5 100644
--- a/deps/v8/src/heap/cppgc/caged-heap.cc
+++ b/deps/v8/src/heap/cppgc/caged-heap.cc
@@ -27,18 +27,17 @@ STATIC_ASSERT(api_constants::kCagedHeapReservationAlignment ==
namespace {
-VirtualMemory ReserveCagedHeap(PageAllocator* platform_allocator) {
- DCHECK_NOT_NULL(platform_allocator);
+VirtualMemory ReserveCagedHeap(PageAllocator& platform_allocator) {
DCHECK_EQ(0u,
- kCagedHeapReservationSize % platform_allocator->AllocatePageSize());
+ kCagedHeapReservationSize % platform_allocator.AllocatePageSize());
static constexpr size_t kAllocationTries = 4;
for (size_t i = 0; i < kAllocationTries; ++i) {
void* hint = reinterpret_cast<void*>(RoundDown(
- reinterpret_cast<uintptr_t>(platform_allocator->GetRandomMmapAddr()),
+ reinterpret_cast<uintptr_t>(platform_allocator.GetRandomMmapAddr()),
kCagedHeapReservationAlignment));
- VirtualMemory memory(platform_allocator, kCagedHeapReservationSize,
+ VirtualMemory memory(&platform_allocator, kCagedHeapReservationSize,
kCagedHeapReservationAlignment, hint);
if (memory.IsReserved()) return memory;
}
@@ -70,23 +69,19 @@ class CppgcBoundedPageAllocator final : public v8::base::BoundedPageAllocator {
} // namespace
-CagedHeap::CagedHeap(HeapBase* heap_base, PageAllocator* platform_allocator)
+CagedHeap::CagedHeap(HeapBase& heap_base, PageAllocator& platform_allocator)
: reserved_area_(ReserveCagedHeap(platform_allocator)) {
using CagedAddress = CagedHeap::AllocatorType::Address;
- DCHECK_NOT_NULL(heap_base);
-
- CHECK(platform_allocator->SetPermissions(
+ const bool is_not_oom = platform_allocator.SetPermissions(
reserved_area_.address(),
- RoundUp(sizeof(CagedHeapLocalData), platform_allocator->CommitPageSize()),
- PageAllocator::kReadWrite));
+ RoundUp(sizeof(CagedHeapLocalData), platform_allocator.CommitPageSize()),
+ PageAllocator::kReadWrite);
+ // Failing to commit the reservation means that we are out of memory.
+ CHECK(is_not_oom);
- auto* local_data =
- new (reserved_area_.address()) CagedHeapLocalData(heap_base);
-#if defined(CPPGC_YOUNG_GENERATION)
- local_data->age_table.Reset(platform_allocator);
-#endif
- USE(local_data);
+ new (reserved_area_.address())
+ CagedHeapLocalData(heap_base, platform_allocator);
const CagedAddress caged_heap_start =
RoundUp(reinterpret_cast<CagedAddress>(reserved_area_.address()) +
@@ -97,7 +92,7 @@ CagedHeap::CagedHeap(HeapBase* heap_base, PageAllocator* platform_allocator)
reinterpret_cast<CagedAddress>(reserved_area_.address());
bounded_allocator_ = std::make_unique<CppgcBoundedPageAllocator>(
- platform_allocator, caged_heap_start,
+ &platform_allocator, caged_heap_start,
reserved_area_.size() - local_data_size_with_padding, kPageSize);
}
diff --git a/deps/v8/src/heap/cppgc/caged-heap.h b/deps/v8/src/heap/cppgc/caged-heap.h
index 7ac34624a0..89b2f7f112 100644
--- a/deps/v8/src/heap/cppgc/caged-heap.h
+++ b/deps/v8/src/heap/cppgc/caged-heap.h
@@ -22,7 +22,17 @@ class CagedHeap final {
public:
using AllocatorType = v8::base::BoundedPageAllocator;
- CagedHeap(HeapBase* heap, PageAllocator* platform_allocator);
+ static uintptr_t OffsetFromAddress(const void* address) {
+ return reinterpret_cast<uintptr_t>(address) &
+ (kCagedHeapReservationAlignment - 1);
+ }
+
+ static uintptr_t BaseFromAddress(const void* address) {
+ return reinterpret_cast<uintptr_t>(address) &
+ ~(kCagedHeapReservationAlignment - 1);
+ }
+
+ CagedHeap(HeapBase& heap, PageAllocator& platform_allocator);
CagedHeap(const CagedHeap&) = delete;
CagedHeap& operator=(const CagedHeap&) = delete;
@@ -37,13 +47,13 @@ class CagedHeap final {
return *static_cast<CagedHeapLocalData*>(reserved_area_.address());
}
- static uintptr_t OffsetFromAddress(void* address) {
- return reinterpret_cast<uintptr_t>(address) &
- (kCagedHeapReservationAlignment - 1);
+ bool IsOnHeap(const void* address) const {
+ return reinterpret_cast<void*>(BaseFromAddress(address)) ==
+ reserved_area_.address();
}
private:
- VirtualMemory reserved_area_;
+ const VirtualMemory reserved_area_;
std::unique_ptr<AllocatorType> bounded_allocator_;
};
diff --git a/deps/v8/src/heap/cppgc/gc-info.cc b/deps/v8/src/heap/cppgc/gc-info.cc
index de57805dcb..4c555106fd 100644
--- a/deps/v8/src/heap/cppgc/gc-info.cc
+++ b/deps/v8/src/heap/cppgc/gc-info.cc
@@ -3,19 +3,86 @@
// found in the LICENSE file.
#include "include/cppgc/internal/gc-info.h"
+
+#include "include/cppgc/internal/name-trait.h"
#include "include/v8config.h"
#include "src/heap/cppgc/gc-info-table.h"
namespace cppgc {
namespace internal {
-GCInfoIndex EnsureGCInfoIndex(std::atomic<GCInfoIndex>& registered_index,
- FinalizationCallback finalization_callback,
- TraceCallback trace_callback,
- NameCallback name_callback, bool has_v_table) {
+namespace {
+
+HeapObjectName GetHiddenName(const void*) {
+ return {NameProvider::kHiddenName, true};
+}
+
+} // namespace
+
+// static
+GCInfoIndex EnsureGCInfoIndexTrait::EnsureGCInfoIndexPolymorphic(
+ std::atomic<GCInfoIndex>& registered_index, TraceCallback trace_callback,
+ FinalizationCallback finalization_callback, NameCallback name_callback) {
return GlobalGCInfoTable::GetMutable().RegisterNewGCInfo(
registered_index,
- {finalization_callback, trace_callback, name_callback, has_v_table});
+ {finalization_callback, trace_callback, name_callback, true});
+}
+
+// static
+GCInfoIndex EnsureGCInfoIndexTrait::EnsureGCInfoIndexPolymorphic(
+ std::atomic<GCInfoIndex>& registered_index, TraceCallback trace_callback,
+ FinalizationCallback finalization_callback) {
+ return GlobalGCInfoTable::GetMutable().RegisterNewGCInfo(
+ registered_index,
+ {finalization_callback, trace_callback, GetHiddenName, true});
+}
+
+// static
+GCInfoIndex EnsureGCInfoIndexTrait::EnsureGCInfoIndexPolymorphic(
+ std::atomic<GCInfoIndex>& registered_index, TraceCallback trace_callback,
+ NameCallback name_callback) {
+ return GlobalGCInfoTable::GetMutable().RegisterNewGCInfo(
+ registered_index, {nullptr, trace_callback, name_callback, true});
+}
+
+// static
+GCInfoIndex EnsureGCInfoIndexTrait::EnsureGCInfoIndexPolymorphic(
+ std::atomic<GCInfoIndex>& registered_index, TraceCallback trace_callback) {
+ return GlobalGCInfoTable::GetMutable().RegisterNewGCInfo(
+ registered_index, {nullptr, trace_callback, GetHiddenName, true});
+}
+
+// static
+GCInfoIndex EnsureGCInfoIndexTrait::EnsureGCInfoIndexNonPolymorphic(
+ std::atomic<GCInfoIndex>& registered_index, TraceCallback trace_callback,
+ FinalizationCallback finalization_callback, NameCallback name_callback) {
+ return GlobalGCInfoTable::GetMutable().RegisterNewGCInfo(
+ registered_index,
+ {finalization_callback, trace_callback, name_callback, false});
+}
+
+// static
+GCInfoIndex EnsureGCInfoIndexTrait::EnsureGCInfoIndexNonPolymorphic(
+ std::atomic<GCInfoIndex>& registered_index, TraceCallback trace_callback,
+ FinalizationCallback finalization_callback) {
+ return GlobalGCInfoTable::GetMutable().RegisterNewGCInfo(
+ registered_index,
+ {finalization_callback, trace_callback, GetHiddenName, false});
+}
+
+// static
+GCInfoIndex EnsureGCInfoIndexTrait::EnsureGCInfoIndexNonPolymorphic(
+ std::atomic<GCInfoIndex>& registered_index, TraceCallback trace_callback,
+ NameCallback name_callback) {
+ return GlobalGCInfoTable::GetMutable().RegisterNewGCInfo(
+ registered_index, {nullptr, trace_callback, name_callback, false});
+}
+
+// static
+GCInfoIndex EnsureGCInfoIndexTrait::EnsureGCInfoIndexNonPolymorphic(
+ std::atomic<GCInfoIndex>& registered_index, TraceCallback trace_callback) {
+ return GlobalGCInfoTable::GetMutable().RegisterNewGCInfo(
+ registered_index, {nullptr, trace_callback, GetHiddenName, false});
}
} // namespace internal
diff --git a/deps/v8/src/heap/cppgc/heap-base.cc b/deps/v8/src/heap/cppgc/heap-base.cc
index c89c2842f9..db16019b61 100644
--- a/deps/v8/src/heap/cppgc/heap-base.cc
+++ b/deps/v8/src/heap/cppgc/heap-base.cc
@@ -17,6 +17,7 @@
#include "src/heap/cppgc/marking-verifier.h"
#include "src/heap/cppgc/object-view.h"
#include "src/heap/cppgc/page-memory.h"
+#include "src/heap/cppgc/platform.h"
#include "src/heap/cppgc/prefinalizer-handler.h"
#include "src/heap/cppgc/stats-collector.h"
@@ -56,23 +57,26 @@ HeapBase::HeapBase(
StackSupport stack_support)
: raw_heap_(this, custom_spaces),
platform_(std::move(platform)),
+ oom_handler_(std::make_unique<FatalOutOfMemoryHandler>(this)),
#if defined(LEAK_SANITIZER)
lsan_page_allocator_(std::make_unique<v8::base::LsanPageAllocator>(
platform_->GetPageAllocator())),
#endif // LEAK_SANITIZER
#if defined(CPPGC_CAGED_HEAP)
- caged_heap_(this, page_allocator()),
- page_backend_(std::make_unique<PageBackend>(&caged_heap_.allocator())),
+ caged_heap_(*this, *page_allocator()),
+ page_backend_(std::make_unique<PageBackend>(caged_heap_.allocator(),
+ *oom_handler_.get())),
#else // !CPPGC_CAGED_HEAP
- page_backend_(std::make_unique<PageBackend>(page_allocator())),
+ page_backend_(std::make_unique<PageBackend>(*page_allocator(),
+ *oom_handler_.get())),
#endif // !CPPGC_CAGED_HEAP
stats_collector_(std::make_unique<StatsCollector>(platform_.get())),
stack_(std::make_unique<heap::base::Stack>(
v8::base::Stack::GetStackStart())),
prefinalizer_handler_(std::make_unique<PreFinalizerHandler>(*this)),
compactor_(raw_heap_),
- object_allocator_(&raw_heap_, page_backend_.get(),
- stats_collector_.get()),
+ object_allocator_(raw_heap_, *page_backend_, *stats_collector_,
+ *prefinalizer_handler_),
sweeper_(*this),
stack_support_(stack_support) {
stats_collector_->RegisterObserver(
@@ -96,10 +100,17 @@ size_t HeapBase::ObjectPayloadSize() const {
void HeapBase::AdvanceIncrementalGarbageCollectionOnAllocationIfNeeded() {
if (marker_) marker_->AdvanceMarkingOnAllocation();
}
-void HeapBase::ExecutePreFinalizers() {
+
+size_t HeapBase::ExecutePreFinalizers() {
+#ifdef CPPGC_ALLOW_ALLOCATIONS_IN_PREFINALIZERS
+ // Allocations in pre finalizers should not trigger another GC.
+ cppgc::subtle::NoGarbageCollectionScope no_gc_scope(*this);
+#else
// Pre finalizers are forbidden from allocating objects.
cppgc::subtle::DisallowGarbageCollectionScope no_gc_scope(*this);
+#endif // CPPGC_ALLOW_ALLOCATIONS_IN_PREFINALIZERS
prefinalizer_handler_->InvokePreFinalizers();
+ return prefinalizer_handler_->ExtractBytesAllocatedInPrefinalizers();
}
void HeapBase::Terminate() {
@@ -110,6 +121,7 @@ void HeapBase::Terminate() {
constexpr size_t kMaxTerminationGCs = 20;
size_t gc_count = 0;
+ bool more_termination_gcs_needed = false;
do {
CHECK_LT(gc_count++, kMaxTerminationGCs);
@@ -132,7 +144,14 @@ void HeapBase::Terminate() {
{Sweeper::SweepingConfig::SweepingType::kAtomic,
Sweeper::SweepingConfig::CompactableSpaceHandling::kSweep});
sweeper().NotifyDoneIfNeeded();
- } while (strong_persistent_region_.NodesInUse() > 0);
+ more_termination_gcs_needed =
+ strong_persistent_region_.NodesInUse() ||
+ weak_persistent_region_.NodesInUse() || [this]() {
+ PersistentRegionLock guard;
+ return strong_cross_thread_persistent_region_.NodesInUse() ||
+ weak_cross_thread_persistent_region_.NodesInUse();
+ }();
+ } while (more_termination_gcs_needed);
object_allocator().Terminate();
disallow_gc_scope_++;
diff --git a/deps/v8/src/heap/cppgc/heap-base.h b/deps/v8/src/heap/cppgc/heap-base.h
index 91f99b39cc..6196955a3e 100644
--- a/deps/v8/src/heap/cppgc/heap-base.h
+++ b/deps/v8/src/heap/cppgc/heap-base.h
@@ -18,6 +18,7 @@
#include "src/heap/cppgc/marker.h"
#include "src/heap/cppgc/metric-recorder.h"
#include "src/heap/cppgc/object-allocator.h"
+#include "src/heap/cppgc/platform.h"
#include "src/heap/cppgc/process-heap-statistics.h"
#include "src/heap/cppgc/process-heap.h"
#include "src/heap/cppgc/raw-heap.h"
@@ -65,6 +66,7 @@ namespace testing {
class TestWithHeap;
} // namespace testing
+class FatalOutOfMemoryHandler;
class PageBackend;
class PreFinalizerHandler;
class StatsCollector;
@@ -95,6 +97,11 @@ class V8_EXPORT_PRIVATE HeapBase : public cppgc::HeapHandle {
cppgc::Platform* platform() { return platform_.get(); }
const cppgc::Platform* platform() const { return platform_.get(); }
+ FatalOutOfMemoryHandler& oom_handler() { return *oom_handler_.get(); }
+ const FatalOutOfMemoryHandler& oom_handler() const {
+ return *oom_handler_.get();
+ }
+
PageBackend* page_backend() { return page_backend_.get(); }
const PageBackend* page_backend() const { return page_backend_.get(); }
@@ -208,12 +215,14 @@ class V8_EXPORT_PRIVATE HeapBase : public cppgc::HeapHandle {
bool IsMarking() const { return marker_.get(); }
- void ExecutePreFinalizers();
+ // Returns amount of bytes allocated while executing prefinalizers.
+ size_t ExecutePreFinalizers();
PageAllocator* page_allocator() const;
RawHeap raw_heap_;
std::shared_ptr<cppgc::Platform> platform_;
+ std::unique_ptr<FatalOutOfMemoryHandler> oom_handler_;
#if defined(LEAK_SANITIZER)
std::unique_ptr<v8::base::LsanPageAllocator> lsan_page_allocator_;
diff --git a/deps/v8/src/heap/cppgc/heap-object-header.h b/deps/v8/src/heap/cppgc/heap-object-header.h
index a50d115e52..97a65fbf20 100644
--- a/deps/v8/src/heap/cppgc/heap-object-header.h
+++ b/deps/v8/src/heap/cppgc/heap-object-header.h
@@ -91,6 +91,8 @@ class HeapObjectHeader {
void Unmark();
inline bool TryMarkAtomic();
+ inline void MarkNonAtomic();
+
template <AccessMode = AccessMode::kNonAtomic>
bool IsYoung() const;
@@ -266,6 +268,11 @@ bool HeapObjectHeader::TryMarkAtomic() {
std::memory_order_relaxed);
}
+void HeapObjectHeader::MarkNonAtomic() {
+ DCHECK(!IsMarked<AccessMode::kNonAtomic>());
+ encoded_low_ |= MarkBitField::encode(true);
+}
+
template <AccessMode mode>
bool HeapObjectHeader::IsYoung() const {
return !IsMarked<mode>();
diff --git a/deps/v8/src/heap/cppgc/heap.cc b/deps/v8/src/heap/cppgc/heap.cc
index 58252a20ab..a4e514a7c2 100644
--- a/deps/v8/src/heap/cppgc/heap.cc
+++ b/deps/v8/src/heap/cppgc/heap.cc
@@ -187,13 +187,17 @@ void Heap::FinalizeGarbageCollection(Config::StackState stack_state) {
marker_->FinishMarking(config_.stack_state);
}
marker_.reset();
- ExecutePreFinalizers();
- // TODO(chromium:1056170): replace build flag with dedicated flag.
-#if DEBUG
+ const size_t bytes_allocated_in_prefinalizers = ExecutePreFinalizers();
+#if CPPGC_VERIFY_HEAP
MarkingVerifier verifier(*this);
- verifier.Run(config_.stack_state, stack_end_of_current_gc(),
- stats_collector()->marked_bytes());
+ verifier.Run(
+ config_.stack_state, stack_end_of_current_gc(),
+ stats_collector()->marked_bytes() + bytes_allocated_in_prefinalizers);
+#endif // CPPGC_VERIFY_HEAP
+#ifndef CPPGC_ALLOW_ALLOCATIONS_IN_PREFINALIZERS
+ DCHECK_EQ(0u, bytes_allocated_in_prefinalizers);
#endif
+ USE(bytes_allocated_in_prefinalizers);
subtle::NoGarbageCollectionScope no_gc(*this);
const Sweeper::SweepingConfig sweeping_config{
diff --git a/deps/v8/src/heap/cppgc/marker.cc b/deps/v8/src/heap/cppgc/marker.cc
index 549a9fe1da..e290787a59 100644
--- a/deps/v8/src/heap/cppgc/marker.cc
+++ b/deps/v8/src/heap/cppgc/marker.cc
@@ -38,7 +38,7 @@ bool EnterIncrementalMarkingIfNeeded(Marker::MarkingConfig config,
WriteBarrier::IncrementalOrConcurrentMarkingFlagUpdater::Enter();
#if defined(CPPGC_CAGED_HEAP)
heap.caged_heap().local_data().is_incremental_marking_in_progress = true;
-#endif
+#endif // defined(CPPGC_CAGED_HEAP)
return true;
}
return false;
@@ -52,7 +52,7 @@ bool ExitIncrementalMarkingIfNeeded(Marker::MarkingConfig config,
WriteBarrier::IncrementalOrConcurrentMarkingFlagUpdater::Exit();
#if defined(CPPGC_CAGED_HEAP)
heap.caged_heap().local_data().is_incremental_marking_in_progress = false;
-#endif
+#endif // defined(CPPGC_CAGED_HEAP)
return true;
}
return false;
@@ -421,7 +421,9 @@ bool MarkerBase::ProcessWorklistsWithDeadline(
size_t marked_bytes_deadline, v8::base::TimeTicks time_deadline) {
StatsCollector::EnabledScope stats_scope(
heap().stats_collector(), StatsCollector::kMarkTransitiveClosure);
+ bool saved_did_discover_new_ephemeron_pairs;
do {
+ mutator_marking_state_.ResetDidDiscoverNewEphemeronPairs();
if ((config_.marking_type == MarkingConfig::MarkingType::kAtomic) ||
schedule_.ShouldFlushEphemeronPairs()) {
mutator_marking_state_.FlushDiscoveredEphemeronPairs();
@@ -509,6 +511,8 @@ bool MarkerBase::ProcessWorklistsWithDeadline(
}
}
+ saved_did_discover_new_ephemeron_pairs =
+ mutator_marking_state_.DidDiscoverNewEphemeronPairs();
{
StatsCollector::EnabledScope stats_scope(
heap().stats_collector(), StatsCollector::kMarkProcessEphemerons);
@@ -522,7 +526,8 @@ bool MarkerBase::ProcessWorklistsWithDeadline(
return false;
}
}
- } while (!mutator_marking_state_.marking_worklist().IsLocalAndGlobalEmpty());
+ } while (!mutator_marking_state_.marking_worklist().IsLocalAndGlobalEmpty() ||
+ saved_did_discover_new_ephemeron_pairs);
return true;
}
diff --git a/deps/v8/src/heap/cppgc/marking-state.h b/deps/v8/src/heap/cppgc/marking-state.h
index 17e64e6fbe..864c8209b7 100644
--- a/deps/v8/src/heap/cppgc/marking-state.h
+++ b/deps/v8/src/heap/cppgc/marking-state.h
@@ -115,6 +115,14 @@ class MarkingStateBase {
movable_slots_worklist_.reset();
}
+ bool DidDiscoverNewEphemeronPairs() const {
+ return discovered_new_ephemeron_pairs_;
+ }
+
+ void ResetDidDiscoverNewEphemeronPairs() {
+ discovered_new_ephemeron_pairs_ = false;
+ }
+
protected:
inline void MarkAndPush(HeapObjectHeader&, TraceDescriptor);
@@ -150,6 +158,8 @@ class MarkingStateBase {
movable_slots_worklist_;
size_t marked_bytes_ = 0;
+ bool in_ephemeron_processing_ = false;
+ bool discovered_new_ephemeron_pairs_ = false;
};
MarkingStateBase::MarkingStateBase(HeapBase& heap,
@@ -286,10 +296,16 @@ void MarkingStateBase::ProcessWeakContainer(const void* object,
void MarkingStateBase::ProcessEphemeron(const void* key, const void* value,
TraceDescriptor value_desc,
Visitor& visitor) {
+ // ProcessEphemeron is not expected to find new ephemerons recursively, which
+ // would break the main marking loop.
+ DCHECK(!in_ephemeron_processing_);
+ in_ephemeron_processing_ = true;
// Filter out already marked keys. The write barrier for WeakMember
// ensures that any newly set value after this point is kept alive and does
// not require the callback.
- if (HeapObjectHeader::FromObject(key).IsMarked<AccessMode::kAtomic>()) {
+ if (!HeapObjectHeader::FromObject(key)
+ .IsInConstruction<AccessMode::kAtomic>() &&
+ HeapObjectHeader::FromObject(key).IsMarked<AccessMode::kAtomic>()) {
if (value_desc.base_object_payload) {
MarkAndPush(value_desc.base_object_payload, value_desc);
} else {
@@ -297,9 +313,11 @@ void MarkingStateBase::ProcessEphemeron(const void* key, const void* value,
// should be immediately traced.
value_desc.callback(&visitor, value);
}
- return;
+ } else {
+ discovered_ephemeron_pairs_worklist_.Push({key, value, value_desc});
+ discovered_new_ephemeron_pairs_ = true;
}
- discovered_ephemeron_pairs_worklist_.Push({key, value, value_desc});
+ in_ephemeron_processing_ = false;
}
void MarkingStateBase::AccountMarkedBytes(const HeapObjectHeader& header) {
diff --git a/deps/v8/src/heap/cppgc/marking-verifier.cc b/deps/v8/src/heap/cppgc/marking-verifier.cc
index 4d2ebcff1d..0dbda1159c 100644
--- a/deps/v8/src/heap/cppgc/marking-verifier.cc
+++ b/deps/v8/src/heap/cppgc/marking-verifier.cc
@@ -21,9 +21,9 @@ MarkingVerifierBase::MarkingVerifierBase(
verification_state_(verification_state),
visitor_(std::move(visitor)) {}
-void MarkingVerifierBase::Run(Heap::Config::StackState stack_state,
- uintptr_t stack_end,
- size_t expected_marked_bytes) {
+void MarkingVerifierBase::Run(
+ Heap::Config::StackState stack_state, uintptr_t stack_end,
+ v8::base::Optional<size_t> expected_marked_bytes) {
Traverse(heap_.raw_heap());
if (stack_state == Heap::Config::StackState::kMayContainHeapPointers) {
in_construction_objects_ = &in_construction_objects_stack_;
@@ -38,9 +38,9 @@ void MarkingVerifierBase::Run(Heap::Config::StackState stack_state,
in_construction_objects_heap_.find(header));
}
}
-#ifdef CPPGC_VERIFY_LIVE_BYTES
- CHECK_EQ(expected_marked_bytes, found_marked_bytes_);
-#endif // CPPGC_VERIFY_LIVE_BYTES
+ if (expected_marked_bytes) {
+ CHECK_EQ(expected_marked_bytes.value(), found_marked_bytes_);
+ }
}
void VerificationState::VerifyMarked(const void* base_object_payload) const {
diff --git a/deps/v8/src/heap/cppgc/marking-verifier.h b/deps/v8/src/heap/cppgc/marking-verifier.h
index 72d49daa76..ca588f40d8 100644
--- a/deps/v8/src/heap/cppgc/marking-verifier.h
+++ b/deps/v8/src/heap/cppgc/marking-verifier.h
@@ -7,6 +7,7 @@
#include <unordered_set>
+#include "src/base/optional.h"
#include "src/heap/base/stack.h"
#include "src/heap/cppgc/heap-object-header.h"
#include "src/heap/cppgc/heap-visitor.h"
@@ -40,7 +41,7 @@ class V8_EXPORT_PRIVATE MarkingVerifierBase
MarkingVerifierBase(const MarkingVerifierBase&) = delete;
MarkingVerifierBase& operator=(const MarkingVerifierBase&) = delete;
- void Run(Heap::Config::StackState, uintptr_t, size_t);
+ void Run(Heap::Config::StackState, uintptr_t, v8::base::Optional<size_t>);
protected:
MarkingVerifierBase(HeapBase&, VerificationState&,
diff --git a/deps/v8/src/heap/cppgc/memory.cc b/deps/v8/src/heap/cppgc/memory.cc
index aa3baeaa8a..6d81957325 100644
--- a/deps/v8/src/heap/cppgc/memory.cc
+++ b/deps/v8/src/heap/cppgc/memory.cc
@@ -12,7 +12,7 @@ namespace cppgc {
namespace internal {
void NoSanitizeMemset(void* address, char c, size_t bytes) {
- volatile Address base = reinterpret_cast<Address>(address);
+ volatile uint8_t* const base = static_cast<uint8_t*>(address);
for (size_t i = 0; i < bytes; ++i) {
base[i] = c;
}
diff --git a/deps/v8/src/heap/cppgc/memory.h b/deps/v8/src/heap/cppgc/memory.h
index adc2ce9bb3..3b9f6cb62c 100644
--- a/deps/v8/src/heap/cppgc/memory.h
+++ b/deps/v8/src/heap/cppgc/memory.h
@@ -117,7 +117,11 @@ V8_INLINE void CheckMemoryIsInaccessible(const void* address, size_t size) {
static_assert(!CheckMemoryIsInaccessibleIsNoop(),
"CheckMemoryIsInaccessibleIsNoop() needs to reflect "
"CheckMemoryIsInaccessible().");
- ASAN_CHECK_MEMORY_REGION_IS_POISONED(address, size);
+ // Only check if memory is poisoned on 64 bit, since there we make sure that
+ // object sizes and alignments are multiple of shadow memory granularity.
+#if defined(V8_TARGET_ARCH_64_BIT)
+ ASAN_CHECK_WHOLE_MEMORY_REGION_IS_POISONED(address, size);
+#endif
ASAN_UNPOISON_MEMORY_REGION(address, size);
CheckMemoryIsZero(address, size);
ASAN_POISON_MEMORY_REGION(address, size);
diff --git a/deps/v8/src/heap/cppgc/object-allocator.cc b/deps/v8/src/heap/cppgc/object-allocator.cc
index 191e73e6d8..0f85d43c1c 100644
--- a/deps/v8/src/heap/cppgc/object-allocator.cc
+++ b/deps/v8/src/heap/cppgc/object-allocator.cc
@@ -16,6 +16,7 @@
#include "src/heap/cppgc/memory.h"
#include "src/heap/cppgc/object-start-bitmap.h"
#include "src/heap/cppgc/page-memory.h"
+#include "src/heap/cppgc/prefinalizer-handler.h"
#include "src/heap/cppgc/stats-collector.h"
#include "src/heap/cppgc/sweeper.h"
@@ -39,7 +40,7 @@ void MarkRangeAsYoung(BasePage* page, Address begin, Address end) {
? RoundUp(offset_end, kEntrySize)
: RoundDown(offset_end, kEntrySize);
- auto& age_table = page->heap()->caged_heap().local_data().age_table;
+ auto& age_table = page->heap().caged_heap().local_data().age_table;
for (auto offset = young_offset_begin; offset < young_offset_end;
offset += AgeTable::kEntrySizeInBytes) {
age_table[offset] = AgeTable::Age::kYoung;
@@ -82,16 +83,16 @@ void ReplaceLinearAllocationBuffer(NormalPageSpace& space,
}
}
-void* AllocateLargeObject(PageBackend* page_backend, LargePageSpace* space,
- StatsCollector* stats_collector, size_t size,
+void* AllocateLargeObject(PageBackend& page_backend, LargePageSpace& space,
+ StatsCollector& stats_collector, size_t size,
GCInfoIndex gcinfo) {
- LargePage* page = LargePage::Create(*page_backend, *space, size);
- space->AddPage(page);
+ LargePage* page = LargePage::Create(page_backend, space, size);
+ space.AddPage(page);
auto* header = new (page->ObjectHeader())
HeapObjectHeader(HeapObjectHeader::kLargeObjectSizeInHeader, gcinfo);
- stats_collector->NotifyAllocation(size);
+ stats_collector.NotifyAllocation(size);
MarkRangeAsYoung(page, page->PayloadStart(), page->PayloadEnd());
return header->ObjectStart();
@@ -101,17 +102,29 @@ void* AllocateLargeObject(PageBackend* page_backend, LargePageSpace* space,
constexpr size_t ObjectAllocator::kSmallestSpaceSize;
-ObjectAllocator::ObjectAllocator(RawHeap* heap, PageBackend* page_backend,
- StatsCollector* stats_collector)
+ObjectAllocator::ObjectAllocator(RawHeap& heap, PageBackend& page_backend,
+ StatsCollector& stats_collector,
+ PreFinalizerHandler& prefinalizer_handler)
: raw_heap_(heap),
page_backend_(page_backend),
- stats_collector_(stats_collector) {}
+ stats_collector_(stats_collector),
+ prefinalizer_handler_(prefinalizer_handler) {}
void* ObjectAllocator::OutOfLineAllocate(NormalPageSpace& space, size_t size,
GCInfoIndex gcinfo) {
void* memory = OutOfLineAllocateImpl(space, size, gcinfo);
- stats_collector_->NotifySafePointForConservativeCollection();
- raw_heap_->heap()->AdvanceIncrementalGarbageCollectionOnAllocationIfNeeded();
+ stats_collector_.NotifySafePointForConservativeCollection();
+ raw_heap_.heap()->AdvanceIncrementalGarbageCollectionOnAllocationIfNeeded();
+ if (prefinalizer_handler_.IsInvokingPreFinalizers()) {
+ // Objects allocated during pre finalizers should be allocated as black
+ // since marking is already done. Atomics are not needed because there is
+ // no concurrent marking in the background.
+ HeapObjectHeader::FromObject(memory).MarkNonAtomic();
+ // Resetting the allocation buffer forces all further allocations in pre
+ // finalizers to go through this slow path.
+ ReplaceLinearAllocationBuffer(space, stats_collector_, nullptr, 0);
+ prefinalizer_handler_.NotifyAllocationInPrefinalizer(size);
+ }
return memory;
}
@@ -124,8 +137,8 @@ void* ObjectAllocator::OutOfLineAllocateImpl(NormalPageSpace& space,
// 1. If this allocation is big enough, allocate a large object.
if (size >= kLargeObjectSizeThreshold) {
- auto* large_space = &LargePageSpace::From(
- *raw_heap_->Space(RawHeap::RegularSpaceType::kLarge));
+ auto& large_space = LargePageSpace::From(
+ *raw_heap_.Space(RawHeap::RegularSpaceType::kLarge));
return AllocateLargeObject(page_backend_, large_space, stats_collector_,
size, gcinfo);
}
@@ -137,7 +150,7 @@ void* ObjectAllocator::OutOfLineAllocateImpl(NormalPageSpace& space,
// 3. Lazily sweep pages of this heap until we find a freed area for
// this allocation or we finish sweeping all pages of this heap.
- Sweeper& sweeper = raw_heap_->heap()->sweeper();
+ Sweeper& sweeper = raw_heap_.heap()->sweeper();
// TODO(chromium:1056170): Investigate whether this should be a loop which
// would result in more agressive re-use of memory at the expense of
// potentially larger allocation time.
@@ -159,11 +172,11 @@ void* ObjectAllocator::OutOfLineAllocateImpl(NormalPageSpace& space,
// TODO(chromium:1056170): Make use of the synchronously freed memory.
// 5. Add a new page to this heap.
- auto* new_page = NormalPage::Create(*page_backend_, space);
+ auto* new_page = NormalPage::Create(page_backend_, space);
space.AddPage(new_page);
// 6. Set linear allocation buffer to new page.
- ReplaceLinearAllocationBuffer(space, *stats_collector_,
+ ReplaceLinearAllocationBuffer(space, stats_collector_,
new_page->PayloadStart(),
new_page->PayloadSize());
@@ -182,13 +195,12 @@ void* ObjectAllocator::AllocateFromFreeList(NormalPageSpace& space, size_t size,
// Assume discarded memory on that page is now zero.
auto& page = *NormalPage::From(BasePage::FromPayload(entry.address));
if (page.discarded_memory()) {
- stats_collector_->DecrementDiscardedMemory(page.discarded_memory());
+ stats_collector_.DecrementDiscardedMemory(page.discarded_memory());
page.ResetDiscardedMemory();
}
- ReplaceLinearAllocationBuffer(space, *stats_collector_,
- static_cast<Address>(entry.address),
- entry.size);
+ ReplaceLinearAllocationBuffer(
+ space, stats_collector_, static_cast<Address>(entry.address), entry.size);
return AllocateObjectOnSpace(space, size, gcinfo);
}
@@ -196,20 +208,20 @@ void* ObjectAllocator::AllocateFromFreeList(NormalPageSpace& space, size_t size,
void ObjectAllocator::ResetLinearAllocationBuffers() {
class Resetter : public HeapVisitor<Resetter> {
public:
- explicit Resetter(StatsCollector* stats) : stats_collector_(stats) {}
+ explicit Resetter(StatsCollector& stats) : stats_collector_(stats) {}
bool VisitLargePageSpace(LargePageSpace&) { return true; }
bool VisitNormalPageSpace(NormalPageSpace& space) {
- ReplaceLinearAllocationBuffer(space, *stats_collector_, nullptr, 0);
+ ReplaceLinearAllocationBuffer(space, stats_collector_, nullptr, 0);
return true;
}
private:
- StatsCollector* stats_collector_;
+ StatsCollector& stats_collector_;
} visitor(stats_collector_);
- visitor.Traverse(*raw_heap_);
+ visitor.Traverse(raw_heap_);
}
void ObjectAllocator::Terminate() {
@@ -217,7 +229,7 @@ void ObjectAllocator::Terminate() {
}
bool ObjectAllocator::in_disallow_gc_scope() const {
- return raw_heap_->heap()->in_disallow_gc_scope();
+ return raw_heap_.heap()->in_disallow_gc_scope();
}
} // namespace internal
diff --git a/deps/v8/src/heap/cppgc/object-allocator.h b/deps/v8/src/heap/cppgc/object-allocator.h
index dd0035cfe9..c02115b667 100644
--- a/deps/v8/src/heap/cppgc/object-allocator.h
+++ b/deps/v8/src/heap/cppgc/object-allocator.h
@@ -20,6 +20,7 @@ namespace cppgc {
namespace internal {
class ObjectAllocator;
+class PreFinalizerHandler;
} // namespace internal
class V8_EXPORT AllocationHandle {
@@ -37,8 +38,9 @@ class V8_EXPORT_PRIVATE ObjectAllocator final : public cppgc::AllocationHandle {
public:
static constexpr size_t kSmallestSpaceSize = 32;
- ObjectAllocator(RawHeap* heap, PageBackend* page_backend,
- StatsCollector* stats_collector);
+ ObjectAllocator(RawHeap& heap, PageBackend& page_backend,
+ StatsCollector& stats_collector,
+ PreFinalizerHandler& prefinalizer_handler);
inline void* AllocateObject(size_t size, GCInfoIndex gcinfo);
inline void* AllocateObject(size_t size, GCInfoIndex gcinfo,
@@ -63,9 +65,10 @@ class V8_EXPORT_PRIVATE ObjectAllocator final : public cppgc::AllocationHandle {
void* OutOfLineAllocateImpl(NormalPageSpace&, size_t, GCInfoIndex);
void* AllocateFromFreeList(NormalPageSpace&, size_t, GCInfoIndex);
- RawHeap* raw_heap_;
- PageBackend* page_backend_;
- StatsCollector* stats_collector_;
+ RawHeap& raw_heap_;
+ PageBackend& page_backend_;
+ StatsCollector& stats_collector_;
+ PreFinalizerHandler& prefinalizer_handler_;
};
void* ObjectAllocator::AllocateObject(size_t size, GCInfoIndex gcinfo) {
@@ -74,7 +77,7 @@ void* ObjectAllocator::AllocateObject(size_t size, GCInfoIndex gcinfo) {
RoundUp<kAllocationGranularity>(size + sizeof(HeapObjectHeader));
const RawHeap::RegularSpaceType type =
GetInitialSpaceIndexForSize(allocation_size);
- return AllocateObjectOnSpace(NormalPageSpace::From(*raw_heap_->Space(type)),
+ return AllocateObjectOnSpace(NormalPageSpace::From(*raw_heap_.Space(type)),
allocation_size, gcinfo);
}
@@ -84,7 +87,7 @@ void* ObjectAllocator::AllocateObject(size_t size, GCInfoIndex gcinfo,
const size_t allocation_size =
RoundUp<kAllocationGranularity>(size + sizeof(HeapObjectHeader));
return AllocateObjectOnSpace(
- NormalPageSpace::From(*raw_heap_->CustomSpace(space_index)),
+ NormalPageSpace::From(*raw_heap_.CustomSpace(space_index)),
allocation_size, gcinfo);
}
diff --git a/deps/v8/src/heap/cppgc/page-memory.cc b/deps/v8/src/heap/cppgc/page-memory.cc
index 49b44aff91..ed76f903e8 100644
--- a/deps/v8/src/heap/cppgc/page-memory.cc
+++ b/deps/v8/src/heap/cppgc/page-memory.cc
@@ -6,17 +6,21 @@
#include "src/base/macros.h"
#include "src/base/sanitizer/asan.h"
+#include "src/heap/cppgc/platform.h"
namespace cppgc {
namespace internal {
namespace {
-void Unprotect(PageAllocator* allocator, const PageMemory& page_memory) {
+void Unprotect(PageAllocator& allocator, FatalOutOfMemoryHandler& oom_handler,
+ const PageMemory& page_memory) {
if (SupportsCommittingGuardPages(allocator)) {
- CHECK(allocator->SetPermissions(page_memory.writeable_region().base(),
- page_memory.writeable_region().size(),
- PageAllocator::Permission::kReadWrite));
+ if (!allocator.SetPermissions(page_memory.writeable_region().base(),
+ page_memory.writeable_region().size(),
+ PageAllocator::Permission::kReadWrite)) {
+ oom_handler("Oilpan: Unprotecting memory.");
+ }
} else {
// No protection in case the allocator cannot commit at the required
// granularity. Only protect if the allocator supports committing at that
@@ -24,53 +28,66 @@ void Unprotect(PageAllocator* allocator, const PageMemory& page_memory) {
//
// The allocator needs to support committing the overall range.
CHECK_EQ(0u,
- page_memory.overall_region().size() % allocator->CommitPageSize());
- CHECK(allocator->SetPermissions(page_memory.overall_region().base(),
- page_memory.overall_region().size(),
- PageAllocator::Permission::kReadWrite));
+ page_memory.overall_region().size() % allocator.CommitPageSize());
+ if (!allocator.SetPermissions(page_memory.overall_region().base(),
+ page_memory.overall_region().size(),
+ PageAllocator::Permission::kReadWrite)) {
+ oom_handler("Oilpan: Unprotecting memory.");
+ }
}
}
-void Protect(PageAllocator* allocator, const PageMemory& page_memory) {
+void Protect(PageAllocator& allocator, FatalOutOfMemoryHandler& oom_handler,
+ const PageMemory& page_memory) {
if (SupportsCommittingGuardPages(allocator)) {
// Swap the same region, providing the OS with a chance for fast lookup and
// change.
- CHECK(allocator->SetPermissions(page_memory.writeable_region().base(),
- page_memory.writeable_region().size(),
- PageAllocator::Permission::kNoAccess));
+ if (!allocator.SetPermissions(page_memory.writeable_region().base(),
+ page_memory.writeable_region().size(),
+ PageAllocator::Permission::kNoAccess)) {
+ oom_handler("Oilpan: Protecting memory.");
+ }
} else {
// See Unprotect().
CHECK_EQ(0u,
- page_memory.overall_region().size() % allocator->CommitPageSize());
- CHECK(allocator->SetPermissions(page_memory.overall_region().base(),
- page_memory.overall_region().size(),
- PageAllocator::Permission::kNoAccess));
+ page_memory.overall_region().size() % allocator.CommitPageSize());
+ if (!allocator.SetPermissions(page_memory.overall_region().base(),
+ page_memory.overall_region().size(),
+ PageAllocator::Permission::kNoAccess)) {
+ oom_handler("Oilpan: Protecting memory.");
+ }
}
}
-MemoryRegion ReserveMemoryRegion(PageAllocator* allocator,
+MemoryRegion ReserveMemoryRegion(PageAllocator& allocator,
+ FatalOutOfMemoryHandler& oom_handler,
size_t allocation_size) {
void* region_memory =
- allocator->AllocatePages(nullptr, allocation_size, kPageSize,
- PageAllocator::Permission::kNoAccess);
+ allocator.AllocatePages(nullptr, allocation_size, kPageSize,
+ PageAllocator::Permission::kNoAccess);
+ if (!region_memory) {
+ oom_handler("Oilpan: Reserving memory.");
+ }
const MemoryRegion reserved_region(static_cast<Address>(region_memory),
allocation_size);
DCHECK_EQ(reserved_region.base() + allocation_size, reserved_region.end());
return reserved_region;
}
-void FreeMemoryRegion(PageAllocator* allocator,
+void FreeMemoryRegion(PageAllocator& allocator,
const MemoryRegion& reserved_region) {
// Make sure pages returned to OS are unpoisoned.
ASAN_UNPOISON_MEMORY_REGION(reserved_region.base(), reserved_region.size());
- allocator->FreePages(reserved_region.base(), reserved_region.size());
+ allocator.FreePages(reserved_region.base(), reserved_region.size());
}
} // namespace
-PageMemoryRegion::PageMemoryRegion(PageAllocator* allocator,
+PageMemoryRegion::PageMemoryRegion(PageAllocator& allocator,
+ FatalOutOfMemoryHandler& oom_handler,
MemoryRegion reserved_region, bool is_large)
: allocator_(allocator),
+ oom_handler_(oom_handler),
reserved_region_(reserved_region),
is_large_(is_large) {}
@@ -81,12 +98,14 @@ PageMemoryRegion::~PageMemoryRegion() {
// static
constexpr size_t NormalPageMemoryRegion::kNumPageRegions;
-NormalPageMemoryRegion::NormalPageMemoryRegion(PageAllocator* allocator)
- : PageMemoryRegion(allocator,
- ReserveMemoryRegion(
- allocator, RoundUp(kPageSize * kNumPageRegions,
- allocator->AllocatePageSize())),
- false) {
+NormalPageMemoryRegion::NormalPageMemoryRegion(
+ PageAllocator& allocator, FatalOutOfMemoryHandler& oom_handler)
+ : PageMemoryRegion(
+ allocator, oom_handler,
+ ReserveMemoryRegion(allocator, oom_handler,
+ RoundUp(kPageSize * kNumPageRegions,
+ allocator.AllocatePageSize())),
+ false) {
#ifdef DEBUG
for (size_t i = 0; i < kNumPageRegions; ++i) {
DCHECK_EQ(false, page_memories_in_use_[i]);
@@ -99,33 +118,35 @@ NormalPageMemoryRegion::~NormalPageMemoryRegion() = default;
void NormalPageMemoryRegion::Allocate(Address writeable_base) {
const size_t index = GetIndex(writeable_base);
ChangeUsed(index, true);
- Unprotect(allocator_, GetPageMemory(index));
+ Unprotect(allocator_, oom_handler_, GetPageMemory(index));
}
void NormalPageMemoryRegion::Free(Address writeable_base) {
const size_t index = GetIndex(writeable_base);
ChangeUsed(index, false);
- Protect(allocator_, GetPageMemory(index));
+ Protect(allocator_, oom_handler_, GetPageMemory(index));
}
void NormalPageMemoryRegion::UnprotectForTesting() {
for (size_t i = 0; i < kNumPageRegions; ++i) {
- Unprotect(allocator_, GetPageMemory(i));
+ Unprotect(allocator_, oom_handler_, GetPageMemory(i));
}
}
-LargePageMemoryRegion::LargePageMemoryRegion(PageAllocator* allocator,
- size_t length)
- : PageMemoryRegion(allocator,
- ReserveMemoryRegion(
- allocator, RoundUp(length + 2 * kGuardPageSize,
- allocator->AllocatePageSize())),
- true) {}
+LargePageMemoryRegion::LargePageMemoryRegion(
+ PageAllocator& allocator, FatalOutOfMemoryHandler& oom_handler,
+ size_t length)
+ : PageMemoryRegion(
+ allocator, oom_handler,
+ ReserveMemoryRegion(allocator, oom_handler,
+ RoundUp(length + 2 * kGuardPageSize,
+ allocator.AllocatePageSize())),
+ true) {}
LargePageMemoryRegion::~LargePageMemoryRegion() = default;
void LargePageMemoryRegion::UnprotectForTesting() {
- Unprotect(allocator_, GetPageMemory());
+ Unprotect(allocator_, oom_handler_, GetPageMemory());
}
PageMemoryRegionTree::PageMemoryRegionTree() = default;
@@ -165,27 +186,33 @@ std::pair<NormalPageMemoryRegion*, Address> NormalPageMemoryPool::Take(
return pair;
}
-PageBackend::PageBackend(PageAllocator* allocator) : allocator_(allocator) {}
+PageBackend::PageBackend(PageAllocator& allocator,
+ FatalOutOfMemoryHandler& oom_handler)
+ : allocator_(allocator), oom_handler_(oom_handler) {}
PageBackend::~PageBackend() = default;
Address PageBackend::AllocateNormalPageMemory(size_t bucket) {
+ v8::base::MutexGuard guard(&mutex_);
std::pair<NormalPageMemoryRegion*, Address> result = page_pool_.Take(bucket);
if (!result.first) {
- auto pmr = std::make_unique<NormalPageMemoryRegion>(allocator_);
+ auto pmr =
+ std::make_unique<NormalPageMemoryRegion>(allocator_, oom_handler_);
for (size_t i = 0; i < NormalPageMemoryRegion::kNumPageRegions; ++i) {
page_pool_.Add(bucket, pmr.get(),
pmr->GetPageMemory(i).writeable_region().base());
}
page_memory_region_tree_.Add(pmr.get());
normal_page_memory_regions_.push_back(std::move(pmr));
- return AllocateNormalPageMemory(bucket);
+ result = page_pool_.Take(bucket);
+ DCHECK(result.first);
}
result.first->Allocate(result.second);
return result.second;
}
void PageBackend::FreeNormalPageMemory(size_t bucket, Address writeable_base) {
+ v8::base::MutexGuard guard(&mutex_);
auto* pmr = static_cast<NormalPageMemoryRegion*>(
page_memory_region_tree_.Lookup(writeable_base));
pmr->Free(writeable_base);
@@ -193,15 +220,18 @@ void PageBackend::FreeNormalPageMemory(size_t bucket, Address writeable_base) {
}
Address PageBackend::AllocateLargePageMemory(size_t size) {
- auto pmr = std::make_unique<LargePageMemoryRegion>(allocator_, size);
+ v8::base::MutexGuard guard(&mutex_);
+ auto pmr =
+ std::make_unique<LargePageMemoryRegion>(allocator_, oom_handler_, size);
const PageMemory pm = pmr->GetPageMemory();
- Unprotect(allocator_, pm);
+ Unprotect(allocator_, oom_handler_, pm);
page_memory_region_tree_.Add(pmr.get());
large_page_memory_regions_.insert(std::make_pair(pmr.get(), std::move(pmr)));
return pm.writeable_region().base();
}
void PageBackend::FreeLargePageMemory(Address writeable_base) {
+ v8::base::MutexGuard guard(&mutex_);
PageMemoryRegion* pmr = page_memory_region_tree_.Lookup(writeable_base);
page_memory_region_tree_.Remove(pmr);
auto size = large_page_memory_regions_.erase(pmr);
diff --git a/deps/v8/src/heap/cppgc/page-memory.h b/deps/v8/src/heap/cppgc/page-memory.h
index 51b2b61f7d..e5b73318f7 100644
--- a/deps/v8/src/heap/cppgc/page-memory.h
+++ b/deps/v8/src/heap/cppgc/page-memory.h
@@ -13,11 +13,14 @@
#include "include/cppgc/platform.h"
#include "src/base/macros.h"
+#include "src/base/platform/mutex.h"
#include "src/heap/cppgc/globals.h"
namespace cppgc {
namespace internal {
+class FatalOutOfMemoryHandler;
+
class V8_EXPORT_PRIVATE MemoryRegion final {
public:
MemoryRegion() = default;
@@ -79,9 +82,11 @@ class V8_EXPORT_PRIVATE PageMemoryRegion {
virtual void UnprotectForTesting() = 0;
protected:
- PageMemoryRegion(PageAllocator*, MemoryRegion, bool);
+ PageMemoryRegion(PageAllocator&, FatalOutOfMemoryHandler&, MemoryRegion,
+ bool);
- PageAllocator* const allocator_;
+ PageAllocator& allocator_;
+ FatalOutOfMemoryHandler& oom_handler_;
const MemoryRegion reserved_region_;
const bool is_large_;
};
@@ -91,7 +96,7 @@ class V8_EXPORT_PRIVATE NormalPageMemoryRegion final : public PageMemoryRegion {
public:
static constexpr size_t kNumPageRegions = 10;
- explicit NormalPageMemoryRegion(PageAllocator*);
+ NormalPageMemoryRegion(PageAllocator&, FatalOutOfMemoryHandler&);
~NormalPageMemoryRegion() override;
const PageMemory GetPageMemory(size_t index) const {
@@ -133,7 +138,7 @@ class V8_EXPORT_PRIVATE NormalPageMemoryRegion final : public PageMemoryRegion {
// LargePageMemoryRegion serves a single large PageMemory object.
class V8_EXPORT_PRIVATE LargePageMemoryRegion final : public PageMemoryRegion {
public:
- LargePageMemoryRegion(PageAllocator*, size_t);
+ LargePageMemoryRegion(PageAllocator&, FatalOutOfMemoryHandler&, size_t);
~LargePageMemoryRegion() override;
const PageMemory GetPageMemory() const {
@@ -193,7 +198,7 @@ class V8_EXPORT_PRIVATE NormalPageMemoryPool final {
// regions alive.
class V8_EXPORT_PRIVATE PageBackend final {
public:
- explicit PageBackend(PageAllocator*);
+ PageBackend(PageAllocator&, FatalOutOfMemoryHandler&);
~PageBackend();
// Allocates a normal page from the backend.
@@ -223,7 +228,10 @@ class V8_EXPORT_PRIVATE PageBackend final {
PageBackend& operator=(const PageBackend&) = delete;
private:
- PageAllocator* allocator_;
+ // Guards against concurrent uses of `Lookup()`.
+ mutable v8::base::Mutex mutex_;
+ PageAllocator& allocator_;
+ FatalOutOfMemoryHandler& oom_handler_;
NormalPageMemoryPool page_pool_;
PageMemoryRegionTree page_memory_region_tree_;
std::vector<std::unique_ptr<PageMemoryRegion>> normal_page_memory_regions_;
@@ -233,8 +241,8 @@ class V8_EXPORT_PRIVATE PageBackend final {
// Returns true if the provided allocator supports committing at the required
// granularity.
-inline bool SupportsCommittingGuardPages(PageAllocator* allocator) {
- return kGuardPageSize % allocator->CommitPageSize() == 0;
+inline bool SupportsCommittingGuardPages(PageAllocator& allocator) {
+ return kGuardPageSize % allocator.CommitPageSize() == 0;
}
Address NormalPageMemoryRegion::Lookup(ConstAddress address) const {
@@ -268,6 +276,7 @@ PageMemoryRegion* PageMemoryRegionTree::Lookup(ConstAddress address) const {
}
Address PageBackend::Lookup(ConstAddress address) const {
+ v8::base::MutexGuard guard(&mutex_);
PageMemoryRegion* pmr = page_memory_region_tree_.Lookup(address);
return pmr ? pmr->Lookup(address) : nullptr;
}
diff --git a/deps/v8/src/heap/cppgc/platform.cc b/deps/v8/src/heap/cppgc/platform.cc
index 90516d6065..fd769ae469 100644
--- a/deps/v8/src/heap/cppgc/platform.cc
+++ b/deps/v8/src/heap/cppgc/platform.cc
@@ -5,10 +5,38 @@
#include "include/cppgc/platform.h"
#include "src/base/lazy-instance.h"
+#include "src/base/logging.h"
+#include "src/base/macros.h"
#include "src/base/platform/platform.h"
+#include "src/base/sanitizer/asan.h"
#include "src/heap/cppgc/gc-info-table.h"
+#include "src/heap/cppgc/globals.h"
+#include "src/heap/cppgc/platform.h"
namespace cppgc {
+namespace internal {
+
+void Abort() { v8::base::OS::Abort(); }
+
+void FatalOutOfMemoryHandler::operator()(const std::string& reason,
+ const SourceLocation& loc) const {
+ if (custom_handler_) {
+ (*custom_handler_)(reason, loc, heap_);
+ FATAL("Custom out of memory handler should not have returned");
+ }
+#ifdef DEBUG
+ V8_Fatal(loc.FileName(), static_cast<int>(loc.Line()),
+ "Oilpan: Out of memory (%s)", reason.c_str());
+#else // !DEBUG
+ V8_Fatal("Oilpan: Out of memory");
+#endif // !DEBUG
+}
+
+void FatalOutOfMemoryHandler::SetCustomHandler(Callback* callback) {
+ custom_handler_ = callback;
+}
+
+} // namespace internal
namespace {
PageAllocator* g_page_allocator = nullptr;
@@ -20,6 +48,17 @@ TracingController* Platform::GetTracingController() {
}
void InitializeProcess(PageAllocator* page_allocator) {
+#if defined(V8_USE_ADDRESS_SANITIZER) && defined(V8_TARGET_ARCH_64_BIT)
+ // Retrieve asan's internal shadow memory granularity and check that Oilpan's
+ // object alignment/sizes are multiple of this granularity. This is needed to
+ // perform poisoness checks.
+ size_t shadow_scale;
+ __asan_get_shadow_mapping(&shadow_scale, nullptr);
+ DCHECK(shadow_scale);
+ const size_t poisoning_granularity = 1 << shadow_scale;
+ CHECK_EQ(0u, internal::kAllocationGranularity % poisoning_granularity);
+#endif
+
CHECK(!g_page_allocator);
internal::GlobalGCInfoTable::Initialize(page_allocator);
g_page_allocator = page_allocator;
@@ -27,9 +66,4 @@ void InitializeProcess(PageAllocator* page_allocator) {
void ShutdownProcess() { g_page_allocator = nullptr; }
-namespace internal {
-
-void Abort() { v8::base::OS::Abort(); }
-
-} // namespace internal
} // namespace cppgc
diff --git a/deps/v8/src/heap/cppgc/platform.h b/deps/v8/src/heap/cppgc/platform.h
new file mode 100644
index 0000000000..2fba1ada1b
--- /dev/null
+++ b/deps/v8/src/heap/cppgc/platform.h
@@ -0,0 +1,43 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_HEAP_CPPGC_PLATFORM_H_
+#define V8_HEAP_CPPGC_PLATFORM_H_
+
+#include <string>
+
+#include "include/cppgc/source-location.h"
+#include "src/base/macros.h"
+
+namespace cppgc {
+namespace internal {
+
+class HeapBase;
+
+class V8_EXPORT_PRIVATE FatalOutOfMemoryHandler final {
+ public:
+ using Callback = void(const std::string&, const SourceLocation&, HeapBase*);
+
+ FatalOutOfMemoryHandler() = default;
+ explicit FatalOutOfMemoryHandler(HeapBase* heap) : heap_(heap) {}
+
+ [[noreturn]] void operator()(
+ const std::string& reason = std::string(),
+ const SourceLocation& = SourceLocation::Current()) const;
+
+ void SetCustomHandler(Callback*);
+
+ // Disallow copy/move.
+ FatalOutOfMemoryHandler(const FatalOutOfMemoryHandler&) = delete;
+ FatalOutOfMemoryHandler& operator=(const FatalOutOfMemoryHandler&) = delete;
+
+ private:
+ HeapBase* heap_ = nullptr;
+ Callback* custom_handler_ = nullptr;
+};
+
+} // namespace internal
+} // namespace cppgc
+
+#endif // V8_HEAP_CPPGC_PLATFORM_H_
diff --git a/deps/v8/src/heap/cppgc/prefinalizer-handler.cc b/deps/v8/src/heap/cppgc/prefinalizer-handler.cc
index c05f06f6b0..9f641d6f4b 100644
--- a/deps/v8/src/heap/cppgc/prefinalizer-handler.cc
+++ b/deps/v8/src/heap/cppgc/prefinalizer-handler.cc
@@ -31,7 +31,8 @@ bool PreFinalizerRegistrationDispatcher::PreFinalizer::operator==(
}
PreFinalizerHandler::PreFinalizerHandler(HeapBase& heap)
- : heap_(heap)
+ : current_ordered_pre_finalizers_(&ordered_pre_finalizers_),
+ heap_(heap)
#ifdef DEBUG
,
creation_thread_id_(v8::base::OS::GetCurrentThreadId())
@@ -44,7 +45,10 @@ void PreFinalizerHandler::RegisterPrefinalizer(PreFinalizer pre_finalizer) {
DCHECK_EQ(ordered_pre_finalizers_.end(),
std::find(ordered_pre_finalizers_.begin(),
ordered_pre_finalizers_.end(), pre_finalizer));
- ordered_pre_finalizers_.push_back(pre_finalizer);
+ DCHECK_EQ(current_ordered_pre_finalizers_->end(),
+ std::find(current_ordered_pre_finalizers_->begin(),
+ current_ordered_pre_finalizers_->end(), pre_finalizer));
+ current_ordered_pre_finalizers_->push_back(pre_finalizer);
}
void PreFinalizerHandler::InvokePreFinalizers() {
@@ -54,6 +58,13 @@ void PreFinalizerHandler::InvokePreFinalizers() {
DCHECK(CurrentThreadIsCreationThread());
LivenessBroker liveness_broker = LivenessBrokerFactory::Create();
is_invoking_ = true;
+ DCHECK_EQ(0u, bytes_allocated_in_prefinalizers);
+ // Reset all LABs to force allocations to the slow path for black allocation.
+ heap_.object_allocator().ResetLinearAllocationBuffers();
+ // Prefinalizers can allocate other objects with prefinalizers, which will
+ // modify ordered_pre_finalizers_ and break iterators.
+ std::vector<PreFinalizer> new_ordered_pre_finalizers;
+ current_ordered_pre_finalizers_ = &new_ordered_pre_finalizers;
ordered_pre_finalizers_.erase(
ordered_pre_finalizers_.begin(),
std::remove_if(ordered_pre_finalizers_.rbegin(),
@@ -62,6 +73,12 @@ void PreFinalizerHandler::InvokePreFinalizers() {
return (pf.callback)(liveness_broker, pf.object);
})
.base());
+ // Newly added objects with prefinalizers will always survive the current GC
+ // cycle, so it's safe to add them after clearing out the older prefinalizers.
+ ordered_pre_finalizers_.insert(ordered_pre_finalizers_.end(),
+ new_ordered_pre_finalizers.begin(),
+ new_ordered_pre_finalizers.end());
+ current_ordered_pre_finalizers_ = &ordered_pre_finalizers_;
is_invoking_ = false;
ordered_pre_finalizers_.shrink_to_fit();
}
@@ -74,5 +91,11 @@ bool PreFinalizerHandler::CurrentThreadIsCreationThread() {
#endif
}
+void PreFinalizerHandler::NotifyAllocationInPrefinalizer(size_t size) {
+ DCHECK_GT(bytes_allocated_in_prefinalizers + size,
+ bytes_allocated_in_prefinalizers);
+ bytes_allocated_in_prefinalizers += size;
+}
+
} // namespace internal
} // namespace cppgc
diff --git a/deps/v8/src/heap/cppgc/prefinalizer-handler.h b/deps/v8/src/heap/cppgc/prefinalizer-handler.h
index e91931bf6f..bc17c99b18 100644
--- a/deps/v8/src/heap/cppgc/prefinalizer-handler.h
+++ b/deps/v8/src/heap/cppgc/prefinalizer-handler.h
@@ -27,6 +27,11 @@ class PreFinalizerHandler final {
bool IsInvokingPreFinalizers() const { return is_invoking_; }
+ void NotifyAllocationInPrefinalizer(size_t);
+ size_t ExtractBytesAllocatedInPrefinalizers() {
+ return std::exchange(bytes_allocated_in_prefinalizers, 0);
+ }
+
private:
// Checks that the current thread is the thread that created the heap.
bool CurrentThreadIsCreationThread();
@@ -36,12 +41,16 @@ class PreFinalizerHandler final {
// objects) for an object, by processing the ordered_pre_finalizers_
// back-to-front.
std::vector<PreFinalizer> ordered_pre_finalizers_;
+ std::vector<PreFinalizer>* current_ordered_pre_finalizers_;
HeapBase& heap_;
bool is_invoking_ = false;
#ifdef DEBUG
int creation_thread_id_;
#endif
+
+ // Counter of bytes allocated during prefinalizers.
+ size_t bytes_allocated_in_prefinalizers = 0u;
};
} // namespace internal
diff --git a/deps/v8/src/heap/cppgc/stats-collector.cc b/deps/v8/src/heap/cppgc/stats-collector.cc
index 54b68f4c28..ce74fe53c8 100644
--- a/deps/v8/src/heap/cppgc/stats-collector.cc
+++ b/deps/v8/src/heap/cppgc/stats-collector.cc
@@ -41,19 +41,19 @@ void StatsCollector::NotifyAllocation(size_t bytes) {
// The current GC may not have been started. This is ok as recording considers
// the whole time range between garbage collections.
allocated_bytes_since_safepoint_ += bytes;
-#ifdef CPPGC_VERIFY_LIVE_BYTES
- DCHECK_GE(live_bytes_ + bytes, live_bytes_);
- live_bytes_ += bytes;
-#endif // CPPGC_VERIFY_LIVE_BYTES
+#ifdef CPPGC_VERIFY_HEAP
+ DCHECK_GE(tracked_live_bytes_ + bytes, tracked_live_bytes_);
+ tracked_live_bytes_ += bytes;
+#endif // CPPGC_VERIFY_HEAP
}
void StatsCollector::NotifyExplicitFree(size_t bytes) {
// See IncreaseAllocatedObjectSize for lifetime of the counter.
explicitly_freed_bytes_since_safepoint_ += bytes;
-#ifdef CPPGC_VERIFY_LIVE_BYTES
- DCHECK_GE(live_bytes_, bytes);
- live_bytes_ -= bytes;
-#endif // CPPGC_VERIFY_LIVE_BYTES
+#ifdef CPPGC_VERIFY_HEAP
+ DCHECK_GE(tracked_live_bytes_, bytes);
+ tracked_live_bytes_ -= bytes;
+#endif // CPPGC_VERIFY_HEAP
}
void StatsCollector::NotifySafePointForConservativeCollection() {
@@ -124,9 +124,9 @@ void StatsCollector::NotifyMarkingCompleted(size_t marked_bytes) {
explicitly_freed_bytes_since_safepoint_;
allocated_bytes_since_safepoint_ = 0;
explicitly_freed_bytes_since_safepoint_ = 0;
-#ifdef CPPGC_VERIFY_LIVE_BYTES
- live_bytes_ = marked_bytes;
-#endif // CPPGC_VERIFY_LIVE_BYTES
+#ifdef CPPGC_VERIFY_HEAP
+ tracked_live_bytes_ = marked_bytes;
+#endif // CPPGC_VERIFY_HEAP
DCHECK_LE(memory_freed_bytes_since_end_of_marking_, memory_allocated_bytes_);
memory_allocated_bytes_ -= memory_freed_bytes_since_end_of_marking_;
diff --git a/deps/v8/src/heap/cppgc/stats-collector.h b/deps/v8/src/heap/cppgc/stats-collector.h
index d63d297c77..c3d8dbbfc0 100644
--- a/deps/v8/src/heap/cppgc/stats-collector.h
+++ b/deps/v8/src/heap/cppgc/stats-collector.h
@@ -334,9 +334,10 @@ class V8_EXPORT_PRIVATE StatsCollector final {
// arithmetic for simplicity.
int64_t allocated_bytes_since_safepoint_ = 0;
int64_t explicitly_freed_bytes_since_safepoint_ = 0;
-#ifdef CPPGC_VERIFY_LIVE_BYTES
- size_t live_bytes_ = 0;
-#endif // CPPGC_VERIFY_LIVE_BYTES
+#ifdef CPPGC_VERIFY_HEAP
+ // Tracks live bytes for overflows.
+ size_t tracked_live_bytes_ = 0;
+#endif // CPPGC_VERIFY_HEAP
int64_t memory_allocated_bytes_ = 0;
int64_t memory_freed_bytes_since_end_of_marking_ = 0;
diff --git a/deps/v8/src/heap/cppgc/visitor.cc b/deps/v8/src/heap/cppgc/visitor.cc
index e871159b7b..2f786b99ac 100644
--- a/deps/v8/src/heap/cppgc/visitor.cc
+++ b/deps/v8/src/heap/cppgc/visitor.cc
@@ -5,7 +5,9 @@
#include "src/heap/cppgc/visitor.h"
#include "src/base/sanitizer/msan.h"
+#include "src/heap/cppgc/caged-heap.h"
#include "src/heap/cppgc/gc-info-table.h"
+#include "src/heap/cppgc/heap-base.h"
#include "src/heap/cppgc/heap-object-header.h"
#include "src/heap/cppgc/heap-page.h"
#include "src/heap/cppgc/object-view.h"
@@ -50,6 +52,11 @@ void TraceConservatively(ConservativeTracingVisitor* conservative_visitor,
void ConservativeTracingVisitor::TraceConservativelyIfNeeded(
const void* address) {
+#if defined(CPPGC_CAGED_HEAP)
+ // TODO(chromium:1056170): Add support for SIMD in stack scanning.
+ if (V8_LIKELY(!heap_.caged_heap().IsOnHeap(address))) return;
+#endif
+
const BasePage* page = reinterpret_cast<const BasePage*>(
page_backend_.Lookup(static_cast<ConstAddress>(address)));
diff --git a/deps/v8/src/heap/cppgc/write-barrier.cc b/deps/v8/src/heap/cppgc/write-barrier.cc
index 6980e4c893..007abe3005 100644
--- a/deps/v8/src/heap/cppgc/write-barrier.cc
+++ b/deps/v8/src/heap/cppgc/write-barrier.cc
@@ -132,12 +132,12 @@ void WriteBarrier::GenerationalBarrierSlow(const CagedHeapLocalData& local_data,
// A write during atomic pause (e.g. pre-finalizer) may trigger the slow path
// of the barrier. This is a result of the order of bailouts where not marking
// results in applying the generational barrier.
- if (local_data.heap_base->in_atomic_pause()) return;
+ if (local_data.heap_base.in_atomic_pause()) return;
if (value_offset > 0 && age_table[value_offset] == AgeTable::Age::kOld)
return;
// Record slot.
- local_data.heap_base->remembered_slots().insert(const_cast<void*>(slot));
+ local_data.heap_base.remembered_slots().insert(const_cast<void*>(slot));
}
#endif // CPPGC_YOUNG_GENERATION
diff --git a/deps/v8/src/heap/embedder-tracing.h b/deps/v8/src/heap/embedder-tracing.h
index befb1a7e7a..1f15a7e826 100644
--- a/deps/v8/src/heap/embedder-tracing.h
+++ b/deps/v8/src/heap/embedder-tracing.h
@@ -6,7 +6,8 @@
#define V8_HEAP_EMBEDDER_TRACING_H_
#include "include/v8-cppgc.h"
-#include "include/v8.h"
+#include "include/v8-embedder-heap.h"
+#include "include/v8-traced-handle.h"
#include "src/common/globals.h"
#include "src/flags/flags.h"
diff --git a/deps/v8/src/heap/factory-base.cc b/deps/v8/src/heap/factory-base.cc
index 1e197b9302..2547d40f0c 100644
--- a/deps/v8/src/heap/factory-base.cc
+++ b/deps/v8/src/heap/factory-base.cc
@@ -809,8 +809,7 @@ HeapObject FactoryBase<Impl>::AllocateRawArray(int size,
(size >
isolate()->heap()->AsHeap()->MaxRegularHeapObjectSize(allocation)) &&
FLAG_use_marking_progress_bar) {
- BasicMemoryChunk* chunk = BasicMemoryChunk::FromHeapObject(result);
- chunk->SetFlag<AccessMode::ATOMIC>(MemoryChunk::HAS_PROGRESS_BAR);
+ LargePage::FromHeapObject(result)->ProgressBar().Enable();
}
return result;
}
diff --git a/deps/v8/src/heap/factory-inl.h b/deps/v8/src/heap/factory-inl.h
index 72d53014fd..b64db0abf9 100644
--- a/deps/v8/src/heap/factory-inl.h
+++ b/deps/v8/src/heap/factory-inl.h
@@ -71,6 +71,15 @@ ReadOnlyRoots Factory::read_only_roots() const {
return ReadOnlyRoots(isolate());
}
+Factory::CodeBuilder& Factory::CodeBuilder::set_interpreter_data(
+ Handle<HeapObject> interpreter_data) {
+ // This DCHECK requires this function to be in -inl.h.
+ DCHECK(interpreter_data->IsInterpreterData() ||
+ interpreter_data->IsBytecodeArray());
+ interpreter_data_ = interpreter_data;
+ return *this;
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/heap/factory.cc b/deps/v8/src/heap/factory.cc
index 0c80e81f51..e995a49897 100644
--- a/deps/v8/src/heap/factory.cc
+++ b/deps/v8/src/heap/factory.cc
@@ -100,14 +100,15 @@ MaybeHandle<Code> Factory::CodeBuilder::BuildInternal(
kind_specific_flags_ == 0
? roots.trampoline_trivial_code_data_container_handle()
: roots.trampoline_promise_rejection_code_data_container_handle());
- DCHECK_EQ(canonical_code_data_container->kind_specific_flags(),
+ DCHECK_EQ(canonical_code_data_container->kind_specific_flags(kRelaxedLoad),
kind_specific_flags_);
data_container = canonical_code_data_container;
} else {
data_container = factory->NewCodeDataContainer(
0, read_only_data_container_ ? AllocationType::kReadOnly
: AllocationType::kOld);
- data_container->set_kind_specific_flags(kind_specific_flags_);
+ data_container->set_kind_specific_flags(kind_specific_flags_,
+ kRelaxedStore);
}
// Basic block profiling data for builtins is stored in the JS heap rather
@@ -161,10 +162,11 @@ MaybeHandle<Code> Factory::CodeBuilder::BuildInternal(
// passing IsPendingAllocation).
raw_code.set_inlined_bytecode_size(inlined_bytecode_size_);
raw_code.set_code_data_container(*data_container, kReleaseStore);
- raw_code.set_deoptimization_data(*deoptimization_data_);
if (kind_ == CodeKind::BASELINE) {
+ raw_code.set_bytecode_or_interpreter_data(*interpreter_data_);
raw_code.set_bytecode_offset_table(*position_table_);
} else {
+ raw_code.set_deoptimization_data(*deoptimization_data_);
raw_code.set_source_position_table(*position_table_);
}
raw_code.set_handler_table_offset(
@@ -312,7 +314,8 @@ void Factory::CodeBuilder::FinalizeOnHeapCode(Handle<Code> code,
Code::SizeFor(code_desc_.instruction_size() + code_desc_.metadata_size());
int size_to_trim = old_object_size - new_object_size;
DCHECK_GE(size_to_trim, 0);
- heap->UndoLastAllocationAt(code->address() + new_object_size, size_to_trim);
+ heap->CreateFillerObjectAt(code->address() + new_object_size, size_to_trim,
+ ClearRecordedSlots::kNo);
}
MaybeHandle<Code> Factory::NewEmptyCode(CodeKind kind, int buffer_size) {
@@ -456,16 +459,6 @@ Handle<Tuple2> Factory::NewTuple2(Handle<Object> value1, Handle<Object> value2,
return handle(result, isolate());
}
-Handle<BaselineData> Factory::NewBaselineData(
- Handle<Code> code, Handle<HeapObject> function_data) {
- auto baseline_data =
- NewStructInternal<BaselineData>(BASELINE_DATA_TYPE, AllocationType::kOld);
- DisallowGarbageCollection no_gc;
- baseline_data.set_baseline_code(*code);
- baseline_data.set_data(*function_data);
- return handle(baseline_data, isolate());
-}
-
Handle<Oddball> Factory::NewOddball(Handle<Map> map, const char* to_string,
Handle<Object> to_number,
const char* type_of, byte kind) {
@@ -512,8 +505,7 @@ MaybeHandle<FixedArray> Factory::TryNewFixedArray(
if (!allocation.To(&result)) return MaybeHandle<FixedArray>();
if ((size > heap->MaxRegularHeapObjectSize(allocation_type)) &&
FLAG_use_marking_progress_bar) {
- BasicMemoryChunk* chunk = BasicMemoryChunk::FromHeapObject(result);
- chunk->SetFlag<AccessMode::ATOMIC>(MemoryChunk::HAS_PROGRESS_BAR);
+ LargePage::FromHeapObject(result)->ProgressBar().Enable();
}
DisallowGarbageCollection no_gc;
result.set_map_after_allocation(*fixed_array_map(), SKIP_WRITE_BARRIER);
@@ -2178,7 +2170,7 @@ Handle<CodeDataContainer> Factory::NewCodeDataContainer(
CodeDataContainer::cast(New(code_data_container_map(), allocation));
DisallowGarbageCollection no_gc;
data_container.set_next_code_link(*undefined_value(), SKIP_WRITE_BARRIER);
- data_container.set_kind_specific_flags(flags);
+ data_container.set_kind_specific_flags(flags, kRelaxedStore);
if (V8_EXTERNAL_CODE_SPACE_BOOL) {
data_container.AllocateExternalPointerEntries(isolate());
data_container.set_raw_code(Smi::zero(), SKIP_WRITE_BARRIER);
@@ -2198,7 +2190,7 @@ Handle<Code> Factory::NewOffHeapTrampolineFor(Handle<Code> code,
Builtins::CodeObjectIsExecutable(code->builtin_id());
Handle<Code> result = Builtins::GenerateOffHeapTrampolineFor(
isolate(), off_heap_entry,
- code->code_data_container(kAcquireLoad).kind_specific_flags(),
+ code->code_data_container(kAcquireLoad).kind_specific_flags(kRelaxedLoad),
generate_jump_to_instruction_stream);
// Trampolines may not contain any metadata since all metadata offsets,
@@ -2256,7 +2248,7 @@ Handle<Code> Factory::NewOffHeapTrampolineFor(Handle<Code> code,
Handle<Code> Factory::CopyCode(Handle<Code> code) {
Handle<CodeDataContainer> data_container = NewCodeDataContainer(
- code->code_data_container(kAcquireLoad).kind_specific_flags(),
+ code->code_data_container(kAcquireLoad).kind_specific_flags(kRelaxedLoad),
AllocationType::kOld);
Heap* heap = isolate()->heap();
@@ -2872,7 +2864,6 @@ Handle<JSTypedArray> Factory::NewJSTypedArray(ExternalArrayType type,
map, empty_byte_array(), buffer, byte_offset, byte_length));
JSTypedArray raw = *typed_array;
DisallowGarbageCollection no_gc;
- raw.AllocateExternalPointerEntries(isolate());
raw.set_length(length);
raw.SetOffHeapDataPtr(isolate(), buffer->backing_store(), byte_offset);
raw.set_is_length_tracking(false);
@@ -2887,7 +2878,6 @@ Handle<JSDataView> Factory::NewJSDataView(Handle<JSArrayBuffer> buffer,
isolate());
Handle<JSDataView> obj = Handle<JSDataView>::cast(NewJSArrayBufferView(
map, empty_fixed_array(), buffer, byte_offset, byte_length));
- obj->AllocateExternalPointerEntries(isolate());
obj->set_data_pointer(
isolate(), static_cast<uint8_t*>(buffer->backing_store()) + byte_offset);
return obj;
diff --git a/deps/v8/src/heap/factory.h b/deps/v8/src/heap/factory.h
index 1acf9a65c2..355a8d5d6e 100644
--- a/deps/v8/src/heap/factory.h
+++ b/deps/v8/src/heap/factory.h
@@ -116,9 +116,6 @@ class V8_EXPORT_PRIVATE Factory : public FactoryBase<Factory> {
return handle(obj, isolate());
}
- Handle<BaselineData> NewBaselineData(Handle<Code> code,
- Handle<HeapObject> function_data);
-
Handle<Oddball> NewOddball(Handle<Map> map, const char* to_string,
Handle<Object> to_number, const char* type_of,
byte kind);
@@ -884,11 +881,15 @@ class V8_EXPORT_PRIVATE Factory : public FactoryBase<Factory> {
CodeBuilder& set_deoptimization_data(
Handle<DeoptimizationData> deopt_data) {
+ DCHECK_NE(kind_, CodeKind::BASELINE);
DCHECK(!deopt_data.is_null());
deoptimization_data_ = deopt_data;
return *this;
}
+ inline CodeBuilder& set_interpreter_data(
+ Handle<HeapObject> interpreter_data);
+
CodeBuilder& set_is_turbofanned() {
DCHECK(!CodeKindIsUnoptimizedJSFunction(kind_));
is_turbofanned_ = true;
@@ -943,6 +944,7 @@ class V8_EXPORT_PRIVATE Factory : public FactoryBase<Factory> {
Handle<ByteArray> position_table_;
Handle<DeoptimizationData> deoptimization_data_ =
DeoptimizationData::Empty(isolate_);
+ Handle<HeapObject> interpreter_data_;
BasicBlockProfilerData* profiler_data_ = nullptr;
bool is_executable_ = true;
bool read_only_data_container_ = false;
diff --git a/deps/v8/src/heap/heap.cc b/deps/v8/src/heap/heap.cc
index 982b80bb89..0d0c4935a3 100644
--- a/deps/v8/src/heap/heap.cc
+++ b/deps/v8/src/heap/heap.cc
@@ -653,8 +653,8 @@ void Heap::DumpJSONHeapStatistics(std::stringstream& stream) {
// clang-format off
#define DICT(s) "{" << s << "}"
#define LIST(s) "[" << s << "]"
-#define ESCAPE(s) "\"" << s << "\""
-#define MEMBER(s) ESCAPE(s) << ":"
+#define QUOTE(s) "\"" << s << "\""
+#define MEMBER(s) QUOTE(s) << ":"
auto SpaceStatistics = [this](int space_index) {
HeapSpaceStatistics space_stats;
@@ -663,7 +663,7 @@ void Heap::DumpJSONHeapStatistics(std::stringstream& stream) {
std::stringstream stream;
stream << DICT(
MEMBER("name")
- << ESCAPE(BaseSpace::GetSpaceName(
+ << QUOTE(BaseSpace::GetSpaceName(
static_cast<AllocationSpace>(space_index)))
<< ","
MEMBER("size") << space_stats.space_size() << ","
@@ -674,7 +674,7 @@ void Heap::DumpJSONHeapStatistics(std::stringstream& stream) {
};
stream << DICT(
- MEMBER("isolate") << ESCAPE(reinterpret_cast<void*>(isolate())) << ","
+ MEMBER("isolate") << QUOTE(reinterpret_cast<void*>(isolate())) << ","
MEMBER("id") << gc_count() << ","
MEMBER("time_ms") << isolate()->time_millis_since_init() << ","
MEMBER("total_heap_size") << stats.total_heap_size() << ","
@@ -699,7 +699,7 @@ void Heap::DumpJSONHeapStatistics(std::stringstream& stream) {
#undef DICT
#undef LIST
-#undef ESCAPE
+#undef QUOTE
#undef MEMBER
// clang-format on
}
@@ -1929,14 +1929,7 @@ void Heap::StartIncrementalMarking(int gc_flags,
}
void Heap::CompleteSweepingFull() {
- TRACE_GC_EPOCH(tracer(), GCTracer::Scope::MC_COMPLETE_SWEEPING,
- ThreadKind::kMain);
-
- {
- TRACE_GC(tracer(), GCTracer::Scope::MC_COMPLETE_SWEEP_ARRAY_BUFFERS);
- array_buffer_sweeper()->EnsureFinished();
- }
-
+ array_buffer_sweeper()->EnsureFinished();
mark_compact_collector()->EnsureSweepingCompleted();
DCHECK(!mark_compact_collector()->sweeping_in_progress());
}
@@ -3476,15 +3469,6 @@ void Heap::RightTrimWeakFixedArray(WeakFixedArray object,
elements_to_trim * kTaggedSize);
}
-void Heap::UndoLastAllocationAt(Address addr, int size) {
- DCHECK_LE(0, size);
- if (size == 0) return;
- if (code_space_->TryFreeLast(addr, size)) {
- return;
- }
- CreateFillerObjectAt(addr, size, ClearRecordedSlots::kNo);
-}
-
template <typename T>
void Heap::CreateFillerForArray(T object, int elements_to_trim,
int bytes_to_trim) {
@@ -7171,7 +7155,7 @@ void Heap::WriteBarrierForRange(HeapObject object, TSlot start_slot,
if (incremental_marking()->IsMarking()) {
mode |= kDoMarking;
- if (!source_page->ShouldSkipEvacuationSlotRecording<AccessMode::ATOMIC>()) {
+ if (!source_page->ShouldSkipEvacuationSlotRecording()) {
mode |= kDoEvacuationSlotRecording;
}
}
diff --git a/deps/v8/src/heap/heap.h b/deps/v8/src/heap/heap.h
index 61dea819f0..e2e6316ef5 100644
--- a/deps/v8/src/heap/heap.h
+++ b/deps/v8/src/heap/heap.h
@@ -15,8 +15,10 @@
// Clients of this interface shouldn't depend on lots of heap internals.
// Do not include anything from src/heap here!
+#include "include/v8-callbacks.h"
+#include "include/v8-embedder-heap.h"
#include "include/v8-internal.h"
-#include "include/v8.h"
+#include "include/v8-isolate.h"
#include "src/base/atomic-utils.h"
#include "src/base/enum-set.h"
#include "src/base/platform/condition-variable.h"
@@ -577,8 +579,6 @@ class Heap {
int elements_to_trim);
void RightTrimWeakFixedArray(WeakFixedArray obj, int elements_to_trim);
- void UndoLastAllocationAt(Address addr, int size);
-
// Converts the given boolean condition to JavaScript boolean value.
inline Oddball ToBoolean(bool condition);
diff --git a/deps/v8/src/heap/large-spaces.cc b/deps/v8/src/heap/large-spaces.cc
index 1736fee60d..6cc5a4a868 100644
--- a/deps/v8/src/heap/large-spaces.cc
+++ b/deps/v8/src/heap/large-spaces.cc
@@ -230,7 +230,7 @@ void OldLargeObjectSpace::ClearMarkingStateOfLiveObjects() {
Marking::MarkWhite(marking_state->MarkBitFrom(obj));
MemoryChunk* chunk = MemoryChunk::FromHeapObject(obj);
RememberedSet<OLD_TO_NEW>::FreeEmptyBuckets(chunk);
- chunk->ResetProgressBar();
+ chunk->ProgressBar().ResetIfEnabled();
marking_state->SetLiveBytes(chunk, 0);
}
DCHECK(marking_state->IsWhite(obj));
diff --git a/deps/v8/src/heap/mark-compact-inl.h b/deps/v8/src/heap/mark-compact-inl.h
index 2210c73958..47865a6cc7 100644
--- a/deps/v8/src/heap/mark-compact-inl.h
+++ b/deps/v8/src/heap/mark-compact-inl.h
@@ -68,7 +68,7 @@ void MarkCompactCollector::RecordSlot(HeapObject object, ObjectSlot slot,
void MarkCompactCollector::RecordSlot(HeapObject object, HeapObjectSlot slot,
HeapObject target) {
MemoryChunk* source_page = MemoryChunk::FromHeapObject(object);
- if (!source_page->ShouldSkipEvacuationSlotRecording<AccessMode::ATOMIC>()) {
+ if (!source_page->ShouldSkipEvacuationSlotRecording()) {
RecordSlot(source_page, slot, target);
}
}
@@ -76,7 +76,7 @@ void MarkCompactCollector::RecordSlot(HeapObject object, HeapObjectSlot slot,
void MarkCompactCollector::RecordSlot(MemoryChunk* source_page,
HeapObjectSlot slot, HeapObject target) {
BasicMemoryChunk* target_page = BasicMemoryChunk::FromHeapObject(target);
- if (target_page->IsEvacuationCandidate<AccessMode::ATOMIC>()) {
+ if (target_page->IsEvacuationCandidate()) {
if (V8_EXTERNAL_CODE_SPACE_BOOL &&
target_page->IsFlagSet(MemoryChunk::IS_EXECUTABLE)) {
RememberedSet<OLD_TO_CODE>::Insert<AccessMode::ATOMIC>(source_page,
diff --git a/deps/v8/src/heap/mark-compact.cc b/deps/v8/src/heap/mark-compact.cc
index 0fffb4ea45..83983ae820 100644
--- a/deps/v8/src/heap/mark-compact.cc
+++ b/deps/v8/src/heap/mark-compact.cc
@@ -646,6 +646,9 @@ void MarkCompactCollector::VerifyMarkbitsAreClean() {
void MarkCompactCollector::EnsureSweepingCompleted() {
if (!sweeper()->sweeping_in_progress()) return;
+ TRACE_GC_EPOCH(heap()->tracer(), GCTracer::Scope::MC_COMPLETE_SWEEPING,
+ ThreadKind::kMain);
+
sweeper()->EnsureCompleted();
heap()->old_space()->RefillFreeList();
heap()->code_space()->RefillFreeList();
@@ -1707,9 +1710,8 @@ void MarkCompactCollector::VisitObject(HeapObject obj) {
void MarkCompactCollector::RevisitObject(HeapObject obj) {
DCHECK(marking_state()->IsBlack(obj));
- DCHECK_IMPLIES(MemoryChunk::FromHeapObject(obj)->IsFlagSet(
- MemoryChunk::HAS_PROGRESS_BAR),
- 0u == MemoryChunk::FromHeapObject(obj)->ProgressBar());
+ DCHECK_IMPLIES(MemoryChunk::FromHeapObject(obj)->ProgressBar().IsEnabled(),
+ 0u == MemoryChunk::FromHeapObject(obj)->ProgressBar().Value());
MarkingVisitor::RevisitScope revisit(marking_visitor_.get());
marking_visitor_->Visit(obj.map(), obj);
}
@@ -2368,23 +2370,6 @@ void MarkCompactCollector::FlushBytecodeFromSFI(
DCHECK(!shared_info.is_compiled());
}
-void MarkCompactCollector::MarkBaselineDataAsLive(BaselineData baseline_data) {
- if (non_atomic_marking_state()->IsBlackOrGrey(baseline_data)) return;
-
- // Mark baseline data as live.
- non_atomic_marking_state()->WhiteToBlack(baseline_data);
-
- // Record object slots.
- DCHECK(
- non_atomic_marking_state()->IsBlackOrGrey(baseline_data.baseline_code()));
- ObjectSlot code = baseline_data.RawField(BaselineData::kBaselineCodeOffset);
- RecordSlot(baseline_data, code, HeapObject::cast(*code));
-
- DCHECK(non_atomic_marking_state()->IsBlackOrGrey(baseline_data.data()));
- ObjectSlot data = baseline_data.RawField(BaselineData::kDataOffset);
- RecordSlot(baseline_data, data, HeapObject::cast(*data));
-}
-
void MarkCompactCollector::ProcessOldCodeCandidates() {
DCHECK(FLAG_flush_bytecode || FLAG_flush_baseline_code ||
weak_objects_.code_flushing_candidates.IsEmpty());
@@ -2393,10 +2378,12 @@ void MarkCompactCollector::ProcessOldCodeCandidates() {
&flushing_candidate)) {
bool is_bytecode_live = non_atomic_marking_state()->IsBlackOrGrey(
flushing_candidate.GetBytecodeArray(isolate()));
- if (FLAG_flush_baseline_code && flushing_candidate.HasBaselineData()) {
- BaselineData baseline_data = flushing_candidate.baseline_data();
- if (non_atomic_marking_state()->IsBlackOrGrey(
- baseline_data.baseline_code())) {
+ if (FLAG_flush_baseline_code && flushing_candidate.HasBaselineCode()) {
+ CodeT baseline_codet =
+ CodeT::cast(flushing_candidate.function_data(kAcquireLoad));
+ // Safe to do a relaxed load here since the CodeT was acquire-loaded.
+ Code baseline_code = FromCodeT(baseline_codet, kRelaxedLoad);
+ if (non_atomic_marking_state()->IsBlackOrGrey(baseline_code)) {
// Currently baseline code holds bytecode array strongly and it is
// always ensured that bytecode is live if baseline code is live. Hence
// baseline code can safely load bytecode array without any additional
@@ -2404,19 +2391,23 @@ void MarkCompactCollector::ProcessOldCodeCandidates() {
// flush code if the bytecode is not live and also update baseline code
// to bailout if there is no bytecode.
DCHECK(is_bytecode_live);
- MarkBaselineDataAsLive(baseline_data);
+
+ // Regardless of whether the CodeT is a CodeDataContainer or the Code
+ // itself, if the Code is live then the CodeT has to be live and will
+ // have been marked via the owning JSFunction.
+ DCHECK(non_atomic_marking_state()->IsBlackOrGrey(baseline_codet));
} else if (is_bytecode_live) {
// If baseline code is flushed but we have a valid bytecode array reset
- // the function_data field to BytecodeArray.
- flushing_candidate.set_function_data(baseline_data.data(),
- kReleaseStore);
+ // the function_data field to the BytecodeArray/InterpreterData.
+ flushing_candidate.set_function_data(
+ baseline_code.bytecode_or_interpreter_data(), kReleaseStore);
}
}
if (!is_bytecode_live) {
// If baseline code flushing is disabled we should only flush bytecode
// from functions that don't have baseline data.
- DCHECK(FLAG_flush_baseline_code || !flushing_candidate.HasBaselineData());
+ DCHECK(FLAG_flush_baseline_code || !flushing_candidate.HasBaselineCode());
// If the BytecodeArray is dead, flush it, which will replace the field
// with an uncompiled data object.
diff --git a/deps/v8/src/heap/mark-compact.h b/deps/v8/src/heap/mark-compact.h
index 9ce993898c..8be25e0914 100644
--- a/deps/v8/src/heap/mark-compact.h
+++ b/deps/v8/src/heap/mark-compact.h
@@ -670,10 +670,6 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
// Flushes a weakly held bytecode array from a shared function info.
void FlushBytecodeFromSFI(SharedFunctionInfo shared_info);
- // Marks the BaselineData as live and records the slots of baseline data
- // fields. This assumes that the objects in the data fields are alive.
- void MarkBaselineDataAsLive(BaselineData baseline_data);
-
// Clears bytecode arrays / baseline code that have not been executed for
// multiple collections.
void ProcessOldCodeCandidates();
diff --git a/deps/v8/src/heap/marking-visitor-inl.h b/deps/v8/src/heap/marking-visitor-inl.h
index fe8661c516..39d446aa3a 100644
--- a/deps/v8/src/heap/marking-visitor-inl.h
+++ b/deps/v8/src/heap/marking-visitor-inl.h
@@ -8,6 +8,7 @@
#include "src/heap/marking-visitor.h"
#include "src/heap/objects-visiting-inl.h"
#include "src/heap/objects-visiting.h"
+#include "src/heap/progress-bar.h"
#include "src/heap/spaces.h"
#include "src/objects/objects.h"
#include "src/objects/smi.h"
@@ -185,11 +186,13 @@ int MarkingVisitorBase<ConcreteVisitor, MarkingState>::VisitSharedFunctionInfo(
// If bytecode flushing is disabled but baseline code flushing is enabled
// then we have to visit the bytecode but not the baseline code.
DCHECK(IsBaselineCodeFlushingEnabled(code_flush_mode_));
- BaselineData baseline_data =
- BaselineData::cast(shared_info.function_data(kAcquireLoad));
- // Visit the bytecode hanging off baseline data.
- VisitPointer(baseline_data,
- baseline_data.RawField(BaselineData::kDataOffset));
+ CodeT baseline_codet = CodeT::cast(shared_info.function_data(kAcquireLoad));
+ // Safe to do a relaxed load here since the CodeT was acquire-loaded.
+ Code baseline_code = FromCodeT(baseline_codet, kRelaxedLoad);
+ // Visit the bytecode hanging off baseline code.
+ VisitPointer(baseline_code,
+ baseline_code.RawField(
+ Code::kDeoptimizationDataOrInterpreterDataOffset));
weak_objects_->code_flushing_candidates.Push(task_id_, shared_info);
} else {
// In other cases, record as a flushing candidate since we have old
@@ -206,13 +209,13 @@ int MarkingVisitorBase<ConcreteVisitor, MarkingState>::VisitSharedFunctionInfo(
template <typename ConcreteVisitor, typename MarkingState>
int MarkingVisitorBase<ConcreteVisitor, MarkingState>::
VisitFixedArrayWithProgressBar(Map map, FixedArray object,
- MemoryChunk* chunk) {
+ ProgressBar& progress_bar) {
const int kProgressBarScanningChunk = kMaxRegularHeapObjectSize;
STATIC_ASSERT(kMaxRegularHeapObjectSize % kTaggedSize == 0);
DCHECK(concrete_visitor()->marking_state()->IsBlackOrGrey(object));
concrete_visitor()->marking_state()->GreyToBlack(object);
int size = FixedArray::BodyDescriptor::SizeOf(map, object);
- size_t current_progress_bar = chunk->ProgressBar();
+ size_t current_progress_bar = progress_bar.Value();
int start = static_cast<int>(current_progress_bar);
if (start == 0) {
this->VisitMapPointer(object);
@@ -221,7 +224,7 @@ int MarkingVisitorBase<ConcreteVisitor, MarkingState>::
int end = std::min(size, start + kProgressBarScanningChunk);
if (start < end) {
VisitPointers(object, object.RawField(start), object.RawField(end));
- bool success = chunk->TrySetProgressBar(current_progress_bar, end);
+ bool success = progress_bar.TrySetNewValue(current_progress_bar, end);
CHECK(success);
if (end < size) {
// The object can be pushed back onto the marking worklist only after
@@ -237,9 +240,10 @@ int MarkingVisitorBase<ConcreteVisitor, MarkingState>::VisitFixedArray(
Map map, FixedArray object) {
// Arrays with the progress bar are not left-trimmable because they reside
// in the large object space.
- MemoryChunk* chunk = MemoryChunk::FromHeapObject(object);
- return chunk->IsFlagSet<AccessMode::ATOMIC>(MemoryChunk::HAS_PROGRESS_BAR)
- ? VisitFixedArrayWithProgressBar(map, object, chunk)
+ ProgressBar& progress_bar =
+ MemoryChunk::FromHeapObject(object)->ProgressBar();
+ return progress_bar.IsEnabled()
+ ? VisitFixedArrayWithProgressBar(map, object, progress_bar)
: concrete_visitor()->VisitLeftTrimmableArray(map, object);
}
diff --git a/deps/v8/src/heap/marking-visitor.h b/deps/v8/src/heap/marking-visitor.h
index 555b2e8118..6a016a143e 100644
--- a/deps/v8/src/heap/marking-visitor.h
+++ b/deps/v8/src/heap/marking-visitor.h
@@ -193,7 +193,7 @@ class MarkingVisitorBase : public HeapVisitor<int, ConcreteVisitor> {
template <typename T>
int VisitEmbedderTracingSubclass(Map map, T object);
V8_INLINE int VisitFixedArrayWithProgressBar(Map map, FixedArray object,
- MemoryChunk* chunk);
+ ProgressBar& progress_bar);
// Marks the descriptor array black without pushing it on the marking work
// list and visits its header. Returns the size of the descriptor array
// if it was successully marked as black.
diff --git a/deps/v8/src/heap/memory-chunk-layout.h b/deps/v8/src/heap/memory-chunk-layout.h
index f37583ab42..1b958f0cbf 100644
--- a/deps/v8/src/heap/memory-chunk-layout.h
+++ b/deps/v8/src/heap/memory-chunk-layout.h
@@ -7,6 +7,7 @@
#include "src/heap/heap.h"
#include "src/heap/list.h"
+#include "src/heap/progress-bar.h"
#include "src/heap/slot-set.h"
#ifdef V8_ENABLE_CONSERVATIVE_STACK_SCANNING
@@ -50,7 +51,7 @@ class V8_EXPORT_PRIVATE MemoryChunkLayout {
FIELD(VirtualMemory, Reservation),
// MemoryChunk fields:
FIELD(SlotSet* [kNumSets], SlotSet),
- FIELD(std::atomic<size_t>, ProgressBar),
+ FIELD(ProgressBar, ProgressBar),
FIELD(std::atomic<intptr_t>, LiveByteCount),
FIELD(SlotSet*, SweepingSlotSet),
FIELD(TypedSlotsSet* [kNumSets], TypedSlotSet),
diff --git a/deps/v8/src/heap/memory-chunk.cc b/deps/v8/src/heap/memory-chunk.cc
index 0d9afdb1c7..29dbf74934 100644
--- a/deps/v8/src/heap/memory-chunk.cc
+++ b/deps/v8/src/heap/memory-chunk.cc
@@ -130,7 +130,7 @@ MemoryChunk* MemoryChunk::Initialize(BasicMemoryChunk* basic_chunk, Heap* heap,
// Not actually used but initialize anyway for predictability.
chunk->invalidated_slots_[OLD_TO_CODE] = nullptr;
}
- chunk->progress_bar_ = 0;
+ chunk->progress_bar_.Initialize();
chunk->set_concurrent_sweeping_state(ConcurrentSweepingState::kDone);
chunk->page_protection_change_mutex_ = new base::Mutex();
chunk->write_unprotect_counter_ = 0;
diff --git a/deps/v8/src/heap/memory-chunk.h b/deps/v8/src/heap/memory-chunk.h
index 66196c1f13..ad9ac72f83 100644
--- a/deps/v8/src/heap/memory-chunk.h
+++ b/deps/v8/src/heap/memory-chunk.h
@@ -162,22 +162,10 @@ class MemoryChunk : public BasicMemoryChunk {
// Approximate amount of physical memory committed for this chunk.
V8_EXPORT_PRIVATE size_t CommittedPhysicalMemory();
- size_t ProgressBar() {
- DCHECK(IsFlagSet<AccessMode::ATOMIC>(HAS_PROGRESS_BAR));
- return progress_bar_.load(std::memory_order_acquire);
- }
-
- bool TrySetProgressBar(size_t old_value, size_t new_value) {
- DCHECK(IsFlagSet<AccessMode::ATOMIC>(HAS_PROGRESS_BAR));
- return progress_bar_.compare_exchange_strong(old_value, new_value,
- std::memory_order_acq_rel);
- }
-
- void ResetProgressBar() {
- if (IsFlagSet(MemoryChunk::HAS_PROGRESS_BAR)) {
- progress_bar_.store(0, std::memory_order_release);
- }
+ class ProgressBar& ProgressBar() {
+ return progress_bar_;
}
+ const class ProgressBar& ProgressBar() const { return progress_bar_; }
inline void IncrementExternalBackingStoreBytes(ExternalBackingStoreType type,
size_t amount);
@@ -256,9 +244,9 @@ class MemoryChunk : public BasicMemoryChunk {
// is ceil(size() / kPageSize).
SlotSet* slot_set_[NUMBER_OF_REMEMBERED_SET_TYPES];
- // Used by the incremental marker to keep track of the scanning progress in
- // large objects that have a progress bar and are scanned in increments.
- std::atomic<size_t> progress_bar_;
+ // Used by the marker to keep track of the scanning progress in large objects
+ // that have a progress bar and are scanned in increments.
+ class ProgressBar progress_bar_;
// Count of bytes marked black on page.
std::atomic<intptr_t> live_byte_count_;
diff --git a/deps/v8/src/heap/memory-measurement.cc b/deps/v8/src/heap/memory-measurement.cc
index a29ffb10e1..87cfb06faf 100644
--- a/deps/v8/src/heap/memory-measurement.cc
+++ b/deps/v8/src/heap/memory-measurement.cc
@@ -4,7 +4,7 @@
#include "src/heap/memory-measurement.h"
-#include "include/v8.h"
+#include "include/v8-local-handle.h"
#include "src/api/api-inl.h"
#include "src/execution/isolate-inl.h"
#include "src/heap/factory-inl.h"
diff --git a/deps/v8/src/heap/memory-measurement.h b/deps/v8/src/heap/memory-measurement.h
index cf72c57abd..2b5377943c 100644
--- a/deps/v8/src/heap/memory-measurement.h
+++ b/deps/v8/src/heap/memory-measurement.h
@@ -8,6 +8,7 @@
#include <list>
#include <unordered_map>
+#include "include/v8-statistics.h"
#include "src/base/platform/elapsed-timer.h"
#include "src/base/utils/random-number-generator.h"
#include "src/common/globals.h"
diff --git a/deps/v8/src/heap/new-spaces.cc b/deps/v8/src/heap/new-spaces.cc
index d08fe48f23..b935a585bc 100644
--- a/deps/v8/src/heap/new-spaces.cc
+++ b/deps/v8/src/heap/new-spaces.cc
@@ -57,7 +57,7 @@ bool SemiSpace::EnsureCurrentCapacity() {
memory_chunk_list_.Remove(current_page);
// Clear new space flags to avoid this page being treated as a new
// space page that is potentially being swept.
- current_page->SetFlags(0, Page::kIsInYoungGenerationMask);
+ current_page->ClearFlags(Page::kIsInYoungGenerationMask);
heap()->memory_allocator()->Free<MemoryAllocator::kPooledAndQueue>(
current_page);
current_page = next_current;
@@ -76,8 +76,7 @@ bool SemiSpace::EnsureCurrentCapacity() {
DCHECK_NOT_NULL(current_page);
memory_chunk_list_.PushBack(current_page);
marking_state->ClearLiveness(current_page);
- current_page->SetFlags(first_page()->GetFlags(),
- static_cast<uintptr_t>(Page::kCopyAllFlags));
+ current_page->SetFlags(first_page()->GetFlags(), Page::kAllFlagsMask);
heap()->CreateFillerObjectAt(current_page->area_start(),
static_cast<int>(current_page->area_size()),
ClearRecordedSlots::kNo);
@@ -214,7 +213,8 @@ void SemiSpace::ShrinkTo(size_t new_capacity) {
target_capacity_ = new_capacity;
}
-void SemiSpace::FixPagesFlags(intptr_t flags, intptr_t mask) {
+void SemiSpace::FixPagesFlags(Page::MainThreadFlags flags,
+ Page::MainThreadFlags mask) {
for (Page* page : *this) {
page->set_owner(this);
page->SetFlags(flags, mask);
@@ -253,8 +253,7 @@ void SemiSpace::RemovePage(Page* page) {
}
void SemiSpace::PrependPage(Page* page) {
- page->SetFlags(current_page()->GetFlags(),
- static_cast<uintptr_t>(Page::kCopyAllFlags));
+ page->SetFlags(current_page()->GetFlags(), Page::kAllFlagsMask);
page->set_owner(this);
memory_chunk_list_.PushFront(page);
current_capacity_ += Page::kPageSize;
@@ -276,7 +275,7 @@ void SemiSpace::Swap(SemiSpace* from, SemiSpace* to) {
DCHECK(from->first_page());
DCHECK(to->first_page());
- intptr_t saved_to_space_flags = to->current_page()->GetFlags();
+ auto saved_to_space_flags = to->current_page()->GetFlags();
// We swap all properties but id_.
std::swap(from->target_capacity_, to->target_capacity_);
@@ -289,7 +288,7 @@ void SemiSpace::Swap(SemiSpace* from, SemiSpace* to) {
to->external_backing_store_bytes_);
to->FixPagesFlags(saved_to_space_flags, Page::kCopyOnFlipFlagsMask);
- from->FixPagesFlags(0, 0);
+ from->FixPagesFlags(Page::NO_FLAGS, Page::NO_FLAGS);
}
void SemiSpace::set_age_mark(Address mark) {
diff --git a/deps/v8/src/heap/new-spaces.h b/deps/v8/src/heap/new-spaces.h
index 7f6f46c78b..45129acea1 100644
--- a/deps/v8/src/heap/new-spaces.h
+++ b/deps/v8/src/heap/new-spaces.h
@@ -173,7 +173,7 @@ class SemiSpace : public Space {
void RewindPages(int num_pages);
// Copies the flags into the masked positions on all pages in the space.
- void FixPagesFlags(intptr_t flags, intptr_t flag_mask);
+ void FixPagesFlags(Page::MainThreadFlags flags, Page::MainThreadFlags mask);
// The currently committed space capacity.
size_t current_capacity_;
diff --git a/deps/v8/src/heap/objects-visiting.cc b/deps/v8/src/heap/objects-visiting.cc
index a33844743f..e3514c51fa 100644
--- a/deps/v8/src/heap/objects-visiting.cc
+++ b/deps/v8/src/heap/objects-visiting.cc
@@ -26,7 +26,7 @@ struct WeakListVisitor;
template <class T>
Object VisitWeakList(Heap* heap, Object list, WeakObjectRetainer* retainer) {
- Object undefined = ReadOnlyRoots(heap).undefined_value();
+ HeapObject undefined = ReadOnlyRoots(heap).undefined_value();
Object head = undefined;
T tail;
bool record_slots = MustRecordSlots(heap);
@@ -47,7 +47,7 @@ Object VisitWeakList(Heap* heap, Object list, WeakObjectRetainer* retainer) {
} else {
// Subsequent elements in the list.
DCHECK(!tail.is_null());
- WeakListVisitor<T>::SetWeakNext(tail, retained);
+ WeakListVisitor<T>::SetWeakNext(tail, HeapObject::cast(retained));
if (record_slots) {
HeapObject slot_holder = WeakListVisitor<T>::WeakNextHolder(tail);
int slot_offset = WeakListVisitor<T>::WeakNextOffset();
@@ -187,7 +187,7 @@ struct WeakListVisitor<AllocationSite> {
template <>
struct WeakListVisitor<JSFinalizationRegistry> {
- static void SetWeakNext(JSFinalizationRegistry obj, Object next) {
+ static void SetWeakNext(JSFinalizationRegistry obj, HeapObject next) {
obj.set_next_dirty(next, UPDATE_WEAK_WRITE_BARRIER);
}
diff --git a/deps/v8/src/heap/progress-bar.h b/deps/v8/src/heap/progress-bar.h
new file mode 100644
index 0000000000..b00558b684
--- /dev/null
+++ b/deps/v8/src/heap/progress-bar.h
@@ -0,0 +1,61 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_HEAP_PROGRESS_BAR_H_
+#define V8_HEAP_PROGRESS_BAR_H_
+
+#include <atomic>
+#include <cstdint>
+
+#include "src/base/logging.h"
+
+namespace v8 {
+namespace internal {
+
+// The progress bar allows for keeping track of the bytes processed of a single
+// object. The progress bar itself must be enabled before it's used.
+//
+// Only large objects use the progress bar which is stored in their page header.
+// These objects are scanned in increments and will be kept black while being
+// scanned. Even if the mutator writes to them they will be kept black and a
+// white to grey transition is performed in the value.
+//
+// The progress bar starts as disabled. After enabling (through `Enable()`), it
+// can never be disabled again.
+class ProgressBar final {
+ public:
+ void Initialize() { value_ = kDisabledSentinel; }
+ void Enable() { value_ = 0; }
+ bool IsEnabled() const {
+ return value_.load(std::memory_order_acquire) != kDisabledSentinel;
+ }
+
+ size_t Value() const {
+ DCHECK(IsEnabled());
+ return value_.load(std::memory_order_acquire);
+ }
+
+ bool TrySetNewValue(size_t old_value, size_t new_value) {
+ DCHECK(IsEnabled());
+ DCHECK_NE(kDisabledSentinel, new_value);
+ return value_.compare_exchange_strong(old_value, new_value,
+ std::memory_order_acq_rel);
+ }
+
+ void ResetIfEnabled() {
+ if (IsEnabled()) {
+ value_.store(0, std::memory_order_release);
+ }
+ }
+
+ private:
+ static constexpr size_t kDisabledSentinel = SIZE_MAX;
+
+ std::atomic<size_t> value_;
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_HEAP_PROGRESS_BAR_H_
diff --git a/deps/v8/src/heap/setup-heap-internal.cc b/deps/v8/src/heap/setup-heap-internal.cc
index 08488eacd0..31e8c92258 100644
--- a/deps/v8/src/heap/setup-heap-internal.cc
+++ b/deps/v8/src/heap/setup-heap-internal.cc
@@ -59,7 +59,7 @@ Handle<SharedFunctionInfo> CreateSharedFunctionInfo(
Handle<SharedFunctionInfo> shared =
isolate->factory()->NewSharedFunctionInfoForBuiltin(
isolate->factory()->empty_string(), builtin, kind);
- shared->set_internal_formal_parameter_count(len);
+ shared->set_internal_formal_parameter_count(JSParameterCount(len));
shared->set_length(len);
return shared;
}
diff --git a/deps/v8/src/heap/spaces.cc b/deps/v8/src/heap/spaces.cc
index 4d3fd9411f..a1992c3e5e 100644
--- a/deps/v8/src/heap/spaces.cc
+++ b/deps/v8/src/heap/spaces.cc
@@ -45,6 +45,9 @@ namespace internal {
STATIC_ASSERT(kClearedWeakHeapObjectLower32 > 0);
STATIC_ASSERT(kClearedWeakHeapObjectLower32 < Page::kHeaderSize);
+// static
+constexpr Page::MainThreadFlags Page::kCopyOnFlipFlagsMask;
+
void Page::AllocateFreeListCategories() {
DCHECK_NULL(categories_);
categories_ =
@@ -82,7 +85,7 @@ Page* Page::ConvertNewToOld(Page* old_page) {
DCHECK(old_page->InNewSpace());
OldSpace* old_space = old_page->heap()->old_space();
old_page->set_owner(old_space);
- old_page->SetFlags(0, static_cast<uintptr_t>(~0));
+ old_page->ClearFlags(Page::kAllFlagsMask);
Page* new_page = old_space->InitializePage(old_page);
old_space->AddPage(new_page);
return new_page;
diff --git a/deps/v8/src/heap/spaces.h b/deps/v8/src/heap/spaces.h
index 6a047fd375..eb71467f78 100644
--- a/deps/v8/src/heap/spaces.h
+++ b/deps/v8/src/heap/spaces.h
@@ -211,13 +211,11 @@ STATIC_ASSERT(sizeof(std::atomic<intptr_t>) == kSystemPointerSize);
// Page* p = Page::FromAllocationAreaAddress(address);
class Page : public MemoryChunk {
public:
- static const intptr_t kCopyAllFlags = ~0;
-
// Page flags copied from from-space to to-space when flipping semispaces.
- static const intptr_t kCopyOnFlipFlagsMask =
- static_cast<intptr_t>(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING) |
- static_cast<intptr_t>(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING) |
- static_cast<intptr_t>(MemoryChunk::INCREMENTAL_MARKING);
+ static constexpr MainThreadFlags kCopyOnFlipFlagsMask =
+ MainThreadFlags(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING) |
+ MainThreadFlags(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING) |
+ MainThreadFlags(MemoryChunk::INCREMENTAL_MARKING);
// Returns the page containing a given address. The address ranges
// from [page_addr .. page_addr + kPageSize[. This only works if the object
diff --git a/deps/v8/src/heap/third-party/heap-api.h b/deps/v8/src/heap/third-party/heap-api.h
index 9354c7bca8..2a7409040b 100644
--- a/deps/v8/src/heap/third-party/heap-api.h
+++ b/deps/v8/src/heap/third-party/heap-api.h
@@ -5,11 +5,13 @@
#ifndef V8_HEAP_THIRD_PARTY_HEAP_API_H_
#define V8_HEAP_THIRD_PARTY_HEAP_API_H_
-#include "include/v8.h"
#include "src/base/address-region.h"
#include "src/heap/heap.h"
namespace v8 {
+
+class Isolate;
+
namespace internal {
namespace third_party_heap {
diff --git a/deps/v8/src/ic/OWNERS b/deps/v8/src/ic/OWNERS
index 3c99566e98..369dfdf31b 100644
--- a/deps/v8/src/ic/OWNERS
+++ b/deps/v8/src/ic/OWNERS
@@ -1,5 +1,4 @@
ishell@chromium.org
jkummerow@chromium.org
mvstanton@chromium.org
-mythria@chromium.org
verwaest@chromium.org
diff --git a/deps/v8/src/init/bootstrapper.cc b/deps/v8/src/init/bootstrapper.cc
index 326944e13e..e81b74d440 100644
--- a/deps/v8/src/init/bootstrapper.cc
+++ b/deps/v8/src/init/bootstrapper.cc
@@ -368,7 +368,8 @@ void Bootstrapper::DetachGlobal(Handle<Context> env) {
// causing a map change.
JSObject::ForceSetPrototype(isolate_, global_proxy,
isolate_->factory()->null_value());
- global_proxy->map().SetConstructor(roots.null_value());
+ global_proxy->map().set_constructor_or_back_pointer(roots.null_value(),
+ kRelaxedStore);
if (FLAG_track_detached_contexts) {
isolate_->AddDetachedContext(env);
}
@@ -551,7 +552,7 @@ V8_NOINLINE Handle<JSFunction> SimpleCreateFunction(Isolate* isolate,
fun->shared().set_native(true);
if (adapt) {
- fun->shared().set_internal_formal_parameter_count(len);
+ fun->shared().set_internal_formal_parameter_count(JSParameterCount(len));
} else {
fun->shared().DontAdaptArguments();
}
@@ -1548,9 +1549,8 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
SimpleInstallFunction(isolate_, object_function, "seal",
Builtin::kObjectSeal, 1, false);
- Handle<JSFunction> object_create = SimpleInstallFunction(
- isolate_, object_function, "create", Builtin::kObjectCreate, 2, false);
- native_context()->set_object_create(*object_create);
+ SimpleInstallFunction(isolate_, object_function, "create",
+ Builtin::kObjectCreate, 2, false);
SimpleInstallFunction(isolate_, object_function, "defineProperties",
Builtin::kObjectDefineProperties, 2, true);
@@ -2375,7 +2375,7 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
Context::PROMISE_FUNCTION_INDEX);
Handle<SharedFunctionInfo> shared(promise_fun->shared(), isolate_);
- shared->set_internal_formal_parameter_count(1);
+ shared->set_internal_formal_parameter_count(JSParameterCount(1));
shared->set_length(1);
InstallSpeciesGetter(isolate_, promise_fun);
@@ -2438,7 +2438,7 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
InstallWithIntrinsicDefaultProto(isolate_, regexp_fun,
Context::REGEXP_FUNCTION_INDEX);
Handle<SharedFunctionInfo> shared(regexp_fun->shared(), isolate_);
- shared->set_internal_formal_parameter_count(2);
+ shared->set_internal_formal_parameter_count(JSParameterCount(2));
shared->set_length(2);
{
@@ -2462,7 +2462,7 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
Builtin::kRegExpPrototypeFlagsGetter, true);
SimpleInstallGetter(isolate_, prototype, factory->global_string(),
Builtin::kRegExpPrototypeGlobalGetter, true);
- SimpleInstallGetter(isolate(), prototype, factory->has_indices_string(),
+ SimpleInstallGetter(isolate(), prototype, factory->hasIndices_string(),
Builtin::kRegExpPrototypeHasIndicesGetter, true);
SimpleInstallGetter(isolate_, prototype, factory->ignoreCase_string(),
Builtin::kRegExpPrototypeIgnoreCaseGetter, true);
@@ -2746,9 +2746,8 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
SimpleInstallFunction(isolate_, math, "cos", Builtin::kMathCos, 1, true);
SimpleInstallFunction(isolate_, math, "cosh", Builtin::kMathCosh, 1, true);
SimpleInstallFunction(isolate_, math, "exp", Builtin::kMathExp, 1, true);
- Handle<JSFunction> math_floor = SimpleInstallFunction(
- isolate_, math, "floor", Builtin::kMathFloor, 1, true);
- native_context()->set_math_floor(*math_floor);
+ SimpleInstallFunction(isolate_, math, "floor", Builtin::kMathFloor, 1,
+ true);
SimpleInstallFunction(isolate_, math, "fround", Builtin::kMathFround, 1,
true);
SimpleInstallFunction(isolate_, math, "hypot", Builtin::kMathHypot, 2,
@@ -2762,9 +2761,7 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
true);
SimpleInstallFunction(isolate_, math, "max", Builtin::kMathMax, 2, false);
SimpleInstallFunction(isolate_, math, "min", Builtin::kMathMin, 2, false);
- Handle<JSFunction> math_pow = SimpleInstallFunction(
- isolate_, math, "pow", Builtin::kMathPow, 2, true);
- native_context()->set_math_pow(*math_pow);
+ SimpleInstallFunction(isolate_, math, "pow", Builtin::kMathPow, 2, true);
SimpleInstallFunction(isolate_, math, "random", Builtin::kMathRandom, 0,
true);
SimpleInstallFunction(isolate_, math, "round", Builtin::kMathRound, 1,
@@ -3780,7 +3777,8 @@ void Genesis::InitializeGlobal(Handle<JSGlobalObject> global_object,
isolate_->proxy_map()->SetConstructor(*proxy_function);
- proxy_function->shared().set_internal_formal_parameter_count(2);
+ proxy_function->shared().set_internal_formal_parameter_count(
+ JSParameterCount(2));
proxy_function->shared().set_length(2);
native_context()->set_proxy_function(*proxy_function);
@@ -4129,10 +4127,9 @@ bool Genesis::CompileExtension(Isolate* isolate, v8::Extension* extension) {
Handle<String> script_name =
factory->NewStringFromUtf8(name).ToHandleChecked();
MaybeHandle<SharedFunctionInfo> maybe_function_info =
- Compiler::GetSharedFunctionInfoForScript(
- isolate, source, ScriptDetails(script_name), extension, nullptr,
- ScriptCompiler::kNoCompileOptions,
- ScriptCompiler::kNoCacheBecauseV8Extension, EXTENSION_CODE);
+ Compiler::GetSharedFunctionInfoForScriptWithExtension(
+ isolate, source, ScriptDetails(script_name), extension,
+ ScriptCompiler::kNoCompileOptions, EXTENSION_CODE);
if (!maybe_function_info.ToHandle(&function_info)) return false;
cache->Add(isolate, name, function_info);
}
@@ -4594,6 +4591,20 @@ void Genesis::InitializeGlobal_harmony_intl_locale_info() {
Builtin::kLocalePrototypeWeekInfo, true);
}
+void Genesis::InitializeGlobal_harmony_intl_enumeration() {
+ if (!FLAG_harmony_intl_enumeration) return;
+
+ Handle<JSObject> intl = Handle<JSObject>::cast(
+ JSReceiver::GetProperty(
+ isolate(),
+ Handle<JSReceiver>(native_context()->global_object(), isolate()),
+ factory()->InternalizeUtf8String("Intl"))
+ .ToHandleChecked());
+
+ SimpleInstallFunction(isolate(), intl, "supportedValuesOf",
+ Builtin::kIntlSupportedValuesOf, 0, false);
+}
+
#endif // V8_INTL_SUPPORT
Handle<JSFunction> Genesis::CreateArrayBuffer(
diff --git a/deps/v8/src/init/bootstrapper.h b/deps/v8/src/init/bootstrapper.h
index 19f028048e..b92e755c93 100644
--- a/deps/v8/src/init/bootstrapper.h
+++ b/deps/v8/src/init/bootstrapper.h
@@ -5,6 +5,9 @@
#ifndef V8_INIT_BOOTSTRAPPER_H_
#define V8_INIT_BOOTSTRAPPER_H_
+#include "include/v8-context.h"
+#include "include/v8-local-handle.h"
+#include "include/v8-snapshot.h"
#include "src/heap/factory.h"
#include "src/objects/fixed-array.h"
#include "src/objects/shared-function-info.h"
diff --git a/deps/v8/src/init/heap-symbols.h b/deps/v8/src/init/heap-symbols.h
index d4737bf331..f30192526e 100644
--- a/deps/v8/src/init/heap-symbols.h
+++ b/deps/v8/src/init/heap-symbols.h
@@ -198,13 +198,14 @@
V(_, dot_string, ".") \
V(_, dot_switch_tag_string, ".switch_tag") \
V(_, dotAll_string, "dotAll") \
- V(_, enumerable_string, "enumerable") \
- V(_, element_string, "element") \
V(_, Error_string, "Error") \
- V(_, errors_string, "errors") \
+ V(_, EvalError_string, "EvalError") \
+ V(_, element_string, "element") \
+ V(_, enumerable_string, "enumerable") \
V(_, error_to_string, "[object Error]") \
+ V(_, errors_string, "errors") \
V(_, eval_string, "eval") \
- V(_, EvalError_string, "EvalError") \
+ V(_, exception_string, "exception") \
V(_, exec_string, "exec") \
V(_, false_string, "false") \
V(_, FinalizationRegistry_string, "FinalizationRegistry") \
@@ -226,7 +227,7 @@
V(_, groups_string, "groups") \
V(_, growable_string, "growable") \
V(_, has_string, "has") \
- V(_, has_indices_string, "hasIndices") \
+ V(_, hasIndices_string, "hasIndices") \
V(_, ignoreCase_string, "ignoreCase") \
V(_, illegal_access_string, "illegal access") \
V(_, illegal_argument_string, "illegal argument") \
diff --git a/deps/v8/src/init/isolate-allocator.cc b/deps/v8/src/init/isolate-allocator.cc
index a479f1ab94..34a24a348f 100644
--- a/deps/v8/src/init/isolate-allocator.cc
+++ b/deps/v8/src/init/isolate-allocator.cc
@@ -8,6 +8,7 @@
#include "src/common/ptr-compr.h"
#include "src/execution/isolate.h"
#include "src/heap/code-range.h"
+#include "src/init/vm-cage.h"
#include "src/utils/memcopy.h"
#include "src/utils/utils.h"
@@ -74,7 +75,28 @@ void IsolateAllocator::FreeProcessWidePtrComprCageForTesting() {
void IsolateAllocator::InitializeOncePerProcess() {
#ifdef V8_COMPRESS_POINTERS_IN_SHARED_CAGE
PtrComprCageReservationParams params;
- if (!GetProcessWidePtrComprCage()->InitReservation(params)) {
+ base::AddressRegion existing_reservation;
+#ifdef V8_VIRTUAL_MEMORY_CAGE
+ // TODO(chromium:1218005) avoid the name collision with
+ // v8::internal::VirtualMemoryCage and ideally figure out a clear naming
+ // scheme for the different types of virtual memory cages.
+
+ // For now, we allow the virtual memory cage to be disabled even when
+ // compiling with v8_enable_virtual_memory_cage. This fallback will be
+ // disallowed in the future, at the latest once ArrayBuffers are referenced
+ // through an offset rather than a raw pointer.
+ if (GetProcessWideVirtualMemoryCage()->is_disabled()) {
+ CHECK(kAllowBackingStoresOutsideDataCage);
+ } else {
+ auto cage = GetProcessWideVirtualMemoryCage();
+ CHECK(cage->is_initialized());
+ DCHECK_EQ(params.reservation_size, cage->pointer_cage_size());
+ existing_reservation = base::AddressRegion(cage->pointer_cage_base(),
+ cage->pointer_cage_size());
+ }
+#endif
+ if (!GetProcessWidePtrComprCage()->InitReservation(params,
+ existing_reservation)) {
V8::FatalProcessOutOfMemory(
nullptr,
"Failed to reserve virtual memory for process-wide V8 "
diff --git a/deps/v8/src/init/startup-data-util.cc b/deps/v8/src/init/startup-data-util.cc
index d480e3dcc2..ba3a123651 100644
--- a/deps/v8/src/init/startup-data-util.cc
+++ b/deps/v8/src/init/startup-data-util.cc
@@ -7,6 +7,8 @@
#include <stdlib.h>
#include <string.h>
+#include "include/v8-initialization.h"
+#include "include/v8-snapshot.h"
#include "src/base/file-utils.h"
#include "src/base/logging.h"
#include "src/base/platform/platform.h"
@@ -76,11 +78,6 @@ void LoadFromFile(const char* snapshot_blob) {
void InitializeExternalStartupData(const char* directory_path) {
#ifdef V8_USE_EXTERNAL_STARTUP_DATA
const char* snapshot_name = "snapshot_blob.bin";
-#ifdef V8_MULTI_SNAPSHOTS
- if (!FLAG_untrusted_code_mitigations) {
- snapshot_name = "snapshot_blob_trusted.bin";
- }
-#endif
std::unique_ptr<char[]> snapshot =
base::RelativePath(directory_path, snapshot_name);
LoadFromFile(snapshot.get());
diff --git a/deps/v8/src/init/startup-data-util.h b/deps/v8/src/init/startup-data-util.h
index 5d49b0b1a1..90751e558e 100644
--- a/deps/v8/src/init/startup-data-util.h
+++ b/deps/v8/src/init/startup-data-util.h
@@ -5,8 +5,6 @@
#ifndef V8_INIT_STARTUP_DATA_UTIL_H_
#define V8_INIT_STARTUP_DATA_UTIL_H_
-#include "include/v8.h"
-
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/init/v8.cc b/deps/v8/src/init/v8.cc
index 70367d0697..7258ba8d93 100644
--- a/deps/v8/src/init/v8.cc
+++ b/deps/v8/src/init/v8.cc
@@ -20,6 +20,7 @@
#include "src/execution/runtime-profiler.h"
#include "src/execution/simulator.h"
#include "src/init/bootstrapper.h"
+#include "src/init/vm-cage.h"
#include "src/libsampler/sampler.h"
#include "src/objects/elements.h"
#include "src/objects/objects-inl.h"
@@ -73,6 +74,17 @@ void V8::TearDown() {
}
void V8::InitializeOncePerProcessImpl() {
+ CHECK(platform_);
+
+#ifdef V8_VIRTUAL_MEMORY_CAGE
+ if (!GetProcessWideVirtualMemoryCage()->is_initialized()) {
+ // For now, we still allow the cage to be disabled even if V8 was compiled
+ // with V8_VIRTUAL_MEMORY_CAGE. This will eventually be forbidden.
+ CHECK(kAllowBackingStoresOutsideDataCage);
+ GetProcessWideVirtualMemoryCage()->Disable();
+ }
+#endif
+
// Update logging information before enforcing flag implications.
bool* log_all_flags[] = {&FLAG_turbo_profiling_log_builtins,
&FLAG_log_all,
@@ -207,6 +219,15 @@ void V8::InitializePlatform(v8::Platform* platform) {
#endif
}
+#ifdef V8_VIRTUAL_MEMORY_CAGE
+bool V8::InitializeVirtualMemoryCage() {
+ // Platform must have been initialized already.
+ CHECK(platform_);
+ v8::PageAllocator* page_allocator = GetPlatformPageAllocator();
+ return GetProcessWideVirtualMemoryCage()->Initialize(page_allocator);
+}
+#endif
+
void V8::ShutdownPlatform() {
CHECK(platform_);
#if defined(V8_OS_WIN) && defined(V8_ENABLE_SYSTEM_INSTRUMENTATION)
@@ -216,6 +237,13 @@ void V8::ShutdownPlatform() {
#endif
v8::tracing::TracingCategoryObserver::TearDown();
v8::base::SetPrintStackTrace(nullptr);
+
+#ifdef V8_VIRTUAL_MEMORY_CAGE
+ // TODO(chromium:1218005) alternatively, this could move to its own
+ // public TearDownVirtualMemoryCage function.
+ GetProcessWideVirtualMemoryCage()->TearDown();
+#endif
+
platform_ = nullptr;
}
diff --git a/deps/v8/src/init/v8.h b/deps/v8/src/init/v8.h
index a8cd6832cd..bbde9bfd13 100644
--- a/deps/v8/src/init/v8.h
+++ b/deps/v8/src/init/v8.h
@@ -29,6 +29,10 @@ class V8 : public AllStatic {
const char* location,
bool is_heap_oom = false);
+#ifdef V8_VIRTUAL_MEMORY_CAGE
+ static bool InitializeVirtualMemoryCage();
+#endif
+
static void InitializePlatform(v8::Platform* platform);
static void ShutdownPlatform();
V8_EXPORT_PRIVATE static v8::Platform* GetCurrentPlatform();
diff --git a/deps/v8/src/init/vm-cage.cc b/deps/v8/src/init/vm-cage.cc
new file mode 100644
index 0000000000..9d88e4085b
--- /dev/null
+++ b/deps/v8/src/init/vm-cage.cc
@@ -0,0 +1,81 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/init/vm-cage.h"
+
+#include "include/v8-internal.h"
+#include "src/base/bounded-page-allocator.h"
+#include "src/base/lazy-instance.h"
+#include "src/utils/allocation.h"
+
+namespace v8 {
+namespace internal {
+
+#ifdef V8_VIRTUAL_MEMORY_CAGE
+
+bool V8VirtualMemoryCage::Initialize(PageAllocator* page_allocator) {
+ constexpr bool use_guard_regions = true;
+ return Initialize(page_allocator, kVirtualMemoryCageSize, use_guard_regions);
+}
+
+bool V8VirtualMemoryCage::Initialize(v8::PageAllocator* page_allocator,
+ size_t size, bool use_guard_regions) {
+ CHECK(!initialized_);
+ CHECK(!disabled_);
+ CHECK_GE(size, kVirtualMemoryCageMinimumSize);
+
+ size_t reservation_size = size;
+ if (use_guard_regions) {
+ reservation_size += 2 * kVirtualMemoryCageGuardRegionSize;
+ }
+
+ base_ = reinterpret_cast<Address>(page_allocator->AllocatePages(
+ nullptr, reservation_size, kVirtualMemoryCageAlignment,
+ PageAllocator::kNoAccess));
+ if (!base_) return false;
+
+ if (use_guard_regions) {
+ base_ += kVirtualMemoryCageGuardRegionSize;
+ has_guard_regions_ = true;
+ }
+
+ page_allocator_ = page_allocator;
+ size_ = size;
+
+ data_cage_page_allocator_ = std::make_unique<base::BoundedPageAllocator>(
+ page_allocator_, data_cage_base(), data_cage_size(),
+ page_allocator_->AllocatePageSize());
+
+ initialized_ = true;
+
+ return true;
+}
+
+void V8VirtualMemoryCage::TearDown() {
+ if (initialized_) {
+ data_cage_page_allocator_.reset();
+ Address reservation_base = base_;
+ size_t reservation_size = size_;
+ if (has_guard_regions_) {
+ reservation_base -= kVirtualMemoryCageGuardRegionSize;
+ reservation_size += 2 * kVirtualMemoryCageGuardRegionSize;
+ }
+ CHECK(page_allocator_->FreePages(reinterpret_cast<void*>(reservation_base),
+ reservation_size));
+ page_allocator_ = nullptr;
+ base_ = kNullAddress;
+ size_ = 0;
+ initialized_ = false;
+ has_guard_regions_ = false;
+ }
+ disabled_ = false;
+}
+
+DEFINE_LAZY_LEAKY_OBJECT_GETTER(V8VirtualMemoryCage,
+ GetProcessWideVirtualMemoryCage)
+
+#endif
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/src/init/vm-cage.h b/deps/v8/src/init/vm-cage.h
new file mode 100644
index 0000000000..5fdd2ad6e0
--- /dev/null
+++ b/deps/v8/src/init/vm-cage.h
@@ -0,0 +1,130 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_INIT_VM_CAGE_H_
+#define V8_INIT_VM_CAGE_H_
+
+#include "include/v8-internal.h"
+#include "src/common/globals.h"
+
+namespace v8 {
+
+class PageAllocator;
+
+namespace internal {
+
+#ifdef V8_VIRTUAL_MEMORY_CAGE
+
+/**
+ * V8 Virtual Memory Cage.
+ *
+ * When the virtual memory cage is enabled, v8 will place most of its objects
+ * inside a dedicated region of virtual address space. In particular, all v8
+ * heaps, inside which objects reference themselves using compressed (32-bit)
+ * pointers, are located at the start of the virtual memory cage (the "pointer
+ * cage") and pure memory buffers like ArrayBuffer backing stores, which
+ * themselves do not contain any pointers, are located in the remaining part of
+ * the cage (the "data cage"). These buffers will eventually be referenced from
+ * inside the v8 heap using offsets rather than pointers. It should then be
+ * assumed that an attacker is able to corrupt data arbitrarily and concurrently
+ * inside the virtual memory cage.
+ *
+ * As the embedder is responsible for providing ArrayBuffer allocators, v8
+ * exposes a page allocator for the data cage to the embedder.
+ *
+ * TODO(chromium:1218005) Maybe don't call the sub-regions "cages" as well to
+ * avoid confusion? In any case, the names should probably be identical to the
+ * internal names for these virtual memory regions (where they are currently
+ * called cages).
+ * TODO(chromium:1218005) come up with a coherent naming scheme for this class
+ * and the other "cages" in v8.
+ */
+class V8VirtualMemoryCage {
+ public:
+ // +- ~~~ -+----------------+----------------------- ~~~ -+- ~~~ -+
+ // | 32 GB | 4 GB | | 32 GB |
+ // +- ~~~ -+----------------+----------------------- ~~~ -+- ~~~ -+
+ // ^ ^ ^ ^
+ // Guard Pointer Cage Data Cage Guard
+ // Region (contains all (contains all ArrayBuffer and Region
+ // (front) V8 heaps) WASM memory backing stores) (back)
+ //
+ // | base ---------------- size ------------------> |
+
+ V8VirtualMemoryCage() = default;
+
+ V8VirtualMemoryCage(const V8VirtualMemoryCage&) = delete;
+ V8VirtualMemoryCage& operator=(V8VirtualMemoryCage&) = delete;
+
+ bool is_initialized() const { return initialized_; }
+ bool is_disabled() const { return disabled_; }
+ bool is_enabled() const { return !disabled_; }
+
+ bool Initialize(v8::PageAllocator* page_allocator);
+ void Disable() {
+ CHECK(!initialized_);
+ disabled_ = true;
+ }
+
+ void TearDown();
+
+ Address base() const { return base_; }
+ size_t size() const { return size_; }
+
+ Address pointer_cage_base() const { return base_; }
+ size_t pointer_cage_size() const { return kVirtualMemoryCagePointerCageSize; }
+
+ Address data_cage_base() const {
+ return pointer_cage_base() + pointer_cage_size();
+ }
+ size_t data_cage_size() const { return size_ - pointer_cage_size(); }
+
+ bool Contains(Address addr) const {
+ return addr >= base_ && addr < base_ + size_;
+ }
+
+ bool Contains(void* ptr) const {
+ return Contains(reinterpret_cast<Address>(ptr));
+ }
+
+ v8::PageAllocator* GetDataCagePageAllocator() {
+ return data_cage_page_allocator_.get();
+ }
+
+ private:
+ friend class SequentialUnmapperTest;
+
+ // We allow tests to disable the guard regions around the cage. This is useful
+ // for example for tests like the SequentialUnmapperTest which track page
+ // allocations and so would incur a large overhead from the guard regions.
+ bool Initialize(v8::PageAllocator* page_allocator, size_t total_size,
+ bool use_guard_regions);
+
+ Address base_ = kNullAddress;
+ size_t size_ = 0;
+ bool has_guard_regions_ = false;
+ bool initialized_ = false;
+ bool disabled_ = false;
+ v8::PageAllocator* page_allocator_ = nullptr;
+ std::unique_ptr<v8::PageAllocator> data_cage_page_allocator_;
+};
+
+V8VirtualMemoryCage* GetProcessWideVirtualMemoryCage();
+
+#endif // V8_VIRTUAL_MEMORY_CAGE
+
+V8_INLINE bool IsValidBackingStorePointer(void* ptr) {
+#ifdef V8_VIRTUAL_MEMORY_CAGE
+ Address addr = reinterpret_cast<Address>(ptr);
+ return kAllowBackingStoresOutsideDataCage || addr == kNullAddress ||
+ GetProcessWideVirtualMemoryCage()->Contains(addr);
+#else
+ return true;
+#endif
+}
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_INIT_VM_CAGE_H_
diff --git a/deps/v8/src/inspector/DEPS b/deps/v8/src/inspector/DEPS
index 1c3ef43314..08b97ea3e9 100644
--- a/deps/v8/src/inspector/DEPS
+++ b/deps/v8/src/inspector/DEPS
@@ -13,7 +13,6 @@ include_rules = [
"+src/base/safe_conversions.h",
"+src/base/template-utils.h",
"+src/base/v8-fallthrough.h",
- "+src/logging/tracing-flags.h",
"+src/numbers/conversions.h",
"+src/inspector",
"+src/tracing",
diff --git a/deps/v8/src/inspector/custom-preview.cc b/deps/v8/src/inspector/custom-preview.cc
index d8e88861cb..97b0a07210 100644
--- a/deps/v8/src/inspector/custom-preview.cc
+++ b/deps/v8/src/inspector/custom-preview.cc
@@ -5,6 +5,11 @@
#include "src/inspector/custom-preview.h"
#include "../../third_party/inspector_protocol/crdtp/json.h"
+#include "include/v8-container.h"
+#include "include/v8-context.h"
+#include "include/v8-function.h"
+#include "include/v8-json.h"
+#include "include/v8-microtask-queue.h"
#include "src/debug/debug-interface.h"
#include "src/inspector/injected-script.h"
#include "src/inspector/inspected-context.h"
diff --git a/deps/v8/src/inspector/injected-script.cc b/deps/v8/src/inspector/injected-script.cc
index fc029e937a..e927c1cc40 100644
--- a/deps/v8/src/inspector/injected-script.cc
+++ b/deps/v8/src/inspector/injected-script.cc
@@ -34,7 +34,11 @@
#include <unordered_set>
#include "../../third_party/inspector_protocol/crdtp/json.h"
+#include "include/v8-container.h"
+#include "include/v8-context.h"
+#include "include/v8-function.h"
#include "include/v8-inspector.h"
+#include "include/v8-microtask-queue.h"
#include "src/debug/debug-interface.h"
#include "src/inspector/custom-preview.h"
#include "src/inspector/inspected-context.h"
@@ -354,8 +358,8 @@ class PropertyAccumulator : public ValueMirror::PropertyAccumulator {
Response InjectedScript::getProperties(
v8::Local<v8::Object> object, const String16& groupName, bool ownProperties,
- bool accessorPropertiesOnly, WrapMode wrapMode,
- std::unique_ptr<Array<PropertyDescriptor>>* properties,
+ bool accessorPropertiesOnly, bool nonIndexedPropertiesOnly,
+ WrapMode wrapMode, std::unique_ptr<Array<PropertyDescriptor>>* properties,
Maybe<protocol::Runtime::ExceptionDetails>* exceptionDetails) {
v8::HandleScope handles(m_context->isolate());
v8::Local<v8::Context> context = m_context->context();
@@ -367,7 +371,8 @@ Response InjectedScript::getProperties(
std::vector<PropertyMirror> mirrors;
PropertyAccumulator accumulator(&mirrors);
if (!ValueMirror::getProperties(context, object, ownProperties,
- accessorPropertiesOnly, &accumulator)) {
+ accessorPropertiesOnly,
+ nonIndexedPropertiesOnly, &accumulator)) {
return createExceptionDetails(tryCatch, groupName, exceptionDetails);
}
for (const PropertyMirror& mirror : mirrors) {
diff --git a/deps/v8/src/inspector/injected-script.h b/deps/v8/src/inspector/injected-script.h
index 9971d7da3a..86bcf60b17 100644
--- a/deps/v8/src/inspector/injected-script.h
+++ b/deps/v8/src/inspector/injected-script.h
@@ -35,6 +35,9 @@
#include <unordered_map>
#include <unordered_set>
+#include "include/v8-exception.h"
+#include "include/v8-local-handle.h"
+#include "include/v8-persistent-handle.h"
#include "src/base/macros.h"
#include "src/inspector/inspected-context.h"
#include "src/inspector/protocol/Forward.h"
@@ -42,8 +45,6 @@
#include "src/inspector/v8-console.h"
#include "src/inspector/v8-debugger.h"
-#include "include/v8.h"
-
namespace v8_inspector {
class RemoteObjectId;
@@ -76,7 +77,8 @@ class InjectedScript final {
Response getProperties(
v8::Local<v8::Object>, const String16& groupName, bool ownProperties,
- bool accessorPropertiesOnly, WrapMode wrapMode,
+ bool accessorPropertiesOnly, bool nonIndexedPropertiesOnly,
+ WrapMode wrapMode,
std::unique_ptr<protocol::Array<protocol::Runtime::PropertyDescriptor>>*
result,
Maybe<protocol::Runtime::ExceptionDetails>*);
diff --git a/deps/v8/src/inspector/inspected-context.cc b/deps/v8/src/inspector/inspected-context.cc
index a47df1ef12..6786f06b2f 100644
--- a/deps/v8/src/inspector/inspected-context.cc
+++ b/deps/v8/src/inspector/inspected-context.cc
@@ -4,14 +4,14 @@
#include "src/inspector/inspected-context.h"
+#include "include/v8-context.h"
+#include "include/v8-inspector.h"
#include "src/debug/debug-interface.h"
#include "src/inspector/injected-script.h"
#include "src/inspector/string-util.h"
#include "src/inspector/v8-console.h"
#include "src/inspector/v8-inspector-impl.h"
-#include "include/v8-inspector.h"
-
namespace v8_inspector {
class InspectedContext::WeakCallbackData {
diff --git a/deps/v8/src/inspector/inspected-context.h b/deps/v8/src/inspector/inspected-context.h
index d3f0fe012b..f8811d0469 100644
--- a/deps/v8/src/inspector/inspected-context.h
+++ b/deps/v8/src/inspector/inspected-context.h
@@ -9,12 +9,18 @@
#include <unordered_map>
#include <unordered_set>
-#include "include/v8.h"
+#include "include/v8-local-handle.h"
+#include "include/v8-persistent-handle.h"
#include "src/base/macros.h"
#include "src/debug/debug-interface.h"
#include "src/inspector/string-16.h"
#include "src/inspector/v8-debugger-id.h"
+namespace v8 {
+class Context;
+class Object;
+} // namespace v8
+
namespace v8_inspector {
class InjectedScript;
diff --git a/deps/v8/src/inspector/test-interface.h b/deps/v8/src/inspector/test-interface.h
index cf16c6936e..406ba02fa9 100644
--- a/deps/v8/src/inspector/test-interface.h
+++ b/deps/v8/src/inspector/test-interface.h
@@ -5,7 +5,7 @@
#ifndef V8_INSPECTOR_TEST_INTERFACE_H_
#define V8_INSPECTOR_TEST_INTERFACE_H_
-#include "include/v8.h"
+#include "include/v8config.h"
namespace v8_inspector {
diff --git a/deps/v8/src/inspector/v8-console-message.cc b/deps/v8/src/inspector/v8-console-message.cc
index 78622aa8d3..2734c67876 100644
--- a/deps/v8/src/inspector/v8-console-message.cc
+++ b/deps/v8/src/inspector/v8-console-message.cc
@@ -4,7 +4,11 @@
#include "src/inspector/v8-console-message.h"
+#include "include/v8-container.h"
+#include "include/v8-context.h"
#include "include/v8-inspector.h"
+#include "include/v8-microtask-queue.h"
+#include "include/v8-primitive-object.h"
#include "src/debug/debug-interface.h"
#include "src/inspector/inspected-context.h"
#include "src/inspector/protocol/Protocol.h"
diff --git a/deps/v8/src/inspector/v8-console-message.h b/deps/v8/src/inspector/v8-console-message.h
index 4dc521ee1c..cd960cf797 100644
--- a/deps/v8/src/inspector/v8-console-message.h
+++ b/deps/v8/src/inspector/v8-console-message.h
@@ -10,7 +10,8 @@
#include <memory>
#include <set>
-#include "include/v8.h"
+#include "include/v8-local-handle.h"
+#include "include/v8-persistent-handle.h"
#include "src/inspector/protocol/Console.h"
#include "src/inspector/protocol/Forward.h"
#include "src/inspector/protocol/Runtime.h"
diff --git a/deps/v8/src/inspector/v8-console.cc b/deps/v8/src/inspector/v8-console.cc
index 93a73f2580..55b620b0fc 100644
--- a/deps/v8/src/inspector/v8-console.cc
+++ b/deps/v8/src/inspector/v8-console.cc
@@ -4,6 +4,11 @@
#include "src/inspector/v8-console.h"
+#include "include/v8-container.h"
+#include "include/v8-context.h"
+#include "include/v8-function.h"
+#include "include/v8-inspector.h"
+#include "include/v8-microtask-queue.h"
#include "src/base/macros.h"
#include "src/inspector/injected-script.h"
#include "src/inspector/inspected-context.h"
@@ -17,8 +22,6 @@
#include "src/inspector/v8-stack-trace-impl.h"
#include "src/inspector/v8-value-utils.h"
-#include "include/v8-inspector.h"
-
namespace v8_inspector {
namespace {
diff --git a/deps/v8/src/inspector/v8-console.h b/deps/v8/src/inspector/v8-console.h
index 59d7a8152f..cd10f11a8a 100644
--- a/deps/v8/src/inspector/v8-console.h
+++ b/deps/v8/src/inspector/v8-console.h
@@ -5,11 +5,16 @@
#ifndef V8_INSPECTOR_V8_CONSOLE_H_
#define V8_INSPECTOR_V8_CONSOLE_H_
+#include "include/v8-array-buffer.h"
+#include "include/v8-external.h"
+#include "include/v8-local-handle.h"
#include "src/base/macros.h"
-
-#include "include/v8.h"
#include "src/debug/interface-types.h"
+namespace v8 {
+class Set;
+} // namespace v8
+
namespace v8_inspector {
class InspectedContext;
diff --git a/deps/v8/src/inspector/v8-debugger-agent-impl.cc b/deps/v8/src/inspector/v8-debugger-agent-impl.cc
index c49903f8c3..c19e2b72af 100644
--- a/deps/v8/src/inspector/v8-debugger-agent-impl.cc
+++ b/deps/v8/src/inspector/v8-debugger-agent-impl.cc
@@ -7,7 +7,10 @@
#include <algorithm>
#include "../../third_party/inspector_protocol/crdtp/json.h"
+#include "include/v8-context.h"
+#include "include/v8-function.h"
#include "include/v8-inspector.h"
+#include "include/v8-microtask-queue.h"
#include "src/base/safe_conversions.h"
#include "src/debug/debug-interface.h"
#include "src/inspector/injected-script.h"
diff --git a/deps/v8/src/inspector/v8-debugger-script.h b/deps/v8/src/inspector/v8-debugger-script.h
index a8fd6775b0..d4486eb85e 100644
--- a/deps/v8/src/inspector/v8-debugger-script.h
+++ b/deps/v8/src/inspector/v8-debugger-script.h
@@ -32,12 +32,16 @@
#include <memory>
+#include "include/v8-local-handle.h"
+#include "include/v8-maybe.h"
#include "src/base/macros.h"
+#include "src/debug/debug-interface.h"
#include "src/inspector/string-16.h"
#include "src/inspector/string-util.h"
-#include "include/v8.h"
-#include "src/debug/debug-interface.h"
+namespace v8 {
+class Isolate;
+}
namespace v8_inspector {
diff --git a/deps/v8/src/inspector/v8-debugger.cc b/deps/v8/src/inspector/v8-debugger.cc
index 0ac934a4d3..1216dc78de 100644
--- a/deps/v8/src/inspector/v8-debugger.cc
+++ b/deps/v8/src/inspector/v8-debugger.cc
@@ -4,6 +4,11 @@
#include "src/inspector/v8-debugger.h"
+#include "include/v8-container.h"
+#include "include/v8-context.h"
+#include "include/v8-function.h"
+#include "include/v8-microtask-queue.h"
+#include "include/v8-util.h"
#include "src/inspector/inspected-context.h"
#include "src/inspector/protocol/Protocol.h"
#include "src/inspector/string-util.h"
@@ -14,8 +19,6 @@
#include "src/inspector/v8-stack-trace-impl.h"
#include "src/inspector/v8-value-utils.h"
-#include "include/v8-util.h"
-
namespace v8_inspector {
namespace {
@@ -535,10 +538,6 @@ size_t HeapLimitForDebugging(size_t initial_heap_limit) {
size_t V8Debugger::nearHeapLimitCallback(void* data, size_t current_heap_limit,
size_t initial_heap_limit) {
V8Debugger* thisPtr = static_cast<V8Debugger*>(data);
-// TODO(solanes, v8:10876): Remove when bug is solved.
-#if DEBUG
- printf("nearHeapLimitCallback\n");
-#endif
thisPtr->m_originalHeapLimit = current_heap_limit;
thisPtr->m_scheduledOOMBreak = true;
v8::Local<v8::Context> context =
diff --git a/deps/v8/src/inspector/v8-heap-profiler-agent-impl.cc b/deps/v8/src/inspector/v8-heap-profiler-agent-impl.cc
index ed6901292c..955d7bcf76 100644
--- a/deps/v8/src/inspector/v8-heap-profiler-agent-impl.cc
+++ b/deps/v8/src/inspector/v8-heap-profiler-agent-impl.cc
@@ -4,6 +4,7 @@
#include "src/inspector/v8-heap-profiler-agent-impl.h"
+#include "include/v8-context.h"
#include "include/v8-inspector.h"
#include "include/v8-platform.h"
#include "include/v8-profiler.h"
diff --git a/deps/v8/src/inspector/v8-heap-profiler-agent-impl.h b/deps/v8/src/inspector/v8-heap-profiler-agent-impl.h
index feda75ffb7..cd92bd32d0 100644
--- a/deps/v8/src/inspector/v8-heap-profiler-agent-impl.h
+++ b/deps/v8/src/inspector/v8-heap-profiler-agent-impl.h
@@ -11,7 +11,9 @@
#include "src/inspector/protocol/Forward.h"
#include "src/inspector/protocol/HeapProfiler.h"
-#include "include/v8.h"
+namespace v8 {
+class Isolate;
+}
namespace v8_inspector {
diff --git a/deps/v8/src/inspector/v8-inspector-impl.cc b/deps/v8/src/inspector/v8-inspector-impl.cc
index f0cfa9b2c7..2da495c470 100644
--- a/deps/v8/src/inspector/v8-inspector-impl.cc
+++ b/deps/v8/src/inspector/v8-inspector-impl.cc
@@ -32,6 +32,9 @@
#include <vector>
+#include "include/v8-context.h"
+#include "include/v8-local-handle.h"
+#include "include/v8-microtask-queue.h"
#include "include/v8-platform.h"
#include "src/base/platform/mutex.h"
#include "src/debug/debug-interface.h"
@@ -333,39 +336,6 @@ void V8InspectorImpl::allAsyncTasksCanceled() {
m_debugger->allAsyncTasksCanceled();
}
-V8Inspector::Counters::Counters(v8::Isolate* isolate) : m_isolate(isolate) {
- CHECK(m_isolate);
- auto* inspector =
- static_cast<V8InspectorImpl*>(v8::debug::GetInspector(m_isolate));
- CHECK(inspector);
- CHECK(!inspector->m_counters);
- inspector->m_counters = this;
- m_isolate->SetCounterFunction(&Counters::getCounterPtr);
-}
-
-V8Inspector::Counters::~Counters() {
- auto* inspector =
- static_cast<V8InspectorImpl*>(v8::debug::GetInspector(m_isolate));
- CHECK(inspector);
- inspector->m_counters = nullptr;
- m_isolate->SetCounterFunction(nullptr);
-}
-
-int* V8Inspector::Counters::getCounterPtr(const char* name) {
- v8::Isolate* isolate = v8::Isolate::GetCurrent();
- DCHECK(isolate);
- V8Inspector* inspector = v8::debug::GetInspector(isolate);
- DCHECK(inspector);
- auto* instance = static_cast<V8InspectorImpl*>(inspector)->m_counters;
- DCHECK(instance);
- return &(instance->m_countersMap[name]);
-}
-
-std::shared_ptr<V8Inspector::Counters> V8InspectorImpl::enableCounters() {
- if (m_counters) return m_counters->shared_from_this();
- return std::make_shared<Counters>(m_isolate);
-}
-
v8::MaybeLocal<v8::Context> V8InspectorImpl::regexContext() {
if (m_regexContext.IsEmpty()) {
m_regexContext.Reset(m_isolate, v8::Context::New(m_isolate));
diff --git a/deps/v8/src/inspector/v8-inspector-impl.h b/deps/v8/src/inspector/v8-inspector-impl.h
index e1607f88c0..5c797bbfc7 100644
--- a/deps/v8/src/inspector/v8-inspector-impl.h
+++ b/deps/v8/src/inspector/v8-inspector-impl.h
@@ -110,8 +110,6 @@ class V8InspectorImpl : public V8Inspector {
void externalAsyncTaskStarted(const V8StackTraceId& parent) override;
void externalAsyncTaskFinished(const V8StackTraceId& parent) override;
- std::shared_ptr<Counters> enableCounters() override;
-
bool associateExceptionData(v8::Local<v8::Context>,
v8::Local<v8::Value> exception,
v8::Local<v8::Name> key,
@@ -157,8 +155,6 @@ class V8InspectorImpl : public V8Inspector {
};
private:
- friend class Counters;
-
v8::Isolate* m_isolate;
V8InspectorClient* m_client;
std::unique_ptr<V8Debugger> m_debugger;
@@ -191,8 +187,6 @@ class V8InspectorImpl : public V8Inspector {
std::map<std::pair<int64_t, int64_t>, int> m_uniqueIdToContextId;
std::unique_ptr<V8Console> m_console;
-
- Counters* m_counters = nullptr;
};
} // namespace v8_inspector
diff --git a/deps/v8/src/inspector/v8-profiler-agent-impl.cc b/deps/v8/src/inspector/v8-profiler-agent-impl.cc
index b2c04842cc..6b44459082 100644
--- a/deps/v8/src/inspector/v8-profiler-agent-impl.cc
+++ b/deps/v8/src/inspector/v8-profiler-agent-impl.cc
@@ -16,7 +16,6 @@
#include "src/inspector/v8-inspector-impl.h"
#include "src/inspector/v8-inspector-session-impl.h"
#include "src/inspector/v8-stack-trace-impl.h"
-#include "src/logging/tracing-flags.h"
namespace v8_inspector {
@@ -30,8 +29,6 @@ static const char preciseCoverageDetailed[] = "preciseCoverageDetailed";
static const char preciseCoverageAllowTriggeredUpdates[] =
"preciseCoverageAllowTriggeredUpdates";
static const char typeProfileStarted[] = "typeProfileStarted";
-static const char countersEnabled[] = "countersEnabled";
-static const char runtimeCallStatsEnabled[] = "runtimeCallStatsEnabled";
} // namespace ProfilerAgentState
namespace {
@@ -243,16 +240,6 @@ Response V8ProfilerAgentImpl::disable() {
m_state->setBoolean(ProfilerAgentState::profilerEnabled, false);
}
- if (m_counters) {
- disableCounters();
- m_state->setBoolean(ProfilerAgentState::countersEnabled, false);
- }
-
- if (m_runtime_call_stats_enabled) {
- disableRuntimeCallStats();
- m_state->setBoolean(ProfilerAgentState::runtimeCallStatsEnabled, false);
- }
-
return Response::Success();
}
@@ -287,15 +274,6 @@ void V8ProfilerAgentImpl::restore() {
Maybe<bool>(updatesAllowed), &timestamp);
}
}
-
- if (m_state->booleanProperty(ProfilerAgentState::countersEnabled, false)) {
- enableCounters();
- }
-
- if (m_state->booleanProperty(ProfilerAgentState::runtimeCallStatsEnabled,
- false)) {
- enableRuntimeCallStats();
- }
}
Response V8ProfilerAgentImpl::start() {
@@ -551,104 +529,6 @@ Response V8ProfilerAgentImpl::takeTypeProfile(
return Response::Success();
}
-Response V8ProfilerAgentImpl::enableCounters() {
- if (m_counters)
- return Response::ServerError("Counters collection already enabled.");
-
- if (V8Inspector* inspector = v8::debug::GetInspector(m_isolate))
- m_counters = inspector->enableCounters();
- else
- return Response::ServerError("No inspector found.");
-
- return Response::Success();
-}
-
-Response V8ProfilerAgentImpl::disableCounters() {
- if (m_counters) m_counters.reset();
- return Response::Success();
-}
-
-Response V8ProfilerAgentImpl::getCounters(
- std::unique_ptr<protocol::Array<protocol::Profiler::CounterInfo>>*
- out_result) {
- if (!m_counters)
- return Response::ServerError("Counters collection is not enabled.");
-
- *out_result =
- std::make_unique<protocol::Array<protocol::Profiler::CounterInfo>>();
-
- for (const auto& counter : m_counters->getCountersMap()) {
- (*out_result)
- ->emplace_back(
- protocol::Profiler::CounterInfo::create()
- .setName(String16(counter.first.data(), counter.first.length()))
- .setValue(counter.second)
- .build());
- }
-
- return Response::Success();
-}
-
-Response V8ProfilerAgentImpl::enableRuntimeCallStats() {
- if (v8::internal::TracingFlags::runtime_stats.load()) {
- return Response::ServerError(
- "Runtime Call Stats collection is already enabled.");
- }
-
- v8::internal::TracingFlags::runtime_stats.store(true);
- m_runtime_call_stats_enabled = true;
-
- return Response::Success();
-}
-
-Response V8ProfilerAgentImpl::disableRuntimeCallStats() {
- if (!v8::internal::TracingFlags::runtime_stats.load()) {
- return Response::ServerError(
- "Runtime Call Stats collection is not enabled.");
- }
-
- if (!m_runtime_call_stats_enabled) {
- return Response::ServerError(
- "Runtime Call Stats collection was not enabled by this session.");
- }
-
- v8::internal::TracingFlags::runtime_stats.store(false);
- m_runtime_call_stats_enabled = false;
-
- return Response::Success();
-}
-
-Response V8ProfilerAgentImpl::getRuntimeCallStats(
- std::unique_ptr<
- protocol::Array<protocol::Profiler::RuntimeCallCounterInfo>>*
- out_result) {
- if (!m_runtime_call_stats_enabled) {
- return Response::ServerError(
- "Runtime Call Stats collection is not enabled.");
- }
-
- if (!v8::internal::TracingFlags::runtime_stats.load()) {
- return Response::ServerError(
- "Runtime Call Stats collection was disabled outside of this session.");
- }
-
- *out_result = std::make_unique<
- protocol::Array<protocol::Profiler::RuntimeCallCounterInfo>>();
-
- v8::debug::EnumerateRuntimeCallCounters(
- m_isolate,
- [&](const char* name, int64_t count, v8::base::TimeDelta time) {
- (*out_result)
- ->emplace_back(protocol::Profiler::RuntimeCallCounterInfo::create()
- .setName(String16(name))
- .setValue(static_cast<double>(count))
- .setTime(time.InSecondsF())
- .build());
- });
-
- return Response::Success();
-}
-
String16 V8ProfilerAgentImpl::nextProfileId() {
return String16::fromInteger(
v8::base::Relaxed_AtomicIncrement(&s_lastProfileId, 1));
diff --git a/deps/v8/src/inspector/v8-profiler-agent-impl.h b/deps/v8/src/inspector/v8-profiler-agent-impl.h
index 7cafa0cb01..4fba6e6c70 100644
--- a/deps/v8/src/inspector/v8-profiler-agent-impl.h
+++ b/deps/v8/src/inspector/v8-profiler-agent-impl.h
@@ -59,19 +59,6 @@ class V8ProfilerAgentImpl : public protocol::Profiler::Backend {
std::unique_ptr<protocol::Array<protocol::Profiler::ScriptTypeProfile>>*
out_result) override;
- Response enableCounters() override;
- Response disableCounters() override;
- Response getCounters(
- std::unique_ptr<protocol::Array<protocol::Profiler::CounterInfo>>*
- out_result) override;
-
- Response enableRuntimeCallStats() override;
- Response disableRuntimeCallStats() override;
- Response getRuntimeCallStats(
- std::unique_ptr<
- protocol::Array<protocol::Profiler::RuntimeCallCounterInfo>>*
- out_result) override;
-
void consoleProfile(const String16& title);
void consoleProfileEnd(const String16& title);
@@ -95,8 +82,6 @@ class V8ProfilerAgentImpl : public protocol::Profiler::Backend {
std::vector<ProfileDescriptor> m_startedProfiles;
String16 m_frontendInitiatedProfileId;
int m_startedProfilesCount = 0;
- std::shared_ptr<V8Inspector::Counters> m_counters;
- bool m_runtime_call_stats_enabled = false;
};
} // namespace v8_inspector
diff --git a/deps/v8/src/inspector/v8-regex.cc b/deps/v8/src/inspector/v8-regex.cc
index 55b00d50ae..fd44a6a258 100644
--- a/deps/v8/src/inspector/v8-regex.cc
+++ b/deps/v8/src/inspector/v8-regex.cc
@@ -6,11 +6,15 @@
#include <limits.h>
+#include "include/v8-container.h"
+#include "include/v8-context.h"
+#include "include/v8-function.h"
+#include "include/v8-inspector.h"
+#include "include/v8-microtask-queue.h"
+#include "include/v8-regexp.h"
#include "src/inspector/string-util.h"
#include "src/inspector/v8-inspector-impl.h"
-#include "include/v8-inspector.h"
-
namespace v8_inspector {
V8Regex::V8Regex(V8InspectorImpl* inspector, const String16& pattern,
diff --git a/deps/v8/src/inspector/v8-regex.h b/deps/v8/src/inspector/v8-regex.h
index 9ce31cf4ce..75d972f15a 100644
--- a/deps/v8/src/inspector/v8-regex.h
+++ b/deps/v8/src/inspector/v8-regex.h
@@ -5,10 +5,13 @@
#ifndef V8_INSPECTOR_V8_REGEX_H_
#define V8_INSPECTOR_V8_REGEX_H_
+#include "include/v8-persistent-handle.h"
#include "src/base/macros.h"
#include "src/inspector/string-16.h"
-#include "include/v8.h"
+namespace v8 {
+class RegExp;
+}
namespace v8_inspector {
diff --git a/deps/v8/src/inspector/v8-runtime-agent-impl.cc b/deps/v8/src/inspector/v8-runtime-agent-impl.cc
index b78b641edf..3a8277639c 100644
--- a/deps/v8/src/inspector/v8-runtime-agent-impl.cc
+++ b/deps/v8/src/inspector/v8-runtime-agent-impl.cc
@@ -33,6 +33,11 @@
#include <inttypes.h>
#include "../../third_party/inspector_protocol/crdtp/json.h"
+#include "include/v8-container.h"
+#include "include/v8-context.h"
+#include "include/v8-function.h"
+#include "include/v8-inspector.h"
+#include "include/v8-microtask-queue.h"
#include "src/debug/debug-interface.h"
#include "src/inspector/injected-script.h"
#include "src/inspector/inspected-context.h"
@@ -47,8 +52,6 @@
#include "src/inspector/v8-value-utils.h"
#include "src/tracing/trace-event.h"
-#include "include/v8-inspector.h"
-
namespace v8_inspector {
namespace V8RuntimeAgentImplState {
@@ -418,6 +421,7 @@ void V8RuntimeAgentImpl::callFunctionOn(
Response V8RuntimeAgentImpl::getProperties(
const String16& objectId, Maybe<bool> ownProperties,
Maybe<bool> accessorPropertiesOnly, Maybe<bool> generatePreview,
+ Maybe<bool> nonIndexedPropertiesOnly,
std::unique_ptr<protocol::Array<protocol::Runtime::PropertyDescriptor>>*
result,
Maybe<protocol::Array<protocol::Runtime::InternalPropertyDescriptor>>*
@@ -442,6 +446,7 @@ Response V8RuntimeAgentImpl::getProperties(
response = scope.injectedScript()->getProperties(
object, scope.objectGroupName(), ownProperties.fromMaybe(false),
accessorPropertiesOnly.fromMaybe(false),
+ nonIndexedPropertiesOnly.fromMaybe(false),
generatePreview.fromMaybe(false) ? WrapMode::kWithPreview
: WrapMode::kNoPreview,
result, exceptionDetails);
diff --git a/deps/v8/src/inspector/v8-runtime-agent-impl.h b/deps/v8/src/inspector/v8-runtime-agent-impl.h
index eadc596ca3..0ab39e8da2 100644
--- a/deps/v8/src/inspector/v8-runtime-agent-impl.h
+++ b/deps/v8/src/inspector/v8-runtime-agent-impl.h
@@ -35,11 +35,16 @@
#include <set>
#include <unordered_map>
-#include "include/v8.h"
+#include "include/v8-persistent-handle.h"
+// #include "include/v8-function-callback.h"
#include "src/base/macros.h"
#include "src/inspector/protocol/Forward.h"
#include "src/inspector/protocol/Runtime.h"
+namespace v8 {
+class Script;
+} // namespace v8
+
namespace v8_inspector {
class InjectedScript;
@@ -88,6 +93,7 @@ class V8RuntimeAgentImpl : public protocol::Runtime::Backend {
Response getProperties(
const String16& objectId, Maybe<bool> ownProperties,
Maybe<bool> accessorPropertiesOnly, Maybe<bool> generatePreview,
+ Maybe<bool> nonIndexedPropertiesOnly,
std::unique_ptr<protocol::Array<protocol::Runtime::PropertyDescriptor>>*
result,
Maybe<protocol::Array<protocol::Runtime::InternalPropertyDescriptor>>*
diff --git a/deps/v8/src/inspector/v8-stack-trace-impl.h b/deps/v8/src/inspector/v8-stack-trace-impl.h
index cd86659fdb..aaad7ab6b3 100644
--- a/deps/v8/src/inspector/v8-stack-trace-impl.h
+++ b/deps/v8/src/inspector/v8-stack-trace-impl.h
@@ -9,11 +9,16 @@
#include <vector>
#include "include/v8-inspector.h"
-#include "include/v8.h"
+#include "include/v8-local-handle.h"
#include "src/base/macros.h"
#include "src/inspector/protocol/Runtime.h"
#include "src/inspector/string-16.h"
+namespace v8 {
+class StackFrame;
+class StackTrace;
+} // namespace v8
+
namespace v8_inspector {
class AsyncStackTrace;
diff --git a/deps/v8/src/inspector/v8-value-utils.cc b/deps/v8/src/inspector/v8-value-utils.cc
index dd73c2919d..4b9f0b7a1a 100644
--- a/deps/v8/src/inspector/v8-value-utils.cc
+++ b/deps/v8/src/inspector/v8-value-utils.cc
@@ -4,6 +4,10 @@
#include "src/inspector/v8-value-utils.h"
+#include "include/v8-container.h"
+#include "include/v8-context.h"
+#include "include/v8-exception.h"
+
namespace v8_inspector {
v8::Maybe<bool> createDataProperty(v8::Local<v8::Context> context,
diff --git a/deps/v8/src/inspector/v8-value-utils.h b/deps/v8/src/inspector/v8-value-utils.h
index 6817d9fbb6..7eae23d9b1 100644
--- a/deps/v8/src/inspector/v8-value-utils.h
+++ b/deps/v8/src/inspector/v8-value-utils.h
@@ -5,10 +5,9 @@
#ifndef V8_INSPECTOR_V8_VALUE_UTILS_H_
#define V8_INSPECTOR_V8_VALUE_UTILS_H_
+#include "include/v8-local-handle.h"
#include "src/inspector/protocol/Protocol.h"
-#include "include/v8.h"
-
namespace v8_inspector {
v8::Maybe<bool> createDataProperty(v8::Local<v8::Context>,
diff --git a/deps/v8/src/inspector/value-mirror.cc b/deps/v8/src/inspector/value-mirror.cc
index 78078f4c17..57eebb0c80 100644
--- a/deps/v8/src/inspector/value-mirror.cc
+++ b/deps/v8/src/inspector/value-mirror.cc
@@ -7,6 +7,15 @@
#include <algorithm>
#include <cmath>
+#include "include/v8-container.h"
+#include "include/v8-date.h"
+#include "include/v8-function.h"
+#include "include/v8-microtask-queue.h"
+#include "include/v8-primitive-object.h"
+#include "include/v8-proxy.h"
+#include "include/v8-regexp.h"
+#include "include/v8-typed-array.h"
+#include "include/v8-wasm.h"
#include "src/base/optional.h"
#include "src/debug/debug-interface.h"
#include "src/inspector/v8-debugger.h"
@@ -786,7 +795,7 @@ class PreviewPropertyAccumulator : public ValueMirror::PropertyAccumulator {
!mirror.value) {
return true;
}
- if (!mirror.isOwn) return true;
+ if (!mirror.isOwn && !mirror.isSynthetic) return true;
if (std::find(m_blocklist.begin(), m_blocklist.end(), mirror.name) !=
m_blocklist.end()) {
return true;
@@ -844,7 +853,7 @@ bool getPropertiesForPreview(v8::Local<v8::Context> context,
: -1;
PreviewPropertyAccumulator accumulator(blocklist, skipIndex, nameLimit,
indexLimit, overflow, properties);
- return ValueMirror::getProperties(context, object, false, false,
+ return ValueMirror::getProperties(context, object, false, false, false,
&accumulator);
}
@@ -1178,6 +1187,7 @@ ValueMirror::~ValueMirror() = default;
bool ValueMirror::getProperties(v8::Local<v8::Context> context,
v8::Local<v8::Object> object,
bool ownProperties, bool accessorPropertiesOnly,
+ bool nonIndexedPropertiesOnly,
PropertyAccumulator* accumulator) {
v8::Isolate* isolate = context->GetIsolate();
v8::TryCatch tryCatch(isolate);
@@ -1209,6 +1219,14 @@ bool ValueMirror::getProperties(v8::Local<v8::Context> context,
while (!iterator->Done()) {
bool isOwn = iterator->is_own();
if (!isOwn && ownProperties) break;
+ bool isIndex = iterator->is_array_index();
+ if (isIndex && nonIndexedPropertiesOnly) {
+ if (!iterator->Advance().FromMaybe(false)) {
+ CHECK(tryCatch.HasCaught());
+ return false;
+ }
+ continue;
+ }
v8::Local<v8::Name> v8Name = iterator->name();
v8::Maybe<bool> result = set->Has(context, v8Name);
if (result.IsNothing()) return false;
@@ -1287,10 +1305,14 @@ bool ValueMirror::getProperties(v8::Local<v8::Context> context,
if (v8::debug::CallFunctionOn(context, getterFunction, object, 0,
nullptr, true)
.ToLocal(&value)) {
- valueMirror = ValueMirror::create(context, value);
- isOwn = true;
- setterMirror = nullptr;
- getterMirror = nullptr;
+ if (value->IsPromise() &&
+ value.As<v8::Promise>()->State() == v8::Promise::kRejected) {
+ value.As<v8::Promise>()->MarkAsHandled();
+ } else {
+ valueMirror = ValueMirror::create(context, value);
+ setterMirror = nullptr;
+ getterMirror = nullptr;
+ }
}
}
}
@@ -1302,7 +1324,8 @@ bool ValueMirror::getProperties(v8::Local<v8::Context> context,
configurable,
enumerable,
isOwn,
- iterator->is_array_index(),
+ isIndex,
+ isAccessorProperty && valueMirror,
std::move(valueMirror),
std::move(getterMirror),
std::move(setterMirror),
diff --git a/deps/v8/src/inspector/value-mirror.h b/deps/v8/src/inspector/value-mirror.h
index 88b4ad2711..721695e74d 100644
--- a/deps/v8/src/inspector/value-mirror.h
+++ b/deps/v8/src/inspector/value-mirror.h
@@ -8,7 +8,7 @@
#include <memory>
#include "include/v8-inspector.h"
-#include "include/v8.h"
+#include "include/v8-local-handle.h"
#include "src/base/macros.h"
#include "src/inspector/protocol/Protocol.h"
#include "src/inspector/protocol/Runtime.h"
@@ -38,6 +38,7 @@ struct PropertyMirror {
bool enumerable;
bool isOwn;
bool isIndex;
+ bool isSynthetic;
std::unique_ptr<ValueMirror> value;
std::unique_ptr<ValueMirror> getter;
std::unique_ptr<ValueMirror> setter;
@@ -74,6 +75,7 @@ class ValueMirror {
static bool getProperties(v8::Local<v8::Context> context,
v8::Local<v8::Object> object, bool ownProperties,
bool accessorPropertiesOnly,
+ bool nonIndexedPropertiesOnly,
PropertyAccumulator* accumulator);
static void getInternalProperties(
v8::Local<v8::Context> context, v8::Local<v8::Object> object,
diff --git a/deps/v8/src/interpreter/OWNERS b/deps/v8/src/interpreter/OWNERS
index 481caea50b..e61606034b 100644
--- a/deps/v8/src/interpreter/OWNERS
+++ b/deps/v8/src/interpreter/OWNERS
@@ -1,3 +1,2 @@
leszeks@chromium.org
-mythria@chromium.org
-rmcilroy@chromium.org
+syg@chromium.org
diff --git a/deps/v8/src/interpreter/bytecode-generator.cc b/deps/v8/src/interpreter/bytecode-generator.cc
index f78330bea1..9536df172d 100644
--- a/deps/v8/src/interpreter/bytecode-generator.cc
+++ b/deps/v8/src/interpreter/bytecode-generator.cc
@@ -8,6 +8,7 @@
#include <unordered_map>
#include <unordered_set>
+#include "include/v8-extension.h"
#include "src/api/api-inl.h"
#include "src/ast/ast-source-ranges.h"
#include "src/ast/ast.h"
@@ -2525,7 +2526,7 @@ void BytecodeGenerator::BuildClassLiteral(ClassLiteral* expr, Register name) {
const AstRawString* class_name =
expr->scope()->class_variable() != nullptr
? expr->scope()->class_variable()->raw_name()
- : ast_string_constants()->empty_string();
+ : ast_string_constants()->anonymous_string();
builder()
->LoadLiteral(class_name)
.StoreAccumulatorInRegister(brand)
@@ -3647,8 +3648,7 @@ void BytecodeGenerator::BuildVariableAssignment(
break;
}
case VariableLocation::UNALLOCATED: {
- FeedbackSlot slot = GetCachedStoreGlobalICSlot(language_mode(), variable);
- builder()->StoreGlobal(variable->raw_name(), feedback_index(slot));
+ BuildStoreGlobal(variable);
break;
}
case VariableLocation::CONTEXT: {
@@ -3737,9 +3737,7 @@ void BytecodeGenerator::BuildVariableAssignment(
if (mode == VariableMode::kConst) {
builder()->CallRuntime(Runtime::kThrowConstAssignError);
} else {
- FeedbackSlot slot =
- GetCachedStoreGlobalICSlot(language_mode(), variable);
- builder()->StoreGlobal(variable->raw_name(), feedback_index(slot));
+ BuildStoreGlobal(variable);
}
}
break;
@@ -3772,6 +3770,21 @@ void BytecodeGenerator::BuildStoreNamedProperty(const Expression* object_expr,
}
}
+void BytecodeGenerator::BuildStoreGlobal(Variable* variable) {
+ Register value;
+ if (!execution_result()->IsEffect()) {
+ value = register_allocator()->NewRegister();
+ builder()->StoreAccumulatorInRegister(value);
+ }
+
+ FeedbackSlot slot = GetCachedStoreGlobalICSlot(language_mode(), variable);
+ builder()->StoreGlobal(variable->raw_name(), feedback_index(slot));
+
+ if (!execution_result()->IsEffect()) {
+ builder()->LoadAccumulatorWithRegister(value);
+ }
+}
+
// static
BytecodeGenerator::AssignmentLhsData
BytecodeGenerator::AssignmentLhsData::NonProperty(Expression* expr) {
diff --git a/deps/v8/src/interpreter/bytecode-generator.h b/deps/v8/src/interpreter/bytecode-generator.h
index 01f4b2a5b6..d3cc86acf5 100644
--- a/deps/v8/src/interpreter/bytecode-generator.h
+++ b/deps/v8/src/interpreter/bytecode-generator.h
@@ -241,6 +241,7 @@ class BytecodeGenerator final : public AstVisitor<BytecodeGenerator> {
const AstRawString* name);
void BuildStoreNamedProperty(const Expression* object_expr, Register object,
const AstRawString* name);
+ void BuildStoreGlobal(Variable* variable);
void BuildVariableLoad(Variable* variable, HoleCheckMode hole_check_mode,
TypeofMode typeof_mode = TypeofMode::kNotInside);
diff --git a/deps/v8/src/interpreter/bytecodes.h b/deps/v8/src/interpreter/bytecodes.h
index d938aff5a5..61734b9044 100644
--- a/deps/v8/src/interpreter/bytecodes.h
+++ b/deps/v8/src/interpreter/bytecodes.h
@@ -106,7 +106,7 @@ namespace interpreter {
OperandType::kIdx) \
V(LdaGlobalInsideTypeof, ImplicitRegisterUse::kWriteAccumulator, \
OperandType::kIdx, OperandType::kIdx) \
- V(StaGlobal, ImplicitRegisterUse::kReadAccumulator, OperandType::kIdx, \
+ V(StaGlobal, ImplicitRegisterUse::kReadWriteAccumulator, OperandType::kIdx, \
OperandType::kIdx) \
\
/* Context operations */ \
@@ -393,7 +393,7 @@ namespace interpreter {
\
/* Complex flow control For..in */ \
V(ForInEnumerate, ImplicitRegisterUse::kWriteAccumulator, OperandType::kReg) \
- V(ForInPrepare, ImplicitRegisterUse::kReadAccumulator, \
+ V(ForInPrepare, ImplicitRegisterUse::kReadWriteAccumulator, \
OperandType::kRegOutTriple, OperandType::kIdx) \
V(ForInContinue, ImplicitRegisterUse::kWriteAccumulator, OperandType::kReg, \
OperandType::kReg) \
diff --git a/deps/v8/src/interpreter/interpreter-assembler.cc b/deps/v8/src/interpreter/interpreter-assembler.cc
index c6d6e44a2f..49e4fad1fb 100644
--- a/deps/v8/src/interpreter/interpreter-assembler.cc
+++ b/deps/v8/src/interpreter/interpreter-assembler.cc
@@ -157,7 +157,7 @@ TNode<Object> InterpreterAssembler::GetAccumulator() {
DCHECK(Bytecodes::ReadsAccumulator(bytecode_));
implicit_register_use_ =
implicit_register_use_ | ImplicitRegisterUse::kReadAccumulator;
- return TaggedPoisonOnSpeculation(GetAccumulatorUnchecked());
+ return GetAccumulatorUnchecked();
}
void InterpreterAssembler::SetAccumulator(TNode<Object> value) {
@@ -204,8 +204,8 @@ TNode<Context> InterpreterAssembler::GetContextAtDepth(TNode<Context> context,
TNode<IntPtrT> InterpreterAssembler::RegisterLocation(
TNode<IntPtrT> reg_index) {
- return Signed(WordPoisonOnSpeculation(
- IntPtrAdd(GetInterpretedFramePointer(), RegisterFrameOffset(reg_index))));
+ return Signed(
+ IntPtrAdd(GetInterpretedFramePointer(), RegisterFrameOffset(reg_index)));
}
TNode<IntPtrT> InterpreterAssembler::RegisterLocation(Register reg) {
@@ -218,8 +218,7 @@ TNode<IntPtrT> InterpreterAssembler::RegisterFrameOffset(TNode<IntPtrT> index) {
TNode<Object> InterpreterAssembler::LoadRegister(TNode<IntPtrT> reg_index) {
return LoadFullTagged(GetInterpretedFramePointer(),
- RegisterFrameOffset(reg_index),
- LoadSensitivity::kCritical);
+ RegisterFrameOffset(reg_index));
}
TNode<Object> InterpreterAssembler::LoadRegister(Register reg) {
@@ -242,16 +241,14 @@ TNode<IntPtrT> InterpreterAssembler::LoadAndUntagRegister(Register reg) {
TNode<Object> InterpreterAssembler::LoadRegisterAtOperandIndex(
int operand_index) {
- return LoadRegister(
- BytecodeOperandReg(operand_index, LoadSensitivity::kSafe));
+ return LoadRegister(BytecodeOperandReg(operand_index));
}
std::pair<TNode<Object>, TNode<Object>>
InterpreterAssembler::LoadRegisterPairAtOperandIndex(int operand_index) {
DCHECK_EQ(OperandType::kRegPair,
Bytecodes::GetOperandType(bytecode_, operand_index));
- TNode<IntPtrT> first_reg_index =
- BytecodeOperandReg(operand_index, LoadSensitivity::kSafe);
+ TNode<IntPtrT> first_reg_index = BytecodeOperandReg(operand_index);
TNode<IntPtrT> second_reg_index = NextRegister(first_reg_index);
return std::make_pair(LoadRegister(first_reg_index),
LoadRegister(second_reg_index));
@@ -263,8 +260,7 @@ InterpreterAssembler::GetRegisterListAtOperandIndex(int operand_index) {
Bytecodes::GetOperandType(bytecode_, operand_index)));
DCHECK_EQ(OperandType::kRegCount,
Bytecodes::GetOperandType(bytecode_, operand_index + 1));
- TNode<IntPtrT> base_reg = RegisterLocation(
- BytecodeOperandReg(operand_index, LoadSensitivity::kSafe));
+ TNode<IntPtrT> base_reg = RegisterLocation(BytecodeOperandReg(operand_index));
TNode<Uint32T> reg_count = BytecodeOperandCount(operand_index + 1);
return RegListNodePair(base_reg, reg_count);
}
@@ -272,7 +268,6 @@ InterpreterAssembler::GetRegisterListAtOperandIndex(int operand_index) {
TNode<Object> InterpreterAssembler::LoadRegisterFromRegisterList(
const RegListNodePair& reg_list, int index) {
TNode<IntPtrT> location = RegisterLocationInRegisterList(reg_list, index);
- // Location is already poisoned on speculation, so no need to poison here.
return LoadFullTagged(location);
}
@@ -329,8 +324,7 @@ void InterpreterAssembler::StoreRegisterForShortStar(TNode<Object> value,
void InterpreterAssembler::StoreRegisterAtOperandIndex(TNode<Object> value,
int operand_index) {
- StoreRegister(value,
- BytecodeOperandReg(operand_index, LoadSensitivity::kSafe));
+ StoreRegister(value, BytecodeOperandReg(operand_index));
}
void InterpreterAssembler::StoreRegisterPairAtOperandIndex(TNode<Object> value1,
@@ -338,8 +332,7 @@ void InterpreterAssembler::StoreRegisterPairAtOperandIndex(TNode<Object> value1,
int operand_index) {
DCHECK_EQ(OperandType::kRegOutPair,
Bytecodes::GetOperandType(bytecode_, operand_index));
- TNode<IntPtrT> first_reg_index =
- BytecodeOperandReg(operand_index, LoadSensitivity::kSafe);
+ TNode<IntPtrT> first_reg_index = BytecodeOperandReg(operand_index);
StoreRegister(value1, first_reg_index);
TNode<IntPtrT> second_reg_index = NextRegister(first_reg_index);
StoreRegister(value2, second_reg_index);
@@ -350,8 +343,7 @@ void InterpreterAssembler::StoreRegisterTripleAtOperandIndex(
int operand_index) {
DCHECK_EQ(OperandType::kRegOutTriple,
Bytecodes::GetOperandType(bytecode_, operand_index));
- TNode<IntPtrT> first_reg_index =
- BytecodeOperandReg(operand_index, LoadSensitivity::kSafe);
+ TNode<IntPtrT> first_reg_index = BytecodeOperandReg(operand_index);
StoreRegister(value1, first_reg_index);
TNode<IntPtrT> second_reg_index = NextRegister(first_reg_index);
StoreRegister(value2, second_reg_index);
@@ -370,30 +362,27 @@ TNode<IntPtrT> InterpreterAssembler::OperandOffset(int operand_index) {
}
TNode<Uint8T> InterpreterAssembler::BytecodeOperandUnsignedByte(
- int operand_index, LoadSensitivity needs_poisoning) {
+ int operand_index) {
DCHECK_LT(operand_index, Bytecodes::NumberOfOperands(bytecode_));
DCHECK_EQ(OperandSize::kByte, Bytecodes::GetOperandSize(
bytecode_, operand_index, operand_scale()));
TNode<IntPtrT> operand_offset = OperandOffset(operand_index);
return Load<Uint8T>(BytecodeArrayTaggedPointer(),
- IntPtrAdd(BytecodeOffset(), operand_offset),
- needs_poisoning);
+ IntPtrAdd(BytecodeOffset(), operand_offset));
}
TNode<Int8T> InterpreterAssembler::BytecodeOperandSignedByte(
- int operand_index, LoadSensitivity needs_poisoning) {
+ int operand_index) {
DCHECK_LT(operand_index, Bytecodes::NumberOfOperands(bytecode_));
DCHECK_EQ(OperandSize::kByte, Bytecodes::GetOperandSize(
bytecode_, operand_index, operand_scale()));
TNode<IntPtrT> operand_offset = OperandOffset(operand_index);
return Load<Int8T>(BytecodeArrayTaggedPointer(),
- IntPtrAdd(BytecodeOffset(), operand_offset),
- needs_poisoning);
+ IntPtrAdd(BytecodeOffset(), operand_offset));
}
TNode<Word32T> InterpreterAssembler::BytecodeOperandReadUnaligned(
- int relative_offset, MachineType result_type,
- LoadSensitivity needs_poisoning) {
+ int relative_offset, MachineType result_type) {
static const int kMaxCount = 4;
DCHECK(!TargetSupportsUnalignedAccess());
@@ -430,9 +419,8 @@ TNode<Word32T> InterpreterAssembler::BytecodeOperandReadUnaligned(
TNode<IntPtrT> offset =
IntPtrConstant(relative_offset + msb_offset + i * kStep);
TNode<IntPtrT> array_offset = IntPtrAdd(BytecodeOffset(), offset);
- bytes[i] =
- UncheckedCast<Word32T>(Load(machine_type, BytecodeArrayTaggedPointer(),
- array_offset, needs_poisoning));
+ bytes[i] = UncheckedCast<Word32T>(
+ Load(machine_type, BytecodeArrayTaggedPointer(), array_offset));
}
// Pack LSB to MSB.
@@ -446,7 +434,7 @@ TNode<Word32T> InterpreterAssembler::BytecodeOperandReadUnaligned(
}
TNode<Uint16T> InterpreterAssembler::BytecodeOperandUnsignedShort(
- int operand_index, LoadSensitivity needs_poisoning) {
+ int operand_index) {
DCHECK_LT(operand_index, Bytecodes::NumberOfOperands(bytecode_));
DCHECK_EQ(
OperandSize::kShort,
@@ -456,16 +444,15 @@ TNode<Uint16T> InterpreterAssembler::BytecodeOperandUnsignedShort(
if (TargetSupportsUnalignedAccess()) {
return Load<Uint16T>(
BytecodeArrayTaggedPointer(),
- IntPtrAdd(BytecodeOffset(), IntPtrConstant(operand_offset)),
- needs_poisoning);
+ IntPtrAdd(BytecodeOffset(), IntPtrConstant(operand_offset)));
} else {
- return UncheckedCast<Uint16T>(BytecodeOperandReadUnaligned(
- operand_offset, MachineType::Uint16(), needs_poisoning));
+ return UncheckedCast<Uint16T>(
+ BytecodeOperandReadUnaligned(operand_offset, MachineType::Uint16()));
}
}
TNode<Int16T> InterpreterAssembler::BytecodeOperandSignedShort(
- int operand_index, LoadSensitivity needs_poisoning) {
+ int operand_index) {
DCHECK_LT(operand_index, Bytecodes::NumberOfOperands(bytecode_));
DCHECK_EQ(
OperandSize::kShort,
@@ -475,16 +462,15 @@ TNode<Int16T> InterpreterAssembler::BytecodeOperandSignedShort(
if (TargetSupportsUnalignedAccess()) {
return Load<Int16T>(
BytecodeArrayTaggedPointer(),
- IntPtrAdd(BytecodeOffset(), IntPtrConstant(operand_offset)),
- needs_poisoning);
+ IntPtrAdd(BytecodeOffset(), IntPtrConstant(operand_offset)));
} else {
- return UncheckedCast<Int16T>(BytecodeOperandReadUnaligned(
- operand_offset, MachineType::Int16(), needs_poisoning));
+ return UncheckedCast<Int16T>(
+ BytecodeOperandReadUnaligned(operand_offset, MachineType::Int16()));
}
}
TNode<Uint32T> InterpreterAssembler::BytecodeOperandUnsignedQuad(
- int operand_index, LoadSensitivity needs_poisoning) {
+ int operand_index) {
DCHECK_LT(operand_index, Bytecodes::NumberOfOperands(bytecode_));
DCHECK_EQ(OperandSize::kQuad, Bytecodes::GetOperandSize(
bytecode_, operand_index, operand_scale()));
@@ -493,16 +479,15 @@ TNode<Uint32T> InterpreterAssembler::BytecodeOperandUnsignedQuad(
if (TargetSupportsUnalignedAccess()) {
return Load<Uint32T>(
BytecodeArrayTaggedPointer(),
- IntPtrAdd(BytecodeOffset(), IntPtrConstant(operand_offset)),
- needs_poisoning);
+ IntPtrAdd(BytecodeOffset(), IntPtrConstant(operand_offset)));
} else {
- return UncheckedCast<Uint32T>(BytecodeOperandReadUnaligned(
- operand_offset, MachineType::Uint32(), needs_poisoning));
+ return UncheckedCast<Uint32T>(
+ BytecodeOperandReadUnaligned(operand_offset, MachineType::Uint32()));
}
}
TNode<Int32T> InterpreterAssembler::BytecodeOperandSignedQuad(
- int operand_index, LoadSensitivity needs_poisoning) {
+ int operand_index) {
DCHECK_LT(operand_index, Bytecodes::NumberOfOperands(bytecode_));
DCHECK_EQ(OperandSize::kQuad, Bytecodes::GetOperandSize(
bytecode_, operand_index, operand_scale()));
@@ -511,43 +496,40 @@ TNode<Int32T> InterpreterAssembler::BytecodeOperandSignedQuad(
if (TargetSupportsUnalignedAccess()) {
return Load<Int32T>(
BytecodeArrayTaggedPointer(),
- IntPtrAdd(BytecodeOffset(), IntPtrConstant(operand_offset)),
- needs_poisoning);
+ IntPtrAdd(BytecodeOffset(), IntPtrConstant(operand_offset)));
} else {
- return UncheckedCast<Int32T>(BytecodeOperandReadUnaligned(
- operand_offset, MachineType::Int32(), needs_poisoning));
+ return UncheckedCast<Int32T>(
+ BytecodeOperandReadUnaligned(operand_offset, MachineType::Int32()));
}
}
TNode<Int32T> InterpreterAssembler::BytecodeSignedOperand(
- int operand_index, OperandSize operand_size,
- LoadSensitivity needs_poisoning) {
+ int operand_index, OperandSize operand_size) {
DCHECK(!Bytecodes::IsUnsignedOperandType(
Bytecodes::GetOperandType(bytecode_, operand_index)));
switch (operand_size) {
case OperandSize::kByte:
- return BytecodeOperandSignedByte(operand_index, needs_poisoning);
+ return BytecodeOperandSignedByte(operand_index);
case OperandSize::kShort:
- return BytecodeOperandSignedShort(operand_index, needs_poisoning);
+ return BytecodeOperandSignedShort(operand_index);
case OperandSize::kQuad:
- return BytecodeOperandSignedQuad(operand_index, needs_poisoning);
+ return BytecodeOperandSignedQuad(operand_index);
case OperandSize::kNone:
UNREACHABLE();
}
}
TNode<Uint32T> InterpreterAssembler::BytecodeUnsignedOperand(
- int operand_index, OperandSize operand_size,
- LoadSensitivity needs_poisoning) {
+ int operand_index, OperandSize operand_size) {
DCHECK(Bytecodes::IsUnsignedOperandType(
Bytecodes::GetOperandType(bytecode_, operand_index)));
switch (operand_size) {
case OperandSize::kByte:
- return BytecodeOperandUnsignedByte(operand_index, needs_poisoning);
+ return BytecodeOperandUnsignedByte(operand_index);
case OperandSize::kShort:
- return BytecodeOperandUnsignedShort(operand_index, needs_poisoning);
+ return BytecodeOperandUnsignedShort(operand_index);
case OperandSize::kQuad:
- return BytecodeOperandUnsignedQuad(operand_index, needs_poisoning);
+ return BytecodeOperandUnsignedQuad(operand_index);
case OperandSize::kNone:
UNREACHABLE();
}
@@ -629,23 +611,22 @@ TNode<TaggedIndex> InterpreterAssembler::BytecodeOperandIdxTaggedIndex(
}
TNode<UintPtrT> InterpreterAssembler::BytecodeOperandConstantPoolIdx(
- int operand_index, LoadSensitivity needs_poisoning) {
+ int operand_index) {
DCHECK_EQ(OperandType::kIdx,
Bytecodes::GetOperandType(bytecode_, operand_index));
OperandSize operand_size =
Bytecodes::GetOperandSize(bytecode_, operand_index, operand_scale());
return ChangeUint32ToWord(
- BytecodeUnsignedOperand(operand_index, operand_size, needs_poisoning));
+ BytecodeUnsignedOperand(operand_index, operand_size));
}
-TNode<IntPtrT> InterpreterAssembler::BytecodeOperandReg(
- int operand_index, LoadSensitivity needs_poisoning) {
+TNode<IntPtrT> InterpreterAssembler::BytecodeOperandReg(int operand_index) {
DCHECK(Bytecodes::IsRegisterOperandType(
Bytecodes::GetOperandType(bytecode_, operand_index)));
OperandSize operand_size =
Bytecodes::GetOperandSize(bytecode_, operand_index, operand_scale());
return ChangeInt32ToIntPtr(
- BytecodeSignedOperand(operand_index, operand_size, needs_poisoning));
+ BytecodeSignedOperand(operand_index, operand_size));
}
TNode<Uint32T> InterpreterAssembler::BytecodeOperandRuntimeId(
@@ -682,8 +663,7 @@ TNode<Object> InterpreterAssembler::LoadConstantPoolEntry(TNode<WordT> index) {
TNode<FixedArray> constant_pool = CAST(LoadObjectField(
BytecodeArrayTaggedPointer(), BytecodeArray::kConstantPoolOffset));
return UnsafeLoadFixedArrayElement(constant_pool,
- UncheckedCast<IntPtrT>(index), 0,
- LoadSensitivity::kCritical);
+ UncheckedCast<IntPtrT>(index), 0);
}
TNode<IntPtrT> InterpreterAssembler::LoadAndUntagConstantPoolEntry(
@@ -693,8 +673,7 @@ TNode<IntPtrT> InterpreterAssembler::LoadAndUntagConstantPoolEntry(
TNode<Object> InterpreterAssembler::LoadConstantPoolEntryAtOperandIndex(
int operand_index) {
- TNode<UintPtrT> index =
- BytecodeOperandConstantPoolIdx(operand_index, LoadSensitivity::kSafe);
+ TNode<UintPtrT> index = BytecodeOperandConstantPoolIdx(operand_index);
return LoadConstantPoolEntry(index);
}
@@ -733,14 +712,16 @@ void InterpreterAssembler::CallJSAndDispatch(
bytecode_ == Bytecode::kInvokeIntrinsic);
DCHECK_EQ(Bytecodes::GetReceiverMode(bytecode_), receiver_mode);
- TNode<Word32T> args_count;
- if (receiver_mode == ConvertReceiverMode::kNullOrUndefined) {
- // The receiver is implied, so it is not in the argument list.
- args_count = args.reg_count();
- } else {
- // Subtract the receiver from the argument count.
+ TNode<Word32T> args_count = args.reg_count();
+ const bool receiver_included =
+ receiver_mode != ConvertReceiverMode::kNullOrUndefined;
+ if (kJSArgcIncludesReceiver && !receiver_included) {
+ // Add receiver if we want to include it in argc and it isn't already.
+ args_count = Int32Add(args_count, Int32Constant(kJSArgcReceiverSlots));
+ } else if (!kJSArgcIncludesReceiver && receiver_included) {
+ // Subtract receiver if we don't want to include it, but it is included.
TNode<Int32T> receiver_count = Int32Constant(1);
- args_count = Int32Sub(args.reg_count(), receiver_count);
+ args_count = Int32Sub(args_count, receiver_count);
}
Callable callable = CodeFactory::InterpreterPushArgsThenCall(
@@ -768,6 +749,7 @@ void InterpreterAssembler::CallJSAndDispatch(TNode<Object> function,
Callable callable = CodeFactory::Call(isolate());
TNode<Code> code_target = HeapConstant(callable.code());
+ arg_count = JSParameterCount(arg_count);
if (receiver_mode == ConvertReceiverMode::kNullOrUndefined) {
// The first argument parameter (the receiver) is implied to be undefined.
TailCallStubThenBytecodeDispatch(callable.descriptor(), code_target,
@@ -812,8 +794,11 @@ void InterpreterAssembler::CallJSWithSpreadAndDispatch(
InterpreterPushArgsMode::kWithFinalSpread);
TNode<Code> code_target = HeapConstant(callable.code());
- TNode<Int32T> receiver_count = Int32Constant(1);
- TNode<Word32T> args_count = Int32Sub(args.reg_count(), receiver_count);
+ TNode<Word32T> args_count = args.reg_count();
+ if (!kJSArgcIncludesReceiver) {
+ TNode<Int32T> receiver_count = Int32Constant(1);
+ args_count = Int32Sub(args_count, receiver_count);
+ }
TailCallStubThenBytecodeDispatch(callable.descriptor(), code_target, context,
args_count, args.base_reg_location(),
function);
@@ -832,6 +817,7 @@ TNode<Object> InterpreterAssembler::Construct(
Label return_result(this), construct_generic(this),
construct_array(this, &var_site);
+ TNode<Word32T> args_count = JSParameterCount(args.reg_count());
CollectConstructFeedback(context, target, new_target, maybe_feedback_vector,
slot_id, UpdateFeedbackMode::kOptionalFeedback,
&construct_generic, &construct_array, &var_site);
@@ -843,7 +829,7 @@ TNode<Object> InterpreterAssembler::Construct(
Callable callable = CodeFactory::InterpreterPushArgsThenConstruct(
isolate(), InterpreterPushArgsMode::kOther);
var_result =
- CallStub(callable, context, args.reg_count(), args.base_reg_location(),
+ CallStub(callable, context, args_count, args.base_reg_location(),
target, new_target, UndefinedConstant());
Goto(&return_result);
}
@@ -856,7 +842,7 @@ TNode<Object> InterpreterAssembler::Construct(
Callable callable = CodeFactory::InterpreterPushArgsThenConstruct(
isolate(), InterpreterPushArgsMode::kArrayFunction);
var_result =
- CallStub(callable, context, args.reg_count(), args.base_reg_location(),
+ CallStub(callable, context, args_count, args.base_reg_location(),
target, new_target, var_site.value());
Goto(&return_result);
}
@@ -982,7 +968,8 @@ TNode<Object> InterpreterAssembler::ConstructWithSpread(
Comment("call using ConstructWithSpread builtin");
Callable callable = CodeFactory::InterpreterPushArgsThenConstruct(
isolate(), InterpreterPushArgsMode::kWithFinalSpread);
- return CallStub(callable, context, args.reg_count(), args.base_reg_location(),
+ TNode<Word32T> args_count = JSParameterCount(args.reg_count());
+ return CallStub(callable, context, args_count, args.base_reg_location(),
target, new_target, UndefinedConstant());
}
@@ -1224,13 +1211,9 @@ void InterpreterAssembler::DispatchToBytecode(
void InterpreterAssembler::DispatchToBytecodeHandlerEntry(
TNode<RawPtrT> handler_entry, TNode<IntPtrT> bytecode_offset) {
- // Propagate speculation poisoning.
- TNode<RawPtrT> poisoned_handler_entry =
- UncheckedCast<RawPtrT>(WordPoisonOnSpeculation(handler_entry));
- TailCallBytecodeDispatch(InterpreterDispatchDescriptor{},
- poisoned_handler_entry, GetAccumulatorUnchecked(),
- bytecode_offset, BytecodeArrayTaggedPointer(),
- DispatchTablePointer());
+ TailCallBytecodeDispatch(
+ InterpreterDispatchDescriptor{}, handler_entry, GetAccumulatorUnchecked(),
+ bytecode_offset, BytecodeArrayTaggedPointer(), DispatchTablePointer());
}
void InterpreterAssembler::DispatchWide(OperandScale operand_scale) {
@@ -1325,7 +1308,7 @@ void InterpreterAssembler::OnStackReplacement(TNode<Context> context,
TNode<Uint16T> data_type = LoadInstanceType(CAST(sfi_data));
Label baseline(this);
- GotoIf(InstanceTypeEqual(data_type, BASELINE_DATA_TYPE), &baseline);
+ GotoIf(InstanceTypeEqual(data_type, CODET_TYPE), &baseline);
{
Callable callable = CodeFactory::InterpreterOnStackReplacement(isolate());
CallStub(callable, context);
@@ -1382,7 +1365,7 @@ bool InterpreterAssembler::TargetSupportsUnalignedAccess() {
return false;
#elif V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_S390 || \
V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_PPC || \
- V8_TARGET_ARCH_PPC64
+ V8_TARGET_ARCH_PPC64 || V8_TARGET_ARCH_LOONG64
return true;
#else
#error "Unknown Architecture"
diff --git a/deps/v8/src/interpreter/interpreter-assembler.h b/deps/v8/src/interpreter/interpreter-assembler.h
index bf4641200b..d89c05e2d3 100644
--- a/deps/v8/src/interpreter/interpreter-assembler.h
+++ b/deps/v8/src/interpreter/interpreter-assembler.h
@@ -308,51 +308,32 @@ class V8_EXPORT_PRIVATE InterpreterAssembler : public CodeStubAssembler {
// The |result_type| determines the size and signedness. of the
// value read. This method should only be used on architectures that
// do not support unaligned memory accesses.
- TNode<Word32T> BytecodeOperandReadUnaligned(
- int relative_offset, MachineType result_type,
- LoadSensitivity needs_poisoning = LoadSensitivity::kCritical);
+ TNode<Word32T> BytecodeOperandReadUnaligned(int relative_offset,
+ MachineType result_type);
// Returns zero- or sign-extended to word32 value of the operand.
- TNode<Uint8T> BytecodeOperandUnsignedByte(
- int operand_index,
- LoadSensitivity needs_poisoning = LoadSensitivity::kCritical);
- TNode<Int8T> BytecodeOperandSignedByte(
- int operand_index,
- LoadSensitivity needs_poisoning = LoadSensitivity::kCritical);
- TNode<Uint16T> BytecodeOperandUnsignedShort(
- int operand_index,
- LoadSensitivity needs_poisoning = LoadSensitivity::kCritical);
- TNode<Int16T> BytecodeOperandSignedShort(
- int operand_index,
- LoadSensitivity needs_poisoning = LoadSensitivity::kCritical);
- TNode<Uint32T> BytecodeOperandUnsignedQuad(
- int operand_index,
- LoadSensitivity needs_poisoning = LoadSensitivity::kCritical);
- TNode<Int32T> BytecodeOperandSignedQuad(
- int operand_index,
- LoadSensitivity needs_poisoning = LoadSensitivity::kCritical);
+ TNode<Uint8T> BytecodeOperandUnsignedByte(int operand_index);
+ TNode<Int8T> BytecodeOperandSignedByte(int operand_index);
+ TNode<Uint16T> BytecodeOperandUnsignedShort(int operand_index);
+ TNode<Int16T> BytecodeOperandSignedShort(int operand_index);
+ TNode<Uint32T> BytecodeOperandUnsignedQuad(int operand_index);
+ TNode<Int32T> BytecodeOperandSignedQuad(int operand_index);
// Returns zero- or sign-extended to word32 value of the operand of
// given size.
- TNode<Int32T> BytecodeSignedOperand(
- int operand_index, OperandSize operand_size,
- LoadSensitivity needs_poisoning = LoadSensitivity::kCritical);
- TNode<Uint32T> BytecodeUnsignedOperand(
- int operand_index, OperandSize operand_size,
- LoadSensitivity needs_poisoning = LoadSensitivity::kCritical);
+ TNode<Int32T> BytecodeSignedOperand(int operand_index,
+ OperandSize operand_size);
+ TNode<Uint32T> BytecodeUnsignedOperand(int operand_index,
+ OperandSize operand_size);
// Returns the word-size sign-extended register index for bytecode operand
- // |operand_index| in the current bytecode. Value is not poisoned on
- // speculation since the value loaded from the register is poisoned instead.
- TNode<IntPtrT> BytecodeOperandReg(
- int operand_index,
- LoadSensitivity needs_poisoning = LoadSensitivity::kCritical);
+ // |operand_index| in the current bytecode.
+ TNode<IntPtrT> BytecodeOperandReg(int operand_index);
// Returns the word zero-extended index immediate for bytecode operand
- // |operand_index| in the current bytecode for use when loading a .
- TNode<UintPtrT> BytecodeOperandConstantPoolIdx(
- int operand_index,
- LoadSensitivity needs_poisoning = LoadSensitivity::kCritical);
+ // |operand_index| in the current bytecode for use when loading a constant
+ // pool element.
+ TNode<UintPtrT> BytecodeOperandConstantPoolIdx(int operand_index);
// Jump relative to the current bytecode by the |jump_offset|. If |backward|,
// then jump backward (subtract the offset), otherwise jump forward (add the
diff --git a/deps/v8/src/interpreter/interpreter-generator.cc b/deps/v8/src/interpreter/interpreter-generator.cc
index e010ab2f64..fb23f90841 100644
--- a/deps/v8/src/interpreter/interpreter-generator.cc
+++ b/deps/v8/src/interpreter/interpreter-generator.cc
@@ -236,8 +236,14 @@ IGNITION_HANDLER(StaGlobal, InterpreterAssembler) {
TNode<TaggedIndex> slot = BytecodeOperandIdxTaggedIndex(1);
TNode<HeapObject> maybe_vector = LoadFeedbackVector();
- CallBuiltin(Builtin::kStoreGlobalIC, context, name, value, slot,
- maybe_vector);
+ TNode<Object> result = CallBuiltin(Builtin::kStoreGlobalIC, context, name,
+ value, slot, maybe_vector);
+ // To avoid special logic in the deoptimizer to re-materialize the value in
+ // the accumulator, we overwrite the accumulator after the IC call. It
+ // doesn't really matter what we write to the accumulator here, since we
+ // restore to the correct value on the outside. Storing the result means we
+ // don't need to keep unnecessary state alive across the callstub.
+ SetAccumulator(result);
Dispatch();
}
@@ -598,14 +604,14 @@ class InterpreterStoreNamedPropertyAssembler : public InterpreterAssembler {
TNode<HeapObject> maybe_vector = LoadFeedbackVector();
TNode<Context> context = GetContext();
- TVARIABLE(Object, var_result);
- var_result = CallStub(ic, context, object, name, value, slot, maybe_vector);
+ TNode<Object> result =
+ CallStub(ic, context, object, name, value, slot, maybe_vector);
// To avoid special logic in the deoptimizer to re-materialize the value in
// the accumulator, we overwrite the accumulator after the IC call. It
// doesn't really matter what we write to the accumulator here, since we
// restore to the correct value on the outside. Storing the result means we
// don't need to keep unnecessary state alive across the callstub.
- SetAccumulator(var_result.value());
+ SetAccumulator(result);
Dispatch();
}
};
@@ -642,15 +648,14 @@ IGNITION_HANDLER(StaKeyedProperty, InterpreterAssembler) {
TNode<HeapObject> maybe_vector = LoadFeedbackVector();
TNode<Context> context = GetContext();
- TVARIABLE(Object, var_result);
- var_result = CallBuiltin(Builtin::kKeyedStoreIC, context, object, name, value,
- slot, maybe_vector);
+ TNode<Object> result = CallBuiltin(Builtin::kKeyedStoreIC, context, object,
+ name, value, slot, maybe_vector);
// To avoid special logic in the deoptimizer to re-materialize the value in
// the accumulator, we overwrite the accumulator after the IC call. It
// doesn't really matter what we write to the accumulator here, since we
// restore to the correct value on the outside. Storing the result means we
// don't need to keep unnecessary state alive across the callstub.
- SetAccumulator(var_result.value());
+ SetAccumulator(result);
Dispatch();
}
@@ -666,15 +671,15 @@ IGNITION_HANDLER(StaInArrayLiteral, InterpreterAssembler) {
TNode<HeapObject> feedback_vector = LoadFeedbackVector();
TNode<Context> context = GetContext();
- TVARIABLE(Object, var_result);
- var_result = CallBuiltin(Builtin::kStoreInArrayLiteralIC, context, array,
- index, value, slot, feedback_vector);
+ TNode<Object> result =
+ CallBuiltin(Builtin::kStoreInArrayLiteralIC, context, array, index, value,
+ slot, feedback_vector);
// To avoid special logic in the deoptimizer to re-materialize the value in
// the accumulator, we overwrite the accumulator after the IC call. It
// doesn't really matter what we write to the accumulator here, since we
// restore to the correct value on the outside. Storing the result means we
// don't need to keep unnecessary state alive across the callstub.
- SetAccumulator(var_result.value());
+ SetAccumulator(result);
Dispatch();
}
@@ -2834,6 +2839,11 @@ IGNITION_HANDLER(ForInPrepare, InterpreterAssembler) {
ForInPrepare(enumerator, vector_index, maybe_feedback_vector, &cache_array,
&cache_length, UpdateFeedbackMode::kOptionalFeedback);
+ // The accumulator is clobbered soon after ForInPrepare, so avoid keeping it
+ // alive too long and instead set it to cache_array to match the first return
+ // value of Builtin::kForInPrepare.
+ SetAccumulator(cache_array);
+
StoreRegisterTripleAtOperandIndex(cache_type, cache_array, cache_length, 0);
Dispatch();
}
@@ -2970,8 +2980,8 @@ IGNITION_HANDLER(SuspendGenerator, InterpreterAssembler) {
TNode<SharedFunctionInfo> shared =
CAST(LoadObjectField(closure, JSFunction::kSharedFunctionInfoOffset));
- TNode<Int32T> formal_parameter_count = LoadObjectField<Uint16T>(
- shared, SharedFunctionInfo::kFormalParameterCountOffset);
+ TNode<Int32T> formal_parameter_count =
+ LoadSharedFunctionInfoFormalParameterCountWithoutReceiver(shared);
ExportParametersAndRegisterFile(array, registers, formal_parameter_count);
StoreObjectField(generator, JSGeneratorObject::kContextOffset, context);
@@ -3046,8 +3056,8 @@ IGNITION_HANDLER(ResumeGenerator, InterpreterAssembler) {
TNode<SharedFunctionInfo> shared =
CAST(LoadObjectField(closure, JSFunction::kSharedFunctionInfoOffset));
- TNode<Int32T> formal_parameter_count = LoadObjectField<Uint16T>(
- shared, SharedFunctionInfo::kFormalParameterCountOffset);
+ TNode<Int32T> formal_parameter_count =
+ LoadSharedFunctionInfoFormalParameterCountWithoutReceiver(shared);
ImportRegisterFile(
CAST(LoadObjectField(generator,
@@ -3074,9 +3084,6 @@ Handle<Code> GenerateBytecodeHandler(Isolate* isolate, const char* debug_name,
compiler::CodeAssemblerState state(
isolate, &zone, InterpreterDispatchDescriptor{},
CodeKind::BYTECODE_HANDLER, debug_name,
- FLAG_untrusted_code_mitigations
- ? PoisoningMitigationLevel::kPoisonCriticalOnly
- : PoisoningMitigationLevel::kDontPoison,
builtin);
switch (bytecode) {
diff --git a/deps/v8/src/interpreter/interpreter.cc b/deps/v8/src/interpreter/interpreter.cc
index a874954157..88d7706c72 100644
--- a/deps/v8/src/interpreter/interpreter.cc
+++ b/deps/v8/src/interpreter/interpreter.cc
@@ -12,6 +12,7 @@
#include "src/ast/scopes.h"
#include "src/codegen/compiler.h"
#include "src/codegen/unoptimized-compilation-info.h"
+#include "src/common/globals.h"
#include "src/execution/local-isolate.h"
#include "src/heap/parked-scope.h"
#include "src/init/bootstrapper.h"
@@ -389,11 +390,9 @@ uintptr_t Interpreter::GetDispatchCounter(Bytecode from, Bytecode to) const {
to_index];
}
-Local<v8::Object> Interpreter::GetDispatchCountersObject() {
- v8::Isolate* isolate = reinterpret_cast<v8::Isolate*>(isolate_);
- Local<v8::Context> context = isolate->GetCurrentContext();
-
- Local<v8::Object> counters_map = v8::Object::New(isolate);
+Handle<JSObject> Interpreter::GetDispatchCountersObject() {
+ Handle<JSObject> counters_map =
+ isolate_->factory()->NewJSObjectWithNullProto();
// Output is a JSON-encoded object of objects.
//
@@ -408,30 +407,23 @@ Local<v8::Object> Interpreter::GetDispatchCountersObject() {
for (int from_index = 0; from_index < kNumberOfBytecodes; ++from_index) {
Bytecode from_bytecode = Bytecodes::FromByte(from_index);
- Local<v8::Object> counters_row = v8::Object::New(isolate);
+ Handle<JSObject> counters_row =
+ isolate_->factory()->NewJSObjectWithNullProto();
for (int to_index = 0; to_index < kNumberOfBytecodes; ++to_index) {
Bytecode to_bytecode = Bytecodes::FromByte(to_index);
uintptr_t counter = GetDispatchCounter(from_bytecode, to_bytecode);
if (counter > 0) {
- std::string to_name = Bytecodes::ToString(to_bytecode);
- Local<v8::String> to_name_object =
- v8::String::NewFromUtf8(isolate, to_name.c_str()).ToLocalChecked();
- Local<v8::Number> counter_object = v8::Number::New(isolate, counter);
- CHECK(counters_row
- ->DefineOwnProperty(context, to_name_object, counter_object)
- .IsJust());
+ Handle<Object> value = isolate_->factory()->NewNumberFromSize(counter);
+ JSObject::AddProperty(isolate_, counters_row,
+ Bytecodes::ToString(to_bytecode), value, NONE);
}
}
- std::string from_name = Bytecodes::ToString(from_bytecode);
- Local<v8::String> from_name_object =
- v8::String::NewFromUtf8(isolate, from_name.c_str()).ToLocalChecked();
-
- CHECK(
- counters_map->DefineOwnProperty(context, from_name_object, counters_row)
- .IsJust());
+ JSObject::AddProperty(isolate_, counters_map,
+ Bytecodes::ToString(from_bytecode), counters_row,
+ NONE);
}
return counters_map;
diff --git a/deps/v8/src/interpreter/interpreter.h b/deps/v8/src/interpreter/interpreter.h
index 95a3c4ef79..9daa886e65 100644
--- a/deps/v8/src/interpreter/interpreter.h
+++ b/deps/v8/src/interpreter/interpreter.h
@@ -72,7 +72,7 @@ class Interpreter {
// Disassembler support.
V8_EXPORT_PRIVATE const char* LookupNameOfBytecodeHandler(const Code code);
- V8_EXPORT_PRIVATE Local<v8::Object> GetDispatchCountersObject();
+ V8_EXPORT_PRIVATE Handle<JSObject> GetDispatchCountersObject();
void ForEachBytecode(const std::function<void(Bytecode, OperandScale)>& f);
diff --git a/deps/v8/src/json/json-parser.h b/deps/v8/src/json/json-parser.h
index 03e7537512..4819f9d64e 100644
--- a/deps/v8/src/json/json-parser.h
+++ b/deps/v8/src/json/json-parser.h
@@ -5,6 +5,7 @@
#ifndef V8_JSON_JSON_PARSER_H_
#define V8_JSON_JSON_PARSER_H_
+#include "include/v8-callbacks.h"
#include "src/base/small-vector.h"
#include "src/base/strings.h"
#include "src/execution/isolate.h"
diff --git a/deps/v8/src/libplatform/default-platform.cc b/deps/v8/src/libplatform/default-platform.cc
index 66057e1a39..1cbc01193d 100644
--- a/deps/v8/src/libplatform/default-platform.cc
+++ b/deps/v8/src/libplatform/default-platform.cc
@@ -8,6 +8,7 @@
#include <queue>
#include "include/libplatform/libplatform.h"
+#include "src/base/bounded-page-allocator.h"
#include "src/base/debug/stack_trace.h"
#include "src/base/logging.h"
#include "src/base/page-allocator.h"
diff --git a/deps/v8/src/libsampler/sampler.cc b/deps/v8/src/libsampler/sampler.cc
index 49c8406533..fb94972b85 100644
--- a/deps/v8/src/libsampler/sampler.cc
+++ b/deps/v8/src/libsampler/sampler.cc
@@ -4,6 +4,9 @@
#include "src/libsampler/sampler.h"
+#include "include/v8-isolate.h"
+#include "include/v8-unwinder.h"
+
#ifdef USE_SIGNALS
#include <errno.h>
@@ -412,6 +415,10 @@ void SignalHandler::FillRegisterState(void* context, RegisterState* state) {
state->pc = reinterpret_cast<void*>(mcontext.pc);
state->sp = reinterpret_cast<void*>(mcontext.gregs[29]);
state->fp = reinterpret_cast<void*>(mcontext.gregs[30]);
+#elif V8_HOST_ARCH_LOONG64
+ state->pc = reinterpret_cast<void*>(mcontext.__pc);
+ state->sp = reinterpret_cast<void*>(mcontext.__gregs[3]);
+ state->fp = reinterpret_cast<void*>(mcontext.__gregs[22]);
#elif V8_HOST_ARCH_PPC || V8_HOST_ARCH_PPC64
#if V8_LIBC_GLIBC
state->pc = reinterpret_cast<void*>(ucontext->uc_mcontext.regs->nip);
diff --git a/deps/v8/src/libsampler/sampler.h b/deps/v8/src/libsampler/sampler.h
index 35bcf23546..98c0606151 100644
--- a/deps/v8/src/libsampler/sampler.h
+++ b/deps/v8/src/libsampler/sampler.h
@@ -8,8 +8,8 @@
#include <atomic>
#include <memory>
#include <unordered_map>
+#include <vector>
-#include "include/v8.h"
#include "src/base/lazy-instance.h"
#include "src/base/macros.h"
@@ -18,6 +18,10 @@
#endif
namespace v8 {
+
+class Isolate;
+struct RegisterState;
+
namespace sampler {
// ----------------------------------------------------------------------------
diff --git a/deps/v8/src/logging/counters.h b/deps/v8/src/logging/counters.h
index 3a2527f49c..08e35352cf 100644
--- a/deps/v8/src/logging/counters.h
+++ b/deps/v8/src/logging/counters.h
@@ -7,7 +7,7 @@
#include <memory>
-#include "include/v8.h"
+#include "include/v8-callbacks.h"
#include "src/base/atomic-utils.h"
#include "src/base/optional.h"
#include "src/base/platform/elapsed-timer.h"
diff --git a/deps/v8/src/logging/log.cc b/deps/v8/src/logging/log.cc
index 4f6aa856d7..022d0e9c57 100644
--- a/deps/v8/src/logging/log.cc
+++ b/deps/v8/src/logging/log.cc
@@ -9,6 +9,7 @@
#include <memory>
#include <sstream>
+#include "include/v8-locker.h"
#include "src/api/api-inl.h"
#include "src/base/platform/mutex.h"
#include "src/base/platform/platform.h"
@@ -614,6 +615,8 @@ void LowLevelLogger::LogCodeInfo() {
const char arch[] = "ppc64";
#elif V8_TARGET_ARCH_MIPS
const char arch[] = "mips";
+#elif V8_TARGET_ARCH_LOONG64
+ const char arch[] = "loong64";
#elif V8_TARGET_ARCH_ARM64
const char arch[] = "arm64";
#elif V8_TARGET_ARCH_S390
@@ -730,7 +733,7 @@ void JitLogger::LogRecordedBuffer(const wasm::WasmCode* code, const char* name,
int length) {
JitCodeEvent event = {};
event.type = JitCodeEvent::CODE_ADDED;
- event.code_type = JitCodeEvent::JIT_CODE;
+ event.code_type = JitCodeEvent::WASM_CODE;
event.code_start = code->instructions().begin();
event.code_len = code->instructions().length();
event.name.str = name;
@@ -1558,12 +1561,14 @@ void Logger::CodeLinePosInfoRecordEvent(Address code_start,
CodeLinePosEvent(*jit_logger_, code_start, iter, code_type);
}
-void Logger::CodeLinePosInfoRecordEvent(
+#if V8_ENABLE_WEBASSEMBLY
+void Logger::WasmCodeLinePosInfoRecordEvent(
Address code_start, base::Vector<const byte> source_position_table) {
if (!jit_logger_) return;
SourcePositionTableIterator iter(source_position_table);
- CodeLinePosEvent(*jit_logger_, code_start, iter, JitCodeEvent::JIT_CODE);
+ CodeLinePosEvent(*jit_logger_, code_start, iter, JitCodeEvent::WASM_CODE);
}
+#endif // V8_ENABLE_WEBASSEMBLY
void Logger::CodeNameEvent(Address addr, int pos, const char* code_name) {
if (code_name == nullptr) return; // Not a code object.
@@ -2217,12 +2222,11 @@ void ExistingCodeLogger::LogCompiledFunctions() {
Handle<AbstractCode>(
AbstractCode::cast(shared->InterpreterTrampoline()), isolate_));
}
- if (shared->HasBaselineData()) {
+ if (shared->HasBaselineCode()) {
LogExistingFunction(
- shared,
- Handle<AbstractCode>(
- AbstractCode::cast(shared->baseline_data().baseline_code()),
- isolate_));
+ shared, Handle<AbstractCode>(
+ AbstractCode::cast(shared->baseline_code(kAcquireLoad)),
+ isolate_));
}
if (pair.second.is_identical_to(BUILTIN_CODE(isolate_, CompileLazy)))
continue;
diff --git a/deps/v8/src/logging/log.h b/deps/v8/src/logging/log.h
index 612c2a2df7..b9e7a75c20 100644
--- a/deps/v8/src/logging/log.h
+++ b/deps/v8/src/logging/log.h
@@ -10,6 +10,7 @@
#include <set>
#include <string>
+#include "include/v8-callbacks.h"
#include "include/v8-profiler.h"
#include "src/base/platform/elapsed-timer.h"
#include "src/execution/isolate.h"
@@ -241,8 +242,10 @@ class Logger : public CodeEventListener {
void CodeLinePosInfoRecordEvent(Address code_start,
ByteArray source_position_table,
JitCodeEvent::CodeType code_type);
- void CodeLinePosInfoRecordEvent(
+#if V8_ENABLE_WEBASSEMBLY
+ void WasmCodeLinePosInfoRecordEvent(
Address code_start, base::Vector<const byte> source_position_table);
+#endif // V8_ENABLE_WEBASSEMBLY
void CodeNameEvent(Address addr, int pos, const char* code_name);
diff --git a/deps/v8/src/logging/runtime-call-stats.cc b/deps/v8/src/logging/runtime-call-stats.cc
index 86e3215f74..66e26096d0 100644
--- a/deps/v8/src/logging/runtime-call-stats.cc
+++ b/deps/v8/src/logging/runtime-call-stats.cc
@@ -260,17 +260,6 @@ void RuntimeCallStats::Print(std::ostream& os) {
entries.Print(os);
}
-void RuntimeCallStats::EnumerateCounters(
- debug::RuntimeCallCounterCallback callback) {
- if (current_timer_.Value() != nullptr) {
- current_timer_.Value()->Snapshot();
- }
- for (int i = 0; i < kNumberOfCounters; i++) {
- RuntimeCallCounter* counter = GetCounter(i);
- callback(counter->name(), counter->count(), counter->time());
- }
-}
-
void RuntimeCallStats::Reset() {
if (V8_LIKELY(!TracingFlags::is_runtime_stats_enabled())) return;
diff --git a/deps/v8/src/logging/runtime-call-stats.h b/deps/v8/src/logging/runtime-call-stats.h
index 5b3284a0c9..4e54e0ab71 100644
--- a/deps/v8/src/logging/runtime-call-stats.h
+++ b/deps/v8/src/logging/runtime-call-stats.h
@@ -5,8 +5,6 @@
#ifndef V8_LOGGING_RUNTIME_CALL_STATS_H_
#define V8_LOGGING_RUNTIME_CALL_STATS_H_
-#include "include/v8.h"
-
#ifdef V8_RUNTIME_CALL_STATS
#include "src/base/atomic-utils.h"
@@ -339,6 +337,7 @@ class RuntimeCallTimer final {
ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, FrameElision) \
ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, GenericLowering) \
ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, Inlining) \
+ ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, JSWasmInlining) \
ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, JumpThreading) \
ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, MidTierPopulateReferenceMaps) \
ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, MidTierRegisterAllocator) \
@@ -597,9 +596,6 @@ class RuntimeCallStats final {
V8_EXPORT_PRIVATE void Print();
V8_NOINLINE void Dump(v8::tracing::TracedValue* value);
- V8_EXPORT_PRIVATE void EnumerateCounters(
- debug::RuntimeCallCounterCallback callback);
-
ThreadId thread_id() const { return thread_id_; }
RuntimeCallTimer* current_timer() { return current_timer_.Value(); }
RuntimeCallCounter* current_counter() { return current_counter_.Value(); }
diff --git a/deps/v8/src/objects/allocation-site-inl.h b/deps/v8/src/objects/allocation-site-inl.h
index 9d17048958..1fc6709a5e 100644
--- a/deps/v8/src/objects/allocation-site-inl.h
+++ b/deps/v8/src/objects/allocation-site-inl.h
@@ -5,9 +5,9 @@
#ifndef V8_OBJECTS_ALLOCATION_SITE_INL_H_
#define V8_OBJECTS_ALLOCATION_SITE_INL_H_
-#include "src/objects/allocation-site.h"
-
+#include "src/common/globals.h"
#include "src/heap/heap-write-barrier-inl.h"
+#include "src/objects/allocation-site.h"
#include "src/objects/js-objects-inl.h"
// Has to be the last include (doesn't have include guards):
@@ -30,8 +30,7 @@ ACCESSORS(AllocationSite, transition_info_or_boilerplate, Object,
RELEASE_ACQUIRE_ACCESSORS(AllocationSite, transition_info_or_boilerplate,
Object, kTransitionInfoOrBoilerplateOffset)
ACCESSORS(AllocationSite, nested_site, Object, kNestedSiteOffset)
-IMPLICIT_TAG_RELAXED_INT32_ACCESSORS(AllocationSite, pretenure_data,
- kPretenureDataOffset)
+RELAXED_INT32_ACCESSORS(AllocationSite, pretenure_data, kPretenureDataOffset)
INT32_ACCESSORS(AllocationSite, pretenure_create_count,
kPretenureCreateCountOffset)
ACCESSORS(AllocationSite, dependent_code, DependentCode, kDependentCodeOffset)
@@ -73,7 +72,7 @@ void AllocationSite::Initialize() {
set_transition_info_or_boilerplate(Smi::zero());
SetElementsKind(GetInitialFastElementsKind());
set_nested_site(Smi::zero());
- set_pretenure_data(0);
+ set_pretenure_data(0, kRelaxedStore);
set_pretenure_create_count(0);
set_dependent_code(
DependentCode::cast(GetReadOnlyRoots().empty_weak_fixed_array()),
@@ -139,36 +138,39 @@ inline bool AllocationSite::CanTrack(InstanceType type) {
}
AllocationSite::PretenureDecision AllocationSite::pretenure_decision() const {
- return PretenureDecisionBits::decode(pretenure_data());
+ return PretenureDecisionBits::decode(pretenure_data(kRelaxedLoad));
}
void AllocationSite::set_pretenure_decision(PretenureDecision decision) {
- int32_t value = pretenure_data();
- set_pretenure_data(PretenureDecisionBits::update(value, decision));
+ int32_t value = pretenure_data(kRelaxedLoad);
+ set_pretenure_data(PretenureDecisionBits::update(value, decision),
+ kRelaxedStore);
}
bool AllocationSite::deopt_dependent_code() const {
- return DeoptDependentCodeBit::decode(pretenure_data());
+ return DeoptDependentCodeBit::decode(pretenure_data(kRelaxedLoad));
}
void AllocationSite::set_deopt_dependent_code(bool deopt) {
- int32_t value = pretenure_data();
- set_pretenure_data(DeoptDependentCodeBit::update(value, deopt));
+ int32_t value = pretenure_data(kRelaxedLoad);
+ set_pretenure_data(DeoptDependentCodeBit::update(value, deopt),
+ kRelaxedStore);
}
int AllocationSite::memento_found_count() const {
- return MementoFoundCountBits::decode(pretenure_data());
+ return MementoFoundCountBits::decode(pretenure_data(kRelaxedLoad));
}
inline void AllocationSite::set_memento_found_count(int count) {
- int32_t value = pretenure_data();
+ int32_t value = pretenure_data(kRelaxedLoad);
// Verify that we can count more mementos than we can possibly find in one
// new space collection.
DCHECK((GetHeap()->MaxSemiSpaceSize() /
(Heap::kMinObjectSizeInTaggedWords * kTaggedSize +
AllocationMemento::kSize)) < MementoFoundCountBits::kMax);
DCHECK_LT(count, MementoFoundCountBits::kMax);
- set_pretenure_data(MementoFoundCountBits::update(value, count));
+ set_pretenure_data(MementoFoundCountBits::update(value, count),
+ kRelaxedStore);
}
int AllocationSite::memento_create_count() const {
diff --git a/deps/v8/src/objects/allocation-site.h b/deps/v8/src/objects/allocation-site.h
index a069279c6e..4d673b4caf 100644
--- a/deps/v8/src/objects/allocation-site.h
+++ b/deps/v8/src/objects/allocation-site.h
@@ -51,7 +51,7 @@ class AllocationSite : public Struct {
DECL_ACCESSORS(nested_site, Object)
// Bitfield containing pretenuring information.
- DECL_INT32_ACCESSORS(pretenure_data)
+ DECL_RELAXED_INT32_ACCESSORS(pretenure_data)
DECL_INT32_ACCESSORS(pretenure_create_count)
DECL_ACCESSORS(dependent_code, DependentCode)
diff --git a/deps/v8/src/objects/arguments.h b/deps/v8/src/objects/arguments.h
index 372fc745e4..55f51a7669 100644
--- a/deps/v8/src/objects/arguments.h
+++ b/deps/v8/src/objects/arguments.h
@@ -29,12 +29,10 @@ class JSArgumentsObject
// JSSloppyArgumentsObject is just a JSArgumentsObject with specific initial
// map. This initial map adds in-object properties for "length" and "callee".
-class JSSloppyArgumentsObject : public JSArgumentsObject {
+class JSSloppyArgumentsObject
+ : public TorqueGeneratedJSSloppyArgumentsObject<JSSloppyArgumentsObject,
+ JSArgumentsObject> {
public:
- DEFINE_FIELD_OFFSET_CONSTANTS(
- JSArgumentsObject::kHeaderSize,
- TORQUE_GENERATED_JS_SLOPPY_ARGUMENTS_OBJECT_FIELDS)
-
// Indices of in-object properties.
static const int kLengthIndex = 0;
static const int kCalleeIndex = kLengthIndex + 1;
@@ -45,13 +43,10 @@ class JSSloppyArgumentsObject : public JSArgumentsObject {
// JSStrictArgumentsObject is just a JSArgumentsObject with specific initial
// map. This initial map adds an in-object property for "length".
-class JSStrictArgumentsObject : public JSArgumentsObject {
+class JSStrictArgumentsObject
+ : public TorqueGeneratedJSStrictArgumentsObject<JSStrictArgumentsObject,
+ JSArgumentsObject> {
public:
- // Layout description.
- DEFINE_FIELD_OFFSET_CONSTANTS(
- JSArgumentsObject::kHeaderSize,
- TORQUE_GENERATED_JS_STRICT_ARGUMENTS_OBJECT_FIELDS)
-
// Indices of in-object properties.
static const int kLengthIndex = 0;
STATIC_ASSERT(kLengthIndex == JSSloppyArgumentsObject::kLengthIndex);
diff --git a/deps/v8/src/objects/arguments.tq b/deps/v8/src/objects/arguments.tq
index cc60e62f70..c522b1db76 100644
--- a/deps/v8/src/objects/arguments.tq
+++ b/deps/v8/src/objects/arguments.tq
@@ -14,14 +14,12 @@ macro IsJSArgumentsObjectWithLength(implicit context: Context)(o: Object):
}
// Just a starting shape for JSObject; properties can move after initialization.
-@doNotGenerateCppClass
extern shape JSSloppyArgumentsObject extends JSArgumentsObject {
length: JSAny;
callee: JSAny;
}
// Just a starting shape for JSObject; properties can move after initialization.
-@doNotGenerateCppClass
extern shape JSStrictArgumentsObject extends JSArgumentsObject {
length: JSAny;
}
@@ -50,7 +48,7 @@ extern shape JSStrictArgumentsObject extends JSArgumentsObject {
// arguments array is not a fixed array or if key >= elements.arguments.length.
//
// Otherwise, t = elements.mapped_entries[key]. If t is the hole, then the
-// entry has been deleted fron the arguments object, and value is looked up in
+// entry has been deleted from the arguments object, and value is looked up in
// the unmapped arguments array, as described above. Otherwise, t is a Smi
// index into the context array specified at elements.context, and the return
// value is elements.context[t].
diff --git a/deps/v8/src/objects/backing-store.cc b/deps/v8/src/objects/backing-store.cc
index 7a59c2e715..836ad3e71d 100644
--- a/deps/v8/src/objects/backing-store.cc
+++ b/deps/v8/src/objects/backing-store.cc
@@ -9,6 +9,7 @@
#include "src/base/platform/wrappers.h"
#include "src/execution/isolate.h"
#include "src/handles/global-handles.h"
+#include "src/init/vm-cage.h"
#include "src/logging/counters.h"
#if V8_ENABLE_WEBASSEMBLY
@@ -38,8 +39,8 @@ constexpr uint64_t kFullGuardSize = uint64_t{10} * GB;
#endif // V8_ENABLE_WEBASSEMBLY
-#if V8_TARGET_ARCH_MIPS64
-// MIPS64 has a user space of 2^40 bytes on most processors,
+#if V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_LOONG64
+// MIPS64 and LOONG64 has a user space of 2^40 bytes on most processors,
// address space limits needs to be smaller.
constexpr size_t kAddressSpaceLimit = 0x8000000000L; // 512 GiB
#elif V8_TARGET_ARCH_RISCV64
@@ -152,6 +153,15 @@ BackingStore::~BackingStore() {
return;
}
+ PageAllocator* page_allocator = GetPlatformPageAllocator();
+#ifdef V8_VIRTUAL_MEMORY_CAGE
+ if (GetProcessWideVirtualMemoryCage()->Contains(buffer_start_)) {
+ page_allocator = GetPlatformDataCagePageAllocator();
+ } else {
+ DCHECK(kAllowBackingStoresOutsideDataCage);
+ }
+#endif
+
#if V8_ENABLE_WEBASSEMBLY
if (is_wasm_memory_) {
// TODO(v8:11111): RAB / GSAB - Wasm integration.
@@ -176,8 +186,8 @@ BackingStore::~BackingStore() {
bool pages_were_freed =
region.size() == 0 /* no need to free any pages */ ||
- FreePages(GetPlatformPageAllocator(),
- reinterpret_cast<void*>(region.begin()), region.size());
+ FreePages(page_allocator, reinterpret_cast<void*>(region.begin()),
+ region.size());
CHECK(pages_were_freed);
BackingStore::ReleaseReservation(reservation_size);
Clear();
@@ -195,8 +205,8 @@ BackingStore::~BackingStore() {
bool pages_were_freed =
region.size() == 0 /* no need to free any pages */ ||
- FreePages(GetPlatformPageAllocator(),
- reinterpret_cast<void*>(region.begin()), region.size());
+ FreePages(page_allocator, reinterpret_cast<void*>(region.begin()),
+ region.size());
CHECK(pages_were_freed);
BackingStore::ReleaseReservation(reservation_size);
Clear();
@@ -263,6 +273,8 @@ std::unique_ptr<BackingStore> BackingStore::Allocate(
counters->array_buffer_new_size_failures()->AddSample(mb_length);
return {};
}
+
+ DCHECK(IsValidBackingStorePointer(buffer_start));
}
auto result = new BackingStore(buffer_start, // start
@@ -400,10 +412,24 @@ std::unique_ptr<BackingStore> BackingStore::TryAllocateAndPartiallyCommitMemory(
// 2. Allocate pages (inaccessible by default).
//--------------------------------------------------------------------------
void* allocation_base = nullptr;
+ PageAllocator* page_allocator = GetPlatformPageAllocator();
auto allocate_pages = [&] {
- allocation_base =
- AllocatePages(GetPlatformPageAllocator(), nullptr, reservation_size,
- page_size, PageAllocator::kNoAccess);
+#ifdef V8_VIRTUAL_MEMORY_CAGE
+ page_allocator = GetPlatformDataCagePageAllocator();
+ allocation_base = AllocatePages(page_allocator, nullptr, reservation_size,
+ page_size, PageAllocator::kNoAccess);
+ if (allocation_base) return true;
+ // We currently still allow falling back to the platform page allocator if
+ // the data cage page allocator fails. This will eventually be removed.
+ // TODO(chromium:1218005) once we forbid the fallback, we should have a
+ // single API, e.g. GetPlatformDataPageAllocator(), that returns the correct
+ // page allocator to use here depending on whether the virtual memory cage
+ // is enabled or not.
+ if (!kAllowBackingStoresOutsideDataCage) return false;
+ page_allocator = GetPlatformPageAllocator();
+#endif
+ allocation_base = AllocatePages(page_allocator, nullptr, reservation_size,
+ page_size, PageAllocator::kNoAccess);
return allocation_base != nullptr;
};
if (!gc_retry(allocate_pages)) {
@@ -414,6 +440,8 @@ std::unique_ptr<BackingStore> BackingStore::TryAllocateAndPartiallyCommitMemory(
return {};
}
+ DCHECK(IsValidBackingStorePointer(allocation_base));
+
// Get a pointer to the start of the buffer, skipping negative guard region
// if necessary.
#if V8_ENABLE_WEBASSEMBLY
@@ -429,8 +457,8 @@ std::unique_ptr<BackingStore> BackingStore::TryAllocateAndPartiallyCommitMemory(
size_t committed_byte_length = initial_pages * page_size;
auto commit_memory = [&] {
return committed_byte_length == 0 ||
- SetPermissions(GetPlatformPageAllocator(), buffer_start,
- committed_byte_length, PageAllocator::kReadWrite);
+ SetPermissions(page_allocator, buffer_start, committed_byte_length,
+ PageAllocator::kReadWrite);
};
if (!gc_retry(commit_memory)) {
TRACE_BS("BSw:try failed to set permissions (%p, %zu)\n", buffer_start,
@@ -708,6 +736,7 @@ BackingStore::ResizeOrGrowResult BackingStore::GrowInPlace(
std::unique_ptr<BackingStore> BackingStore::WrapAllocation(
Isolate* isolate, void* allocation_base, size_t allocation_length,
SharedFlag shared, bool free_on_destruct) {
+ DCHECK(IsValidBackingStorePointer(allocation_base));
auto result = new BackingStore(allocation_base, // start
allocation_length, // length
allocation_length, // max length
@@ -729,6 +758,7 @@ std::unique_ptr<BackingStore> BackingStore::WrapAllocation(
void* allocation_base, size_t allocation_length,
v8::BackingStore::DeleterCallback deleter, void* deleter_data,
SharedFlag shared) {
+ DCHECK(IsValidBackingStorePointer(allocation_base));
bool is_empty_deleter = (deleter == v8::BackingStore::EmptyDeleter);
auto result = new BackingStore(allocation_base, // start
allocation_length, // length
diff --git a/deps/v8/src/objects/backing-store.h b/deps/v8/src/objects/backing-store.h
index 013a97a526..6c709c2b96 100644
--- a/deps/v8/src/objects/backing-store.h
+++ b/deps/v8/src/objects/backing-store.h
@@ -7,8 +7,8 @@
#include <memory>
+#include "include/v8-array-buffer.h"
#include "include/v8-internal.h"
-#include "include/v8.h"
#include "src/base/optional.h"
#include "src/handles/handles.h"
diff --git a/deps/v8/src/objects/bigint.cc b/deps/v8/src/objects/bigint.cc
index 5d21adfb89..3f1f12bcc2 100644
--- a/deps/v8/src/objects/bigint.cc
+++ b/deps/v8/src/objects/bigint.cc
@@ -1107,8 +1107,19 @@ MaybeHandle<BigInt> BigInt::FromObject(Isolate* isolate, Handle<Object> obj) {
if (isolate->has_pending_exception()) {
return MaybeHandle<BigInt>();
} else {
+ Handle<String> str = Handle<String>::cast(obj);
+ constexpr int kMaxRenderedLength = 1000;
+ if (str->length() > kMaxRenderedLength) {
+ Factory* factory = isolate->factory();
+ Handle<String> prefix =
+ factory->NewProperSubString(str, 0, kMaxRenderedLength);
+ Handle<SeqTwoByteString> ellipsis =
+ factory->NewRawTwoByteString(1).ToHandleChecked();
+ ellipsis->SeqTwoByteStringSet(0, 0x2026);
+ str = factory->NewConsString(prefix, ellipsis).ToHandleChecked();
+ }
THROW_NEW_ERROR(isolate,
- NewSyntaxError(MessageTemplate::kBigIntFromObject, obj),
+ NewSyntaxError(MessageTemplate::kBigIntFromObject, str),
BigInt);
}
}
diff --git a/deps/v8/src/objects/cell-inl.h b/deps/v8/src/objects/cell-inl.h
index 5c809b8172..d47b49504e 100644
--- a/deps/v8/src/objects/cell-inl.h
+++ b/deps/v8/src/objects/cell-inl.h
@@ -20,6 +20,10 @@ namespace internal {
TQ_OBJECT_CONSTRUCTORS_IMPL(Cell)
+DEF_RELAXED_GETTER(Cell, value, Object) {
+ return TaggedField<Object, kValueOffset>::Relaxed_Load(cage_base, *this);
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/objects/cell.h b/deps/v8/src/objects/cell.h
index 4076dea0e6..56c1016bd5 100644
--- a/deps/v8/src/objects/cell.h
+++ b/deps/v8/src/objects/cell.h
@@ -19,6 +19,9 @@ class Cell : public TorqueGeneratedCell<Cell, HeapObject> {
public:
inline Address ValueAddress() { return address() + kValueOffset; }
+ using TorqueGeneratedCell::value;
+ DECL_RELAXED_GETTER(value, Object)
+
using BodyDescriptor = FixedBodyDescriptor<kValueOffset, kSize, kSize>;
TQ_OBJECT_CONSTRUCTORS(Cell)
diff --git a/deps/v8/src/objects/code-inl.h b/deps/v8/src/objects/code-inl.h
index cae02edc23..48e5810f14 100644
--- a/deps/v8/src/objects/code-inl.h
+++ b/deps/v8/src/objects/code-inl.h
@@ -9,6 +9,7 @@
#include "src/baseline/bytecode-offset-iterator.h"
#include "src/codegen/code-desc.h"
#include "src/common/assert-scope.h"
+#include "src/common/globals.h"
#include "src/execution/isolate.h"
#include "src/heap/heap-inl.h"
#include "src/heap/heap-write-barrier-inl.h"
@@ -205,15 +206,26 @@ CODE_ACCESSORS_CHECKED(relocation_info_or_undefined, HeapObject,
kRelocationInfoOffset,
value.IsUndefined() || value.IsByteArray())
-CODE_ACCESSORS(deoptimization_data, FixedArray, kDeoptimizationDataOffset)
-#define IS_BASELINE() (kind() == CodeKind::BASELINE)
+ACCESSORS_CHECKED2(Code, deoptimization_data, FixedArray,
+ kDeoptimizationDataOrInterpreterDataOffset,
+ kind() != CodeKind::BASELINE,
+ kind() != CodeKind::BASELINE &&
+ !ObjectInYoungGeneration(value))
+ACCESSORS_CHECKED2(Code, bytecode_or_interpreter_data, HeapObject,
+ kDeoptimizationDataOrInterpreterDataOffset,
+ kind() == CodeKind::BASELINE,
+ kind() == CodeKind::BASELINE &&
+ !ObjectInYoungGeneration(value))
+
ACCESSORS_CHECKED2(Code, source_position_table, ByteArray, kPositionTableOffset,
- !IS_BASELINE(),
- !IS_BASELINE() && !ObjectInYoungGeneration(value))
+ kind() != CodeKind::BASELINE,
+ kind() != CodeKind::BASELINE &&
+ !ObjectInYoungGeneration(value))
ACCESSORS_CHECKED2(Code, bytecode_offset_table, ByteArray, kPositionTableOffset,
- IS_BASELINE(),
- IS_BASELINE() && !ObjectInYoungGeneration(value))
-#undef IS_BASELINE
+ kind() == CodeKind::BASELINE,
+ kind() == CodeKind::BASELINE &&
+ !ObjectInYoungGeneration(value))
+
// Concurrent marker needs to access kind specific flags in code data container.
RELEASE_ACQUIRE_CODE_ACCESSORS(code_data_container, CodeDataContainer,
kCodeDataContainerOffset)
@@ -268,7 +280,8 @@ inline CodeDataContainer CodeDataContainerFromCodeT(CodeT code) {
void Code::WipeOutHeader() {
WRITE_FIELD(*this, kRelocationInfoOffset, Smi::FromInt(0));
- WRITE_FIELD(*this, kDeoptimizationDataOffset, Smi::FromInt(0));
+ WRITE_FIELD(*this, kDeoptimizationDataOrInterpreterDataOffset,
+ Smi::FromInt(0));
WRITE_FIELD(*this, kPositionTableOffset, Smi::FromInt(0));
WRITE_FIELD(*this, kCodeDataContainerOffset, Smi::FromInt(0));
}
@@ -553,44 +566,47 @@ inline bool Code::is_turbofanned() const {
inline bool Code::can_have_weak_objects() const {
DCHECK(CodeKindIsOptimizedJSFunction(kind()));
- int32_t flags = code_data_container(kAcquireLoad).kind_specific_flags();
+ int32_t flags =
+ code_data_container(kAcquireLoad).kind_specific_flags(kRelaxedLoad);
return CanHaveWeakObjectsField::decode(flags);
}
inline void Code::set_can_have_weak_objects(bool value) {
DCHECK(CodeKindIsOptimizedJSFunction(kind()));
CodeDataContainer container = code_data_container(kAcquireLoad);
- int32_t previous = container.kind_specific_flags();
+ int32_t previous = container.kind_specific_flags(kRelaxedLoad);
int32_t updated = CanHaveWeakObjectsField::update(previous, value);
- container.set_kind_specific_flags(updated);
+ container.set_kind_specific_flags(updated, kRelaxedStore);
}
inline bool Code::is_promise_rejection() const {
DCHECK(kind() == CodeKind::BUILTIN);
- int32_t flags = code_data_container(kAcquireLoad).kind_specific_flags();
+ int32_t flags =
+ code_data_container(kAcquireLoad).kind_specific_flags(kRelaxedLoad);
return IsPromiseRejectionField::decode(flags);
}
inline void Code::set_is_promise_rejection(bool value) {
DCHECK(kind() == CodeKind::BUILTIN);
CodeDataContainer container = code_data_container(kAcquireLoad);
- int32_t previous = container.kind_specific_flags();
+ int32_t previous = container.kind_specific_flags(kRelaxedLoad);
int32_t updated = IsPromiseRejectionField::update(previous, value);
- container.set_kind_specific_flags(updated);
+ container.set_kind_specific_flags(updated, kRelaxedStore);
}
inline bool Code::is_exception_caught() const {
DCHECK(kind() == CodeKind::BUILTIN);
- int32_t flags = code_data_container(kAcquireLoad).kind_specific_flags();
+ int32_t flags =
+ code_data_container(kAcquireLoad).kind_specific_flags(kRelaxedLoad);
return IsExceptionCaughtField::decode(flags);
}
inline void Code::set_is_exception_caught(bool value) {
DCHECK(kind() == CodeKind::BUILTIN);
CodeDataContainer container = code_data_container(kAcquireLoad);
- int32_t previous = container.kind_specific_flags();
+ int32_t previous = container.kind_specific_flags(kRelaxedLoad);
int32_t updated = IsExceptionCaughtField::update(previous, value);
- container.set_kind_specific_flags(updated);
+ container.set_kind_specific_flags(updated, kRelaxedStore);
}
inline bool Code::is_off_heap_trampoline() const {
@@ -642,7 +658,8 @@ int Code::stack_slots() const {
bool Code::marked_for_deoptimization() const {
DCHECK(CodeKindCanDeoptimize(kind()));
- int32_t flags = code_data_container(kAcquireLoad).kind_specific_flags();
+ int32_t flags =
+ code_data_container(kAcquireLoad).kind_specific_flags(kRelaxedLoad);
return MarkedForDeoptimizationField::decode(flags);
}
@@ -650,14 +667,15 @@ void Code::set_marked_for_deoptimization(bool flag) {
DCHECK(CodeKindCanDeoptimize(kind()));
DCHECK_IMPLIES(flag, AllowDeoptimization::IsAllowed(GetIsolate()));
CodeDataContainer container = code_data_container(kAcquireLoad);
- int32_t previous = container.kind_specific_flags();
+ int32_t previous = container.kind_specific_flags(kRelaxedLoad);
int32_t updated = MarkedForDeoptimizationField::update(previous, flag);
- container.set_kind_specific_flags(updated);
+ container.set_kind_specific_flags(updated, kRelaxedStore);
}
int Code::deoptimization_count() const {
DCHECK(CodeKindCanDeoptimize(kind()));
- int32_t flags = code_data_container(kAcquireLoad).kind_specific_flags();
+ int32_t flags =
+ code_data_container(kAcquireLoad).kind_specific_flags(kRelaxedLoad);
int count = DeoptCountField::decode(flags);
DCHECK_GE(count, 0);
return count;
@@ -666,17 +684,18 @@ int Code::deoptimization_count() const {
void Code::increment_deoptimization_count() {
DCHECK(CodeKindCanDeoptimize(kind()));
CodeDataContainer container = code_data_container(kAcquireLoad);
- int32_t flags = container.kind_specific_flags();
+ int32_t flags = container.kind_specific_flags(kRelaxedLoad);
int32_t count = DeoptCountField::decode(flags);
DCHECK_GE(count, 0);
CHECK_LE(count + 1, DeoptCountField::kMax);
int32_t updated = DeoptCountField::update(flags, count + 1);
- container.set_kind_specific_flags(updated);
+ container.set_kind_specific_flags(updated, kRelaxedStore);
}
bool Code::embedded_objects_cleared() const {
DCHECK(CodeKindIsOptimizedJSFunction(kind()));
- int32_t flags = code_data_container(kAcquireLoad).kind_specific_flags();
+ int32_t flags =
+ code_data_container(kAcquireLoad).kind_specific_flags(kRelaxedLoad);
return EmbeddedObjectsClearedField::decode(flags);
}
@@ -684,14 +703,15 @@ void Code::set_embedded_objects_cleared(bool flag) {
DCHECK(CodeKindIsOptimizedJSFunction(kind()));
DCHECK_IMPLIES(flag, marked_for_deoptimization());
CodeDataContainer container = code_data_container(kAcquireLoad);
- int32_t previous = container.kind_specific_flags();
+ int32_t previous = container.kind_specific_flags(kRelaxedLoad);
int32_t updated = EmbeddedObjectsClearedField::update(previous, flag);
- container.set_kind_specific_flags(updated);
+ container.set_kind_specific_flags(updated, kRelaxedStore);
}
bool Code::deopt_already_counted() const {
DCHECK(CodeKindCanDeoptimize(kind()));
- int32_t flags = code_data_container(kAcquireLoad).kind_specific_flags();
+ int32_t flags =
+ code_data_container(kAcquireLoad).kind_specific_flags(kRelaxedLoad);
return DeoptAlreadyCountedField::decode(flags);
}
@@ -699,9 +719,9 @@ void Code::set_deopt_already_counted(bool flag) {
DCHECK(CodeKindCanDeoptimize(kind()));
DCHECK_IMPLIES(flag, AllowDeoptimization::IsAllowed(GetIsolate()));
CodeDataContainer container = code_data_container(kAcquireLoad);
- int32_t previous = container.kind_specific_flags();
+ int32_t previous = container.kind_specific_flags(kRelaxedLoad);
int32_t updated = DeoptAlreadyCountedField::update(previous, flag);
- container.set_kind_specific_flags(updated);
+ container.set_kind_specific_flags(updated, kRelaxedStore);
}
bool Code::is_optimized_code() const {
@@ -800,8 +820,8 @@ bool Code::IsExecutable() {
// concurrent marker.
STATIC_ASSERT(FIELD_SIZE(CodeDataContainer::kKindSpecificFlagsOffset) ==
kInt32Size);
-IMPLICIT_TAG_RELAXED_INT32_ACCESSORS(CodeDataContainer, kind_specific_flags,
- kKindSpecificFlagsOffset)
+RELAXED_INT32_ACCESSORS(CodeDataContainer, kind_specific_flags,
+ kKindSpecificFlagsOffset)
ACCESSORS_CHECKED(CodeDataContainer, raw_code, Object, kCodeOffset,
V8_EXTERNAL_CODE_SPACE_BOOL)
RELAXED_ACCESSORS_CHECKED(CodeDataContainer, raw_code, Object, kCodeOffset,
diff --git a/deps/v8/src/objects/code.cc b/deps/v8/src/objects/code.cc
index e2a4528d0d..b3f9953be1 100644
--- a/deps/v8/src/objects/code.cc
+++ b/deps/v8/src/objects/code.cc
@@ -333,7 +333,7 @@ bool Code::IsIsolateIndependent(Isolate* isolate) {
#elif defined(V8_TARGET_ARCH_X64) || defined(V8_TARGET_ARCH_ARM64) || \
defined(V8_TARGET_ARCH_ARM) || defined(V8_TARGET_ARCH_MIPS) || \
defined(V8_TARGET_ARCH_S390) || defined(V8_TARGET_ARCH_IA32) || \
- defined(V8_TARGET_ARCH_RISCV64)
+ defined(V8_TARGET_ARCH_RISCV64) || defined(V8_TARGET_ARCH_LOONG64)
for (RelocIterator it(*this, kModeMask); !it.done(); it.next()) {
// On these platforms we emit relative builtin-to-builtin
// jumps for isolate independent builtins in the snapshot. They are later
@@ -349,10 +349,10 @@ bool Code::IsIsolateIndependent(Isolate* isolate) {
}
return false;
}
+ return true;
#else
#error Unsupported architecture.
#endif
- return true;
}
bool Code::Inlines(SharedFunctionInfo sfi) {
@@ -775,7 +775,7 @@ void DependentCode::SetDependentCode(Handle<HeapObject> object,
void DependentCode::InstallDependency(Isolate* isolate, Handle<Code> code,
Handle<HeapObject> object,
DependencyGroup group) {
- if (V8_UNLIKELY(FLAG_trace_code_dependencies)) {
+ if (V8_UNLIKELY(FLAG_trace_compilation_dependencies)) {
StdoutStream{} << "Installing dependency of [" << code->GetHeapObject()
<< "] on [" << object << "] in group ["
<< DependencyGroupName(group) << "]\n";
diff --git a/deps/v8/src/objects/code.h b/deps/v8/src/objects/code.h
index 2d6fc3e983..2b2c874d86 100644
--- a/deps/v8/src/objects/code.h
+++ b/deps/v8/src/objects/code.h
@@ -43,7 +43,7 @@ class CodeDataContainer : public HeapObject {
public:
NEVER_READ_ONLY_SPACE
DECL_ACCESSORS(next_code_link, Object)
- DECL_INT_ACCESSORS(kind_specific_flags)
+ DECL_RELAXED_INT32_ACCESSORS(kind_specific_flags)
// Clear uninitialized padding space. This ensures that the snapshot content
// is deterministic.
@@ -279,8 +279,12 @@ class Code : public HeapObject {
// This function should be called only from GC.
void ClearEmbeddedObjects(Heap* heap);
- // [deoptimization_data]: Array containing data for deopt.
+ // [deoptimization_data]: Array containing data for deopt for non-baseline
+ // code.
DECL_ACCESSORS(deoptimization_data, FixedArray)
+ // [bytecode_or_interpreter_data]: BytecodeArray or InterpreterData for
+ // baseline code.
+ DECL_ACCESSORS(bytecode_or_interpreter_data, HeapObject)
// [source_position_table]: ByteArray for the source positions table for
// non-baseline code.
@@ -511,7 +515,7 @@ class Code : public HeapObject {
// Layout description.
#define CODE_FIELDS(V) \
V(kRelocationInfoOffset, kTaggedSize) \
- V(kDeoptimizationDataOffset, kTaggedSize) \
+ V(kDeoptimizationDataOrInterpreterDataOffset, kTaggedSize) \
V(kPositionTableOffset, kTaggedSize) \
V(kCodeDataContainerOffset, kTaggedSize) \
/* Data or code not directly visited by GC directly starts here. */ \
@@ -544,8 +548,10 @@ class Code : public HeapObject {
static constexpr int kHeaderPaddingSize = COMPRESS_POINTERS_BOOL ? 12 : 24;
#elif V8_TARGET_ARCH_MIPS64
static constexpr int kHeaderPaddingSize = 24;
+#elif V8_TARGET_ARCH_LOONG64
+ static constexpr int kHeaderPaddingSize = 24;
#elif V8_TARGET_ARCH_X64
- static constexpr int kHeaderPaddingSize = COMPRESS_POINTERS_BOOL ? 12 : 24;
+ static constexpr int kHeaderPaddingSize = COMPRESS_POINTERS_BOOL ? 12 : 56;
#elif V8_TARGET_ARCH_ARM
static constexpr int kHeaderPaddingSize = 12;
#elif V8_TARGET_ARCH_IA32
@@ -647,6 +653,10 @@ class Code::OptimizedCodeIterator {
inline CodeT ToCodeT(Code code);
inline Code FromCodeT(CodeT code);
inline Code FromCodeT(CodeT code, RelaxedLoadTag);
+inline Code FromCodeT(CodeT code, AcquireLoadTag);
+inline Code FromCodeT(CodeT code, PtrComprCageBase);
+inline Code FromCodeT(CodeT code, PtrComprCageBase, RelaxedLoadTag);
+inline Code FromCodeT(CodeT code, PtrComprCageBase, AcquireLoadTag);
inline CodeDataContainer CodeDataContainerFromCodeT(CodeT code);
class AbstractCode : public HeapObject {
diff --git a/deps/v8/src/objects/contexts.h b/deps/v8/src/objects/contexts.h
index 7fae0c9e0d..81ed696cb0 100644
--- a/deps/v8/src/objects/contexts.h
+++ b/deps/v8/src/objects/contexts.h
@@ -5,6 +5,7 @@
#ifndef V8_OBJECTS_CONTEXTS_H_
#define V8_OBJECTS_CONTEXTS_H_
+#include "include/v8-promise.h"
#include "src/objects/fixed-array.h"
#include "src/objects/function-kind.h"
#include "src/objects/ordered-hash-table.h"
@@ -43,13 +44,8 @@ enum ContextLookupFlags {
V(GENERATOR_NEXT_INTERNAL, JSFunction, generator_next_internal) \
V(ASYNC_MODULE_EVALUATE_INTERNAL, JSFunction, \
async_module_evaluate_internal) \
- V(OBJECT_CREATE, JSFunction, object_create) \
V(REFLECT_APPLY_INDEX, JSFunction, reflect_apply) \
V(REFLECT_CONSTRUCT_INDEX, JSFunction, reflect_construct) \
- V(MATH_FLOOR_INDEX, JSFunction, math_floor) \
- V(MATH_POW_INDEX, JSFunction, math_pow) \
- V(PROMISE_INTERNAL_CONSTRUCTOR_INDEX, JSFunction, \
- promise_internal_constructor) \
V(PROMISE_THEN_INDEX, JSFunction, promise_then) \
V(FUNCTION_PROTOTYPE_APPLY_INDEX, JSFunction, function_prototype_apply)
diff --git a/deps/v8/src/objects/feedback-cell-inl.h b/deps/v8/src/objects/feedback-cell-inl.h
index 84257e544c..46d92b8447 100644
--- a/deps/v8/src/objects/feedback-cell-inl.h
+++ b/deps/v8/src/objects/feedback-cell-inl.h
@@ -40,7 +40,7 @@ void FeedbackCell::reset_feedback_vector(
CHECK(value().IsFeedbackVector());
ClosureFeedbackCellArray closure_feedback_cell_array =
FeedbackVector::cast(value()).closure_feedback_cell_array();
- set_value(closure_feedback_cell_array);
+ set_value(closure_feedback_cell_array, kReleaseStore);
if (gc_notify_updated_slot) {
(*gc_notify_updated_slot)(*this, RawField(FeedbackCell::kValueOffset),
closure_feedback_cell_array);
diff --git a/deps/v8/src/objects/fixed-array-inl.h b/deps/v8/src/objects/fixed-array-inl.h
index dbf0222b84..98315ad73d 100644
--- a/deps/v8/src/objects/fixed-array-inl.h
+++ b/deps/v8/src/objects/fixed-array-inl.h
@@ -84,7 +84,6 @@ bool FixedArray::is_the_hole(Isolate* isolate, int index) {
return get(isolate, index).IsTheHole(isolate);
}
-#if !defined(_WIN32) || (defined(_WIN64) && _MSC_VER < 1930 && __cplusplus < 201703L)
void FixedArray::set(int index, Smi value) {
DCHECK_NE(map(), GetReadOnlyRoots().fixed_cow_array_map());
DCHECK_LT(static_cast<unsigned>(index), static_cast<unsigned>(length()));
@@ -92,7 +91,6 @@ void FixedArray::set(int index, Smi value) {
int offset = OffsetOfElementAt(index);
RELAXED_WRITE_FIELD(*this, offset, value);
}
-#endif
void FixedArray::set(int index, Object value) {
DCHECK_NE(GetReadOnlyRoots().fixed_cow_array_map(), map());
diff --git a/deps/v8/src/objects/fixed-array.h b/deps/v8/src/objects/fixed-array.h
index 6b0ac5d9ac..1dfd7dac13 100644
--- a/deps/v8/src/objects/fixed-array.h
+++ b/deps/v8/src/objects/fixed-array.h
@@ -134,18 +134,7 @@ class FixedArray
inline bool is_the_hole(Isolate* isolate, int index);
// Setter that doesn't need write barrier.
-#if !defined(_WIN32) || (defined(_WIN64) && _MSC_VER < 1930 && __cplusplus < 201703L)
inline void set(int index, Smi value);
-#else
- inline void set(int index, Smi value) {
- DCHECK_NE(map(), GetReadOnlyRoots().fixed_cow_array_map());
- DCHECK_LT(static_cast<unsigned>(index), static_cast<unsigned>(length()));
- DCHECK(Object(value).IsSmi());
- int offset = OffsetOfElementAt(index);
- RELAXED_WRITE_FIELD(*this, offset, value);
- }
-#endif
-
// Setter with explicit barrier mode.
inline void set(int index, Object value, WriteBarrierMode mode);
diff --git a/deps/v8/src/objects/instance-type.h b/deps/v8/src/objects/instance-type.h
index f7cdd28c05..de90f6baa1 100644
--- a/deps/v8/src/objects/instance-type.h
+++ b/deps/v8/src/objects/instance-type.h
@@ -139,6 +139,12 @@ enum InstanceType : uint16_t {
FIRST_TYPE = FIRST_HEAP_OBJECT_TYPE,
LAST_TYPE = LAST_HEAP_OBJECT_TYPE,
BIGINT_TYPE = BIG_INT_BASE_TYPE,
+
+#ifdef V8_EXTERNAL_CODE_SPACE
+ CODET_TYPE = CODE_DATA_CONTAINER_TYPE,
+#else
+ CODET_TYPE = CODE_TYPE,
+#endif
};
// This constant is defined outside of the InstanceType enum because the
diff --git a/deps/v8/src/objects/intl-objects.cc b/deps/v8/src/objects/intl-objects.cc
index ac43c319f5..99a7d62098 100644
--- a/deps/v8/src/objects/intl-objects.cc
+++ b/deps/v8/src/objects/intl-objects.cc
@@ -1666,6 +1666,145 @@ MaybeHandle<JSArray> Intl::GetCanonicalLocales(Isolate* isolate,
return CreateArrayFromList(isolate, maybe_ll.FromJust(), attr);
}
+namespace {
+
+MaybeHandle<JSArray> AvailableCollations(Isolate* isolate) {
+ UErrorCode status = U_ZERO_ERROR;
+ std::unique_ptr<icu::StringEnumeration> enumeration(
+ icu::Collator::getKeywordValues("collation", status));
+ if (U_FAILURE(status)) {
+ THROW_NEW_ERROR(isolate, NewRangeError(MessageTemplate::kIcuError),
+ JSArray);
+ }
+ return Intl::ToJSArray(isolate, "co", enumeration.get(),
+ Intl::RemoveCollation, true);
+}
+
+MaybeHandle<JSArray> VectorToJSArray(Isolate* isolate,
+ const std::vector<std::string>& array) {
+ Factory* factory = isolate->factory();
+ Handle<FixedArray> fixed_array =
+ factory->NewFixedArray(static_cast<int32_t>(array.size()));
+ int32_t index = 0;
+ for (std::string item : array) {
+ Handle<String> str = factory->NewStringFromAsciiChecked(item.c_str());
+ fixed_array->set(index++, *str);
+ }
+ return factory->NewJSArrayWithElements(fixed_array);
+}
+
+MaybeHandle<JSArray> AvailableCurrencies(Isolate* isolate) {
+ UErrorCode status = U_ZERO_ERROR;
+ UEnumeration* ids =
+ ucurr_openISOCurrencies(UCURR_COMMON | UCURR_NON_DEPRECATED, &status);
+ const char* next = nullptr;
+ std::vector<std::string> array;
+ while (U_SUCCESS(status) &&
+ (next = uenum_next(ids, nullptr, &status)) != nullptr) {
+ array.push_back(next);
+ }
+ std::sort(array.begin(), array.end());
+ uenum_close(ids);
+ return VectorToJSArray(isolate, array);
+}
+
+MaybeHandle<JSArray> AvailableNumberingSystems(Isolate* isolate) {
+ UErrorCode status = U_ZERO_ERROR;
+ std::unique_ptr<icu::StringEnumeration> enumeration(
+ icu::NumberingSystem::getAvailableNames(status));
+ if (U_FAILURE(status)) {
+ THROW_NEW_ERROR(isolate, NewRangeError(MessageTemplate::kIcuError),
+ JSArray);
+ }
+ // Need to filter out isAlgorithmic
+ return Intl::ToJSArray(
+ isolate, "nu", enumeration.get(),
+ [](const char* value) {
+ UErrorCode status = U_ZERO_ERROR;
+ std::unique_ptr<icu::NumberingSystem> numbering_system(
+ icu::NumberingSystem::createInstanceByName(value, status));
+ // Skip algorithmic one since chrome filter out the resource.
+ return U_FAILURE(status) || numbering_system->isAlgorithmic();
+ },
+ true);
+}
+
+MaybeHandle<JSArray> AvailableTimeZones(Isolate* isolate) {
+ UErrorCode status = U_ZERO_ERROR;
+ std::unique_ptr<icu::StringEnumeration> enumeration(
+ icu::TimeZone::createTimeZoneIDEnumeration(
+ UCAL_ZONE_TYPE_CANONICAL_LOCATION, nullptr, nullptr, status));
+ if (U_FAILURE(status)) {
+ THROW_NEW_ERROR(isolate, NewRangeError(MessageTemplate::kIcuError),
+ JSArray);
+ }
+ return Intl::ToJSArray(isolate, nullptr, enumeration.get(), nullptr, true);
+}
+
+MaybeHandle<JSArray> AvailableUnits(Isolate* isolate) {
+ Factory* factory = isolate->factory();
+ std::set<std::string> sanctioned(Intl::SanctionedSimpleUnits());
+ Handle<FixedArray> fixed_array =
+ factory->NewFixedArray(static_cast<int32_t>(sanctioned.size()));
+ int32_t index = 0;
+ for (std::string item : sanctioned) {
+ Handle<String> str = factory->NewStringFromAsciiChecked(item.c_str());
+ fixed_array->set(index++, *str);
+ }
+ return factory->NewJSArrayWithElements(fixed_array);
+}
+
+} // namespace
+
+// ecma-402 #sec-intl.supportedvaluesof
+MaybeHandle<JSArray> Intl::SupportedValuesOf(Isolate* isolate,
+ Handle<Object> key_obj) {
+ Factory* factory = isolate->factory();
+ // 1. 1. Let key be ? ToString(key).
+ Handle<String> key_str;
+ ASSIGN_RETURN_ON_EXCEPTION(isolate, key_str,
+ Object::ToString(isolate, key_obj), JSArray);
+ // 2. If key is "calendar", then
+ if (factory->calendar_string()->Equals(*key_str)) {
+ // a. Let list be ! AvailableCalendars( ).
+ return Intl::AvailableCalendars(isolate);
+ }
+ // 3. Else if key is "collation", then
+ if (factory->collation_string()->Equals(*key_str)) {
+ // a. Let list be ! AvailableCollations( ).
+ return AvailableCollations(isolate);
+ }
+ // 4. Else if key is "currency", then
+ if (factory->currency_string()->Equals(*key_str)) {
+ // a. Let list be ! AvailableCurrencies( ).
+ return AvailableCurrencies(isolate);
+ }
+ // 5. Else if key is "numberingSystem", then
+ if (factory->numberingSystem_string()->Equals(*key_str)) {
+ // a. Let list be ! AvailableNumberingSystems( ).
+ return AvailableNumberingSystems(isolate);
+ }
+ // 6. Else if key is "timeZone", then
+ if (factory->timeZone_string()->Equals(*key_str)) {
+ // a. Let list be ! AvailableTimeZones( ).
+ return AvailableTimeZones(isolate);
+ }
+ // 7. Else if key is "unit", then
+ if (factory->unit_string()->Equals(*key_str)) {
+ // a. Let list be ! AvailableUnits( ).
+ return AvailableUnits(isolate);
+ }
+ // 8. Else,
+ // a. Throw a RangeError exception.
+ // 9. Return ! CreateArrayFromList( list ).
+
+ THROW_NEW_ERROR(
+ isolate,
+ NewRangeError(MessageTemplate::kInvalid,
+ factory->NewStringFromStaticChars("key"), key_str),
+ JSArray);
+}
+
// ECMA 402 Intl.*.supportedLocalesOf
MaybeHandle<JSObject> Intl::SupportedLocalesOf(
Isolate* isolate, const char* method,
@@ -2247,5 +2386,51 @@ MaybeHandle<JSReceiver> Intl::CoerceOptionsToObject(Isolate* isolate,
return Handle<JSReceiver>::cast(options);
}
+MaybeHandle<JSArray> Intl::ToJSArray(
+ Isolate* isolate, const char* unicode_key,
+ icu::StringEnumeration* enumeration,
+ const std::function<bool(const char*)>& removes, bool sort) {
+ UErrorCode status = U_ZERO_ERROR;
+ std::vector<std::string> array;
+ for (const char* item = enumeration->next(nullptr, status);
+ U_SUCCESS(status) && item != nullptr;
+ item = enumeration->next(nullptr, status)) {
+ if (unicode_key != nullptr) {
+ item = uloc_toUnicodeLocaleType(unicode_key, item);
+ }
+ if (removes == nullptr || !(removes)(item)) {
+ array.push_back(item);
+ }
+ }
+
+ if (sort) {
+ std::sort(array.begin(), array.end());
+ }
+ return VectorToJSArray(isolate, array);
+}
+
+bool Intl::RemoveCollation(const char* collation) {
+ return strcmp("standard", collation) == 0 || strcmp("search", collation) == 0;
+}
+
+// See the list in ecma402 #sec-issanctionedsimpleunitidentifier
+std::set<std::string> Intl::SanctionedSimpleUnits() {
+ return std::set<std::string>({"acre", "bit", "byte",
+ "celsius", "centimeter", "day",
+ "degree", "fahrenheit", "fluid-ounce",
+ "foot", "gallon", "gigabit",
+ "gigabyte", "gram", "hectare",
+ "hour", "inch", "kilobit",
+ "kilobyte", "kilogram", "kilometer",
+ "liter", "megabit", "megabyte",
+ "meter", "mile", "mile-scandinavian",
+ "millimeter", "milliliter", "millisecond",
+ "minute", "month", "ounce",
+ "percent", "petabyte", "pound",
+ "second", "stone", "terabit",
+ "terabyte", "week", "yard",
+ "year"});
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/objects/intl-objects.h b/deps/v8/src/objects/intl-objects.h
index ec0eb93873..122ca4b746 100644
--- a/deps/v8/src/objects/intl-objects.h
+++ b/deps/v8/src/objects/intl-objects.h
@@ -27,6 +27,7 @@ namespace U_ICU_NAMESPACE {
class BreakIterator;
class Collator;
class FormattedValue;
+class StringEnumeration;
class UnicodeString;
} // namespace U_ICU_NAMESPACE
@@ -142,6 +143,10 @@ class Intl {
V8_WARN_UNUSED_RESULT static MaybeHandle<JSArray> GetCanonicalLocales(
Isolate* isolate, Handle<Object> locales);
+ // ecma-402 #sec-intl.supportedvaluesof
+ V8_WARN_UNUSED_RESULT static MaybeHandle<JSArray> SupportedValuesOf(
+ Isolate* isolate, Handle<Object> key);
+
// For locale sensitive functions
V8_WARN_UNUSED_RESULT static MaybeHandle<String> StringLocaleConvertCase(
Isolate* isolate, Handle<String> s, bool is_upper,
@@ -338,6 +343,18 @@ class Intl {
// ecma402/#sec-coerceoptionstoobject
V8_WARN_UNUSED_RESULT static MaybeHandle<JSReceiver> CoerceOptionsToObject(
Isolate* isolate, Handle<Object> options, const char* service);
+
+ V8_WARN_UNUSED_RESULT static MaybeHandle<JSArray> ToJSArray(
+ Isolate* isolate, const char* unicode_key,
+ icu::StringEnumeration* enumeration,
+ const std::function<bool(const char*)>& removes, bool sort);
+
+ static bool RemoveCollation(const char* collation);
+
+ static std::set<std::string> SanctionedSimpleUnits();
+
+ V8_WARN_UNUSED_RESULT static MaybeHandle<JSArray> AvailableCalendars(
+ Isolate* isolate);
};
} // namespace internal
diff --git a/deps/v8/src/objects/js-array-buffer-inl.h b/deps/v8/src/objects/js-array-buffer-inl.h
index 7ea8aeb3e5..e1de03dcf9 100644
--- a/deps/v8/src/objects/js-array-buffer-inl.h
+++ b/deps/v8/src/objects/js-array-buffer-inl.h
@@ -30,10 +30,6 @@ ACCESSORS(JSTypedArray, base_pointer, Object, kBasePointerOffset)
RELEASE_ACQUIRE_ACCESSORS(JSTypedArray, base_pointer, Object,
kBasePointerOffset)
-void JSArrayBuffer::AllocateExternalPointerEntries(Isolate* isolate) {
- InitExternalPointerField(kBackingStoreOffset, isolate);
-}
-
size_t JSArrayBuffer::byte_length() const {
return ReadField<size_t>(kByteLengthOffset);
}
@@ -43,26 +39,20 @@ void JSArrayBuffer::set_byte_length(size_t value) {
}
DEF_GETTER(JSArrayBuffer, backing_store, void*) {
- Isolate* isolate = GetIsolateForHeapSandbox(*this);
- Address value = ReadExternalPointerField(kBackingStoreOffset, isolate,
- kArrayBufferBackingStoreTag);
- return reinterpret_cast<void*>(value);
+ return reinterpret_cast<void*>(ReadField<Address>(kBackingStoreOffset));
}
void JSArrayBuffer::set_backing_store(Isolate* isolate, void* value) {
- WriteExternalPointerField(kBackingStoreOffset, isolate,
- reinterpret_cast<Address>(value),
- kArrayBufferBackingStoreTag);
+ DCHECK(IsValidBackingStorePointer(value));
+ WriteField<Address>(kBackingStoreOffset, reinterpret_cast<Address>(value));
}
uint32_t JSArrayBuffer::GetBackingStoreRefForDeserialization() const {
- return static_cast<uint32_t>(
- ReadField<ExternalPointer_t>(kBackingStoreOffset));
+ return static_cast<uint32_t>(ReadField<Address>(kBackingStoreOffset));
}
void JSArrayBuffer::SetBackingStoreRefForSerialization(uint32_t ref) {
- WriteField<ExternalPointer_t>(kBackingStoreOffset,
- static_cast<ExternalPointer_t>(ref));
+ WriteField<Address>(kBackingStoreOffset, static_cast<Address>(ref));
}
ArrayBufferExtension* JSArrayBuffer::extension() const {
@@ -238,10 +228,6 @@ size_t JSTypedArray::GetLength() const {
return GetLengthOrOutOfBounds(out_of_bounds);
}
-void JSTypedArray::AllocateExternalPointerEntries(Isolate* isolate) {
- InitExternalPointerField(kExternalPointerOffset, isolate);
-}
-
size_t JSTypedArray::length() const {
DCHECK(!is_length_tracking());
DCHECK(!is_backed_by_rab());
@@ -257,18 +243,16 @@ void JSTypedArray::set_length(size_t value) {
}
DEF_GETTER(JSTypedArray, external_pointer, Address) {
- Isolate* isolate = GetIsolateForHeapSandbox(*this);
- return ReadExternalPointerField(kExternalPointerOffset, isolate,
- kTypedArrayExternalPointerTag);
+ return ReadField<Address>(kExternalPointerOffset);
}
-DEF_GETTER(JSTypedArray, external_pointer_raw, ExternalPointer_t) {
- return ReadField<ExternalPointer_t>(kExternalPointerOffset);
+DEF_GETTER(JSTypedArray, external_pointer_raw, Address) {
+ return ReadField<Address>(kExternalPointerOffset);
}
void JSTypedArray::set_external_pointer(Isolate* isolate, Address value) {
- WriteExternalPointerField(kExternalPointerOffset, isolate, value,
- kTypedArrayExternalPointerTag);
+ DCHECK(IsValidBackingStorePointer(reinterpret_cast<void*>(value)));
+ WriteField<Address>(kExternalPointerOffset, value);
}
Address JSTypedArray::ExternalPointerCompensationForOnHeapArray(
@@ -282,14 +266,12 @@ Address JSTypedArray::ExternalPointerCompensationForOnHeapArray(
uint32_t JSTypedArray::GetExternalBackingStoreRefForDeserialization() const {
DCHECK(!is_on_heap());
- return static_cast<uint32_t>(
- ReadField<ExternalPointer_t>(kExternalPointerOffset));
+ return static_cast<uint32_t>(ReadField<Address>(kExternalPointerOffset));
}
void JSTypedArray::SetExternalBackingStoreRefForSerialization(uint32_t ref) {
DCHECK(!is_on_heap());
- WriteField<ExternalPointer_t>(kExternalPointerOffset,
- static_cast<ExternalPointer_t>(ref));
+ WriteField<Address>(kExternalPointerOffset, static_cast<Address>(ref));
}
void JSTypedArray::RemoveExternalPointerCompensationForSerialization(
@@ -390,19 +372,12 @@ MaybeHandle<JSTypedArray> JSTypedArray::Validate(Isolate* isolate,
}
DEF_GETTER(JSDataView, data_pointer, void*) {
- Isolate* isolate = GetIsolateForHeapSandbox(*this);
- return reinterpret_cast<void*>(ReadExternalPointerField(
- kDataPointerOffset, isolate, kDataViewDataPointerTag));
-}
-
-void JSDataView::AllocateExternalPointerEntries(Isolate* isolate) {
- InitExternalPointerField(kDataPointerOffset, isolate);
+ return reinterpret_cast<void*>(ReadField<Address>(kDataPointerOffset));
}
void JSDataView::set_data_pointer(Isolate* isolate, void* value) {
- WriteExternalPointerField(kDataPointerOffset, isolate,
- reinterpret_cast<Address>(value),
- kDataViewDataPointerTag);
+ DCHECK(IsValidBackingStorePointer(value));
+ WriteField<Address>(kDataPointerOffset, reinterpret_cast<Address>(value));
}
} // namespace internal
diff --git a/deps/v8/src/objects/js-array-buffer.cc b/deps/v8/src/objects/js-array-buffer.cc
index 917a055b46..bbe635ee2a 100644
--- a/deps/v8/src/objects/js-array-buffer.cc
+++ b/deps/v8/src/objects/js-array-buffer.cc
@@ -55,7 +55,6 @@ void JSArrayBuffer::Setup(SharedFlag shared, ResizableFlag resizable,
SetEmbedderField(i, Smi::zero());
}
set_extension(nullptr);
- AllocateExternalPointerEntries(GetIsolate());
if (!backing_store) {
set_backing_store(GetIsolate(), nullptr);
set_byte_length(0);
diff --git a/deps/v8/src/objects/js-array-buffer.h b/deps/v8/src/objects/js-array-buffer.h
index 1522f4b951..e5a68f3923 100644
--- a/deps/v8/src/objects/js-array-buffer.h
+++ b/deps/v8/src/objects/js-array-buffer.h
@@ -5,6 +5,7 @@
#ifndef V8_OBJECTS_JS_ARRAY_BUFFER_H_
#define V8_OBJECTS_JS_ARRAY_BUFFER_H_
+#include "include/v8-typed-array.h"
#include "src/objects/backing-store.h"
#include "src/objects/js-objects.h"
#include "torque-generated/bit-fields.h"
@@ -32,12 +33,6 @@ class JSArrayBuffer
static constexpr size_t kMaxByteLength = kMaxSafeInteger;
#endif
- // When soft sandbox is enabled, creates entries in external pointer table for
- // all JSArrayBuffer's fields that require soft sandbox protection (backing
- // store pointer, backing store length, etc.).
- // When sandbox is not enabled, it's a no-op.
- inline void AllocateExternalPointerEntries(Isolate* isolate);
-
// [byte_length]: length in bytes
DECL_PRIMITIVE_ACCESSORS(byte_length, size_t)
@@ -283,12 +278,6 @@ class JSTypedArray
V8_EXPORT_PRIVATE Handle<JSArrayBuffer> GetBuffer();
- // When soft sandbox is enabled, creates entries in external pointer table for
- // all JSTypedArray's fields that require soft sandbox protection (external
- // pointer, offset, length, etc.).
- // When sandbox is not enabled, it's a no-op.
- inline void AllocateExternalPointerEntries(Isolate* isolate);
-
// The `DataPtr` is `base_ptr + external_pointer`, and `base_ptr` is nullptr
// for off-heap typed arrays.
static constexpr bool kOffHeapDataPtrEqualsExternalPointer = true;
@@ -392,12 +381,6 @@ class JSDataView
DECL_GETTER(data_pointer, void*)
inline void set_data_pointer(Isolate* isolate, void* value);
- // When soft sandbox is enabled, creates entries in external pointer table for
- // all JSDataView's fields that require soft sandbox protection (data pointer,
- // offset, length, etc.).
- // When sandbox is not enabled, it's a no-op.
- inline void AllocateExternalPointerEntries(Isolate* isolate);
-
// Dispatched behavior.
DECL_PRINTER(JSDataView)
DECL_VERIFIER(JSDataView)
diff --git a/deps/v8/src/objects/js-array-inl.h b/deps/v8/src/objects/js-array-inl.h
index ed7ab4e003..3b9f796263 100644
--- a/deps/v8/src/objects/js-array-inl.h
+++ b/deps/v8/src/objects/js-array-inl.h
@@ -15,11 +15,10 @@
namespace v8 {
namespace internal {
-OBJECT_CONSTRUCTORS_IMPL(JSArray, JSObject)
-OBJECT_CONSTRUCTORS_IMPL(JSArrayIterator, JSObject)
+#include "torque-generated/src/objects/js-array-tq-inl.inc"
-CAST_ACCESSOR(JSArray)
-CAST_ACCESSOR(JSArrayIterator)
+TQ_OBJECT_CONSTRUCTORS_IMPL(JSArray)
+TQ_OBJECT_CONSTRUCTORS_IMPL(JSArrayIterator)
DEF_GETTER(JSArray, length, Object) {
return TaggedField<Object, kLengthOffset>::load(cage_base, *this);
@@ -70,9 +69,6 @@ bool JSArray::HasArrayPrototype(Isolate* isolate) {
return map().prototype() == *isolate->initial_array_prototype();
}
-ACCESSORS(JSArrayIterator, iterated_object, Object, kIteratedObjectOffset)
-ACCESSORS(JSArrayIterator, next_index, Object, kNextIndexOffset)
-
SMI_ACCESSORS(JSArrayIterator, raw_kind, kKindOffset)
IterationKind JSArrayIterator::kind() const {
diff --git a/deps/v8/src/objects/js-array.h b/deps/v8/src/objects/js-array.h
index 776cb4446b..4d725a0905 100644
--- a/deps/v8/src/objects/js-array.h
+++ b/deps/v8/src/objects/js-array.h
@@ -16,12 +16,14 @@
namespace v8 {
namespace internal {
+#include "torque-generated/src/objects/js-array-tq.inc"
+
// The JSArray describes JavaScript Arrays
// Such an array can be in one of two modes:
// - fast, backing storage is a FixedArray and length <= elements.length();
// Please note: push and pop can be used to grow and shrink the array.
// - slow, backing storage is a HashTable with numbers as keys.
-class JSArray : public JSObject {
+class JSArray : public TorqueGeneratedJSArray<JSArray, JSObject> {
public:
// [length]: The length property.
DECL_ACCESSORS(length, Object)
@@ -109,8 +111,6 @@ class JSArray : public JSObject {
// to Proxies and objects with a hidden prototype.
inline bool HasArrayPrototype(Isolate* isolate);
- DECL_CAST(JSArray)
-
// Dispatched behavior.
DECL_PRINTER(JSArray)
DECL_VERIFIER(JSArray)
@@ -118,9 +118,6 @@ class JSArray : public JSObject {
// Number of element slots to pre-allocate for an empty array.
static const int kPreallocatedArrayElements = 4;
- DEFINE_FIELD_OFFSET_CONSTANTS(JSObject::kHeaderSize,
- TORQUE_GENERATED_JS_ARRAY_FIELDS)
-
static const int kLengthDescriptorIndex = 0;
// Max. number of elements being copied in Array builtins.
@@ -144,7 +141,7 @@ class JSArray : public JSObject {
AllocationMemento::kSize) >>
kDoubleSizeLog2;
- OBJECT_CONSTRUCTORS(JSArray, JSObject);
+ TQ_OBJECT_CONSTRUCTORS(JSArray)
};
Handle<Object> CacheInitialJSArrayMaps(Isolate* isolate,
@@ -153,52 +150,20 @@ Handle<Object> CacheInitialJSArrayMaps(Isolate* isolate,
// The JSArrayIterator describes JavaScript Array Iterators Objects, as
// defined in ES section #sec-array-iterator-objects.
-class JSArrayIterator : public JSObject {
+class JSArrayIterator
+ : public TorqueGeneratedJSArrayIterator<JSArrayIterator, JSObject> {
public:
DECL_PRINTER(JSArrayIterator)
DECL_VERIFIER(JSArrayIterator)
- DECL_CAST(JSArrayIterator)
-
- // [iterated_object]: the [[IteratedObject]] inobject property.
- DECL_ACCESSORS(iterated_object, Object)
-
- // [next_index]: The [[ArrayIteratorNextIndex]] inobject property.
- // The next_index is always a positive integer, and it points to
- // the next index that is to be returned by this iterator. It's
- // possible range is fixed depending on the [[iterated_object]]:
- //
- // 1. For JSArray's the next_index is always in Unsigned32
- // range, and when the iterator reaches the end it's set
- // to kMaxUInt32 to indicate that this iterator should
- // never produce values anymore even if the "length"
- // property of the JSArray changes at some later point.
- // 2. For JSTypedArray's the next_index is always in
- // UnsignedSmall range, and when the iterator terminates
- // it's set to Smi::kMaxValue.
- // 3. For all other JSReceiver's it's always between 0 and
- // kMaxSafeInteger, and the latter value is used to mark
- // termination.
- //
- // It's important that for 1. and 2. the value fits into the
- // Unsigned32 range (UnsignedSmall is a subset of Unsigned32),
- // since we use this knowledge in the fast-path for the array
- // iterator next calls in TurboFan (in the JSCallReducer) to
- // keep the index in Word32 representation. This invariant is
- // checked in JSArrayIterator::JSArrayIteratorVerify().
- DECL_ACCESSORS(next_index, Object)
-
// [kind]: the [[ArrayIterationKind]] inobject property.
inline IterationKind kind() const;
inline void set_kind(IterationKind kind);
- DEFINE_FIELD_OFFSET_CONSTANTS(JSObject::kHeaderSize,
- TORQUE_GENERATED_JS_ARRAY_ITERATOR_FIELDS)
-
private:
DECL_INT_ACCESSORS(raw_kind)
- OBJECT_CONSTRUCTORS(JSArrayIterator, JSObject);
+ TQ_OBJECT_CONSTRUCTORS(JSArrayIterator)
};
} // namespace internal
diff --git a/deps/v8/src/objects/js-array.tq b/deps/v8/src/objects/js-array.tq
index 3ccf37b150..e9f7d86c44 100644
--- a/deps/v8/src/objects/js-array.tq
+++ b/deps/v8/src/objects/js-array.tq
@@ -4,10 +4,34 @@
extern enum IterationKind extends uint31 { kKeys, kValues, kEntries }
-@doNotGenerateCppClass
extern class JSArrayIterator extends JSObject {
iterated_object: JSReceiver;
+
+ // [next_index]: The [[ArrayIteratorNextIndex]] inobject property.
+ // The next_index is always a positive integer, and it points to
+ // the next index that is to be returned by this iterator. It's
+ // possible range is fixed depending on the [[iterated_object]]:
+ //
+ // 1. For JSArray's the next_index is always in Unsigned32
+ // range, and when the iterator reaches the end it's set
+ // to kMaxUInt32 to indicate that this iterator should
+ // never produce values anymore even if the "length"
+ // property of the JSArray changes at some later point.
+ // 2. For JSTypedArray's the next_index is always in
+ // UnsignedSmall range, and when the iterator terminates
+ // it's set to Smi::kMaxValue.
+ // 3. For all other JSReceiver's it's always between 0 and
+ // kMaxSafeInteger, and the latter value is used to mark
+ // termination.
+ //
+ // It's important that for 1. and 2. the value fits into the
+ // Unsigned32 range (UnsignedSmall is a subset of Unsigned32),
+ // since we use this knowledge in the fast-path for the array
+ // iterator next calls in TurboFan (in the JSCallReducer) to
+ // keep the index in Word32 representation. This invariant is
+ // checked in JSArrayIterator::JSArrayIteratorVerify().
next_index: Number;
+
kind: SmiTagged<IterationKind>;
}
@@ -25,7 +49,6 @@ macro CreateArrayIterator(implicit context: NativeContext)(
};
}
-@doNotGenerateCppClass
extern class JSArray extends JSObject {
macro IsEmpty(): bool {
return this.length == 0;
diff --git a/deps/v8/src/objects/js-date-time-format.cc b/deps/v8/src/objects/js-date-time-format.cc
index 7e2ece76a9..868b0a3be2 100644
--- a/deps/v8/src/objects/js-date-time-format.cc
+++ b/deps/v8/src/objects/js-date-time-format.cc
@@ -2209,8 +2209,8 @@ template <typename T>
MaybeHandle<T> FormatRangeCommon(
Isolate* isolate, Handle<JSDateTimeFormat> date_time_format, double x,
double y,
- MaybeHandle<T> (*formatToResult)(Isolate*, const icu::FormattedValue&,
- bool*),
+ const std::function<MaybeHandle<T>(Isolate*, const icu::FormattedValue&,
+ bool*)>& formatToResult,
bool* outputRange) {
// Track newer feature formateRange and formatRangeToParts
isolate->CountUsage(v8::Isolate::UseCounterFeature::kDateTimeFormatRange);
diff --git a/deps/v8/src/objects/js-function-inl.h b/deps/v8/src/objects/js-function-inl.h
index 275ffba14d..15634b8f02 100644
--- a/deps/v8/src/objects/js-function-inl.h
+++ b/deps/v8/src/objects/js-function-inl.h
@@ -27,9 +27,7 @@ namespace internal {
TQ_OBJECT_CONSTRUCTORS_IMPL(JSFunctionOrBoundFunction)
TQ_OBJECT_CONSTRUCTORS_IMPL(JSBoundFunction)
-OBJECT_CONSTRUCTORS_IMPL(JSFunction, JSFunctionOrBoundFunction)
-
-CAST_ACCESSOR(JSFunction)
+TQ_OBJECT_CONSTRUCTORS_IMPL(JSFunction)
ACCESSORS(JSFunction, raw_feedback_cell, FeedbackCell, kFeedbackCellOffset)
RELEASE_ACQUIRE_ACCESSORS(JSFunction, raw_feedback_cell, FeedbackCell,
@@ -55,7 +53,7 @@ void JSFunction::ClearOptimizationMarker() {
}
bool JSFunction::ChecksOptimizationMarker() {
- return code(kAcquireLoad).checks_optimization_marker();
+ return code().checks_optimization_marker();
}
bool JSFunction::IsMarkedForOptimization() {
@@ -218,12 +216,6 @@ NativeContext JSFunction::native_context() {
return context().native_context();
}
-void JSFunction::set_context(HeapObject value, WriteBarrierMode mode) {
- DCHECK(value.IsUndefined() || value.IsContext());
- WRITE_FIELD(*this, kContextOffset, value);
- CONDITIONAL_WRITE_BARRIER(*this, kContextOffset, value, mode);
-}
-
RELEASE_ACQUIRE_ACCESSORS_CHECKED(JSFunction, prototype_or_initial_map,
HeapObject, kPrototypeOrInitialMapOffset,
map().has_prototype_slot())
@@ -332,7 +324,7 @@ bool JSFunction::NeedsResetDueToFlushedBytecode() {
}
bool JSFunction::NeedsResetDueToFlushedBaselineCode() {
- return code().kind() == CodeKind::BASELINE && !shared().HasBaselineData();
+ return code().kind() == CodeKind::BASELINE && !shared().HasBaselineCode();
}
void JSFunction::ResetIfCodeFlushed(
diff --git a/deps/v8/src/objects/js-function.cc b/deps/v8/src/objects/js-function.cc
index b2d086814f..3bcaf07387 100644
--- a/deps/v8/src/objects/js-function.cc
+++ b/deps/v8/src/objects/js-function.cc
@@ -19,19 +19,10 @@ namespace v8 {
namespace internal {
CodeKinds JSFunction::GetAttachedCodeKinds() const {
- // Note: There's a special case when bytecode has been aged away. After
- // flushing the bytecode, the JSFunction will still have the interpreter
- // entry trampoline attached, but the bytecode is no longer available.
- Code code = this->code(kAcquireLoad);
- if (code.is_interpreter_trampoline_builtin()) {
- return CodeKindFlag::INTERPRETED_FUNCTION;
- }
-
- const CodeKind kind = code.kind();
+ const CodeKind kind = code().kind();
if (!CodeKindIsJSFunction(kind)) return {};
-
- if (CodeKindIsOptimizedJSFunction(kind) && code.marked_for_deoptimization()) {
- // Nothing is attached.
+ if (CodeKindIsOptimizedJSFunction(kind) &&
+ code().marked_for_deoptimization()) {
return {};
}
return CodeKindToCodeKindFlag(kind);
@@ -49,7 +40,7 @@ CodeKinds JSFunction::GetAvailableCodeKinds() const {
if ((result & CodeKindFlag::BASELINE) == 0) {
// The SharedFunctionInfo could have attached baseline code.
- if (shared().HasBaselineData()) {
+ if (shared().HasBaselineCode()) {
result |= CodeKindFlag::BASELINE;
}
}
@@ -90,7 +81,8 @@ namespace {
// Returns false if no highest tier exists (i.e. the function is not compiled),
// otherwise returns true and sets highest_tier.
-bool HighestTierOf(CodeKinds kinds, CodeKind* highest_tier) {
+V8_WARN_UNUSED_RESULT bool HighestTierOf(CodeKinds kinds,
+ CodeKind* highest_tier) {
DCHECK_EQ((kinds & ~kJSFunctionCodeKindsMask), 0);
if ((kinds & CodeKindFlag::TURBOFAN) != 0) {
*highest_tier = CodeKind::TURBOFAN;
@@ -111,33 +103,43 @@ bool HighestTierOf(CodeKinds kinds, CodeKind* highest_tier) {
} // namespace
-bool JSFunction::ActiveTierIsIgnition() const {
- if (!shared().HasBytecodeArray()) return false;
- bool result = (GetActiveTier() == CodeKind::INTERPRETED_FUNCTION);
+base::Optional<CodeKind> JSFunction::GetActiveTier() const {
+#if V8_ENABLE_WEBASSEMBLY
+ // Asm/Wasm functions are currently not supported. For simplicity, this
+ // includes invalid asm.js functions whose code hasn't yet been updated to
+ // CompileLazy but is still the InstantiateAsmJs builtin.
+ if (shared().HasAsmWasmData() ||
+ code().builtin_id() == Builtin::kInstantiateAsmJs) {
+ return {};
+ }
+#endif // V8_ENABLE_WEBASSEMBLY
+
+ CodeKind highest_tier;
+ if (!HighestTierOf(GetAvailableCodeKinds(), &highest_tier)) return {};
+
#ifdef DEBUG
- Code code = this->code(kAcquireLoad);
- DCHECK_IMPLIES(result, code.is_interpreter_trampoline_builtin() ||
- (CodeKindIsOptimizedJSFunction(code.kind()) &&
- code.marked_for_deoptimization()) ||
- (code.builtin_id() == Builtin::kCompileLazy &&
- shared().IsInterpreted()));
+ CHECK(highest_tier == CodeKind::TURBOFAN ||
+ highest_tier == CodeKind::BASELINE ||
+ highest_tier == CodeKind::TURBOPROP ||
+ highest_tier == CodeKind::INTERPRETED_FUNCTION);
+
+ if (highest_tier == CodeKind::INTERPRETED_FUNCTION) {
+ CHECK(code().is_interpreter_trampoline_builtin() ||
+ (CodeKindIsOptimizedJSFunction(code().kind()) &&
+ code().marked_for_deoptimization()) ||
+ (code().builtin_id() == Builtin::kCompileLazy &&
+ shared().IsInterpreted()));
+ }
#endif // DEBUG
- return result;
-}
-CodeKind JSFunction::GetActiveTier() const {
- CodeKind highest_tier;
- DCHECK(shared().is_compiled());
- HighestTierOf(GetAvailableCodeKinds(), &highest_tier);
- DCHECK(highest_tier == CodeKind::TURBOFAN ||
- highest_tier == CodeKind::BASELINE ||
- highest_tier == CodeKind::TURBOPROP ||
- highest_tier == CodeKind::INTERPRETED_FUNCTION);
return highest_tier;
}
+bool JSFunction::ActiveTierIsIgnition() const {
+ return GetActiveTier() == CodeKind::INTERPRETED_FUNCTION;
+}
+
bool JSFunction::ActiveTierIsTurbofan() const {
- if (!shared().HasBytecodeArray()) return false;
return GetActiveTier() == CodeKind::TURBOFAN;
}
@@ -145,27 +147,20 @@ bool JSFunction::ActiveTierIsBaseline() const {
return GetActiveTier() == CodeKind::BASELINE;
}
-bool JSFunction::ActiveTierIsIgnitionOrBaseline() const {
- return ActiveTierIsIgnition() || ActiveTierIsBaseline();
-}
-
bool JSFunction::ActiveTierIsToptierTurboprop() const {
- if (!FLAG_turboprop_as_toptier) return false;
- if (!shared().HasBytecodeArray()) return false;
- return GetActiveTier() == CodeKind::TURBOPROP && FLAG_turboprop_as_toptier;
+ return FLAG_turboprop_as_toptier && GetActiveTier() == CodeKind::TURBOPROP;
}
bool JSFunction::ActiveTierIsMidtierTurboprop() const {
- if (!FLAG_turboprop) return false;
- if (!shared().HasBytecodeArray()) return false;
- return GetActiveTier() == CodeKind::TURBOPROP && !FLAG_turboprop_as_toptier;
+ return FLAG_turboprop && !FLAG_turboprop_as_toptier &&
+ GetActiveTier() == CodeKind::TURBOPROP;
}
CodeKind JSFunction::NextTier() const {
if (V8_UNLIKELY(FLAG_turboprop) && ActiveTierIsMidtierTurboprop()) {
return CodeKind::TURBOFAN;
} else if (V8_UNLIKELY(FLAG_turboprop)) {
- DCHECK(ActiveTierIsIgnitionOrBaseline());
+ DCHECK(ActiveTierIsIgnition() || ActiveTierIsBaseline());
return CodeKind::TURBOPROP;
}
return CodeKind::TURBOFAN;
diff --git a/deps/v8/src/objects/js-function.h b/deps/v8/src/objects/js-function.h
index 6d7b21abe9..b7df4daf8b 100644
--- a/deps/v8/src/objects/js-function.h
+++ b/deps/v8/src/objects/js-function.h
@@ -53,7 +53,8 @@ class JSBoundFunction
};
// JSFunction describes JavaScript functions.
-class JSFunction : public JSFunctionOrBoundFunction {
+class JSFunction
+ : public TorqueGeneratedJSFunction<JSFunction, JSFunctionOrBoundFunction> {
public:
// [prototype_or_initial_map]:
DECL_RELEASE_ACQUIRE_ACCESSORS(prototype_or_initial_map, HeapObject)
@@ -70,8 +71,6 @@ class JSFunction : public JSFunctionOrBoundFunction {
inline Context context();
DECL_RELAXED_GETTER(context, Context)
inline bool has_context() const;
- inline void set_context(HeapObject context,
- WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
inline JSGlobalProxy global_proxy();
inline NativeContext native_context();
inline int length();
@@ -106,7 +105,8 @@ class JSFunction : public JSFunctionOrBoundFunction {
// indirect means such as the feedback vector's optimized code cache.
// - Active: the single code kind that would be executed if this function
// were called in its current state. Note that there may not be an active
- // code kind if the function is not compiled.
+ // code kind if the function is not compiled. Also, asm/wasm functions are
+ // currently not supported.
//
// Note: code objects that are marked_for_deoptimization are not part of the
// attached/available/active sets. This is because the JSFunction might have
@@ -120,11 +120,10 @@ class JSFunction : public JSFunctionOrBoundFunction {
bool HasAttachedCodeKind(CodeKind kind) const;
bool HasAvailableCodeKind(CodeKind kind) const;
- CodeKind GetActiveTier() const;
+ base::Optional<CodeKind> GetActiveTier() const;
V8_EXPORT_PRIVATE bool ActiveTierIsIgnition() const;
bool ActiveTierIsTurbofan() const;
bool ActiveTierIsBaseline() const;
- bool ActiveTierIsIgnitionOrBaseline() const;
bool ActiveTierIsMidtierTurboprop() const;
bool ActiveTierIsToptierTurboprop() const;
@@ -275,8 +274,6 @@ class JSFunction : public JSFunctionOrBoundFunction {
// Prints the name of the function using PrintF.
void PrintName(FILE* out = stdout);
- DECL_CAST(JSFunction)
-
// Calculate the instance size and in-object properties count.
// {CalculateExpectedNofProperties} can trigger compilation.
static V8_WARN_UNUSED_RESULT int CalculateExpectedNofProperties(
@@ -310,18 +307,6 @@ class JSFunction : public JSFunctionOrBoundFunction {
// ES6 section 19.2.3.5 Function.prototype.toString ( ).
static Handle<String> ToString(Handle<JSFunction> function);
- struct FieldOffsets {
- DEFINE_FIELD_OFFSET_CONSTANTS(JSFunctionOrBoundFunction::kHeaderSize,
- TORQUE_GENERATED_JS_FUNCTION_FIELDS)
- };
- static constexpr int kSharedFunctionInfoOffset =
- FieldOffsets::kSharedFunctionInfoOffset;
- static constexpr int kContextOffset = FieldOffsets::kContextOffset;
- static constexpr int kFeedbackCellOffset = FieldOffsets::kFeedbackCellOffset;
- static constexpr int kCodeOffset = FieldOffsets::kCodeOffset;
- static constexpr int kPrototypeOrInitialMapOffset =
- FieldOffsets::kPrototypeOrInitialMapOffset;
-
class BodyDescriptor;
private:
@@ -329,9 +314,15 @@ class JSFunction : public JSFunctionOrBoundFunction {
DECL_RELEASE_ACQUIRE_ACCESSORS(raw_code, CodeT)
// JSFunction doesn't have a fixed header size:
- // Hide JSFunctionOrBoundFunction::kHeaderSize to avoid confusion.
+ // Hide TorqueGeneratedClass::kHeaderSize to avoid confusion.
static const int kHeaderSize;
+ // Hide generated accessors; custom accessors are called "shared".
+ DECL_ACCESSORS(shared_function_info, SharedFunctionInfo)
+
+ // Hide generated accessors; custom accessors are called "raw_feedback_cell".
+ DECL_ACCESSORS(feedback_cell, FeedbackCell)
+
// Returns the set of code kinds of compilation artifacts (bytecode,
// generated code) attached to this JSFunction.
// Note that attached code objects that are marked_for_deoptimization are not
@@ -348,9 +339,9 @@ class JSFunction : public JSFunctionOrBoundFunction {
public:
static constexpr int kSizeWithoutPrototype = kPrototypeOrInitialMapOffset;
- static constexpr int kSizeWithPrototype = FieldOffsets::kHeaderSize;
+ static constexpr int kSizeWithPrototype = TorqueGeneratedClass::kHeaderSize;
- OBJECT_CONSTRUCTORS(JSFunction, JSFunctionOrBoundFunction);
+ TQ_OBJECT_CONSTRUCTORS(JSFunction)
};
} // namespace internal
diff --git a/deps/v8/src/objects/js-function.tq b/deps/v8/src/objects/js-function.tq
index de934b82f4..8932ea4395 100644
--- a/deps/v8/src/objects/js-function.tq
+++ b/deps/v8/src/objects/js-function.tq
@@ -18,7 +18,6 @@ extern class JSBoundFunction extends JSFunctionOrBoundFunction {
}
@highestInstanceTypeWithinParentClassRange
-@doNotGenerateCppClass
extern class JSFunction extends JSFunctionOrBoundFunction {
shared_function_info: SharedFunctionInfo;
context: Context;
diff --git a/deps/v8/src/objects/js-list-format.cc b/deps/v8/src/objects/js-list-format.cc
index 9ff9c82d12..ae9e7302bf 100644
--- a/deps/v8/src/objects/js-list-format.cc
+++ b/deps/v8/src/objects/js-list-format.cc
@@ -220,7 +220,8 @@ Maybe<std::vector<icu::UnicodeString>> ToUnicodeStringArray(
template <typename T>
MaybeHandle<T> FormatListCommon(
Isolate* isolate, Handle<JSListFormat> format, Handle<JSArray> list,
- MaybeHandle<T> (*formatToResult)(Isolate*, const icu::FormattedValue&)) {
+ const std::function<MaybeHandle<T>(Isolate*, const icu::FormattedValue&)>&
+ formatToResult) {
DCHECK(!list->IsUndefined());
Maybe<std::vector<icu::UnicodeString>> maybe_array =
ToUnicodeStringArray(isolate, list);
diff --git a/deps/v8/src/objects/js-locale.cc b/deps/v8/src/objects/js-locale.cc
index 64644abad2..51cf1453f4 100644
--- a/deps/v8/src/objects/js-locale.cc
+++ b/deps/v8/src/objects/js-locale.cc
@@ -177,19 +177,26 @@ int32_t weekdayFromEDaysOfWeek(icu::Calendar::EDaysOfWeek eDaysOfWeek) {
} // namespace
-bool JSLocale::Is38AlphaNumList(const std::string& value) {
- std::size_t found_dash = value.find("-");
- std::size_t found_underscore = value.find("_");
- if (found_dash == std::string::npos &&
- found_underscore == std::string::npos) {
- return IsAlphanum(value, 3, 8);
- }
- if (found_underscore == std::string::npos || found_dash < found_underscore) {
- return IsAlphanum(value.substr(0, found_dash), 3, 8) &&
- JSLocale::Is38AlphaNumList(value.substr(found_dash + 1));
+// Implemented as iteration instead of recursion to avoid stack overflow for
+// very long input strings.
+bool JSLocale::Is38AlphaNumList(const std::string& in) {
+ std::string value = in;
+ while (true) {
+ std::size_t found_dash = value.find("-");
+ std::size_t found_underscore = value.find("_");
+ if (found_dash == std::string::npos &&
+ found_underscore == std::string::npos) {
+ return IsAlphanum(value, 3, 8);
+ }
+ if (found_underscore == std::string::npos ||
+ found_dash < found_underscore) {
+ if (!IsAlphanum(value.substr(0, found_dash), 3, 8)) return false;
+ value = value.substr(found_dash + 1);
+ } else {
+ if (!IsAlphanum(value.substr(0, found_underscore), 3, 8)) return false;
+ value = value.substr(found_underscore + 1);
+ }
}
- return IsAlphanum(value.substr(0, found_underscore), 3, 8) &&
- JSLocale::Is38AlphaNumList(value.substr(found_underscore + 1));
}
bool JSLocale::Is3Alpha(const std::string& value) {
@@ -476,57 +483,13 @@ MaybeHandle<JSLocale> JSLocale::Minimize(Isolate* isolate,
return Construct(isolate, result);
}
-MaybeHandle<JSArray> ToJSArray(Isolate* isolate, const char* unicode_key,
- icu::StringEnumeration* enumeration,
- const std::set<std::string>& removes) {
- UErrorCode status = U_ZERO_ERROR;
- Factory* factory = isolate->factory();
-
- int32_t count = 0;
- if (!removes.empty()) {
- // If we may remove items, then we need to go one pass first to count how
- // many items we will insert before we allocate the fixed array.
- for (const char* item = enumeration->next(nullptr, status);
- U_SUCCESS(status) && item != nullptr;
- item = enumeration->next(nullptr, status)) {
- if (unicode_key != nullptr) {
- item = uloc_toUnicodeLocaleType(unicode_key, item);
- }
- if (removes.find(item) == removes.end()) {
- count++;
- }
- }
- enumeration->reset(status);
- } else {
- count = enumeration->count(status);
- }
- Handle<FixedArray> fixed_array = factory->NewFixedArray(count);
-
- int32_t index = 0;
- for (const char* item = enumeration->next(nullptr, status);
- U_SUCCESS(status) && item != nullptr;
- item = enumeration->next(nullptr, status)) {
- if (unicode_key != nullptr) {
- item = uloc_toUnicodeLocaleType(unicode_key, item);
- }
- if (removes.find(item) != removes.end()) {
- continue;
- }
- Handle<String> str = factory->NewStringFromAsciiChecked(item);
- fixed_array->set(index++, *str);
- }
- CHECK(index == count);
- if (U_FAILURE(status)) {
- THROW_NEW_ERROR(isolate, NewRangeError(MessageTemplate::kIcuError),
- JSArray);
- }
- return factory->NewJSArrayWithElements(fixed_array);
-}
-
template <typename T>
-MaybeHandle<JSArray> GetKeywordValuesFromLocale(
- Isolate* isolate, const char* key, const char* unicode_key,
- const icu::Locale& locale, const std::set<std::string>& removes) {
+MaybeHandle<JSArray> GetKeywordValuesFromLocale(Isolate* isolate,
+ const char* key,
+ const char* unicode_key,
+ const icu::Locale& locale,
+ bool (*removes)(const char*),
+ bool commonly_used, bool sort) {
Factory* factory = isolate->factory();
UErrorCode status = U_ZERO_ERROR;
std::string ext =
@@ -539,27 +502,43 @@ MaybeHandle<JSArray> GetKeywordValuesFromLocale(
}
status = U_ZERO_ERROR;
std::unique_ptr<icu::StringEnumeration> enumeration(
- T::getKeywordValuesForLocale(key, locale, true, status));
+ T::getKeywordValuesForLocale(key, locale, commonly_used, status));
if (U_FAILURE(status)) {
THROW_NEW_ERROR(isolate, NewRangeError(MessageTemplate::kIcuError),
JSArray);
}
- return ToJSArray(isolate, unicode_key, enumeration.get(), removes);
+ return Intl::ToJSArray(isolate, unicode_key, enumeration.get(), removes,
+ sort);
}
+namespace {
+
+MaybeHandle<JSArray> CalendarsForLocale(Isolate* isolate,
+ const icu::Locale& icu_locale,
+ bool commonly_used, bool sort) {
+ return GetKeywordValuesFromLocale<icu::Calendar>(
+ isolate, "calendar", "ca", icu_locale, nullptr, commonly_used, sort);
+}
+
+} // namespace
+
MaybeHandle<JSArray> JSLocale::Calendars(Isolate* isolate,
Handle<JSLocale> locale) {
icu::Locale icu_locale(*(locale->icu_locale().raw()));
- return GetKeywordValuesFromLocale<icu::Calendar>(
- isolate, "calendar", "ca", icu_locale, std::set<std::string>());
+ return CalendarsForLocale(isolate, icu_locale, true, false);
+}
+
+MaybeHandle<JSArray> Intl::AvailableCalendars(Isolate* isolate) {
+ icu::Locale icu_locale("und");
+ return CalendarsForLocale(isolate, icu_locale, false, true);
}
MaybeHandle<JSArray> JSLocale::Collations(Isolate* isolate,
Handle<JSLocale> locale) {
icu::Locale icu_locale(*(locale->icu_locale().raw()));
- const std::set<std::string> removes({"standard", "search"});
- return GetKeywordValuesFromLocale<icu::Collator>(isolate, "collations", "co",
- icu_locale, removes);
+ return GetKeywordValuesFromLocale<icu::Collator>(
+ isolate, "collations", "co", icu_locale, Intl::RemoveCollation, true,
+ false);
}
MaybeHandle<JSArray> JSLocale::HourCycles(Isolate* isolate,
@@ -688,8 +667,7 @@ MaybeHandle<Object> JSLocale::TimeZones(Isolate* isolate,
THROW_NEW_ERROR(isolate, NewRangeError(MessageTemplate::kIcuError),
JSArray);
}
- return ToJSArray(isolate, nullptr, enumeration.get(),
- std::set<std::string>());
+ return Intl::ToJSArray(isolate, nullptr, enumeration.get(), nullptr, true);
}
MaybeHandle<JSObject> JSLocale::TextInfo(Isolate* isolate,
diff --git a/deps/v8/src/objects/js-number-format.cc b/deps/v8/src/objects/js-number-format.cc
index cc5b77a005..cf093f7fa5 100644
--- a/deps/v8/src/objects/js-number-format.cc
+++ b/deps/v8/src/objects/js-number-format.cc
@@ -173,27 +173,11 @@ std::map<const std::string, icu::MeasureUnit> CreateUnitMap() {
int32_t total = icu::MeasureUnit::getAvailable(nullptr, 0, status);
CHECK(U_FAILURE(status));
status = U_ZERO_ERROR;
- // See the list in ecma402 #sec-issanctionedsimpleunitidentifier
- std::set<std::string> sanctioned(
- {"acre", "bit", "byte",
- "celsius", "centimeter", "day",
- "degree", "fahrenheit", "fluid-ounce",
- "foot", "gallon", "gigabit",
- "gigabyte", "gram", "hectare",
- "hour", "inch", "kilobit",
- "kilobyte", "kilogram", "kilometer",
- "liter", "megabit", "megabyte",
- "meter", "mile", "mile-scandinavian",
- "millimeter", "milliliter", "millisecond",
- "minute", "month", "ounce",
- "percent", "petabyte", "pound",
- "second", "stone", "terabit",
- "terabyte", "week", "yard",
- "year"});
std::vector<icu::MeasureUnit> units(total);
total = icu::MeasureUnit::getAvailable(units.data(), total, status);
CHECK(U_SUCCESS(status));
std::map<const std::string, icu::MeasureUnit> map;
+ std::set<std::string> sanctioned(Intl::SanctionedSimpleUnits());
for (auto it = units.begin(); it != units.end(); ++it) {
// Need to skip none/percent
if (sanctioned.count(it->getSubtype()) > 0 &&
diff --git a/deps/v8/src/objects/js-objects-inl.h b/deps/v8/src/objects/js-objects-inl.h
index 6be8267a55..c35999592a 100644
--- a/deps/v8/src/objects/js-objects-inl.h
+++ b/deps/v8/src/objects/js-objects-inl.h
@@ -31,25 +31,22 @@ namespace internal {
#include "torque-generated/src/objects/js-objects-tq-inl.inc"
-OBJECT_CONSTRUCTORS_IMPL(JSReceiver, HeapObject)
+TQ_OBJECT_CONSTRUCTORS_IMPL(JSReceiver)
TQ_OBJECT_CONSTRUCTORS_IMPL(JSObject)
TQ_OBJECT_CONSTRUCTORS_IMPL(JSCustomElementsObject)
TQ_OBJECT_CONSTRUCTORS_IMPL(JSSpecialObject)
TQ_OBJECT_CONSTRUCTORS_IMPL(JSAsyncFromSyncIterator)
TQ_OBJECT_CONSTRUCTORS_IMPL(JSDate)
-OBJECT_CONSTRUCTORS_IMPL(JSGlobalObject, JSSpecialObject)
+TQ_OBJECT_CONSTRUCTORS_IMPL(JSGlobalObject)
TQ_OBJECT_CONSTRUCTORS_IMPL(JSGlobalProxy)
JSIteratorResult::JSIteratorResult(Address ptr) : JSObject(ptr) {}
-OBJECT_CONSTRUCTORS_IMPL(JSMessageObject, JSObject)
+TQ_OBJECT_CONSTRUCTORS_IMPL(JSMessageObject)
TQ_OBJECT_CONSTRUCTORS_IMPL(JSPrimitiveWrapper)
TQ_OBJECT_CONSTRUCTORS_IMPL(JSStringIterator)
NEVER_READ_ONLY_SPACE_IMPL(JSReceiver)
-CAST_ACCESSOR(JSGlobalObject)
CAST_ACCESSOR(JSIteratorResult)
-CAST_ACCESSOR(JSMessageObject)
-CAST_ACCESSOR(JSReceiver)
DEF_GETTER(JSObject, elements, FixedArrayBase) {
return TaggedField<FixedArrayBase, kElementsOffset>::load(cage_base, *this);
@@ -472,9 +469,6 @@ void JSObject::InitializeBody(Map map, int start_offset,
}
}
-ACCESSORS(JSGlobalObject, native_context, NativeContext, kNativeContextOffset)
-ACCESSORS(JSGlobalObject, global_proxy, JSGlobalProxy, kGlobalProxyOffset)
-
DEF_GETTER(JSGlobalObject, native_context_unchecked, Object) {
return TaggedField<Object, kNativeContextOffset>::load(cage_base, *this);
}
@@ -501,9 +495,6 @@ void JSMessageObject::set_type(MessageTemplate value) {
set_raw_type(static_cast<int>(value));
}
-ACCESSORS(JSMessageObject, argument, Object, kArgumentsOffset)
-ACCESSORS(JSMessageObject, script, Script, kScriptOffset)
-ACCESSORS(JSMessageObject, stack_frames, Object, kStackFramesOffset)
ACCESSORS(JSMessageObject, shared_info, HeapObject, kSharedInfoOffset)
ACCESSORS(JSMessageObject, bytecode_offset, Smi, kBytecodeOffsetOffset)
SMI_ACCESSORS(JSMessageObject, start_position, kStartPositionOffset)
diff --git a/deps/v8/src/objects/js-objects.h b/deps/v8/src/objects/js-objects.h
index 7452237006..d20cdaceb4 100644
--- a/deps/v8/src/objects/js-objects.h
+++ b/deps/v8/src/objects/js-objects.h
@@ -34,7 +34,7 @@ class IsCompiledScope;
// JSReceiver includes types on which properties can be defined, i.e.,
// JSObject and JSProxy.
-class JSReceiver : public HeapObject {
+class JSReceiver : public TorqueGeneratedJSReceiver<JSReceiver, HeapObject> {
public:
NEVER_READ_ONLY_SPACE
// Returns true if there is no slow (ie, dictionary) backing store.
@@ -85,9 +85,6 @@ class JSReceiver : public HeapObject {
static void DeleteNormalizedProperty(Handle<JSReceiver> object,
InternalIndex entry);
- DECL_CAST(JSReceiver)
- DECL_VERIFIER(JSReceiver)
-
// ES6 section 7.1.1 ToPrimitive
V8_WARN_UNUSED_RESULT static MaybeHandle<Object> ToPrimitive(
Handle<JSReceiver> receiver,
@@ -288,14 +285,17 @@ class JSReceiver : public HeapObject {
static const int kHashMask = PropertyArray::HashField::kMask;
- DEFINE_FIELD_OFFSET_CONSTANTS(HeapObject::kHeaderSize,
- TORQUE_GENERATED_JS_RECEIVER_FIELDS)
bool HasProxyInPrototype(Isolate* isolate);
// TC39 "Dynamic Code Brand Checks"
bool IsCodeLike(Isolate* isolate) const;
- OBJECT_CONSTRUCTORS(JSReceiver, HeapObject);
+ private:
+ // Hide generated accessors; custom accessors are called
+ // "raw_properties_or_hash".
+ DECL_ACCESSORS(properties_or_hash, Object)
+
+ TQ_OBJECT_CONSTRUCTORS(JSReceiver)
};
// The JSObject describes real heap allocated JavaScript objects with
@@ -996,21 +996,14 @@ class JSGlobalProxy
};
// JavaScript global object.
-class JSGlobalObject : public JSSpecialObject {
+class JSGlobalObject
+ : public TorqueGeneratedJSGlobalObject<JSGlobalObject, JSSpecialObject> {
public:
- // [native context]: the natives corresponding to this global object.
- DECL_ACCESSORS(native_context, NativeContext)
-
- // [global proxy]: the global proxy object of the context
- DECL_ACCESSORS(global_proxy, JSGlobalProxy)
-
DECL_RELEASE_ACQUIRE_ACCESSORS(global_dictionary, GlobalDictionary)
static void InvalidatePropertyCell(Handle<JSGlobalObject> object,
Handle<Name> name);
- DECL_CAST(JSGlobalObject)
-
inline bool IsDetached();
// May be called by the concurrent GC when the global object is not
@@ -1021,11 +1014,7 @@ class JSGlobalObject : public JSSpecialObject {
DECL_PRINTER(JSGlobalObject)
DECL_VERIFIER(JSGlobalObject)
- // Layout description.
- DEFINE_FIELD_OFFSET_CONSTANTS(JSSpecialObject::kHeaderSize,
- TORQUE_GENERATED_JS_GLOBAL_OBJECT_FIELDS)
-
- OBJECT_CONSTRUCTORS(JSGlobalObject, JSSpecialObject);
+ TQ_OBJECT_CONSTRUCTORS(JSGlobalObject)
};
// Representation for JS Wrapper objects, String, Number, Boolean, etc.
@@ -1113,21 +1102,13 @@ class JSDate : public TorqueGeneratedJSDate<JSDate, JSObject> {
// error messages are not directly accessible from JavaScript to
// prevent leaking information to user code called during error
// formatting.
-class JSMessageObject : public JSObject {
+class JSMessageObject
+ : public TorqueGeneratedJSMessageObject<JSMessageObject, JSObject> {
public:
// [type]: the type of error message.
inline MessageTemplate type() const;
inline void set_type(MessageTemplate value);
- // [arguments]: the arguments for formatting the error message.
- DECL_ACCESSORS(argument, Object)
-
- // [script]: the script from which the error message originated.
- DECL_ACCESSORS(script, Script)
-
- // [stack_frames]: an array of stack frames for this error object.
- DECL_ACCESSORS(stack_frames, Object)
-
// Initializes the source positions in the object if possible. Does nothing if
// called more than once. If called when stack space is exhausted, then the
// source positions will be not be set and calling it again when there is more
@@ -1159,14 +1140,9 @@ class JSMessageObject : public JSObject {
DECL_INT_ACCESSORS(error_level)
- DECL_CAST(JSMessageObject)
-
// Dispatched behavior.
DECL_PRINTER(JSMessageObject)
- DECL_VERIFIER(JSMessageObject)
- DEFINE_FIELD_OFFSET_CONSTANTS(JSObject::kHeaderSize,
- TORQUE_GENERATED_JS_MESSAGE_OBJECT_FIELDS)
// TODO(v8:8989): [torque] Support marker constants.
static const int kPointerFieldsEndOffset = kStartPositionOffset;
@@ -1195,7 +1171,10 @@ class JSMessageObject : public JSObject {
DECL_INT_ACCESSORS(raw_type)
- OBJECT_CONSTRUCTORS(JSMessageObject, JSObject);
+ // Hide generated accessors; custom accessors are named "raw_type".
+ DECL_INT_ACCESSORS(message_type)
+
+ TQ_OBJECT_CONSTRUCTORS(JSMessageObject)
};
// The [Async-from-Sync Iterator] object
diff --git a/deps/v8/src/objects/js-objects.tq b/deps/v8/src/objects/js-objects.tq
index fd48d43045..1ce7dbd9ea 100644
--- a/deps/v8/src/objects/js-objects.tq
+++ b/deps/v8/src/objects/js-objects.tq
@@ -5,7 +5,6 @@
// JSReceiver corresponds to objects in the JS sense.
@abstract
@highestInstanceTypeWithinParentClassRange
-@doNotGenerateCppClass
extern class JSReceiver extends HeapObject {
properties_or_hash: SwissNameDictionary|FixedArrayBase|PropertyArray|Smi;
}
@@ -97,20 +96,24 @@ extern class JSGlobalProxy extends JSSpecialObject {
native_context: Object;
}
-@doNotGenerateCppClass
extern class JSGlobalObject extends JSSpecialObject {
+ // [native context]: the natives corresponding to this global object.
native_context: NativeContext;
+
+ // [global proxy]: the global proxy object of the context
global_proxy: JSGlobalProxy;
}
extern class JSPrimitiveWrapper extends JSCustomElementsObject { value: JSAny; }
-@doNotGenerateCppClass
extern class JSMessageObject extends JSObject {
// Tagged fields.
message_type: Smi;
- arguments: Object;
+ // [argument]: the arguments for formatting the error message.
+ argument: Object;
+ // [script]: the script from which the error message originated.
script: Script;
+ // [stack_frames]: an array of stack frames for this error object.
stack_frames: Object;
shared_info: SharedFunctionInfo|Undefined;
diff --git a/deps/v8/src/objects/js-promise.h b/deps/v8/src/objects/js-promise.h
index dda3afec99..5afb66a0b2 100644
--- a/deps/v8/src/objects/js-promise.h
+++ b/deps/v8/src/objects/js-promise.h
@@ -5,6 +5,7 @@
#ifndef V8_OBJECTS_JS_PROMISE_H_
#define V8_OBJECTS_JS_PROMISE_H_
+#include "include/v8-promise.h"
#include "src/objects/js-objects.h"
#include "src/objects/promise.h"
#include "torque-generated/bit-fields.h"
diff --git a/deps/v8/src/objects/js-proxy.h b/deps/v8/src/objects/js-proxy.h
index 575c942651..c865b1ffd5 100644
--- a/deps/v8/src/objects/js-proxy.h
+++ b/deps/v8/src/objects/js-proxy.h
@@ -124,12 +124,10 @@ class JSProxy : public TorqueGeneratedJSProxy<JSProxy, JSReceiver> {
// JSProxyRevocableResult is just a JSObject with a specific initial map.
// This initial map adds in-object properties for "proxy" and "revoke".
// See https://tc39.github.io/ecma262/#sec-proxy.revocable
-class JSProxyRevocableResult : public JSObject {
+class JSProxyRevocableResult
+ : public TorqueGeneratedJSProxyRevocableResult<JSProxyRevocableResult,
+ JSObject> {
public:
- // Layout description.
- DEFINE_FIELD_OFFSET_CONSTANTS(
- JSObject::kHeaderSize, TORQUE_GENERATED_JS_PROXY_REVOCABLE_RESULT_FIELDS)
-
// Indices of in-object properties.
static const int kProxyIndex = 0;
static const int kRevokeIndex = 1;
diff --git a/deps/v8/src/objects/js-proxy.tq b/deps/v8/src/objects/js-proxy.tq
index b91c0de5d0..5d0f51a94f 100644
--- a/deps/v8/src/objects/js-proxy.tq
+++ b/deps/v8/src/objects/js-proxy.tq
@@ -7,7 +7,6 @@ extern class JSProxy extends JSReceiver {
handler: JSReceiver|Null;
}
-@doNotGenerateCppClass
extern shape JSProxyRevocableResult extends JSObject {
proxy: JSAny;
revoke: JSAny;
diff --git a/deps/v8/src/objects/js-regexp-inl.h b/deps/v8/src/objects/js-regexp-inl.h
index 0f38daa5e7..2a69bea650 100644
--- a/deps/v8/src/objects/js-regexp-inl.h
+++ b/deps/v8/src/objects/js-regexp-inl.h
@@ -21,17 +21,9 @@ namespace internal {
#include "torque-generated/src/objects/js-regexp-tq-inl.inc"
TQ_OBJECT_CONSTRUCTORS_IMPL(JSRegExp)
-OBJECT_CONSTRUCTORS_IMPL_CHECK_SUPER(JSRegExpResult, JSArray)
-OBJECT_CONSTRUCTORS_IMPL_CHECK_SUPER(JSRegExpResultIndices, JSArray)
-
-inline JSRegExpResultWithIndices::JSRegExpResultWithIndices(Address ptr)
- : JSRegExpResult(ptr) {
- SLOW_DCHECK(IsJSArray());
-}
-
-CAST_ACCESSOR(JSRegExpResult)
-CAST_ACCESSOR(JSRegExpResultWithIndices)
-CAST_ACCESSOR(JSRegExpResultIndices)
+TQ_OBJECT_CONSTRUCTORS_IMPL(JSRegExpResult)
+TQ_OBJECT_CONSTRUCTORS_IMPL(JSRegExpResultIndices)
+TQ_OBJECT_CONSTRUCTORS_IMPL(JSRegExpResultWithIndices)
ACCESSORS(JSRegExp, last_index, Object, kLastIndexOffset)
@@ -59,7 +51,7 @@ int JSRegExp::MaxRegisterCount() const {
return Smi::ToInt(DataAt(kIrregexpMaxRegisterCountIndex));
}
-JSRegExp::Flags JSRegExp::GetFlags() {
+JSRegExp::Flags JSRegExp::GetFlags() const {
DCHECK(this->data().IsFixedArray());
Object data = this->data();
Smi smi = Smi::cast(FixedArray::cast(data).get(kFlagsIndex));
diff --git a/deps/v8/src/objects/js-regexp.cc b/deps/v8/src/objects/js-regexp.cc
index bfc16d1b85..e1e06cb12a 100644
--- a/deps/v8/src/objects/js-regexp.cc
+++ b/deps/v8/src/objects/js-regexp.cc
@@ -111,64 +111,38 @@ uint32_t JSRegExp::BacktrackLimit() const {
}
// static
-JSRegExp::Flags JSRegExp::FlagsFromString(Isolate* isolate,
- Handle<String> flags, bool* success) {
- int length = flags->length();
- if (length == 0) {
- *success = true;
- return JSRegExp::kNone;
- }
+base::Optional<JSRegExp::Flags> JSRegExp::FlagsFromString(
+ Isolate* isolate, Handle<String> flags) {
+ const int length = flags->length();
+
// A longer flags string cannot be valid.
- if (length > JSRegExp::kFlagCount) return JSRegExp::Flags(0);
- JSRegExp::Flags value(0);
- if (flags->IsSeqOneByteString()) {
- DisallowGarbageCollection no_gc;
- SeqOneByteString seq_flags = SeqOneByteString::cast(*flags);
- for (int i = 0; i < length; i++) {
- base::Optional<JSRegExp::Flag> maybe_flag =
- JSRegExp::FlagFromChar(seq_flags.Get(i));
- if (!maybe_flag.has_value()) return JSRegExp::Flags(0);
- JSRegExp::Flag flag = *maybe_flag;
- // Duplicate flag.
- if (value & flag) return JSRegExp::Flags(0);
- value |= flag;
- }
- } else {
- flags = String::Flatten(isolate, flags);
- DisallowGarbageCollection no_gc;
- String::FlatContent flags_content = flags->GetFlatContent(no_gc);
- for (int i = 0; i < length; i++) {
- base::Optional<JSRegExp::Flag> maybe_flag =
- JSRegExp::FlagFromChar(flags_content.Get(i));
- if (!maybe_flag.has_value()) return JSRegExp::Flags(0);
- JSRegExp::Flag flag = *maybe_flag;
- // Duplicate flag.
- if (value & flag) return JSRegExp::Flags(0);
- value |= flag;
- }
+ if (length > JSRegExp::kFlagCount) return {};
+
+ RegExpFlags value;
+ FlatStringReader reader(isolate, String::Flatten(isolate, flags));
+
+ for (int i = 0; i < length; i++) {
+ base::Optional<RegExpFlag> flag = JSRegExp::FlagFromChar(reader.Get(i));
+ if (!flag.has_value()) return {};
+ if (value & flag.value()) return {}; // Duplicate.
+ value |= flag.value();
}
- *success = true;
- return value;
+
+ return JSRegExp::AsJSRegExpFlags(value);
}
// static
Handle<String> JSRegExp::StringFromFlags(Isolate* isolate,
JSRegExp::Flags flags) {
- // Ensure that this function is up-to-date with the supported flag options.
- constexpr size_t kFlagCount = JSRegExp::kFlagCount;
- STATIC_ASSERT(kFlagCount == 8);
-
- // Translate to the lexicographically smaller string.
+ static constexpr int kStringTerminator = 1;
int cursor = 0;
- char buffer[kFlagCount] = {'\0'};
- if (flags & JSRegExp::kHasIndices) buffer[cursor++] = 'd';
- if (flags & JSRegExp::kGlobal) buffer[cursor++] = 'g';
- if (flags & JSRegExp::kIgnoreCase) buffer[cursor++] = 'i';
- if (flags & JSRegExp::kLinear) buffer[cursor++] = 'l';
- if (flags & JSRegExp::kMultiline) buffer[cursor++] = 'm';
- if (flags & JSRegExp::kDotAll) buffer[cursor++] = 's';
- if (flags & JSRegExp::kUnicode) buffer[cursor++] = 'u';
- if (flags & JSRegExp::kSticky) buffer[cursor++] = 'y';
+ char buffer[kFlagCount + kStringTerminator];
+#define V(Lower, Camel, LowerCamel, Char, Bit) \
+ if (flags & JSRegExp::k##Camel) buffer[cursor++] = Char;
+ REGEXP_FLAG_LIST(V)
+#undef V
+ buffer[cursor++] = '\0';
+ DCHECK_LE(cursor, kFlagCount + kStringTerminator);
return isolate->factory()->NewStringFromAsciiChecked(buffer);
}
@@ -247,15 +221,15 @@ MaybeHandle<JSRegExp> JSRegExp::Initialize(Handle<JSRegExp> regexp,
Handle<String> source,
Handle<String> flags_string) {
Isolate* isolate = regexp->GetIsolate();
- bool success = false;
- Flags flags = JSRegExp::FlagsFromString(isolate, flags_string, &success);
- if (!success) {
+ base::Optional<Flags> flags =
+ JSRegExp::FlagsFromString(isolate, flags_string);
+ if (!flags.has_value()) {
THROW_NEW_ERROR(
isolate,
NewSyntaxError(MessageTemplate::kInvalidRegExpFlags, flags_string),
JSRegExp);
}
- return Initialize(regexp, source, flags);
+ return Initialize(regexp, source, flags.value());
}
namespace {
@@ -417,7 +391,9 @@ MaybeHandle<JSRegExp> JSRegExp::Initialize(Handle<JSRegExp> regexp,
source = String::Flatten(isolate, source);
RETURN_ON_EXCEPTION(
- isolate, RegExp::Compile(isolate, regexp, source, flags, backtrack_limit),
+ isolate,
+ RegExp::Compile(isolate, regexp, source, JSRegExp::AsRegExpFlags(flags),
+ backtrack_limit),
JSRegExp);
Handle<String> escaped_source;
diff --git a/deps/v8/src/objects/js-regexp.h b/deps/v8/src/objects/js-regexp.h
index 029964faa2..4671f6607b 100644
--- a/deps/v8/src/objects/js-regexp.h
+++ b/deps/v8/src/objects/js-regexp.h
@@ -5,8 +5,10 @@
#ifndef V8_OBJECTS_JS_REGEXP_H_
#define V8_OBJECTS_JS_REGEXP_H_
+#include "include/v8-regexp.h"
#include "src/objects/contexts.h"
#include "src/objects/js-array.h"
+#include "src/regexp/regexp-flags.h"
#include "torque-generated/bit-fields.h"
// Has to be the last include (doesn't have include guards):
@@ -43,32 +45,35 @@ class JSRegExp : public TorqueGeneratedJSRegExp<JSRegExp, JSObject> {
enum Type { NOT_COMPILED, ATOM, IRREGEXP, EXPERIMENTAL };
DEFINE_TORQUE_GENERATED_JS_REG_EXP_FLAGS()
- static base::Optional<Flag> FlagFromChar(char c) {
- STATIC_ASSERT(kFlagCount == 8);
- // clang-format off
- return c == 'g' ? base::Optional<Flag>(kGlobal)
- : c == 'i' ? base::Optional<Flag>(kIgnoreCase)
- : c == 'm' ? base::Optional<Flag>(kMultiline)
- : c == 'y' ? base::Optional<Flag>(kSticky)
- : c == 'u' ? base::Optional<Flag>(kUnicode)
- : c == 's' ? base::Optional<Flag>(kDotAll)
- : c == 'd' ? base::Optional<Flag>(kHasIndices)
- : (FLAG_enable_experimental_regexp_engine && c == 'l')
- ? base::Optional<Flag>(kLinear)
- : base::Optional<Flag>();
- // clang-format on
+ static constexpr Flag AsJSRegExpFlag(RegExpFlag f) {
+ return static_cast<Flag>(f);
+ }
+ static constexpr Flags AsJSRegExpFlags(RegExpFlags f) {
+ return Flags{static_cast<int>(f)};
+ }
+ static constexpr RegExpFlags AsRegExpFlags(Flags f) {
+ return RegExpFlags{static_cast<int>(f)};
+ }
+
+ static base::Optional<RegExpFlag> FlagFromChar(char c) {
+ base::Optional<RegExpFlag> f = TryRegExpFlagFromChar(c);
+ if (!f.has_value()) return f;
+ if (f.value() == RegExpFlag::kLinear &&
+ !FLAG_enable_experimental_regexp_engine) {
+ return {};
+ }
+ return f;
}
STATIC_ASSERT(static_cast<int>(kNone) == v8::RegExp::kNone);
- STATIC_ASSERT(static_cast<int>(kGlobal) == v8::RegExp::kGlobal);
- STATIC_ASSERT(static_cast<int>(kIgnoreCase) == v8::RegExp::kIgnoreCase);
- STATIC_ASSERT(static_cast<int>(kMultiline) == v8::RegExp::kMultiline);
- STATIC_ASSERT(static_cast<int>(kSticky) == v8::RegExp::kSticky);
- STATIC_ASSERT(static_cast<int>(kUnicode) == v8::RegExp::kUnicode);
- STATIC_ASSERT(static_cast<int>(kDotAll) == v8::RegExp::kDotAll);
- STATIC_ASSERT(static_cast<int>(kLinear) == v8::RegExp::kLinear);
- STATIC_ASSERT(static_cast<int>(kHasIndices) == v8::RegExp::kHasIndices);
+#define V(_, Camel, ...) \
+ STATIC_ASSERT(static_cast<int>(k##Camel) == v8::RegExp::k##Camel); \
+ STATIC_ASSERT(static_cast<int>(k##Camel) == \
+ static_cast<int>(RegExpFlag::k##Camel));
+ REGEXP_FLAG_LIST(V)
+#undef V
STATIC_ASSERT(kFlagCount == v8::RegExp::kFlagCount);
+ STATIC_ASSERT(kFlagCount == kRegExpFlagCount);
DECL_ACCESSORS(last_index, Object)
@@ -86,8 +91,8 @@ class JSRegExp : public TorqueGeneratedJSRegExp<JSRegExp, JSObject> {
Handle<String> source,
Handle<String> flags_string);
- static Flags FlagsFromString(Isolate* isolate, Handle<String> flags,
- bool* success);
+ static base::Optional<Flags> FlagsFromString(Isolate* isolate,
+ Handle<String> flags);
V8_EXPORT_PRIVATE static Handle<String> StringFromFlags(Isolate* isolate,
Flags flags);
@@ -112,7 +117,7 @@ class JSRegExp : public TorqueGeneratedJSRegExp<JSRegExp, JSObject> {
static int RegistersForCaptureCount(int count) { return (count + 1) * 2; }
inline int MaxRegisterCount() const;
- inline Flags GetFlags();
+ inline Flags GetFlags() const;
inline String Pattern();
inline String EscapedPattern();
inline Object CaptureNameMap();
@@ -249,18 +254,13 @@ DEFINE_OPERATORS_FOR_FLAGS(JSRegExp::Flags)
// faster creation of RegExp exec results.
// This class just holds constants used when creating the result.
// After creation the result must be treated as a JSArray in all regards.
-class JSRegExpResult : public JSArray {
+class JSRegExpResult
+ : public TorqueGeneratedJSRegExpResult<JSRegExpResult, JSArray> {
public:
- DECL_CAST(JSRegExpResult)
-
// TODO(joshualitt): We would like to add printers and verifiers to
// JSRegExpResult, and maybe JSRegExpResultIndices, but both have the same
// instance type as JSArray.
- // Layout description.
- DEFINE_FIELD_OFFSET_CONSTANTS(JSArray::kHeaderSize,
- TORQUE_GENERATED_JS_REG_EXP_RESULT_FIELDS)
-
// Indices of in-object properties.
static const int kIndexIndex = 0;
static const int kInputIndex = 1;
@@ -274,25 +274,20 @@ class JSRegExpResult : public JSArray {
static const int kMapIndexInContext = Context::REGEXP_RESULT_MAP_INDEX;
- OBJECT_CONSTRUCTORS(JSRegExpResult, JSArray);
+ TQ_OBJECT_CONSTRUCTORS(JSRegExpResult)
};
-class JSRegExpResultWithIndices : public JSRegExpResult {
+class JSRegExpResultWithIndices
+ : public TorqueGeneratedJSRegExpResultWithIndices<JSRegExpResultWithIndices,
+ JSRegExpResult> {
public:
- DECL_CAST(JSRegExpResultWithIndices)
-
- // Layout description.
- DEFINE_FIELD_OFFSET_CONSTANTS(
- JSRegExpResult::kSize,
- TORQUE_GENERATED_JS_REG_EXP_RESULT_WITH_INDICES_FIELDS)
-
static_assert(
JSRegExpResult::kInObjectPropertyCount == 6,
"JSRegExpResultWithIndices must be a subclass of JSRegExpResult");
static const int kIndicesIndex = 6;
static const int kInObjectPropertyCount = 7;
- OBJECT_CONSTRUCTORS(JSRegExpResultWithIndices, JSRegExpResult);
+ TQ_OBJECT_CONSTRUCTORS(JSRegExpResultWithIndices)
};
// JSRegExpResultIndices is just a JSArray with a specific initial map.
@@ -301,14 +296,10 @@ class JSRegExpResultWithIndices : public JSRegExpResult {
// faster creation of RegExp exec results.
// This class just holds constants used when creating the result.
// After creation the result must be treated as a JSArray in all regards.
-class JSRegExpResultIndices : public JSArray {
+class JSRegExpResultIndices
+ : public TorqueGeneratedJSRegExpResultIndices<JSRegExpResultIndices,
+ JSArray> {
public:
- DECL_CAST(JSRegExpResultIndices)
-
- // Layout description.
- DEFINE_FIELD_OFFSET_CONSTANTS(
- JSArray::kHeaderSize, TORQUE_GENERATED_JS_REG_EXP_RESULT_INDICES_FIELDS)
-
static Handle<JSRegExpResultIndices> BuildIndices(
Isolate* isolate, Handle<RegExpMatchInfo> match_info,
Handle<Object> maybe_names);
@@ -320,7 +311,7 @@ class JSRegExpResultIndices : public JSArray {
// Descriptor index of groups.
static const int kGroupsDescriptorIndex = 1;
- OBJECT_CONSTRUCTORS(JSRegExpResultIndices, JSArray);
+ TQ_OBJECT_CONSTRUCTORS(JSRegExpResultIndices)
};
} // namespace internal
diff --git a/deps/v8/src/objects/js-regexp.tq b/deps/v8/src/objects/js-regexp.tq
index 328dd94efb..7c60df214a 100644
--- a/deps/v8/src/objects/js-regexp.tq
+++ b/deps/v8/src/objects/js-regexp.tq
@@ -38,7 +38,6 @@ RegExpBuiltinsAssembler::FastStoreLastIndex(FastJSRegExp, Smi): void;
extern class JSRegExpConstructor extends JSFunction
generates 'TNode<JSFunction>';
-@doNotGenerateCppClass
extern shape JSRegExpResult extends JSArray {
// In-object properties:
// The below fields are externally exposed.
@@ -52,12 +51,10 @@ extern shape JSRegExpResult extends JSArray {
regexp_last_index: Smi;
}
-@doNotGenerateCppClass
extern shape JSRegExpResultWithIndices extends JSRegExpResult {
indices: JSAny;
}
-@doNotGenerateCppClass
extern shape JSRegExpResultIndices extends JSArray {
// In-object properties:
// The groups field is externally exposed.
diff --git a/deps/v8/src/objects/js-relative-time-format.cc b/deps/v8/src/objects/js-relative-time-format.cc
index caa4ce562d..34db9ad1bf 100644
--- a/deps/v8/src/objects/js-relative-time-format.cc
+++ b/deps/v8/src/objects/js-relative-time-format.cc
@@ -342,9 +342,9 @@ template <typename T>
MaybeHandle<T> FormatCommon(
Isolate* isolate, Handle<JSRelativeTimeFormat> format,
Handle<Object> value_obj, Handle<Object> unit_obj, const char* func_name,
- MaybeHandle<T> (*formatToResult)(Isolate*,
- const icu::FormattedRelativeDateTime&,
- Handle<Object>, Handle<String>)) {
+ const std::function<
+ MaybeHandle<T>(Isolate*, const icu::FormattedRelativeDateTime&,
+ Handle<Object>, Handle<String>)>& formatToResult) {
// 3. Let value be ? ToNumber(value).
Handle<Object> value;
ASSIGN_RETURN_ON_EXCEPTION(isolate, value,
diff --git a/deps/v8/src/objects/js-weak-refs-inl.h b/deps/v8/src/objects/js-weak-refs-inl.h
index 13ac175cf6..acce7b72b9 100644
--- a/deps/v8/src/objects/js-weak-refs-inl.h
+++ b/deps/v8/src/objects/js-weak-refs-inl.h
@@ -21,18 +21,7 @@ namespace internal {
TQ_OBJECT_CONSTRUCTORS_IMPL(WeakCell)
TQ_OBJECT_CONSTRUCTORS_IMPL(JSWeakRef)
-OBJECT_CONSTRUCTORS_IMPL(JSFinalizationRegistry, JSObject)
-
-ACCESSORS(JSFinalizationRegistry, native_context, NativeContext,
- kNativeContextOffset)
-ACCESSORS(JSFinalizationRegistry, cleanup, Object, kCleanupOffset)
-ACCESSORS(JSFinalizationRegistry, active_cells, HeapObject, kActiveCellsOffset)
-ACCESSORS(JSFinalizationRegistry, cleared_cells, HeapObject,
- kClearedCellsOffset)
-ACCESSORS(JSFinalizationRegistry, key_map, Object, kKeyMapOffset)
-SMI_ACCESSORS(JSFinalizationRegistry, flags, kFlagsOffset)
-ACCESSORS(JSFinalizationRegistry, next_dirty, Object, kNextDirtyOffset)
-CAST_ACCESSOR(JSFinalizationRegistry)
+TQ_OBJECT_CONSTRUCTORS_IMPL(JSFinalizationRegistry)
BIT_FIELD_ACCESSORS(JSFinalizationRegistry, flags, scheduled_for_cleanup,
JSFinalizationRegistry::ScheduledForCleanupBit)
diff --git a/deps/v8/src/objects/js-weak-refs.h b/deps/v8/src/objects/js-weak-refs.h
index 250186e7be..b2dc41b570 100644
--- a/deps/v8/src/objects/js-weak-refs.h
+++ b/deps/v8/src/objects/js-weak-refs.h
@@ -21,22 +21,12 @@ class WeakCell;
// FinalizationRegistry object from the JS Weak Refs spec proposal:
// https://github.com/tc39/proposal-weakrefs
-class JSFinalizationRegistry : public JSObject {
+class JSFinalizationRegistry
+ : public TorqueGeneratedJSFinalizationRegistry<JSFinalizationRegistry,
+ JSObject> {
public:
DECL_PRINTER(JSFinalizationRegistry)
EXPORT_DECL_VERIFIER(JSFinalizationRegistry)
- DECL_CAST(JSFinalizationRegistry)
-
- DECL_ACCESSORS(native_context, NativeContext)
- DECL_ACCESSORS(cleanup, Object)
-
- DECL_ACCESSORS(active_cells, HeapObject)
- DECL_ACCESSORS(cleared_cells, HeapObject)
- DECL_ACCESSORS(key_map, Object)
-
- DECL_ACCESSORS(next_dirty, Object)
-
- DECL_INT_ACCESSORS(flags)
DECL_BOOLEAN_ACCESSORS(scheduled_for_cleanup)
@@ -72,14 +62,10 @@ class JSFinalizationRegistry : public JSObject {
Isolate* isolate, Address raw_finalization_registry,
Address raw_weak_cell);
- // Layout description.
- DEFINE_FIELD_OFFSET_CONSTANTS(
- JSObject::kHeaderSize, TORQUE_GENERATED_JS_FINALIZATION_REGISTRY_FIELDS)
-
// Bitfields in flags.
DEFINE_TORQUE_GENERATED_FINALIZATION_REGISTRY_FLAGS()
- OBJECT_CONSTRUCTORS(JSFinalizationRegistry, JSObject);
+ TQ_OBJECT_CONSTRUCTORS(JSFinalizationRegistry)
};
// Internal object for storing weak references in JSFinalizationRegistry.
diff --git a/deps/v8/src/objects/js-weak-refs.tq b/deps/v8/src/objects/js-weak-refs.tq
index 36f3817ac7..c687ab5001 100644
--- a/deps/v8/src/objects/js-weak-refs.tq
+++ b/deps/v8/src/objects/js-weak-refs.tq
@@ -6,7 +6,6 @@ bitfield struct FinalizationRegistryFlags extends uint31 {
scheduled_for_cleanup: bool: 1 bit;
}
-@doNotGenerateCppClass
extern class JSFinalizationRegistry extends JSObject {
native_context: NativeContext;
cleanup: Callable;
diff --git a/deps/v8/src/objects/keys.h b/deps/v8/src/objects/keys.h
index 4abe2a5ad3..b1f539e233 100644
--- a/deps/v8/src/objects/keys.h
+++ b/deps/v8/src/objects/keys.h
@@ -5,6 +5,7 @@
#ifndef V8_OBJECTS_KEYS_H_
#define V8_OBJECTS_KEYS_H_
+#include "include/v8-object.h"
#include "src/objects/hash-table.h"
#include "src/objects/js-objects.h"
#include "src/objects/objects.h"
@@ -17,6 +18,18 @@ class FastKeyAccumulator;
enum AddKeyConversion { DO_NOT_CONVERT, CONVERT_TO_ARRAY_INDEX };
+enum class GetKeysConversion {
+ kKeepNumbers = static_cast<int>(v8::KeyConversionMode::kKeepNumbers),
+ kConvertToString = static_cast<int>(v8::KeyConversionMode::kConvertToString),
+ kNoNumbers = static_cast<int>(v8::KeyConversionMode::kNoNumbers)
+};
+
+enum class KeyCollectionMode {
+ kOwnOnly = static_cast<int>(v8::KeyCollectionMode::kOwnOnly),
+ kIncludePrototypes =
+ static_cast<int>(v8::KeyCollectionMode::kIncludePrototypes)
+};
+
// This is a helper class for JSReceiver::GetKeys which collects and sorts keys.
// GetKeys needs to sort keys per prototype level, first showing the integer
// indices from elements then the strings from the properties. However, this
diff --git a/deps/v8/src/objects/map-inl.h b/deps/v8/src/objects/map-inl.h
index 572b3f9299..dc37a119fa 100644
--- a/deps/v8/src/objects/map-inl.h
+++ b/deps/v8/src/objects/map-inl.h
@@ -466,6 +466,28 @@ void Map::AccountAddedOutOfObjectPropertyField(int unused_in_property_array) {
DCHECK_EQ(unused_in_property_array, UnusedPropertyFields());
}
+#if V8_ENABLE_WEBASSEMBLY
+uint8_t Map::WasmByte1() const {
+ DCHECK(IsWasmObjectMap());
+ return inobject_properties_start_or_constructor_function_index();
+}
+
+uint8_t Map::WasmByte2() const {
+ DCHECK(IsWasmObjectMap());
+ return used_or_unused_instance_size_in_words();
+}
+
+void Map::SetWasmByte1(uint8_t value) {
+ CHECK(IsWasmObjectMap());
+ set_inobject_properties_start_or_constructor_function_index(value);
+}
+
+void Map::SetWasmByte2(uint8_t value) {
+ CHECK(IsWasmObjectMap());
+ set_used_or_unused_instance_size_in_words(value);
+}
+#endif // V8_ENABLE_WEBASSEMBLY
+
byte Map::bit_field() const {
// TODO(solanes, v8:7790, v8:11353): Make this non-atomic when TSAN sees the
// map's store synchronization.
@@ -726,7 +748,7 @@ bool Map::ConcurrentIsMap(PtrComprCageBase cage_base,
}
DEF_GETTER(Map, GetBackPointer, HeapObject) {
- Object object = constructor_or_back_pointer(cage_base);
+ Object object = constructor_or_back_pointer(cage_base, kRelaxedLoad);
if (ConcurrentIsMap(cage_base, object)) {
return Map::cast(object);
}
@@ -754,6 +776,9 @@ ACCESSORS(Map, prototype_validity_cell, Object, kPrototypeValidityCellOffset)
ACCESSORS_CHECKED2(Map, constructor_or_back_pointer, Object,
kConstructorOrBackPointerOrNativeContextOffset,
!IsContextMap(), value.IsNull() || !IsContextMap())
+RELAXED_ACCESSORS_CHECKED2(Map, constructor_or_back_pointer, Object,
+ kConstructorOrBackPointerOrNativeContextOffset,
+ !IsContextMap(), value.IsNull() || !IsContextMap())
ACCESSORS_CHECKED(Map, native_context, NativeContext,
kConstructorOrBackPointerOrNativeContextOffset,
IsContextMap())
diff --git a/deps/v8/src/objects/map.h b/deps/v8/src/objects/map.h
index 74d2a859e8..e649405091 100644
--- a/deps/v8/src/objects/map.h
+++ b/deps/v8/src/objects/map.h
@@ -565,6 +565,7 @@ class Map : public TorqueGeneratedMap<Map, HeapObject> {
// The field also overlaps with the native context pointer for context maps,
// and with the Wasm type info for WebAssembly object maps.
DECL_ACCESSORS(constructor_or_back_pointer, Object)
+ DECL_RELAXED_ACCESSORS(constructor_or_back_pointer, Object)
DECL_ACCESSORS(native_context, NativeContext)
DECL_ACCESSORS(native_context_or_null, Object)
DECL_ACCESSORS(wasm_type_info, WasmTypeInfo)
@@ -850,6 +851,12 @@ class Map : public TorqueGeneratedMap<Map, HeapObject> {
InstanceType instance_type);
inline bool CanHaveFastTransitionableElementsKind() const;
+ // Maps for Wasm objects can use certain fields for other purposes.
+ inline uint8_t WasmByte1() const;
+ inline uint8_t WasmByte2() const;
+ inline void SetWasmByte1(uint8_t value);
+ inline void SetWasmByte2(uint8_t value);
+
private:
// This byte encodes either the instance size without the in-object slack or
// the slack size in properties backing store.
diff --git a/deps/v8/src/objects/module.h b/deps/v8/src/objects/module.h
index 05ea04ccd9..5cb7e4bb7f 100644
--- a/deps/v8/src/objects/module.h
+++ b/deps/v8/src/objects/module.h
@@ -5,6 +5,7 @@
#ifndef V8_OBJECTS_MODULE_H_
#define V8_OBJECTS_MODULE_H_
+#include "include/v8-script.h"
#include "src/objects/fixed-array.h"
#include "src/objects/js-objects.h"
#include "src/objects/objects.h"
diff --git a/deps/v8/src/objects/object-macros-undef.h b/deps/v8/src/objects/object-macros-undef.h
index 1aa9dc10b4..f531ab0aa5 100644
--- a/deps/v8/src/objects/object-macros-undef.h
+++ b/deps/v8/src/objects/object-macros-undef.h
@@ -8,7 +8,6 @@
#undef OBJECT_CONSTRUCTORS
#undef OBJECT_CONSTRUCTORS_IMPL
-#undef OBJECT_CONSTRUCTORS_IMPL_CHECK_SUPER
#undef NEVER_READ_ONLY_SPACE
#undef NEVER_READ_ONLY_SPACE_IMPL
#undef DECL_PRIMITIVE_GETTER
@@ -40,7 +39,6 @@
#undef CAST_ACCESSOR
#undef INT_ACCESSORS
#undef INT32_ACCESSORS
-#undef IMPLICIT_TAG_RELAXED_INT32_ACCESSORS
#undef RELAXED_INT32_ACCESSORS
#undef UINT16_ACCESSORS
#undef UINT8_ACCESSORS
diff --git a/deps/v8/src/objects/object-macros.h b/deps/v8/src/objects/object-macros.h
index 561b1de30b..79cc79033e 100644
--- a/deps/v8/src/objects/object-macros.h
+++ b/deps/v8/src/objects/object-macros.h
@@ -30,11 +30,6 @@
#define OBJECT_CONSTRUCTORS_IMPL(Type, Super) \
inline Type::Type(Address ptr) : Super(ptr) { SLOW_DCHECK(Is##Type()); }
-// In these cases, we don't have our own instance type to check, so check the
-// supertype instead. This happens for types denoting a NativeContext-dependent
-// set of maps.
-#define OBJECT_CONSTRUCTORS_IMPL_CHECK_SUPER(Type, Super) \
- inline Type::Type(Address ptr) : Super(ptr) { SLOW_DCHECK(Is##Super()); }
#define NEVER_READ_ONLY_SPACE \
inline Heap* GetHeap() const; \
@@ -163,15 +158,6 @@
int32_t holder::name() const { return ReadField<int32_t>(offset); } \
void holder::set_##name(int32_t value) { WriteField<int32_t>(offset, value); }
-// TODO(solanes): Use the non-implicit one, and change the uses to use the tag.
-#define IMPLICIT_TAG_RELAXED_INT32_ACCESSORS(holder, name, offset) \
- int32_t holder::name() const { \
- return RELAXED_READ_INT32_FIELD(*this, offset); \
- } \
- void holder::set_##name(int32_t value) { \
- RELAXED_WRITE_INT32_FIELD(*this, offset, value); \
- }
-
#define RELAXED_INT32_ACCESSORS(holder, name, offset) \
int32_t holder::name(RelaxedLoadTag) const { \
return RELAXED_READ_INT32_FIELD(*this, offset); \
diff --git a/deps/v8/src/objects/objects-body-descriptors-inl.h b/deps/v8/src/objects/objects-body-descriptors-inl.h
index 7750b26575..838b0536e2 100644
--- a/deps/v8/src/objects/objects-body-descriptors-inl.h
+++ b/deps/v8/src/objects/objects-body-descriptors-inl.h
@@ -727,7 +727,7 @@ class WasmArray::BodyDescriptor final : public BodyDescriptorBase {
}
static inline int SizeOf(Map map, HeapObject object) {
- return WasmArray::GcSafeSizeFor(map, WasmArray::cast(object).length());
+ return WasmArray::SizeFor(map, WasmArray::cast(object).length());
}
};
@@ -800,8 +800,8 @@ class CoverageInfo::BodyDescriptor final : public BodyDescriptorBase {
class Code::BodyDescriptor final : public BodyDescriptorBase {
public:
STATIC_ASSERT(kRelocationInfoOffset + kTaggedSize ==
- kDeoptimizationDataOffset);
- STATIC_ASSERT(kDeoptimizationDataOffset + kTaggedSize ==
+ kDeoptimizationDataOrInterpreterDataOffset);
+ STATIC_ASSERT(kDeoptimizationDataOrInterpreterDataOffset + kTaggedSize ==
kPositionTableOffset);
STATIC_ASSERT(kPositionTableOffset + kTaggedSize == kCodeDataContainerOffset);
STATIC_ASSERT(kCodeDataContainerOffset + kTaggedSize == kDataStart);
diff --git a/deps/v8/src/objects/objects-definitions.h b/deps/v8/src/objects/objects-definitions.h
index 20ce96aae5..f70a469364 100644
--- a/deps/v8/src/objects/objects-definitions.h
+++ b/deps/v8/src/objects/objects-definitions.h
@@ -124,7 +124,6 @@ namespace internal {
IF_WASM(V, _, ASM_WASM_DATA_TYPE, AsmWasmData, asm_wasm_data) \
V(_, ASYNC_GENERATOR_REQUEST_TYPE, AsyncGeneratorRequest, \
async_generator_request) \
- V(_, BASELINE_DATA_TYPE, BaselineData, baseline_data) \
V(_, BREAK_POINT_TYPE, BreakPoint, break_point) \
V(_, BREAK_POINT_INFO_TYPE, BreakPointInfo, break_point_info) \
V(_, CACHED_TEMPLATE_OBJECT_TYPE, CachedTemplateObject, \
diff --git a/deps/v8/src/objects/objects.cc b/deps/v8/src/objects/objects.cc
index 2f16615536..68482fe68f 100644
--- a/deps/v8/src/objects/objects.cc
+++ b/deps/v8/src/objects/objects.cc
@@ -197,6 +197,8 @@ std::ostream& operator<<(std::ostream& os, PropertyCellType type) {
return os << "ConstantType";
case PropertyCellType::kMutable:
return os << "Mutable";
+ case PropertyCellType::kInTransition:
+ return os << "InTransition";
}
UNREACHABLE();
}
@@ -2291,7 +2293,7 @@ int HeapObject::SizeFromMap(Map map) const {
return WasmStruct::GcSafeSize(map);
}
if (instance_type == WASM_ARRAY_TYPE) {
- return WasmArray::GcSafeSizeFor(map, WasmArray::cast(*this).length());
+ return WasmArray::SizeFor(map, WasmArray::cast(*this).length());
}
#endif // V8_ENABLE_WEBASSEMBLY
DCHECK_EQ(instance_type, EMBEDDER_DATA_ARRAY_TYPE);
@@ -6532,6 +6534,8 @@ PropertyCellType PropertyCell::UpdatedType(Isolate* isolate,
V8_FALLTHROUGH;
case PropertyCellType::kMutable:
return PropertyCellType::kMutable;
+ case PropertyCellType::kInTransition:
+ UNREACHABLE();
}
}
@@ -6587,6 +6591,7 @@ bool PropertyCell::CheckDataIsCompatible(PropertyDetails details,
Object value) {
DisallowGarbageCollection no_gc;
PropertyCellType cell_type = details.cell_type();
+ CHECK_NE(cell_type, PropertyCellType::kInTransition);
if (value.IsTheHole()) {
CHECK_EQ(cell_type, PropertyCellType::kConstant);
} else {
@@ -6620,8 +6625,9 @@ bool PropertyCell::CanTransitionTo(PropertyDetails new_details,
return new_details.cell_type() == PropertyCellType::kMutable ||
(new_details.cell_type() == PropertyCellType::kConstant &&
new_value.IsTheHole());
+ case PropertyCellType::kInTransition:
+ UNREACHABLE();
}
- UNREACHABLE();
}
#endif // DEBUG
diff --git a/deps/v8/src/objects/objects.h b/deps/v8/src/objects/objects.h
index eb31ec957d..61bcf79800 100644
--- a/deps/v8/src/objects/objects.h
+++ b/deps/v8/src/objects/objects.h
@@ -9,7 +9,6 @@
#include <memory>
#include "include/v8-internal.h"
-#include "include/v8.h"
#include "include/v8config.h"
#include "src/base/bits.h"
#include "src/base/build_config.h"
@@ -840,18 +839,6 @@ enum EnsureElementsMode {
// Indicator for one component of an AccessorPair.
enum AccessorComponent { ACCESSOR_GETTER, ACCESSOR_SETTER };
-enum class GetKeysConversion {
- kKeepNumbers = static_cast<int>(v8::KeyConversionMode::kKeepNumbers),
- kConvertToString = static_cast<int>(v8::KeyConversionMode::kConvertToString),
- kNoNumbers = static_cast<int>(v8::KeyConversionMode::kNoNumbers)
-};
-
-enum class KeyCollectionMode {
- kOwnOnly = static_cast<int>(v8::KeyCollectionMode::kOwnOnly),
- kIncludePrototypes =
- static_cast<int>(v8::KeyCollectionMode::kIncludePrototypes)
-};
-
// Utility superclass for stack-allocated objects that must be updated
// on gc. It provides two ways for the gc to update instances, either
// iterating or updating after gc.
diff --git a/deps/v8/src/objects/ordered-hash-table.h b/deps/v8/src/objects/ordered-hash-table.h
index 1110352e46..45682e45e9 100644
--- a/deps/v8/src/objects/ordered-hash-table.h
+++ b/deps/v8/src/objects/ordered-hash-table.h
@@ -10,6 +10,7 @@
#include "src/objects/fixed-array.h"
#include "src/objects/internal-index.h"
#include "src/objects/js-objects.h"
+#include "src/objects/keys.h"
#include "src/objects/smi.h"
#include "src/roots/roots.h"
diff --git a/deps/v8/src/objects/property-cell-inl.h b/deps/v8/src/objects/property-cell-inl.h
index dfaaf1c80a..ef4fa75463 100644
--- a/deps/v8/src/objects/property-cell-inl.h
+++ b/deps/v8/src/objects/property-cell-inl.h
@@ -57,6 +57,9 @@ void PropertyCell::Transition(PropertyDetails new_details,
DCHECK(CanTransitionTo(new_details, *new_value));
// This code must be in sync with its counterpart in
// PropertyCellData::Serialize.
+ PropertyDetails transition_marker = new_details;
+ transition_marker.set_cell_type(PropertyCellType::kInTransition);
+ set_property_details_raw(transition_marker.AsSmi(), kReleaseStore);
set_value(*new_value, kReleaseStore);
set_property_details_raw(new_details.AsSmi(), kReleaseStore);
}
diff --git a/deps/v8/src/objects/property-details.h b/deps/v8/src/objects/property-details.h
index 58cc2359cb..f32d6ceb89 100644
--- a/deps/v8/src/objects/property-details.h
+++ b/deps/v8/src/objects/property-details.h
@@ -5,7 +5,7 @@
#ifndef V8_OBJECTS_PROPERTY_DETAILS_H_
#define V8_OBJECTS_PROPERTY_DETAILS_H_
-#include "include/v8.h"
+#include "include/v8-object.h"
#include "src/base/bit-field.h"
#include "src/common/globals.h"
#include "src/flags/flags.h"
@@ -242,6 +242,9 @@ enum class PropertyCellType {
kUndefined, // The PREMONOMORPHIC of property cells.
kConstant, // Cell has been assigned only once.
kConstantType, // Cell has been assigned only one type.
+ // Temporary value indicating an ongoing property cell state transition. Only
+ // observable by a background thread.
+ kInTransition,
// Value for dictionaries not holding cells, must be 0:
kNoCell = kMutable,
};
@@ -381,8 +384,7 @@ class PropertyDetails {
// Bit fields in value_ (type, shift, size). Must be public so the
// constants can be embedded in generated code.
using KindField = base::BitField<PropertyKind, 0, 1>;
- using LocationField = KindField::Next<PropertyLocation, 1>;
- using ConstnessField = LocationField::Next<PropertyConstness, 1>;
+ using ConstnessField = KindField::Next<PropertyConstness, 1>;
using AttributesField = ConstnessField::Next<PropertyAttributes, 3>;
static const int kAttributesReadOnlyMask =
(READ_ONLY << AttributesField::kShift);
@@ -392,11 +394,12 @@ class PropertyDetails {
(DONT_ENUM << AttributesField::kShift);
// Bit fields for normalized/dictionary mode objects.
- using PropertyCellTypeField = AttributesField::Next<PropertyCellType, 2>;
+ using PropertyCellTypeField = AttributesField::Next<PropertyCellType, 3>;
using DictionaryStorageField = PropertyCellTypeField::Next<uint32_t, 23>;
// Bit fields for fast objects.
- using RepresentationField = AttributesField::Next<uint32_t, 3>;
+ using LocationField = AttributesField::Next<PropertyLocation, 1>;
+ using RepresentationField = LocationField::Next<uint32_t, 3>;
using DescriptorPointer =
RepresentationField::Next<uint32_t, kDescriptorIndexBitCount>;
using FieldIndexField =
@@ -415,7 +418,6 @@ class PropertyDetails {
STATIC_ASSERT(KindField::kLastUsedBit < 8);
STATIC_ASSERT(ConstnessField::kLastUsedBit < 8);
STATIC_ASSERT(AttributesField::kLastUsedBit < 8);
- STATIC_ASSERT(LocationField::kLastUsedBit < 8);
static const int kInitialIndex = 1;
@@ -445,12 +447,12 @@ class PropertyDetails {
// with an enumeration index of 0 as a single byte.
uint8_t ToByte() {
// We only care about the value of KindField, ConstnessField, and
- // AttributesField. LocationField is also stored, but it will always be
- // kField. We've statically asserted earlier that all those fields fit into
- // a byte together.
+ // AttributesField. We've statically asserted earlier that these fields fit
+ // into a byte together.
+
+ DCHECK_EQ(PropertyLocation::kField, location());
+ STATIC_ASSERT(static_cast<int>(PropertyLocation::kField) == 0);
- // PropertyCellTypeField comes next, its value must be kNoCell == 0 for
- // dictionary mode PropertyDetails anyway.
DCHECK_EQ(PropertyCellType::kNoCell, cell_type());
STATIC_ASSERT(static_cast<int>(PropertyCellType::kNoCell) == 0);
@@ -464,16 +466,13 @@ class PropertyDetails {
// Only to be used for bytes obtained by ToByte. In particular, only used for
// non-global dictionary properties.
static PropertyDetails FromByte(uint8_t encoded_details) {
- // The 0-extension to 32bit sets PropertyCellType to kNoCell and
- // enumeration index to 0, as intended. Everything else is obtained from
- // |encoded_details|.
-
+ // The 0-extension to 32bit sets PropertyLocation to kField,
+ // PropertyCellType to kNoCell, and enumeration index to 0, as intended.
+ // Everything else is obtained from |encoded_details|.
PropertyDetails details(encoded_details);
-
- DCHECK_EQ(0, details.dictionary_index());
DCHECK_EQ(PropertyLocation::kField, details.location());
DCHECK_EQ(PropertyCellType::kNoCell, details.cell_type());
-
+ DCHECK_EQ(0, details.dictionary_index());
return details;
}
diff --git a/deps/v8/src/objects/script.h b/deps/v8/src/objects/script.h
index 10fe0f834e..76b8d92dd8 100644
--- a/deps/v8/src/objects/script.h
+++ b/deps/v8/src/objects/script.h
@@ -7,6 +7,7 @@
#include <memory>
+#include "include/v8-script.h"
#include "src/base/export-template.h"
#include "src/objects/fixed-array.h"
#include "src/objects/objects.h"
@@ -22,6 +23,10 @@ namespace internal {
class FunctionLiteral;
+namespace wasm {
+class NativeModule;
+} // namespace wasm
+
#include "torque-generated/src/objects/script-tq.inc"
// Script describes a script which has been added to the VM.
diff --git a/deps/v8/src/objects/shared-function-info-inl.h b/deps/v8/src/objects/shared-function-info-inl.h
index 583ca8dccf..1b8c56386f 100644
--- a/deps/v8/src/objects/shared-function-info-inl.h
+++ b/deps/v8/src/objects/shared-function-info-inl.h
@@ -7,6 +7,7 @@
#include "src/base/macros.h"
#include "src/base/platform/mutex.h"
+#include "src/common/globals.h"
#include "src/handles/handles-inl.h"
#include "src/heap/heap-write-barrier-inl.h"
#include "src/objects/debug-objects-inl.h"
@@ -92,8 +93,6 @@ TQ_OBJECT_CONSTRUCTORS_IMPL(UncompiledData)
TQ_OBJECT_CONSTRUCTORS_IMPL(UncompiledDataWithoutPreparseData)
TQ_OBJECT_CONSTRUCTORS_IMPL(UncompiledDataWithPreparseData)
-TQ_OBJECT_CONSTRUCTORS_IMPL(BaselineData)
-
TQ_OBJECT_CONSTRUCTORS_IMPL(InterpreterData)
ACCESSORS(InterpreterData, raw_interpreter_trampoline, CodeT,
@@ -130,13 +129,37 @@ DEF_ACQUIRE_GETTER(SharedFunctionInfo,
return value;
}
-RENAME_UINT16_TORQUE_ACCESSORS(SharedFunctionInfo,
- internal_formal_parameter_count,
- formal_parameter_count)
+uint16_t SharedFunctionInfo::internal_formal_parameter_count_with_receiver()
+ const {
+ const uint16_t param_count = TorqueGeneratedClass::formal_parameter_count();
+ if (param_count == kDontAdaptArgumentsSentinel) return param_count;
+ return param_count + (kJSArgcIncludesReceiver ? 0 : 1);
+}
+
+uint16_t SharedFunctionInfo::internal_formal_parameter_count_without_receiver()
+ const {
+ const uint16_t param_count = TorqueGeneratedClass::formal_parameter_count();
+ if (param_count == kDontAdaptArgumentsSentinel) return param_count;
+ return param_count - kJSArgcReceiverSlots;
+}
+
+void SharedFunctionInfo::set_internal_formal_parameter_count(int value) {
+ DCHECK_EQ(value, static_cast<uint16_t>(value));
+ DCHECK_GE(value, kJSArgcReceiverSlots);
+ TorqueGeneratedClass::set_formal_parameter_count(value);
+}
+
RENAME_UINT16_TORQUE_ACCESSORS(SharedFunctionInfo, raw_function_token_offset,
function_token_offset)
-IMPLICIT_TAG_RELAXED_INT32_ACCESSORS(SharedFunctionInfo, flags, kFlagsOffset)
+RELAXED_INT32_ACCESSORS(SharedFunctionInfo, flags, kFlagsOffset)
+int32_t SharedFunctionInfo::relaxed_flags() const {
+ return flags(kRelaxedLoad);
+}
+void SharedFunctionInfo::set_relaxed_flags(int32_t flags) {
+ return set_flags(flags, kRelaxedStore);
+}
+
UINT8_ACCESSORS(SharedFunctionInfo, flags2, kFlags2Offset)
bool SharedFunctionInfo::HasSharedName() const {
@@ -253,34 +276,36 @@ BIT_FIELD_ACCESSORS(SharedFunctionInfo, flags2,
has_static_private_methods_or_accessors,
SharedFunctionInfo::HasStaticPrivateMethodsOrAccessorsBit)
-BIT_FIELD_ACCESSORS(SharedFunctionInfo, flags, syntax_kind,
+BIT_FIELD_ACCESSORS(SharedFunctionInfo, relaxed_flags, syntax_kind,
SharedFunctionInfo::FunctionSyntaxKindBits)
-BIT_FIELD_ACCESSORS(SharedFunctionInfo, flags, allows_lazy_compilation,
+BIT_FIELD_ACCESSORS(SharedFunctionInfo, relaxed_flags, allows_lazy_compilation,
SharedFunctionInfo::AllowLazyCompilationBit)
-BIT_FIELD_ACCESSORS(SharedFunctionInfo, flags, has_duplicate_parameters,
+BIT_FIELD_ACCESSORS(SharedFunctionInfo, relaxed_flags, has_duplicate_parameters,
SharedFunctionInfo::HasDuplicateParametersBit)
-BIT_FIELD_ACCESSORS(SharedFunctionInfo, flags, native,
+BIT_FIELD_ACCESSORS(SharedFunctionInfo, relaxed_flags, native,
SharedFunctionInfo::IsNativeBit)
#if V8_ENABLE_WEBASSEMBLY
-BIT_FIELD_ACCESSORS(SharedFunctionInfo, flags, is_asm_wasm_broken,
+BIT_FIELD_ACCESSORS(SharedFunctionInfo, relaxed_flags, is_asm_wasm_broken,
SharedFunctionInfo::IsAsmWasmBrokenBit)
#endif // V8_ENABLE_WEBASSEMBLY
-BIT_FIELD_ACCESSORS(SharedFunctionInfo, flags,
+BIT_FIELD_ACCESSORS(SharedFunctionInfo, relaxed_flags,
requires_instance_members_initializer,
SharedFunctionInfo::RequiresInstanceMembersInitializerBit)
-BIT_FIELD_ACCESSORS(SharedFunctionInfo, flags, name_should_print_as_anonymous,
+BIT_FIELD_ACCESSORS(SharedFunctionInfo, relaxed_flags,
+ name_should_print_as_anonymous,
SharedFunctionInfo::NameShouldPrintAsAnonymousBit)
-BIT_FIELD_ACCESSORS(SharedFunctionInfo, flags, has_reported_binary_coverage,
+BIT_FIELD_ACCESSORS(SharedFunctionInfo, relaxed_flags,
+ has_reported_binary_coverage,
SharedFunctionInfo::HasReportedBinaryCoverageBit)
-BIT_FIELD_ACCESSORS(SharedFunctionInfo, flags, is_toplevel,
+BIT_FIELD_ACCESSORS(SharedFunctionInfo, relaxed_flags, is_toplevel,
SharedFunctionInfo::IsTopLevelBit)
-BIT_FIELD_ACCESSORS(SharedFunctionInfo, flags, properties_are_final,
+BIT_FIELD_ACCESSORS(SharedFunctionInfo, relaxed_flags, properties_are_final,
SharedFunctionInfo::PropertiesAreFinalBit)
-BIT_FIELD_ACCESSORS(SharedFunctionInfo, flags,
+BIT_FIELD_ACCESSORS(SharedFunctionInfo, relaxed_flags,
private_name_lookup_skips_outer_class,
SharedFunctionInfo::PrivateNameLookupSkipsOuterClassBit)
@@ -289,12 +314,12 @@ bool SharedFunctionInfo::optimization_disabled() const {
}
BailoutReason SharedFunctionInfo::disable_optimization_reason() const {
- return DisabledOptimizationReasonBits::decode(flags());
+ return DisabledOptimizationReasonBits::decode(flags(kRelaxedLoad));
}
LanguageMode SharedFunctionInfo::language_mode() const {
STATIC_ASSERT(LanguageModeSize == 2);
- return construct_language_mode(IsStrictBit::decode(flags()));
+ return construct_language_mode(IsStrictBit::decode(flags(kRelaxedLoad)));
}
void SharedFunctionInfo::set_language_mode(LanguageMode language_mode) {
@@ -302,22 +327,22 @@ void SharedFunctionInfo::set_language_mode(LanguageMode language_mode) {
// We only allow language mode transitions that set the same language mode
// again or go up in the chain:
DCHECK(is_sloppy(this->language_mode()) || is_strict(language_mode));
- int hints = flags();
+ int hints = flags(kRelaxedLoad);
hints = IsStrictBit::update(hints, is_strict(language_mode));
- set_flags(hints);
+ set_flags(hints, kRelaxedStore);
UpdateFunctionMapIndex();
}
FunctionKind SharedFunctionInfo::kind() const {
STATIC_ASSERT(FunctionKindBits::kSize == kFunctionKindBitSize);
- return FunctionKindBits::decode(flags());
+ return FunctionKindBits::decode(flags(kRelaxedLoad));
}
void SharedFunctionInfo::set_kind(FunctionKind kind) {
- int hints = flags();
+ int hints = flags(kRelaxedLoad);
hints = FunctionKindBits::update(hints, kind);
hints = IsClassConstructorBit::update(hints, IsClassConstructor(kind));
- set_flags(hints);
+ set_flags(hints, kRelaxedStore);
UpdateFunctionMapIndex();
}
@@ -326,7 +351,7 @@ bool SharedFunctionInfo::is_wrapped() const {
}
bool SharedFunctionInfo::construct_as_builtin() const {
- return ConstructAsBuiltinBit::decode(flags());
+ return ConstructAsBuiltinBit::decode(flags(kRelaxedLoad));
}
void SharedFunctionInfo::CalculateConstructAsBuiltin() {
@@ -340,15 +365,15 @@ void SharedFunctionInfo::CalculateConstructAsBuiltin() {
uses_builtins_construct_stub = true;
}
- int f = flags();
+ int f = flags(kRelaxedLoad);
f = ConstructAsBuiltinBit::update(f, uses_builtins_construct_stub);
- set_flags(f);
+ set_flags(f, kRelaxedStore);
}
int SharedFunctionInfo::function_map_index() const {
// Note: Must be kept in sync with the FastNewClosure builtin.
- int index =
- Context::FIRST_FUNCTION_MAP_INDEX + FunctionMapIndexBits::decode(flags());
+ int index = Context::FIRST_FUNCTION_MAP_INDEX +
+ FunctionMapIndexBits::decode(flags(kRelaxedLoad));
DCHECK_LE(index, Context::LAST_FUNCTION_MAP_INDEX);
return index;
}
@@ -359,7 +384,8 @@ void SharedFunctionInfo::set_function_map_index(int index) {
DCHECK_LE(Context::FIRST_FUNCTION_MAP_INDEX, index);
DCHECK_LE(index, Context::LAST_FUNCTION_MAP_INDEX);
index -= Context::FIRST_FUNCTION_MAP_INDEX;
- set_flags(FunctionMapIndexBits::update(flags(), index));
+ set_flags(FunctionMapIndexBits::update(flags(kRelaxedLoad), index),
+ kRelaxedStore);
}
void SharedFunctionInfo::clear_padding() {
@@ -378,7 +404,12 @@ void SharedFunctionInfo::DontAdaptArguments() {
// TODO(leszeks): Revise this DCHECK now that the code field is gone.
DCHECK(!HasWasmExportedFunctionData());
#endif // V8_ENABLE_WEBASSEMBLY
- set_internal_formal_parameter_count(kDontAdaptArgumentsSentinel);
+ TorqueGeneratedClass::set_formal_parameter_count(kDontAdaptArgumentsSentinel);
+}
+
+bool SharedFunctionInfo::IsDontAdaptArguments() const {
+ return TorqueGeneratedClass::formal_parameter_count() ==
+ kDontAdaptArgumentsSentinel;
}
bool SharedFunctionInfo::IsInterpreted() const { return HasBytecodeArray(); }
@@ -484,8 +515,8 @@ IsCompiledScope SharedFunctionInfo::is_compiled_scope(IsolateT* isolate) const {
IsCompiledScope::IsCompiledScope(const SharedFunctionInfo shared,
Isolate* isolate)
: is_compiled_(shared.is_compiled()) {
- if (shared.HasBaselineData()) {
- retain_code_ = handle(shared.baseline_data(), isolate);
+ if (shared.HasBaselineCode()) {
+ retain_code_ = handle(shared.baseline_code(kAcquireLoad), isolate);
} else if (shared.HasBytecodeArray()) {
retain_code_ = handle(shared.GetBytecodeArray(isolate), isolate);
} else {
@@ -498,8 +529,9 @@ IsCompiledScope::IsCompiledScope(const SharedFunctionInfo shared,
IsCompiledScope::IsCompiledScope(const SharedFunctionInfo shared,
LocalIsolate* isolate)
: is_compiled_(shared.is_compiled()) {
- if (shared.HasBaselineData()) {
- retain_code_ = isolate->heap()->NewPersistentHandle(shared.baseline_data());
+ if (shared.HasBaselineCode()) {
+ retain_code_ = isolate->heap()->NewPersistentHandle(
+ shared.baseline_code(kAcquireLoad));
} else if (shared.HasBytecodeArray()) {
retain_code_ =
isolate->heap()->NewPersistentHandle(shared.GetBytecodeArray(isolate));
@@ -530,8 +562,7 @@ FunctionTemplateInfo SharedFunctionInfo::get_api_func_data() const {
bool SharedFunctionInfo::HasBytecodeArray() const {
Object data = function_data(kAcquireLoad);
- return data.IsBytecodeArray() || data.IsInterpreterData() ||
- data.IsBaselineData();
+ return data.IsBytecodeArray() || data.IsInterpreterData() || data.IsCodeT();
}
template <typename IsolateT>
@@ -547,40 +578,14 @@ BytecodeArray SharedFunctionInfo::GetBytecodeArray(IsolateT* isolate) const {
return GetActiveBytecodeArray();
}
-DEF_GETTER(BaselineData, baseline_code, Code) {
- return FromCodeT(TorqueGeneratedClass::baseline_code(cage_base));
-}
-
-void BaselineData::set_baseline_code(Code code, WriteBarrierMode mode) {
- return TorqueGeneratedClass::set_baseline_code(ToCodeT(code), mode);
-}
-
-BytecodeArray BaselineData::GetActiveBytecodeArray() const {
- Object data = this->data();
- if (data.IsBytecodeArray()) {
- return BytecodeArray::cast(data);
- } else {
- DCHECK(data.IsInterpreterData());
- return InterpreterData::cast(data).bytecode_array();
- }
-}
-
-void BaselineData::SetActiveBytecodeArray(BytecodeArray bytecode) {
- Object data = this->data();
- if (data.IsBytecodeArray()) {
- set_data(bytecode);
- } else {
- DCHECK(data.IsInterpreterData());
- InterpreterData::cast(data).set_bytecode_array(bytecode);
- }
-}
-
BytecodeArray SharedFunctionInfo::GetActiveBytecodeArray() const {
Object data = function_data(kAcquireLoad);
+ if (data.IsCodeT()) {
+ Code baseline_code = FromCodeT(CodeT::cast(data));
+ data = baseline_code.bytecode_or_interpreter_data();
+ }
if (data.IsBytecodeArray()) {
return BytecodeArray::cast(data);
- } else if (data.IsBaselineData()) {
- return baseline_data().GetActiveBytecodeArray();
} else {
DCHECK(data.IsInterpreterData());
return InterpreterData::cast(data).bytecode_array();
@@ -588,11 +593,13 @@ BytecodeArray SharedFunctionInfo::GetActiveBytecodeArray() const {
}
void SharedFunctionInfo::SetActiveBytecodeArray(BytecodeArray bytecode) {
+ // We don't allow setting the active bytecode array on baseline-optimized
+ // functions. They should have been flushed earlier.
+ DCHECK(!HasBaselineCode());
+
Object data = function_data(kAcquireLoad);
if (data.IsBytecodeArray()) {
set_function_data(bytecode, kReleaseStore);
- } else if (data.IsBaselineData()) {
- baseline_data().SetActiveBytecodeArray(bytecode);
} else {
DCHECK(data.IsInterpreterData());
interpreter_data().set_bytecode_array(bytecode);
@@ -618,12 +625,13 @@ bool SharedFunctionInfo::ShouldFlushCode(
// check if it is old. Note, this is done this way since this function can be
// called by the concurrent marker.
Object data = function_data(kAcquireLoad);
- if (data.IsBaselineData()) {
+ if (data.IsCodeT()) {
+ Code baseline_code = FromCodeT(CodeT::cast(data));
+ DCHECK_EQ(baseline_code.kind(), CodeKind::BASELINE);
// If baseline code flushing isn't enabled and we have baseline data on SFI
// we cannot flush baseline / bytecode.
if (!IsBaselineCodeFlushingEnabled(code_flush_mode)) return false;
- data =
- ACQUIRE_READ_FIELD(BaselineData::cast(data), BaselineData::kDataOffset);
+ data = baseline_code.bytecode_or_interpreter_data();
} else if (!IsByteCodeFlushingEnabled(code_flush_mode)) {
// If bytecode flushing isn't enabled and there is no baseline code there is
// nothing to flush.
@@ -645,40 +653,56 @@ Code SharedFunctionInfo::InterpreterTrampoline() const {
bool SharedFunctionInfo::HasInterpreterData() const {
Object data = function_data(kAcquireLoad);
- if (data.IsBaselineData()) data = BaselineData::cast(data).data();
+ if (data.IsCodeT()) {
+ Code baseline_code = FromCodeT(CodeT::cast(data));
+ DCHECK_EQ(baseline_code.kind(), CodeKind::BASELINE);
+ data = baseline_code.bytecode_or_interpreter_data();
+ }
return data.IsInterpreterData();
}
InterpreterData SharedFunctionInfo::interpreter_data() const {
DCHECK(HasInterpreterData());
Object data = function_data(kAcquireLoad);
- if (data.IsBaselineData()) data = BaselineData::cast(data).data();
+ if (data.IsCodeT()) {
+ Code baseline_code = FromCodeT(CodeT::cast(data));
+ DCHECK_EQ(baseline_code.kind(), CodeKind::BASELINE);
+ data = baseline_code.bytecode_or_interpreter_data();
+ }
return InterpreterData::cast(data);
}
void SharedFunctionInfo::set_interpreter_data(
InterpreterData interpreter_data) {
DCHECK(FLAG_interpreted_frames_native_stack);
- DCHECK(!HasBaselineData());
+ DCHECK(!HasBaselineCode());
set_function_data(interpreter_data, kReleaseStore);
}
-bool SharedFunctionInfo::HasBaselineData() const {
- return function_data(kAcquireLoad).IsBaselineData();
+bool SharedFunctionInfo::HasBaselineCode() const {
+ Object data = function_data(kAcquireLoad);
+ if (data.IsCodeT()) {
+ DCHECK_EQ(FromCodeT(CodeT::cast(data)).kind(), CodeKind::BASELINE);
+ return true;
+ }
+ return false;
}
-BaselineData SharedFunctionInfo::baseline_data() const {
- DCHECK(HasBaselineData());
- return BaselineData::cast(function_data(kAcquireLoad));
+Code SharedFunctionInfo::baseline_code(AcquireLoadTag) const {
+ DCHECK(HasBaselineCode());
+ return FromCodeT(CodeT::cast(function_data(kAcquireLoad)));
}
-void SharedFunctionInfo::set_baseline_data(BaselineData baseline_data) {
- set_function_data(baseline_data, kReleaseStore);
+void SharedFunctionInfo::set_baseline_code(Code baseline_code,
+ ReleaseStoreTag) {
+ DCHECK_EQ(baseline_code.kind(), CodeKind::BASELINE);
+ set_function_data(ToCodeT(baseline_code), kReleaseStore);
}
-void SharedFunctionInfo::flush_baseline_data() {
- DCHECK(HasBaselineData());
- set_function_data(baseline_data().data(), kReleaseStore);
+void SharedFunctionInfo::FlushBaselineCode() {
+ DCHECK(HasBaselineCode());
+ set_function_data(baseline_code(kAcquireLoad).bytecode_or_interpreter_data(),
+ kReleaseStore);
}
#if V8_ENABLE_WEBASSEMBLY
@@ -898,11 +922,11 @@ bool SharedFunctionInfo::CanDiscardCompiled() const {
if (HasAsmWasmData()) return true;
#endif // V8_ENABLE_WEBASSEMBLY
return HasBytecodeArray() || HasUncompiledDataWithPreparseData() ||
- HasBaselineData();
+ HasBaselineCode();
}
bool SharedFunctionInfo::is_class_constructor() const {
- return IsClassConstructorBit::decode(flags());
+ return IsClassConstructorBit::decode(flags(kRelaxedLoad));
}
void SharedFunctionInfo::set_are_properties_final(bool value) {
diff --git a/deps/v8/src/objects/shared-function-info.cc b/deps/v8/src/objects/shared-function-info.cc
index 22e98a140c..4354a2af28 100644
--- a/deps/v8/src/objects/shared-function-info.cc
+++ b/deps/v8/src/objects/shared-function-info.cc
@@ -8,6 +8,7 @@
#include "src/ast/scopes.h"
#include "src/codegen/compilation-cache.h"
#include "src/codegen/compiler.h"
+#include "src/common/globals.h"
#include "src/diagnostics/code-tracer.h"
#include "src/objects/shared-function-info-inl.h"
#include "src/strings/string-builder-inl.h"
@@ -52,13 +53,13 @@ void SharedFunctionInfo::Init(ReadOnlyRoots ro_roots, int unique_id) {
// Set integer fields (smi or int, depending on the architecture).
set_length(0);
- set_internal_formal_parameter_count(0);
+ set_internal_formal_parameter_count(JSParameterCount(0));
set_expected_nof_properties(0);
set_raw_function_token_offset(0);
// All flags default to false or 0, except ConstructAsBuiltinBit just because
// we're using the kIllegal builtin.
- set_flags(ConstructAsBuiltinBit::encode(true));
+ set_flags(ConstructAsBuiltinBit::encode(true), kRelaxedStore);
set_flags2(0);
UpdateFunctionMapIndex();
@@ -84,10 +85,10 @@ Code SharedFunctionInfo::GetCode() const {
DCHECK(HasBytecodeArray());
return isolate->builtins()->code(Builtin::kInterpreterEntryTrampoline);
}
- if (data.IsBaselineData()) {
- // Having BaselineData means we are a compiled, baseline function.
- DCHECK(HasBaselineData());
- return baseline_data().baseline_code();
+ if (data.IsCodeT()) {
+ // Having baseline Code means we are a compiled, baseline function.
+ DCHECK(HasBaselineCode());
+ return FromCodeT(CodeT::cast(data));
}
#if V8_ENABLE_WEBASSEMBLY
if (data.IsAsmWasmData()) {
@@ -435,7 +436,8 @@ std::ostream& operator<<(std::ostream& os, const SourceCodeOf& v) {
void SharedFunctionInfo::DisableOptimization(BailoutReason reason) {
DCHECK_NE(reason, BailoutReason::kNoReason);
- set_flags(DisabledOptimizationReasonBits::update(flags(), reason));
+ set_flags(DisabledOptimizationReasonBits::update(flags(kRelaxedLoad), reason),
+ kRelaxedStore);
// Code should be the lazy compilation stub or else interpreted.
Isolate* isolate = GetIsolate();
DCHECK(abstract_code(isolate).kind() == CodeKind::INTERPRETED_FUNCTION ||
@@ -459,7 +461,8 @@ void SharedFunctionInfo::InitFromFunctionLiteral(
// When adding fields here, make sure DeclarationScope::AnalyzePartially is
// updated accordingly.
- shared_info->set_internal_formal_parameter_count(lit->parameter_count());
+ shared_info->set_internal_formal_parameter_count(
+ JSParameterCount(lit->parameter_count()));
shared_info->SetFunctionTokenPosition(lit->function_token_position(),
lit->start_position());
shared_info->set_syntax_kind(lit->syntax_kind());
@@ -704,6 +707,7 @@ void SharedFunctionInfo::UninstallDebugBytecode(SharedFunctionInfo shared,
isolate->shared_function_info_access());
DebugInfo debug_info = shared.GetDebugInfo();
BytecodeArray original_bytecode_array = debug_info.OriginalBytecodeArray();
+ DCHECK(!shared.HasBaselineCode());
shared.SetActiveBytecodeArray(original_bytecode_array);
debug_info.set_original_bytecode_array(
ReadOnlyRoots(isolate).undefined_value(), kReleaseStore);
diff --git a/deps/v8/src/objects/shared-function-info.h b/deps/v8/src/objects/shared-function-info.h
index fd19f90165..598ccfd883 100644
--- a/deps/v8/src/objects/shared-function-info.h
+++ b/deps/v8/src/objects/shared-function-info.h
@@ -10,6 +10,7 @@
#include "src/base/bit-field.h"
#include "src/builtins/builtins.h"
#include "src/codegen/bailout-reason.h"
+#include "src/common/globals.h"
#include "src/objects/compressed-slots.h"
#include "src/objects/function-kind.h"
#include "src/objects/function-syntax-kind.h"
@@ -154,16 +155,6 @@ class InterpreterData
TQ_OBJECT_CONSTRUCTORS(InterpreterData)
};
-class BaselineData : public TorqueGeneratedBaselineData<BaselineData, Struct> {
- public:
- inline BytecodeArray GetActiveBytecodeArray() const;
- inline void SetActiveBytecodeArray(BytecodeArray bytecode);
-
- DECL_ACCESSORS(baseline_code, Code)
-
- TQ_OBJECT_CONSTRUCTORS(BaselineData)
-};
-
// SharedFunctionInfo describes the JSFunction information that can be
// shared by multiple instances of the function.
class SharedFunctionInfo
@@ -275,8 +266,12 @@ class SharedFunctionInfo
// [internal formal parameter count]: The declared number of parameters.
// For subclass constructors, also includes new.target.
- // The size of function's frame is internal_formal_parameter_count + 1.
- DECL_UINT16_ACCESSORS(internal_formal_parameter_count)
+ // The size of function's frame is
+ // internal_formal_parameter_count_with_receiver.
+ inline void set_internal_formal_parameter_count(int value);
+ inline uint16_t internal_formal_parameter_count_with_receiver() const;
+ inline uint16_t internal_formal_parameter_count_without_receiver() const;
+
private:
using TorqueGeneratedSharedFunctionInfo::formal_parameter_count;
using TorqueGeneratedSharedFunctionInfo::set_formal_parameter_count;
@@ -285,6 +280,7 @@ class SharedFunctionInfo
// Set the formal parameter count so the function code will be
// called without using argument adaptor frames.
inline void DontAdaptArguments();
+ inline bool IsDontAdaptArguments() const;
// [function data]: This field holds some additional data for function.
// Currently it has one of:
@@ -314,10 +310,10 @@ class SharedFunctionInfo
inline bool HasInterpreterData() const;
inline InterpreterData interpreter_data() const;
inline void set_interpreter_data(InterpreterData interpreter_data);
- inline bool HasBaselineData() const;
- inline BaselineData baseline_data() const;
- inline void set_baseline_data(BaselineData Baseline_data);
- inline void flush_baseline_data();
+ inline bool HasBaselineCode() const;
+ inline Code baseline_code(AcquireLoadTag) const;
+ inline void set_baseline_code(Code baseline_code, ReleaseStoreTag);
+ inline void FlushBaselineCode();
inline BytecodeArray GetActiveBytecodeArray() const;
inline void SetActiveBytecodeArray(BytecodeArray bytecode);
@@ -414,7 +410,7 @@ class SharedFunctionInfo
inline bool HasSharedName() const;
// [flags] Bit field containing various flags about the function.
- DECL_INT32_ACCESSORS(flags)
+ DECL_RELAXED_INT32_ACCESSORS(flags)
DECL_UINT8_ACCESSORS(flags2)
// True if the outer class scope contains a private brand for
@@ -673,6 +669,10 @@ class SharedFunctionInfo
inline uint16_t get_property_estimate_from_literal(FunctionLiteral* literal);
+ // For ease of use of the BITFIELD macro.
+ inline int32_t relaxed_flags() const;
+ inline void set_relaxed_flags(int32_t flags);
+
template <typename Impl>
friend class FactoryBase;
friend class V8HeapExplorer;
diff --git a/deps/v8/src/objects/shared-function-info.tq b/deps/v8/src/objects/shared-function-info.tq
index 0b0930b6b4..4f80f568dc 100644
--- a/deps/v8/src/objects/shared-function-info.tq
+++ b/deps/v8/src/objects/shared-function-info.tq
@@ -14,13 +14,6 @@ extern class InterpreterData extends Struct {
@ifnot(V8_EXTERNAL_CODE_SPACE) interpreter_trampoline: Code;
}
-@generatePrint
-extern class BaselineData extends Struct {
- @if(V8_EXTERNAL_CODE_SPACE) baseline_code: CodeDataContainer;
- @ifnot(V8_EXTERNAL_CODE_SPACE) baseline_code: Code;
- data: BytecodeArray|InterpreterData;
-}
-
type FunctionKind extends uint8 constexpr 'FunctionKind';
type FunctionSyntaxKind extends uint8 constexpr 'FunctionSyntaxKind';
type BailoutReason extends uint8 constexpr 'BailoutReason';
@@ -63,11 +56,17 @@ class SharedFunctionInfo extends HeapObject {
name_or_scope_info: String|NoSharedNameSentinel|ScopeInfo;
outer_scope_info_or_feedback_metadata: HeapObject;
script_or_debug_info: Script|DebugInfo|Undefined;
- // [length]: The function length - usually the number of declared parameters.
+ // [length]: The function length - usually the number of declared parameters
+ // (always without the receiver).
// Use up to 2^16-2 parameters (16 bits of values, where one is reserved for
// kDontAdaptArgumentsSentinel). The value is only reliable when the function
// has been compiled.
length: int16;
+ // [formal_parameter_count]: The number of declared parameters (or the special
+ // value kDontAdaptArgumentsSentinel to indicate that arguments are passed
+ // unaltered).
+ // In contrast to [length], formal_parameter_count includes the receiver if
+ // kJSArgcIncludesReceiver is true.
formal_parameter_count: uint16;
function_token_offset: uint16;
// [expected_nof_properties]: Expected number of properties for the
@@ -84,6 +83,40 @@ class SharedFunctionInfo extends HeapObject {
@if(V8_SFI_HAS_UNIQUE_ID) unique_id: int32;
}
+const kDontAdaptArgumentsSentinel: constexpr int32
+ generates 'kDontAdaptArgumentsSentinel';
+const kJSArgcIncludesReceiver:
+ constexpr bool generates 'kJSArgcIncludesReceiver';
+@export
+macro LoadSharedFunctionInfoFormalParameterCountWithoutReceiver(
+ sfi: SharedFunctionInfo): uint16 {
+ let formalParameterCount = sfi.formal_parameter_count;
+ if (kJSArgcIncludesReceiver) {
+ if (Convert<int32>(formalParameterCount) != kDontAdaptArgumentsSentinel) {
+ formalParameterCount = Convert<uint16>(formalParameterCount - 1);
+ }
+ }
+ return formalParameterCount;
+}
+
+@export
+macro LoadSharedFunctionInfoFormalParameterCountWithReceiver(
+ sfi: SharedFunctionInfo): uint16 {
+ let formalParameterCount = sfi.formal_parameter_count;
+ if (!kJSArgcIncludesReceiver) {
+ if (Convert<int32>(formalParameterCount) != kDontAdaptArgumentsSentinel) {
+ formalParameterCount = Convert<uint16>(formalParameterCount + 1);
+ }
+ }
+ return formalParameterCount;
+}
+
+@export
+macro IsSharedFunctionInfoDontAdaptArguments(sfi: SharedFunctionInfo): bool {
+ const formalParameterCount = sfi.formal_parameter_count;
+ return Convert<int32>(formalParameterCount) == kDontAdaptArgumentsSentinel;
+}
+
@abstract
@export
@customCppClass
diff --git a/deps/v8/src/objects/tagged-field.h b/deps/v8/src/objects/tagged-field.h
index 7faf9e9ac9..d9fc0bb102 100644
--- a/deps/v8/src/objects/tagged-field.h
+++ b/deps/v8/src/objects/tagged-field.h
@@ -49,7 +49,7 @@ class TaggedField : public AllStatic {
int offset = 0);
static inline void Relaxed_Store(HeapObject host, T value);
- static void Relaxed_Store(HeapObject host, int offset, T value);
+ static inline void Relaxed_Store(HeapObject host, int offset, T value);
static inline T Acquire_Load(HeapObject host, int offset = 0);
static inline T Acquire_Load_No_Unpack(PtrComprCageBase cage_base,
diff --git a/deps/v8/src/objects/tagged-impl.h b/deps/v8/src/objects/tagged-impl.h
index e7278a1245..6b01c6fe62 100644
--- a/deps/v8/src/objects/tagged-impl.h
+++ b/deps/v8/src/objects/tagged-impl.h
@@ -6,7 +6,6 @@
#define V8_OBJECTS_TAGGED_IMPL_H_
#include "include/v8-internal.h"
-#include "include/v8.h"
#include "src/common/globals.h"
namespace v8 {
diff --git a/deps/v8/src/objects/value-serializer.cc b/deps/v8/src/objects/value-serializer.cc
index 53bb0cf927..a84cf4e2c4 100644
--- a/deps/v8/src/objects/value-serializer.cc
+++ b/deps/v8/src/objects/value-serializer.cc
@@ -6,8 +6,10 @@
#include <type_traits>
+#include "include/v8-maybe.h"
#include "include/v8-value-serializer-version.h"
-#include "include/v8.h"
+#include "include/v8-value-serializer.h"
+#include "include/v8-wasm.h"
#include "src/api/api-inl.h"
#include "src/base/logging.h"
#include "src/base/platform/wrappers.h"
diff --git a/deps/v8/src/objects/value-serializer.h b/deps/v8/src/objects/value-serializer.h
index 8a381d1691..c6363e67c6 100644
--- a/deps/v8/src/objects/value-serializer.h
+++ b/deps/v8/src/objects/value-serializer.h
@@ -8,7 +8,7 @@
#include <cstdint>
#include <vector>
-#include "include/v8.h"
+#include "include/v8-value-serializer.h"
#include "src/base/compiler-specific.h"
#include "src/base/macros.h"
#include "src/base/strings.h"
diff --git a/deps/v8/src/objects/visitors.h b/deps/v8/src/objects/visitors.h
index a784cec756..d527cb0a9a 100644
--- a/deps/v8/src/objects/visitors.h
+++ b/deps/v8/src/objects/visitors.h
@@ -168,7 +168,7 @@ class ObjectVisitor {
virtual void VisitOffHeapTarget(Code host, RelocInfo* rinfo) {}
// Visits the relocation info using the given iterator.
- virtual void VisitRelocInfo(RelocIterator* it);
+ void VisitRelocInfo(RelocIterator* it);
// Visits the object's map pointer, decoding as necessary
virtual void VisitMapPointer(HeapObject host) { UNREACHABLE(); }
diff --git a/deps/v8/src/parsing/parse-info.h b/deps/v8/src/parsing/parse-info.h
index c6bcb221ea..57153c345b 100644
--- a/deps/v8/src/parsing/parse-info.h
+++ b/deps/v8/src/parsing/parse-info.h
@@ -9,7 +9,6 @@
#include <memory>
#include <vector>
-#include "include/v8.h"
#include "src/base/bit-field.h"
#include "src/base/export-template.h"
#include "src/base/logging.h"
diff --git a/deps/v8/src/parsing/parser-base.h b/deps/v8/src/parsing/parser-base.h
index 108b11edc8..ef2fb7ef3e 100644
--- a/deps/v8/src/parsing/parser-base.h
+++ b/deps/v8/src/parsing/parser-base.h
@@ -27,12 +27,15 @@
#include "src/parsing/parse-info.h"
#include "src/parsing/scanner.h"
#include "src/parsing/token.h"
+#include "src/regexp/regexp.h"
#include "src/utils/pointer-with-payload.h"
#include "src/zone/zone-chunk-list.h"
namespace v8 {
namespace internal {
+class PreParserIdentifier;
+
enum FunctionNameValidity {
kFunctionNameIsStrictReserved,
kSkipFunctionNameCheck,
@@ -1074,22 +1077,24 @@ class ParserBase {
}
// Report syntax errors.
- V8_NOINLINE void ReportMessage(MessageTemplate message) {
- Scanner::Location source_location = scanner()->location();
- impl()->ReportMessageAt(source_location, message,
- static_cast<const char*>(nullptr));
+ template <typename... Ts>
+ V8_NOINLINE void ReportMessage(MessageTemplate message, const Ts&... args) {
+ ReportMessageAt(scanner()->location(), message, args...);
}
- template <typename T>
- V8_NOINLINE void ReportMessage(MessageTemplate message, T arg) {
- Scanner::Location source_location = scanner()->location();
- impl()->ReportMessageAt(source_location, message, arg);
+ template <typename... Ts>
+ V8_NOINLINE void ReportMessageAt(Scanner::Location source_location,
+ MessageTemplate message, const Ts&... args) {
+ impl()->pending_error_handler()->ReportMessageAt(
+ source_location.beg_pos, source_location.end_pos, message, args...);
+ scanner()->set_parser_error();
}
- V8_NOINLINE void ReportMessageAt(Scanner::Location location,
- MessageTemplate message) {
- impl()->ReportMessageAt(location, message,
- static_cast<const char*>(nullptr));
+ V8_NOINLINE void ReportMessageAt(Scanner::Location source_location,
+ MessageTemplate message,
+ const PreParserIdentifier& arg0) {
+ ReportMessageAt(source_location, message,
+ impl()->PreParserIdentifierToAstRawString(arg0));
}
V8_NOINLINE void ReportUnexpectedToken(Token::Value token);
@@ -1122,6 +1127,12 @@ class ParserBase {
}
V8_INLINE IdentifierT ParseAndClassifyIdentifier(Token::Value token);
+
+ // Similar logic to ParseAndClassifyIdentifier but the identifier is
+ // already parsed in prop_info. Returns false if this is an invalid
+ // identifier or an invalid use of the "arguments" keyword.
+ V8_INLINE bool ClassifyPropertyIdentifier(Token::Value token,
+ ParsePropertyInfo* prop_info);
// Parses an identifier or a strict mode future reserved word. Allows passing
// in function_kind for the case of parsing the identifier in a function
// expression, where the relevant "function_kind" bit is of the function being
@@ -1140,6 +1151,11 @@ class ParserBase {
ExpressionT ParsePropertyOrPrivatePropertyName();
+ const AstRawString* GetNextSymbolForRegExpLiteral() const {
+ return scanner()->NextSymbol(ast_value_factory());
+ }
+ bool ValidateRegExpLiteral(const AstRawString* pattern, RegExpFlags flags,
+ RegExpError* regexp_error);
ExpressionT ParseRegExpLiteral();
ExpressionT ParseBindingPattern();
@@ -1634,8 +1650,39 @@ void ParserBase<Impl>::ReportUnexpectedToken(Token::Value token) {
}
template <typename Impl>
+bool ParserBase<Impl>::ClassifyPropertyIdentifier(
+ Token::Value next, ParsePropertyInfo* prop_info) {
+ // Updates made here must be reflected on ParseAndClassifyIdentifier.
+ if (V8_LIKELY(base::IsInRange(next, Token::IDENTIFIER, Token::ASYNC))) {
+ if (V8_UNLIKELY(impl()->IsArguments(prop_info->name) &&
+ scope()->ShouldBanArguments())) {
+ ReportMessage(
+ MessageTemplate::kArgumentsDisallowedInInitializerAndStaticBlock);
+ return false;
+ }
+ return true;
+ }
+
+ if (!Token::IsValidIdentifier(next, language_mode(), is_generator(),
+ is_await_as_identifier_disallowed())) {
+ ReportUnexpectedToken(next);
+ return false;
+ }
+
+ DCHECK(!prop_info->is_computed_name);
+
+ if (next == Token::AWAIT) {
+ DCHECK(!is_async_function());
+ expression_scope()->RecordAsyncArrowParametersError(
+ scanner()->peek_location(), MessageTemplate::kAwaitBindingIdentifier);
+ }
+ return true;
+}
+
+template <typename Impl>
typename ParserBase<Impl>::IdentifierT
ParserBase<Impl>::ParseAndClassifyIdentifier(Token::Value next) {
+ // Updates made here must be reflected on ClassifyPropertyIdentifier.
DCHECK_EQ(scanner()->current_token(), next);
if (V8_LIKELY(base::IsInRange(next, Token::IDENTIFIER, Token::ASYNC))) {
IdentifierT name = impl()->GetIdentifier();
@@ -1746,6 +1793,25 @@ ParserBase<Impl>::ParsePropertyOrPrivatePropertyName() {
}
template <typename Impl>
+bool ParserBase<Impl>::ValidateRegExpLiteral(const AstRawString* pattern,
+ RegExpFlags flags,
+ RegExpError* regexp_error) {
+ // TODO(jgruber): If already validated in the preparser, skip validation in
+ // the parser.
+ DisallowGarbageCollection no_gc;
+ const unsigned char* d = pattern->raw_data();
+ if (pattern->is_one_byte()) {
+ return RegExp::VerifySyntax(zone(), stack_limit(),
+ static_cast<const uint8_t*>(d),
+ pattern->length(), flags, regexp_error, no_gc);
+ } else {
+ return RegExp::VerifySyntax(zone(), stack_limit(),
+ reinterpret_cast<const uint16_t*>(d),
+ pattern->length(), flags, regexp_error, no_gc);
+ }
+}
+
+template <typename Impl>
typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParseRegExpLiteral() {
int pos = peek_position();
if (!scanner()->ScanRegExpPattern()) {
@@ -1754,15 +1820,22 @@ typename ParserBase<Impl>::ExpressionT ParserBase<Impl>::ParseRegExpLiteral() {
return impl()->FailureExpression();
}
- IdentifierT js_pattern = impl()->GetNextSymbol();
- Maybe<int> flags = scanner()->ScanRegExpFlags();
- if (flags.IsNothing()) {
+ const AstRawString* js_pattern = GetNextSymbolForRegExpLiteral();
+ base::Optional<RegExpFlags> flags = scanner()->ScanRegExpFlags();
+ if (!flags.has_value()) {
Next();
ReportMessage(MessageTemplate::kMalformedRegExpFlags);
return impl()->FailureExpression();
}
Next();
- return factory()->NewRegExpLiteral(js_pattern, flags.FromJust(), pos);
+ RegExpError regexp_error;
+ if (!ValidateRegExpLiteral(js_pattern, flags.value(), &regexp_error)) {
+ if (RegExpErrorIsStackOverflow(regexp_error)) set_stack_overflow();
+ ReportMessage(MessageTemplate::kMalformedRegExp, js_pattern,
+ RegExpErrorString(regexp_error));
+ return impl()->FailureExpression();
+ }
+ return factory()->NewRegExpLiteral(js_pattern, flags.value(), pos);
}
template <typename Impl>
@@ -2514,7 +2587,6 @@ ParserBase<Impl>::ParseObjectPropertyDefinition(ParsePropertyInfo* prop_info,
IdentifierT name = prop_info->name;
ParseFunctionFlags function_flags = prop_info->function_flags;
- ParsePropertyKind kind = prop_info->kind;
switch (prop_info->kind) {
case ParsePropertyKind::kSpread:
@@ -2562,19 +2634,10 @@ ParserBase<Impl>::ParseObjectPropertyDefinition(ParsePropertyInfo* prop_info,
// IdentifierReference Initializer?
DCHECK_EQ(function_flags, ParseFunctionFlag::kIsNormal);
- if (!Token::IsValidIdentifier(name_token, language_mode(), is_generator(),
- is_await_as_identifier_disallowed())) {
- ReportUnexpectedToken(Next());
+ if (!ClassifyPropertyIdentifier(name_token, prop_info)) {
return impl()->NullLiteralProperty();
}
- DCHECK(!prop_info->is_computed_name);
-
- if (name_token == Token::AWAIT) {
- DCHECK(!is_async_function());
- expression_scope()->RecordAsyncArrowParametersError(
- next_loc, MessageTemplate::kAwaitBindingIdentifier);
- }
ExpressionT lhs =
impl()->ExpressionFromIdentifier(name, next_loc.beg_pos);
if (!IsAssignableIdentifier(lhs)) {
@@ -2637,7 +2700,7 @@ ParserBase<Impl>::ParseObjectPropertyDefinition(ParsePropertyInfo* prop_info,
case ParsePropertyKind::kAccessorGetter:
case ParsePropertyKind::kAccessorSetter: {
DCHECK_EQ(function_flags, ParseFunctionFlag::kIsNormal);
- bool is_get = kind == ParsePropertyKind::kAccessorGetter;
+ bool is_get = prop_info->kind == ParsePropertyKind::kAccessorGetter;
expression_scope()->RecordPatternError(
Scanner::Location(next_loc.beg_pos, end_position()),
diff --git a/deps/v8/src/parsing/parser.h b/deps/v8/src/parsing/parser.h
index 6b50ed134c..c5cc0c8030 100644
--- a/deps/v8/src/parsing/parser.h
+++ b/deps/v8/src/parsing/parser.h
@@ -701,25 +701,10 @@ class V8_EXPORT_PRIVATE Parser : public NON_EXPORTED_BASE(ParserBase<Parser>) {
return NewThrowError(Runtime::kNewTypeError, message, arg, pos);
}
- // Reporting errors.
- void ReportMessageAt(Scanner::Location source_location,
- MessageTemplate message, const char* arg = nullptr) {
- pending_error_handler()->ReportMessageAt(
- source_location.beg_pos, source_location.end_pos, message, arg);
- scanner_.set_parser_error();
- }
-
// Dummy implementation. The parser should never have a unidentifiable
// error.
V8_INLINE void ReportUnidentifiableError() { UNREACHABLE(); }
- void ReportMessageAt(Scanner::Location source_location,
- MessageTemplate message, const AstRawString* arg) {
- pending_error_handler()->ReportMessageAt(
- source_location.beg_pos, source_location.end_pos, message, arg);
- scanner_.set_parser_error();
- }
-
const AstRawString* GetRawNameFromIdentifier(const AstRawString* arg) {
return arg;
}
diff --git a/deps/v8/src/parsing/pending-compilation-error-handler.cc b/deps/v8/src/parsing/pending-compilation-error-handler.cc
index 60bc8ada27..4756628ca7 100644
--- a/deps/v8/src/parsing/pending-compilation-error-handler.cc
+++ b/deps/v8/src/parsing/pending-compilation-error-handler.cc
@@ -19,49 +19,53 @@ namespace internal {
void PendingCompilationErrorHandler::MessageDetails::SetString(
Handle<String> string, Isolate* isolate) {
- DCHECK_NE(type_, kMainThreadHandle);
- type_ = kMainThreadHandle;
- arg_handle_ = string;
+ DCHECK_NE(args_[0].type, kMainThreadHandle);
+ args_[0].type = kMainThreadHandle;
+ args_[0].js_string = string;
}
void PendingCompilationErrorHandler::MessageDetails::SetString(
Handle<String> string, LocalIsolate* isolate) {
- DCHECK_NE(type_, kMainThreadHandle);
- type_ = kMainThreadHandle;
- arg_handle_ = isolate->heap()->NewPersistentHandle(string);
+ DCHECK_NE(args_[0].type, kMainThreadHandle);
+ args_[0].type = kMainThreadHandle;
+ args_[0].js_string = isolate->heap()->NewPersistentHandle(string);
}
template <typename IsolateT>
void PendingCompilationErrorHandler::MessageDetails::Prepare(
IsolateT* isolate) {
- switch (type_) {
- case kAstRawString:
- return SetString(arg_->string(), isolate);
-
- case kNone:
- case kConstCharString:
- // We can delay allocation until ArgumentString(isolate).
- // TODO(leszeks): We don't actually have to transfer this string, since
- // it's a root.
- return;
-
- case kMainThreadHandle:
- // The message details might already be prepared, so skip them if this is
- // the case.
- return;
+ for (int i = 0; i < kMaxArgumentCount; i++) {
+ switch (args_[i].type) {
+ case kAstRawString:
+ return SetString(args_[i].ast_string->string(), isolate);
+
+ case kNone:
+ case kConstCharString:
+ // We can delay allocation until ArgString(isolate).
+ return;
+
+ case kMainThreadHandle:
+ // The message details might already be prepared, so skip them if this
+ // is the case.
+ return;
+ }
}
}
-Handle<String> PendingCompilationErrorHandler::MessageDetails::ArgumentString(
- Isolate* isolate) const {
- switch (type_) {
+Handle<String> PendingCompilationErrorHandler::MessageDetails::ArgString(
+ Isolate* isolate, int index) const {
+ // `index` may be >= argc; in that case we return a default value to pass on
+ // elsewhere.
+ DCHECK_LT(index, kMaxArgumentCount);
+ switch (args_[index].type) {
case kMainThreadHandle:
- return arg_handle_;
+ return args_[index].js_string;
case kNone:
- return isolate->factory()->undefined_string();
+ return Handle<String>::null();
case kConstCharString:
return isolate->factory()
- ->NewStringFromUtf8(base::CStrVector(char_arg_), AllocationType::kOld)
+ ->NewStringFromUtf8(base::CStrVector(args_[index].c_string),
+ AllocationType::kOld)
.ToHandleChecked();
case kAstRawString:
UNREACHABLE();
@@ -93,6 +97,17 @@ void PendingCompilationErrorHandler::ReportMessageAt(int start_position,
error_details_ = MessageDetails(start_position, end_position, message, arg);
}
+void PendingCompilationErrorHandler::ReportMessageAt(int start_position,
+ int end_position,
+ MessageTemplate message,
+ const AstRawString* arg0,
+ const char* arg1) {
+ if (has_pending_error_) return;
+ has_pending_error_ = true;
+ error_details_ =
+ MessageDetails(start_position, end_position, message, arg0, arg1);
+}
+
void PendingCompilationErrorHandler::ReportWarningAt(int start_position,
int end_position,
MessageTemplate message,
@@ -119,7 +134,8 @@ void PendingCompilationErrorHandler::ReportWarnings(
for (const MessageDetails& warning : warning_messages_) {
MessageLocation location = warning.GetLocation(script);
- Handle<String> argument = warning.ArgumentString(isolate);
+ Handle<String> argument = warning.ArgString(isolate, 0);
+ DCHECK_LT(warning.ArgCount(), 2); // Arg1 is only used for errors.
Handle<JSMessageObject> message =
MessageHandler::MakeMessageObject(isolate, warning.message(), &location,
argument, Handle<FixedArray>::null());
@@ -160,12 +176,13 @@ void PendingCompilationErrorHandler::ThrowPendingError(
if (!has_pending_error_) return;
MessageLocation location = error_details_.GetLocation(script);
- Handle<String> argument = error_details_.ArgumentString(isolate);
+ Handle<String> arg0 = error_details_.ArgString(isolate, 0);
+ Handle<String> arg1 = error_details_.ArgString(isolate, 1);
isolate->debug()->OnCompileError(script);
Factory* factory = isolate->factory();
Handle<JSObject> error =
- factory->NewSyntaxError(error_details_.message(), argument);
+ factory->NewSyntaxError(error_details_.message(), arg0, arg1);
isolate->ThrowAt(error, &location);
}
@@ -173,7 +190,8 @@ Handle<String> PendingCompilationErrorHandler::FormatErrorMessageForTest(
Isolate* isolate) {
error_details_.Prepare(isolate);
return MessageFormatter::Format(isolate, error_details_.message(),
- error_details_.ArgumentString(isolate));
+ error_details_.ArgString(isolate, 0),
+ error_details_.ArgString(isolate, 1));
}
} // namespace internal
diff --git a/deps/v8/src/parsing/pending-compilation-error-handler.h b/deps/v8/src/parsing/pending-compilation-error-handler.h
index 31e765d514..9384e94df7 100644
--- a/deps/v8/src/parsing/pending-compilation-error-handler.h
+++ b/deps/v8/src/parsing/pending-compilation-error-handler.h
@@ -25,9 +25,7 @@ class Script;
// compilation phases.
class PendingCompilationErrorHandler {
public:
- PendingCompilationErrorHandler()
- : has_pending_error_(false), stack_overflow_(false) {}
-
+ PendingCompilationErrorHandler() = default;
PendingCompilationErrorHandler(const PendingCompilationErrorHandler&) =
delete;
PendingCompilationErrorHandler& operator=(
@@ -39,6 +37,10 @@ class PendingCompilationErrorHandler {
void ReportMessageAt(int start_position, int end_position,
MessageTemplate message, const AstRawString* arg);
+ void ReportMessageAt(int start_position, int end_position,
+ MessageTemplate message, const AstRawString* arg0,
+ const char* arg1);
+
void ReportWarningAt(int start_position, int end_position,
MessageTemplate message, const char* arg = nullptr);
@@ -85,24 +87,45 @@ class PendingCompilationErrorHandler {
MessageDetails()
: start_position_(-1),
end_position_(-1),
- message_(MessageTemplate::kNone),
- type_(kNone) {}
+ message_(MessageTemplate::kNone) {}
+ MessageDetails(int start_position, int end_position,
+ MessageTemplate message, const AstRawString* arg0)
+ : start_position_(start_position),
+ end_position_(end_position),
+ message_(message),
+ args_{MessageArgument{arg0}, MessageArgument{}} {}
MessageDetails(int start_position, int end_position,
- MessageTemplate message, const AstRawString* arg)
+ MessageTemplate message, const AstRawString* arg0,
+ const char* arg1)
: start_position_(start_position),
end_position_(end_position),
message_(message),
- arg_(arg),
- type_(arg ? kAstRawString : kNone) {}
+ args_{MessageArgument{arg0}, MessageArgument{arg1}} {
+ DCHECK_NOT_NULL(arg0);
+ DCHECK_NOT_NULL(arg1);
+ }
MessageDetails(int start_position, int end_position,
- MessageTemplate message, const char* char_arg)
+ MessageTemplate message, const char* arg0)
: start_position_(start_position),
end_position_(end_position),
message_(message),
- char_arg_(char_arg),
- type_(char_arg_ ? kConstCharString : kNone) {}
+ args_{MessageArgument{arg0}, MessageArgument{}} {}
+
+ Handle<String> ArgString(Isolate* isolate, int index) const;
+ int ArgCount() const {
+ int argc = 0;
+ for (int i = 0; i < kMaxArgumentCount; i++) {
+ if (args_[i].type == kNone) break;
+ argc++;
+ }
+#ifdef DEBUG
+ for (int i = argc; i < kMaxArgumentCount; i++) {
+ DCHECK_EQ(args_[i].type, kNone);
+ }
+#endif // DEBUG
+ return argc;
+ }
- Handle<String> ArgumentString(Isolate* isolate) const;
MessageLocation GetLocation(Handle<Script> script) const;
MessageTemplate message() const { return message_; }
@@ -117,19 +140,32 @@ class PendingCompilationErrorHandler {
int start_position_;
int end_position_;
+
MessageTemplate message_;
- union {
- const AstRawString* arg_;
- const char* char_arg_;
- Handle<String> arg_handle_;
+
+ struct MessageArgument final {
+ constexpr MessageArgument() : ast_string(nullptr), type(kNone) {}
+ explicit constexpr MessageArgument(const AstRawString* s)
+ : ast_string(s), type(s == nullptr ? kNone : kAstRawString) {}
+ explicit constexpr MessageArgument(const char* s)
+ : c_string(s), type(s == nullptr ? kNone : kConstCharString) {}
+
+ union {
+ const AstRawString* ast_string;
+ const char* c_string;
+ Handle<String> js_string;
+ };
+ Type type;
};
- Type type_;
+
+ static constexpr int kMaxArgumentCount = 2;
+ MessageArgument args_[kMaxArgumentCount];
};
void ThrowPendingError(Isolate* isolate, Handle<Script> script) const;
- bool has_pending_error_;
- bool stack_overflow_;
+ bool has_pending_error_ = false;
+ bool stack_overflow_ = false;
bool unidentifiable_error_ = false;
MessageDetails error_details_;
diff --git a/deps/v8/src/parsing/preparse-data.cc b/deps/v8/src/parsing/preparse-data.cc
index 1643c6ba1a..f368a11f9a 100644
--- a/deps/v8/src/parsing/preparse-data.cc
+++ b/deps/v8/src/parsing/preparse-data.cc
@@ -666,12 +666,13 @@ void BaseConsumedPreparseData<Data>::RestoreDataForScope(
scope->AsDeclarationScope()->RecordNeedsPrivateNameContextChainRecalc();
}
if (ShouldSaveClassVariableIndexField::decode(scope_data_flags)) {
- Variable* var;
- // An anonymous class whose class variable needs to be saved do not
+ Variable* var = scope->AsClassScope()->class_variable();
+ // An anonymous class whose class variable needs to be saved might not
// have the class variable created during reparse since we skip parsing
// the inner scopes that contain potential access to static private
// methods. So create it now.
- if (scope->AsClassScope()->is_anonymous_class()) {
+ if (var == nullptr) {
+ DCHECK(scope->AsClassScope()->is_anonymous_class());
var = scope->AsClassScope()->DeclareClassVariable(
ast_value_factory, nullptr, kNoSourcePosition);
AstNodeFactory factory(ast_value_factory, zone);
@@ -679,9 +680,6 @@ void BaseConsumedPreparseData<Data>::RestoreDataForScope(
factory.NewVariableDeclaration(kNoSourcePosition);
scope->declarations()->Add(declaration);
declaration->set_var(var);
- } else {
- var = scope->AsClassScope()->class_variable();
- DCHECK_NOT_NULL(var);
}
var->set_is_used();
var->ForceContextAllocation();
diff --git a/deps/v8/src/parsing/preparser.h b/deps/v8/src/parsing/preparser.h
index 1949e7f8a7..746802a9aa 100644
--- a/deps/v8/src/parsing/preparser.h
+++ b/deps/v8/src/parsing/preparser.h
@@ -537,7 +537,7 @@ class PreParserFactory {
PreParserExpression NewTheHoleLiteral() {
return PreParserExpression::Default();
}
- PreParserExpression NewRegExpLiteral(const PreParserIdentifier& js_pattern,
+ PreParserExpression NewRegExpLiteral(const AstRawString* js_pattern,
int js_flags, int pos) {
return PreParserExpression::Default();
}
@@ -1455,12 +1455,9 @@ class PreParser : public ParserBase<PreParser> {
return PreParserExpression::Default();
}
- // Reporting errors.
- void ReportMessageAt(Scanner::Location source_location,
- MessageTemplate message, const char* arg = nullptr) {
- pending_error_handler()->ReportMessageAt(
- source_location.beg_pos, source_location.end_pos, message, arg);
- scanner()->set_parser_error();
+ V8_INLINE const AstRawString* PreParserIdentifierToAstRawString(
+ const PreParserIdentifier& x) {
+ return x.string_;
}
V8_INLINE void ReportUnidentifiableError() {
@@ -1468,19 +1465,6 @@ class PreParser : public ParserBase<PreParser> {
scanner()->set_parser_error();
}
- V8_INLINE void ReportMessageAt(Scanner::Location source_location,
- MessageTemplate message,
- const PreParserIdentifier& arg) {
- ReportMessageAt(source_location, message, arg.string_);
- }
-
- void ReportMessageAt(Scanner::Location source_location,
- MessageTemplate message, const AstRawString* arg) {
- pending_error_handler()->ReportMessageAt(
- source_location.beg_pos, source_location.end_pos, message, arg);
- scanner()->set_parser_error();
- }
-
const AstRawString* GetRawNameFromIdentifier(const PreParserIdentifier& arg) {
return arg.string_;
}
diff --git a/deps/v8/src/parsing/scanner-character-streams.cc b/deps/v8/src/parsing/scanner-character-streams.cc
index becc72c12d..a4748f0c33 100644
--- a/deps/v8/src/parsing/scanner-character-streams.cc
+++ b/deps/v8/src/parsing/scanner-character-streams.cc
@@ -7,7 +7,8 @@
#include <memory>
#include <vector>
-#include "include/v8.h"
+#include "include/v8-callbacks.h"
+#include "include/v8-primitive.h"
#include "src/base/strings.h"
#include "src/common/globals.h"
#include "src/handles/handles.h"
diff --git a/deps/v8/src/parsing/scanner-character-streams.h b/deps/v8/src/parsing/scanner-character-streams.h
index 09181356f0..8665ea0b4b 100644
--- a/deps/v8/src/parsing/scanner-character-streams.h
+++ b/deps/v8/src/parsing/scanner-character-streams.h
@@ -7,7 +7,7 @@
#include <memory>
-#include "include/v8.h" // for v8::ScriptCompiler
+#include "include/v8-script.h" // for v8::ScriptCompiler
#include "src/common/globals.h"
namespace v8 {
diff --git a/deps/v8/src/parsing/scanner.cc b/deps/v8/src/parsing/scanner.cc
index b624694295..cbfd399020 100644
--- a/deps/v8/src/parsing/scanner.cc
+++ b/deps/v8/src/parsing/scanner.cc
@@ -978,9 +978,6 @@ bool Scanner::ScanRegExpPattern() {
// worrying whether the following characters are part of the escape
// or not, since any '/', '\\' or '[' is guaranteed to not be part
// of the escape sequence.
-
- // TODO(896): At some point, parse RegExps more thoroughly to capture
- // octal esacpes in strict mode.
} else { // Unescaped character.
if (c0_ == '[') in_character_class = true;
if (c0_ == ']') in_character_class = false;
@@ -993,22 +990,21 @@ bool Scanner::ScanRegExpPattern() {
return true;
}
-Maybe<int> Scanner::ScanRegExpFlags() {
+base::Optional<RegExpFlags> Scanner::ScanRegExpFlags() {
DCHECK_EQ(Token::REGEXP_LITERAL, next().token);
- // Scan regular expression flags.
- JSRegExp::Flags flags;
+ RegExpFlags flags;
while (IsIdentifierPart(c0_)) {
- base::Optional<JSRegExp::Flags> maybe_flag = JSRegExp::FlagFromChar(c0_);
- if (!maybe_flag.has_value()) return Nothing<int>();
- JSRegExp::Flags flag = *maybe_flag;
- if (flags & flag) return Nothing<int>();
+ base::Optional<RegExpFlag> maybe_flag = JSRegExp::FlagFromChar(c0_);
+ if (!maybe_flag.has_value()) return {};
+ RegExpFlag flag = maybe_flag.value();
+ if (flags & flag) return {};
Advance();
flags |= flag;
}
next().location.end_pos = source_pos();
- return Just<int>(flags);
+ return flags;
}
const AstRawString* Scanner::CurrentSymbol(
diff --git a/deps/v8/src/parsing/scanner.h b/deps/v8/src/parsing/scanner.h
index 3474f7270d..7ab44d5b20 100644
--- a/deps/v8/src/parsing/scanner.h
+++ b/deps/v8/src/parsing/scanner.h
@@ -10,7 +10,6 @@
#include <algorithm>
#include <memory>
-#include "include/v8.h"
#include "src/base/logging.h"
#include "src/base/strings.h"
#include "src/common/globals.h"
@@ -18,6 +17,7 @@
#include "src/parsing/literal-buffer.h"
#include "src/parsing/parse-info.h"
#include "src/parsing/token.h"
+#include "src/regexp/regexp-flags.h"
#include "src/strings/char-predicates.h"
#include "src/strings/unicode.h"
#include "src/utils/allocation.h"
@@ -399,7 +399,7 @@ class V8_EXPORT_PRIVATE Scanner {
// Returns true if a pattern is scanned.
bool ScanRegExpPattern();
// Scans the input as regular expression flags. Returns the flags on success.
- Maybe<int> ScanRegExpFlags();
+ base::Optional<RegExpFlags> ScanRegExpFlags();
// Scans the input as a template literal
Token::Value ScanTemplateContinuation() {
diff --git a/deps/v8/src/profiler/allocation-tracker.h b/deps/v8/src/profiler/allocation-tracker.h
index 36b9e91883..a33f08c0d0 100644
--- a/deps/v8/src/profiler/allocation-tracker.h
+++ b/deps/v8/src/profiler/allocation-tracker.h
@@ -8,7 +8,9 @@
#include <map>
#include <vector>
+#include "include/v8-persistent-handle.h"
#include "include/v8-profiler.h"
+#include "include/v8-unwinder.h"
#include "src/base/hashmap.h"
#include "src/base/vector.h"
#include "src/handles/handles.h"
diff --git a/deps/v8/src/profiler/cpu-profiler.cc b/deps/v8/src/profiler/cpu-profiler.cc
index a59c9359eb..cf4f549a39 100644
--- a/deps/v8/src/profiler/cpu-profiler.cc
+++ b/deps/v8/src/profiler/cpu-profiler.cc
@@ -7,6 +7,7 @@
#include <unordered_map>
#include <utility>
+#include "include/v8-locker.h"
#include "src/base/lazy-instance.h"
#include "src/base/template-utils.h"
#include "src/debug/debug.h"
@@ -361,6 +362,16 @@ void ProfilerCodeObserver::CodeEventHandler(
CodeEventHandlerInternal(evt_rec);
}
+size_t ProfilerCodeObserver::GetEstimatedMemoryUsage() const {
+ // To avoid race condition in codemap,
+ // for now limit computation in kEagerLogging mode
+ if (!processor_) {
+ return sizeof(*this) + code_map_.GetEstimatedMemoryUsage() +
+ code_entries_.strings().GetStringSize();
+ }
+ return 0;
+}
+
void ProfilerCodeObserver::CodeEventHandlerInternal(
const CodeEventsContainer& evt_rec) {
CodeEventsContainer record = evt_rec;
diff --git a/deps/v8/src/profiler/cpu-profiler.h b/deps/v8/src/profiler/cpu-profiler.h
index b465f827c9..ea14d6c618 100644
--- a/deps/v8/src/profiler/cpu-profiler.h
+++ b/deps/v8/src/profiler/cpu-profiler.h
@@ -268,6 +268,7 @@ class V8_EXPORT_PRIVATE ProfilerCodeObserver : public CodeEventObserver {
CodeEntryStorage* code_entries() { return &code_entries_; }
CodeMap* code_map() { return &code_map_; }
WeakCodeRegistry* weak_code_registry() { return &weak_code_registry_; }
+ size_t GetEstimatedMemoryUsage() const;
void ClearCodeMap();
diff --git a/deps/v8/src/profiler/heap-snapshot-generator.cc b/deps/v8/src/profiler/heap-snapshot-generator.cc
index 231595dae7..1144fdd15e 100644
--- a/deps/v8/src/profiler/heap-snapshot-generator.cc
+++ b/deps/v8/src/profiler/heap-snapshot-generator.cc
@@ -763,7 +763,12 @@ class IndexedReferencesExtractor : public ObjectVisitor {
}
void VisitEmbeddedPointer(Code host, RelocInfo* rinfo) override {
- VisitHeapObjectImpl(rinfo->target_object(), -1);
+ HeapObject object = rinfo->target_object();
+ if (host.IsWeakObject(object)) {
+ generator_->SetWeakReference(parent_, next_index_++, object, {});
+ } else {
+ VisitHeapObjectImpl(rinfo->target_object(), -1);
+ }
}
private:
@@ -774,8 +779,11 @@ class IndexedReferencesExtractor : public ObjectVisitor {
generator_->visited_fields_[field_index] = false;
} else {
HeapObject heap_object;
- if (slot.load(cage_base).GetHeapObject(&heap_object)) {
+ auto loaded_value = slot.load(cage_base);
+ if (loaded_value.GetHeapObjectIfStrong(&heap_object)) {
VisitHeapObjectImpl(heap_object, field_index);
+ } else if (loaded_value.GetHeapObjectIfWeak(&heap_object)) {
+ generator_->SetWeakReference(parent_, next_index_++, heap_object, {});
}
}
}
@@ -1223,15 +1231,20 @@ void V8HeapExplorer::ExtractCodeReferences(HeapEntry* entry, Code code) {
return;
}
- TagObject(code.deoptimization_data(), "(code deopt data)");
- SetInternalReference(entry, "deoptimization_data", code.deoptimization_data(),
- Code::kDeoptimizationDataOffset);
if (code.kind() == CodeKind::BASELINE) {
+ TagObject(code.bytecode_or_interpreter_data(), "(interpreter data)");
+ SetInternalReference(entry, "interpreter_data",
+ code.bytecode_or_interpreter_data(),
+ Code::kDeoptimizationDataOrInterpreterDataOffset);
TagObject(code.bytecode_offset_table(), "(bytecode offset table)");
SetInternalReference(entry, "bytecode_offset_table",
code.bytecode_offset_table(),
Code::kPositionTableOffset);
} else {
+ TagObject(code.deoptimization_data(), "(code deopt data)");
+ SetInternalReference(entry, "deoptimization_data",
+ code.deoptimization_data(),
+ Code::kDeoptimizationDataOrInterpreterDataOffset);
TagObject(code.source_position_table(), "(source position table)");
SetInternalReference(entry, "source_position_table",
code.source_position_table(),
@@ -1781,7 +1794,8 @@ void V8HeapExplorer::SetWeakReference(HeapEntry* parent_entry,
}
void V8HeapExplorer::SetWeakReference(HeapEntry* parent_entry, int index,
- Object child_obj, int field_offset) {
+ Object child_obj,
+ base::Optional<int> field_offset) {
if (!IsEssentialObject(child_obj)) {
return;
}
@@ -1789,7 +1803,9 @@ void V8HeapExplorer::SetWeakReference(HeapEntry* parent_entry, int index,
DCHECK_NOT_NULL(child_entry);
parent_entry->SetNamedReference(
HeapGraphEdge::kWeak, names_->GetFormatted("%d", index), child_entry);
- MarkVisitedField(field_offset);
+ if (field_offset.has_value()) {
+ MarkVisitedField(*field_offset);
+ }
}
void V8HeapExplorer::SetDataOrAccessorPropertyReference(
diff --git a/deps/v8/src/profiler/heap-snapshot-generator.h b/deps/v8/src/profiler/heap-snapshot-generator.h
index 2ab13a99bf..1855aee53c 100644
--- a/deps/v8/src/profiler/heap-snapshot-generator.h
+++ b/deps/v8/src/profiler/heap-snapshot-generator.h
@@ -436,7 +436,7 @@ class V8_EXPORT_PRIVATE V8HeapExplorer : public HeapEntriesAllocator {
void SetWeakReference(HeapEntry* parent_entry, const char* reference_name,
Object child_obj, int field_offset);
void SetWeakReference(HeapEntry* parent_entry, int index, Object child_obj,
- int field_offset);
+ base::Optional<int> field_offset);
void SetPropertyReference(HeapEntry* parent_entry, Name reference_name,
Object child,
const char* name_format_string = nullptr,
diff --git a/deps/v8/src/profiler/profile-generator.cc b/deps/v8/src/profiler/profile-generator.cc
index 06aefe9505..34a15159a3 100644
--- a/deps/v8/src/profiler/profile-generator.cc
+++ b/deps/v8/src/profiler/profile-generator.cc
@@ -64,6 +64,11 @@ int SourcePositionTable::GetInliningId(int pc_offset) const {
return it->inlining_id;
}
+size_t SourcePositionTable::Size() const {
+ return sizeof(*this) + pc_offsets_to_lines_.capacity() *
+ sizeof(decltype(pc_offsets_to_lines_)::value_type);
+}
+
void SourcePositionTable::print() const {
base::OS::Print(" - source position table at %p\n", this);
for (const SourcePositionTuple& pos_info : pc_offsets_to_lines_) {
@@ -207,6 +212,37 @@ void CodeEntry::FillFunctionInfo(SharedFunctionInfo shared) {
}
}
+size_t CodeEntry::EstimatedSize() const {
+ size_t estimated_size = 0;
+ if (rare_data_) {
+ estimated_size += sizeof(rare_data_.get());
+
+ for (const auto& inline_entry : rare_data_->inline_entries_) {
+ estimated_size += inline_entry->EstimatedSize();
+ }
+ estimated_size += rare_data_->inline_entries_.size() *
+ sizeof(decltype(rare_data_->inline_entries_)::value_type);
+
+ for (const auto& inline_stack_pair : rare_data_->inline_stacks_) {
+ estimated_size += inline_stack_pair.second.size() *
+ sizeof(decltype(inline_stack_pair.second)::value_type);
+ }
+ estimated_size +=
+ rare_data_->inline_stacks_.size() *
+ (sizeof(decltype(rare_data_->inline_stacks_)::key_type) +
+ sizeof(decltype(rare_data_->inline_stacks_)::value_type));
+
+ estimated_size +=
+ rare_data_->deopt_inlined_frames_.capacity() *
+ sizeof(decltype(rare_data_->deopt_inlined_frames_)::value_type);
+ }
+
+ if (line_info_) {
+ estimated_size += line_info_.get()->Size();
+ }
+ return sizeof(*this) + estimated_size;
+}
+
CpuProfileDeoptInfo CodeEntry::GetDeoptInfo() {
DCHECK(has_deopt_info());
@@ -423,9 +459,7 @@ class DeleteNodesCallback {
public:
void BeforeTraversingChild(ProfileNode*, ProfileNode*) { }
- void AfterAllChildrenTraversed(ProfileNode* node) {
- delete node;
- }
+ void AfterAllChildrenTraversed(ProfileNode* node) { delete node; }
void AfterChildTraversed(ProfileNode*, ProfileNode*) { }
};
@@ -845,6 +879,15 @@ void CodeMap::Print() {
}
}
+size_t CodeMap::GetEstimatedMemoryUsage() const {
+ size_t map_size = 0;
+ for (const auto& pair : code_map_) {
+ map_size += sizeof(pair.first) + sizeof(pair.second) +
+ pair.second.entry->EstimatedSize();
+ }
+ return sizeof(*this) + map_size;
+}
+
CpuProfilesCollection::CpuProfilesCollection(Isolate* isolate)
: profiler_(nullptr), current_profiles_semaphore_(1) {}
diff --git a/deps/v8/src/profiler/profile-generator.h b/deps/v8/src/profiler/profile-generator.h
index 3e8d073f63..bb0adbfe3b 100644
--- a/deps/v8/src/profiler/profile-generator.h
+++ b/deps/v8/src/profiler/profile-generator.h
@@ -38,6 +38,7 @@ class V8_EXPORT_PRIVATE SourcePositionTable : public Malloced {
int GetSourceLineNumber(int pc_offset) const;
int GetInliningId(int pc_offset) const;
+ size_t Size() const;
void print() const;
private:
@@ -98,6 +99,7 @@ class CodeEntry {
void set_deopt_info(const char* deopt_reason, int deopt_id,
std::vector<CpuProfileDeoptFrame> inlined_frames);
+ size_t EstimatedSize() const;
CpuProfileDeoptInfo GetDeoptInfo();
bool has_deopt_info() const {
return rare_data_ && rare_data_->deopt_id_ != kNoDeoptimizationId;
@@ -491,6 +493,8 @@ class V8_EXPORT_PRIVATE CodeMap {
void Print();
size_t size() const { return code_map_.size(); }
+ size_t GetEstimatedMemoryUsage() const;
+
CodeEntryStorage& code_entries() { return code_entries_; }
void Clear();
diff --git a/deps/v8/src/profiler/strings-storage.cc b/deps/v8/src/profiler/strings-storage.cc
index 054aa3f80e..37197a5918 100644
--- a/deps/v8/src/profiler/strings-storage.cc
+++ b/deps/v8/src/profiler/strings-storage.cc
@@ -36,6 +36,7 @@ const char* StringsStorage::GetCopy(const char* src) {
base::StrNCpy(dst, src, len);
dst[len] = '\0';
entry->key = dst.begin();
+ string_size_ += len;
}
entry->value =
reinterpret_cast<void*>(reinterpret_cast<size_t>(entry->value) + 1);
@@ -56,6 +57,7 @@ const char* StringsStorage::AddOrDisposeString(char* str, int len) {
if (entry->value == nullptr) {
// New entry added.
entry->key = str;
+ string_size_ += len;
} else {
DeleteArray(str);
}
@@ -156,6 +158,7 @@ bool StringsStorage::Release(const char* str) {
reinterpret_cast<void*>(reinterpret_cast<size_t>(entry->value) - 1);
if (entry->value == 0) {
+ string_size_ -= len;
names_.Remove(const_cast<char*>(str), hash);
DeleteArray(str);
}
@@ -166,6 +169,11 @@ size_t StringsStorage::GetStringCountForTesting() const {
return names_.occupancy();
}
+size_t StringsStorage::GetStringSize() {
+ base::MutexGuard guard(&mutex_);
+ return string_size_;
+}
+
base::HashMap::Entry* StringsStorage::GetEntry(const char* str, int len) {
uint32_t hash = ComputeStringHash(str, len);
return names_.LookupOrInsert(const_cast<char*>(str), hash);
diff --git a/deps/v8/src/profiler/strings-storage.h b/deps/v8/src/profiler/strings-storage.h
index 7e39c0ee33..1d4c2e44d2 100644
--- a/deps/v8/src/profiler/strings-storage.h
+++ b/deps/v8/src/profiler/strings-storage.h
@@ -47,6 +47,9 @@ class V8_EXPORT_PRIVATE StringsStorage {
// Returns the number of strings in the store.
size_t GetStringCountForTesting() const;
+ // Returns the size of strings in the store
+ size_t GetStringSize();
+
// Returns true if the strings table is empty.
bool empty() const { return names_.occupancy() == 0; }
@@ -62,6 +65,7 @@ class V8_EXPORT_PRIVATE StringsStorage {
base::CustomMatcherHashMap names_;
base::Mutex mutex_;
+ size_t string_size_ = 0;
};
} // namespace internal
diff --git a/deps/v8/src/profiler/tick-sample.cc b/deps/v8/src/profiler/tick-sample.cc
index 253b80d19e..daef48eb26 100644
--- a/deps/v8/src/profiler/tick-sample.cc
+++ b/deps/v8/src/profiler/tick-sample.cc
@@ -105,7 +105,7 @@ bool SimulatorHelper::FillRegisters(Isolate* isolate,
state->sp = reinterpret_cast<void*>(simulator->sp());
state->fp = reinterpret_cast<void*>(simulator->fp());
state->lr = reinterpret_cast<void*>(simulator->lr());
-#elif V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64
+#elif V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_LOONG64
if (!simulator->has_bad_pc()) {
state->pc = reinterpret_cast<void*>(simulator->get_pc());
}
diff --git a/deps/v8/src/profiler/tick-sample.h b/deps/v8/src/profiler/tick-sample.h
index 1bfcb7d097..4402bdc272 100644
--- a/deps/v8/src/profiler/tick-sample.h
+++ b/deps/v8/src/profiler/tick-sample.h
@@ -5,7 +5,7 @@
#ifndef V8_PROFILER_TICK_SAMPLE_H_
#define V8_PROFILER_TICK_SAMPLE_H_
-#include "include/v8.h"
+#include "include/v8-unwinder.h"
#include "src/base/platform/time.h"
#include "src/common/globals.h"
diff --git a/deps/v8/src/regexp/arm64/regexp-macro-assembler-arm64.cc b/deps/v8/src/regexp/arm64/regexp-macro-assembler-arm64.cc
index 6edb133576..67793ffc41 100644
--- a/deps/v8/src/regexp/arm64/regexp-macro-assembler-arm64.cc
+++ b/deps/v8/src/regexp/arm64/regexp-macro-assembler-arm64.cc
@@ -185,7 +185,6 @@ void RegExpMacroAssemblerARM64::AdvanceRegister(int reg, int by) {
}
default:
UNREACHABLE();
- break;
}
}
}
diff --git a/deps/v8/src/regexp/experimental/experimental-compiler.cc b/deps/v8/src/regexp/experimental/experimental-compiler.cc
index 8b1d841536..ae4abce7b5 100644
--- a/deps/v8/src/regexp/experimental/experimental-compiler.cc
+++ b/deps/v8/src/regexp/experimental/experimental-compiler.cc
@@ -16,12 +16,14 @@ namespace {
// TODO(mbid, v8:10765): Currently the experimental engine doesn't support
// UTF-16, but this shouldn't be too hard to implement.
constexpr base::uc32 kMaxSupportedCodepoint = 0xFFFFu;
+#ifdef DEBUG
+constexpr base::uc32 kMaxCodePoint = 0x10ffff;
+#endif // DEBUG
class CanBeHandledVisitor final : private RegExpVisitor {
// Visitor to implement `ExperimentalRegExp::CanBeHandled`.
public:
- static bool Check(RegExpTree* tree, JSRegExp::Flags flags,
- int capture_count) {
+ static bool Check(RegExpTree* tree, RegExpFlags flags, int capture_count) {
if (!AreSuitableFlags(flags)) return false;
CanBeHandledVisitor visitor;
tree->Accept(&visitor, nullptr);
@@ -31,15 +33,15 @@ class CanBeHandledVisitor final : private RegExpVisitor {
private:
CanBeHandledVisitor() = default;
- static bool AreSuitableFlags(JSRegExp::Flags flags) {
+ static bool AreSuitableFlags(RegExpFlags flags) {
// TODO(mbid, v8:10765): We should be able to support all flags in the
// future.
- static constexpr JSRegExp::Flags kAllowedFlags =
- JSRegExp::kGlobal | JSRegExp::kSticky | JSRegExp::kMultiline |
- JSRegExp::kDotAll | JSRegExp::kLinear;
+ static constexpr RegExpFlags kAllowedFlags =
+ RegExpFlag::kGlobal | RegExpFlag::kSticky | RegExpFlag::kMultiline |
+ RegExpFlag::kDotAll | RegExpFlag::kLinear;
// We support Unicode iff kUnicode is among the supported flags.
STATIC_ASSERT(ExperimentalRegExp::kSupportsUnicode ==
- ((kAllowedFlags & JSRegExp::kUnicode) != 0));
+ IsUnicode(kAllowedFlags));
return (flags & ~kAllowedFlags) == 0;
}
@@ -173,7 +175,7 @@ class CanBeHandledVisitor final : private RegExpVisitor {
} // namespace
bool ExperimentalRegExpCompiler::CanBeHandled(RegExpTree* tree,
- JSRegExp::Flags flags,
+ RegExpFlags flags,
int capture_count) {
return CanBeHandledVisitor::Check(tree, flags, capture_count);
}
@@ -294,11 +296,10 @@ class BytecodeAssembler {
class CompileVisitor : private RegExpVisitor {
public:
static ZoneList<RegExpInstruction> Compile(RegExpTree* tree,
- JSRegExp::Flags flags,
- Zone* zone) {
+ RegExpFlags flags, Zone* zone) {
CompileVisitor compiler(zone);
- if ((flags & JSRegExp::kSticky) == 0 && !tree->IsAnchoredAtStart()) {
+ if (!IsSticky(flags) && !tree->IsAnchoredAtStart()) {
// The match is not anchored, i.e. may start at any input position, so we
// emit a preamble corresponding to /.*?/. This skips an arbitrary
// prefix in the input non-greedily.
@@ -409,7 +410,7 @@ class CompileVisitor : private RegExpVisitor {
base::uc16 from_uc16 = static_cast<base::uc16>(from);
base::uc32 to = (*ranges)[i].to();
- DCHECK_IMPLIES(to > kMaxSupportedCodepoint, to == String::kMaxCodePoint);
+ DCHECK_IMPLIES(to > kMaxSupportedCodepoint, to == kMaxCodePoint);
base::uc16 to_uc16 =
static_cast<base::uc16>(std::min(to, kMaxSupportedCodepoint));
@@ -627,7 +628,7 @@ class CompileVisitor : private RegExpVisitor {
} // namespace
ZoneList<RegExpInstruction> ExperimentalRegExpCompiler::Compile(
- RegExpTree* tree, JSRegExp::Flags flags, Zone* zone) {
+ RegExpTree* tree, RegExpFlags flags, Zone* zone) {
return CompileVisitor::Compile(tree, flags, zone);
}
diff --git a/deps/v8/src/regexp/experimental/experimental-compiler.h b/deps/v8/src/regexp/experimental/experimental-compiler.h
index 87abcd3917..e6abf0557f 100644
--- a/deps/v8/src/regexp/experimental/experimental-compiler.h
+++ b/deps/v8/src/regexp/experimental/experimental-compiler.h
@@ -7,6 +7,7 @@
#include "src/regexp/experimental/experimental-bytecode.h"
#include "src/regexp/regexp-ast.h"
+#include "src/regexp/regexp-flags.h"
#include "src/zone/zone-list.h"
namespace v8 {
@@ -19,13 +20,13 @@ class ExperimentalRegExpCompiler final : public AllStatic {
// but see the definition.
// TODO(mbid,v8:10765): Currently more things are not handled, e.g. some
// quantifiers and unicode.
- static bool CanBeHandled(RegExpTree* tree, JSRegExp::Flags flags,
+ static bool CanBeHandled(RegExpTree* tree, RegExpFlags flags,
int capture_count);
// Compile regexp into a bytecode program. The regexp must be handlable by
// the experimental engine; see`CanBeHandled`. The program is returned as a
// ZoneList backed by the same Zone that is used in the RegExpTree argument.
static ZoneList<RegExpInstruction> Compile(RegExpTree* tree,
- JSRegExp::Flags flags, Zone* zone);
+ RegExpFlags flags, Zone* zone);
};
} // namespace internal
diff --git a/deps/v8/src/regexp/experimental/experimental-interpreter.h b/deps/v8/src/regexp/experimental/experimental-interpreter.h
index d65299499b..a21b01639a 100644
--- a/deps/v8/src/regexp/experimental/experimental-interpreter.h
+++ b/deps/v8/src/regexp/experimental/experimental-interpreter.h
@@ -5,15 +5,14 @@
#ifndef V8_REGEXP_EXPERIMENTAL_EXPERIMENTAL_INTERPRETER_H_
#define V8_REGEXP_EXPERIMENTAL_EXPERIMENTAL_INTERPRETER_H_
-#include "src/base/vector.h"
-#include "src/objects/fixed-array.h"
-#include "src/objects/string.h"
#include "src/regexp/experimental/experimental-bytecode.h"
#include "src/regexp/regexp.h"
namespace v8 {
namespace internal {
+class ByteArray;
+class String;
class Zone;
class ExperimentalRegExpInterpreter final : public AllStatic {
diff --git a/deps/v8/src/regexp/experimental/experimental.cc b/deps/v8/src/regexp/experimental/experimental.cc
index bff2d7da66..c05a010d06 100644
--- a/deps/v8/src/regexp/experimental/experimental.cc
+++ b/deps/v8/src/regexp/experimental/experimental.cc
@@ -14,7 +14,7 @@
namespace v8 {
namespace internal {
-bool ExperimentalRegExp::CanBeHandled(RegExpTree* tree, JSRegExp::Flags flags,
+bool ExperimentalRegExp::CanBeHandled(RegExpTree* tree, RegExpFlags flags,
int capture_count) {
DCHECK(FLAG_enable_experimental_regexp_engine ||
FLAG_enable_experimental_regexp_engine_on_excessive_backtracks);
@@ -22,16 +22,16 @@ bool ExperimentalRegExp::CanBeHandled(RegExpTree* tree, JSRegExp::Flags flags,
}
void ExperimentalRegExp::Initialize(Isolate* isolate, Handle<JSRegExp> re,
- Handle<String> source,
- JSRegExp::Flags flags, int capture_count) {
+ Handle<String> source, RegExpFlags flags,
+ int capture_count) {
DCHECK(FLAG_enable_experimental_regexp_engine);
if (FLAG_trace_experimental_regexp_engine) {
StdoutStream{} << "Initializing experimental regexp " << *source
<< std::endl;
}
- isolate->factory()->SetRegExpExperimentalData(re, source, flags,
- capture_count);
+ isolate->factory()->SetRegExpExperimentalData(
+ re, source, JSRegExp::AsJSRegExpFlags(flags), capture_count);
}
bool ExperimentalRegExp::IsCompiled(Handle<JSRegExp> re, Isolate* isolate) {
@@ -69,15 +69,14 @@ base::Optional<CompilationResult> CompileImpl(Isolate* isolate,
Zone zone(isolate->allocator(), ZONE_NAME);
Handle<String> source(regexp->Pattern(), isolate);
- JSRegExp::Flags flags = regexp->GetFlags();
// Parse and compile the regexp source.
RegExpCompileData parse_result;
- FlatStringReader reader(isolate, source);
DCHECK(!isolate->has_pending_exception());
- bool parse_success =
- RegExpParser::ParseRegExp(isolate, &zone, &reader, flags, &parse_result);
+ bool parse_success = RegExpParser::ParseRegExpFromHeapString(
+ isolate, &zone, source, JSRegExp::AsRegExpFlags(regexp->GetFlags()),
+ &parse_result);
if (!parse_success) {
// The pattern was already parsed successfully during initialization, so
// the only way parsing can fail now is because of stack overflow.
@@ -87,12 +86,13 @@ base::Optional<CompilationResult> CompileImpl(Isolate* isolate,
return base::nullopt;
}
- ZoneList<RegExpInstruction> bytecode =
- ExperimentalRegExpCompiler::Compile(parse_result.tree, flags, &zone);
+ ZoneList<RegExpInstruction> bytecode = ExperimentalRegExpCompiler::Compile(
+ parse_result.tree, JSRegExp::AsRegExpFlags(regexp->GetFlags()), &zone);
CompilationResult result;
result.bytecode = VectorToByteArray(isolate, bytecode.ToVector());
- result.capture_name_map = parse_result.capture_name_map;
+ result.capture_name_map =
+ RegExp::CreateCaptureNameMap(isolate, parse_result.named_captures);
return result;
}
diff --git a/deps/v8/src/regexp/experimental/experimental.h b/deps/v8/src/regexp/experimental/experimental.h
index 1b44100cc8..5987fb4d77 100644
--- a/deps/v8/src/regexp/experimental/experimental.h
+++ b/deps/v8/src/regexp/experimental/experimental.h
@@ -5,6 +5,7 @@
#ifndef V8_REGEXP_EXPERIMENTAL_EXPERIMENTAL_H_
#define V8_REGEXP_EXPERIMENTAL_EXPERIMENTAL_H_
+#include "src/regexp/regexp-flags.h"
#include "src/regexp/regexp.h"
namespace v8 {
@@ -19,10 +20,10 @@ class ExperimentalRegExp final : public AllStatic {
// TODO(mbid, v8:10765): This walks the RegExpTree, but it could also be
// checked on the fly in the parser. Not done currently because walking the
// AST again is more flexible and less error prone (but less performant).
- static bool CanBeHandled(RegExpTree* tree, JSRegExp::Flags flags,
+ static bool CanBeHandled(RegExpTree* tree, RegExpFlags flags,
int capture_count);
static void Initialize(Isolate* isolate, Handle<JSRegExp> re,
- Handle<String> pattern, JSRegExp::Flags flags,
+ Handle<String> pattern, RegExpFlags flags,
int capture_count);
static bool IsCompiled(Handle<JSRegExp> re, Isolate* isolate);
V8_WARN_UNUSED_RESULT
diff --git a/deps/v8/src/regexp/loong64/regexp-macro-assembler-loong64.cc b/deps/v8/src/regexp/loong64/regexp-macro-assembler-loong64.cc
new file mode 100644
index 0000000000..d95a6e7d60
--- /dev/null
+++ b/deps/v8/src/regexp/loong64/regexp-macro-assembler-loong64.cc
@@ -0,0 +1,1264 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#if V8_TARGET_ARCH_LOONG64
+
+#include "src/regexp/loong64/regexp-macro-assembler-loong64.h"
+
+#include "src/codegen/assembler-inl.h"
+#include "src/codegen/macro-assembler.h"
+#include "src/logging/log.h"
+#include "src/objects/objects-inl.h"
+#include "src/regexp/regexp-macro-assembler.h"
+#include "src/regexp/regexp-stack.h"
+#include "src/snapshot/embedded/embedded-data.h"
+#include "src/strings/unicode.h"
+
+namespace v8 {
+namespace internal {
+
+/* clang-format off
+ *
+ * This assembler uses the following register assignment convention
+ * - t3 : Temporarily stores the index of capture start after a matching pass
+ * for a global regexp.
+ * - a5 : Pointer to current Code object including heap object tag.
+ * - a6 : Current position in input, as negative offset from end of string.
+ * Please notice that this is the byte offset, not the character offset!
+ * - a7 : Currently loaded character. Must be loaded using
+ * LoadCurrentCharacter before using any of the dispatch methods.
+ * - t0 : Points to tip of backtrack stack
+ * - t1 : Unused.
+ * - t2 : End of input (points to byte after last character in input).
+ * - fp : Frame pointer. Used to access arguments, local variables and
+ * RegExp registers.
+ * - sp : Points to tip of C stack.
+ *
+ * The remaining registers are free for computations.
+ * Each call to a public method should retain this convention.
+ *
+ * The stack will have the following structure:
+ *
+ * - fp[80] Isolate* isolate (address of the current isolate) kIsolate
+ * kStackFrameHeader
+ * --- sp when called ---
+ * - fp[72] ra Return from RegExp code (ra). kReturnAddress
+ * - fp[64] old-fp Old fp, callee saved.
+ * - fp[0..63] s0..s7 Callee-saved registers s0..s7.
+ * --- frame pointer ----
+ * - fp[-8] direct_call (1 = direct call from JS, 0 = from runtime) kDirectCall
+ * - fp[-16] stack_base (Top of backtracking stack). kStackHighEnd
+ * - fp[-24] capture array size (may fit multiple sets of matches) kNumOutputRegisters
+ * - fp[-32] int* capture_array (int[num_saved_registers_], for output). kRegisterOutput
+ * - fp[-40] end of input (address of end of string). kInputEnd
+ * - fp[-48] start of input (address of first character in string). kInputStart
+ * - fp[-56] start index (character index of start). kStartIndex
+ * - fp[-64] void* input_string (location of a handle containing the string). kInputString
+ * - fp[-72] success counter (only for global regexps to count matches). kSuccessfulCaptures
+ * - fp[-80] Offset of location before start of input (effectively character kStringStartMinusOne
+ * position -1). Used to initialize capture registers to a
+ * non-position.
+ * --------- The following output registers are 32-bit values. ---------
+ * - fp[-88] register 0 (Only positions must be stored in the first kRegisterZero
+ * - register 1 num_saved_registers_ registers)
+ * - ...
+ * - register num_registers-1
+ * --- sp ---
+ *
+ * The first num_saved_registers_ registers are initialized to point to
+ * "character -1" in the string (i.e., char_size() bytes before the first
+ * character of the string). The remaining registers start out as garbage.
+ *
+ * The data up to the return address must be placed there by the calling
+ * code and the remaining arguments are passed in registers, e.g. by calling the
+ * code entry as cast to a function with the signature:
+ * int (*match)(String input_string,
+ * int start_index,
+ * Address start,
+ * Address end,
+ * int* capture_output_array,
+ * int num_capture_registers,
+ * byte* stack_area_base,
+ * bool direct_call = false,
+ * Isolate* isolate);
+ * The call is performed by NativeRegExpMacroAssembler::Execute()
+ * (in regexp-macro-assembler.cc) via the GeneratedCode wrapper.
+ *
+ * clang-format on
+ */
+
+#define __ ACCESS_MASM(masm_)
+
+const int RegExpMacroAssemblerLOONG64::kRegExpCodeSize;
+
+RegExpMacroAssemblerLOONG64::RegExpMacroAssemblerLOONG64(Isolate* isolate,
+ Zone* zone, Mode mode,
+ int registers_to_save)
+ : NativeRegExpMacroAssembler(isolate, zone),
+ masm_(new MacroAssembler(isolate, CodeObjectRequired::kYes,
+ NewAssemblerBuffer(kRegExpCodeSize))),
+ mode_(mode),
+ num_registers_(registers_to_save),
+ num_saved_registers_(registers_to_save),
+ entry_label_(),
+ start_label_(),
+ success_label_(),
+ backtrack_label_(),
+ exit_label_(),
+ internal_failure_label_() {
+ masm_->set_root_array_available(false);
+
+ DCHECK_EQ(0, registers_to_save % 2);
+ __ jmp(&entry_label_); // We'll write the entry code later.
+ // If the code gets too big or corrupted, an internal exception will be
+ // raised, and we will exit right away.
+ __ bind(&internal_failure_label_);
+ __ li(a0, Operand(FAILURE));
+ __ Ret();
+ __ bind(&start_label_); // And then continue from here.
+}
+
+RegExpMacroAssemblerLOONG64::~RegExpMacroAssemblerLOONG64() {
+ delete masm_;
+ // Unuse labels in case we throw away the assembler without calling GetCode.
+ entry_label_.Unuse();
+ start_label_.Unuse();
+ success_label_.Unuse();
+ backtrack_label_.Unuse();
+ exit_label_.Unuse();
+ check_preempt_label_.Unuse();
+ stack_overflow_label_.Unuse();
+ internal_failure_label_.Unuse();
+ fallback_label_.Unuse();
+}
+
+int RegExpMacroAssemblerLOONG64::stack_limit_slack() {
+ return RegExpStack::kStackLimitSlack;
+}
+
+void RegExpMacroAssemblerLOONG64::AdvanceCurrentPosition(int by) {
+ if (by != 0) {
+ __ Add_d(current_input_offset(), current_input_offset(),
+ Operand(by * char_size()));
+ }
+}
+
+void RegExpMacroAssemblerLOONG64::AdvanceRegister(int reg, int by) {
+ DCHECK_LE(0, reg);
+ DCHECK_GT(num_registers_, reg);
+ if (by != 0) {
+ __ Ld_d(a0, register_location(reg));
+ __ Add_d(a0, a0, Operand(by));
+ __ St_d(a0, register_location(reg));
+ }
+}
+
+void RegExpMacroAssemblerLOONG64::Backtrack() {
+ CheckPreemption();
+ if (has_backtrack_limit()) {
+ Label next;
+ __ Ld_d(a0, MemOperand(frame_pointer(), kBacktrackCount));
+ __ Add_d(a0, a0, Operand(1));
+ __ St_d(a0, MemOperand(frame_pointer(), kBacktrackCount));
+ __ Branch(&next, ne, a0, Operand(backtrack_limit()));
+
+ // Backtrack limit exceeded.
+ if (can_fallback()) {
+ __ jmp(&fallback_label_);
+ } else {
+ // Can't fallback, so we treat it as a failed match.
+ Fail();
+ }
+
+ __ bind(&next);
+ }
+ // Pop Code offset from backtrack stack, add Code and jump to location.
+ Pop(a0);
+ __ Add_d(a0, a0, code_pointer());
+ __ Jump(a0);
+}
+
+void RegExpMacroAssemblerLOONG64::Bind(Label* label) { __ bind(label); }
+
+void RegExpMacroAssemblerLOONG64::CheckCharacter(uint32_t c, Label* on_equal) {
+ BranchOrBacktrack(on_equal, eq, current_character(), Operand(c));
+}
+
+void RegExpMacroAssemblerLOONG64::CheckCharacterGT(base::uc16 limit,
+ Label* on_greater) {
+ BranchOrBacktrack(on_greater, gt, current_character(), Operand(limit));
+}
+
+void RegExpMacroAssemblerLOONG64::CheckAtStart(int cp_offset,
+ Label* on_at_start) {
+ __ Ld_d(a1, MemOperand(frame_pointer(), kStringStartMinusOne));
+ __ Add_d(a0, current_input_offset(),
+ Operand(-char_size() + cp_offset * char_size()));
+ BranchOrBacktrack(on_at_start, eq, a0, Operand(a1));
+}
+
+void RegExpMacroAssemblerLOONG64::CheckNotAtStart(int cp_offset,
+ Label* on_not_at_start) {
+ __ Ld_d(a1, MemOperand(frame_pointer(), kStringStartMinusOne));
+ __ Add_d(a0, current_input_offset(),
+ Operand(-char_size() + cp_offset * char_size()));
+ BranchOrBacktrack(on_not_at_start, ne, a0, Operand(a1));
+}
+
+void RegExpMacroAssemblerLOONG64::CheckCharacterLT(base::uc16 limit,
+ Label* on_less) {
+ BranchOrBacktrack(on_less, lt, current_character(), Operand(limit));
+}
+
+void RegExpMacroAssemblerLOONG64::CheckGreedyLoop(Label* on_equal) {
+ Label backtrack_non_equal;
+ __ Ld_w(a0, MemOperand(backtrack_stackpointer(), 0));
+ __ Branch(&backtrack_non_equal, ne, current_input_offset(), Operand(a0));
+ __ Add_d(backtrack_stackpointer(), backtrack_stackpointer(),
+ Operand(kIntSize));
+ __ bind(&backtrack_non_equal);
+ BranchOrBacktrack(on_equal, eq, current_input_offset(), Operand(a0));
+}
+
+void RegExpMacroAssemblerLOONG64::CheckNotBackReferenceIgnoreCase(
+ int start_reg, bool read_backward, bool unicode, Label* on_no_match) {
+ Label fallthrough;
+ __ Ld_d(a0, register_location(start_reg)); // Index of start of capture.
+ __ Ld_d(a1, register_location(start_reg + 1)); // Index of end of capture.
+ __ Sub_d(a1, a1, a0); // Length of capture.
+
+ // At this point, the capture registers are either both set or both cleared.
+ // If the capture length is zero, then the capture is either empty or cleared.
+ // Fall through in both cases.
+ __ Branch(&fallthrough, eq, a1, Operand(zero_reg));
+
+ if (read_backward) {
+ __ Ld_d(t1, MemOperand(frame_pointer(), kStringStartMinusOne));
+ __ Add_d(t1, t1, a1);
+ BranchOrBacktrack(on_no_match, le, current_input_offset(), Operand(t1));
+ } else {
+ __ Add_d(t1, a1, current_input_offset());
+ // Check that there are enough characters left in the input.
+ BranchOrBacktrack(on_no_match, gt, t1, Operand(zero_reg));
+ }
+
+ if (mode_ == LATIN1) {
+ Label success;
+ Label fail;
+ Label loop_check;
+
+ // a0 - offset of start of capture.
+ // a1 - length of capture.
+ __ Add_d(a0, a0, Operand(end_of_input_address()));
+ __ Add_d(a2, end_of_input_address(), Operand(current_input_offset()));
+ if (read_backward) {
+ __ Sub_d(a2, a2, Operand(a1));
+ }
+ __ Add_d(a1, a0, Operand(a1));
+
+ // a0 - Address of start of capture.
+ // a1 - Address of end of capture.
+ // a2 - Address of current input position.
+
+ Label loop;
+ __ bind(&loop);
+ __ Ld_bu(a3, MemOperand(a0, 0));
+ __ addi_d(a0, a0, char_size());
+ __ Ld_bu(a4, MemOperand(a2, 0));
+ __ addi_d(a2, a2, char_size());
+
+ __ Branch(&loop_check, eq, a4, Operand(a3));
+
+ // Mismatch, try case-insensitive match (converting letters to lower-case).
+ __ Or(a3, a3, Operand(0x20)); // Convert capture character to lower-case.
+ __ Or(a4, a4, Operand(0x20)); // Also convert input character.
+ __ Branch(&fail, ne, a4, Operand(a3));
+ __ Sub_d(a3, a3, Operand('a'));
+ __ Branch(&loop_check, ls, a3, Operand('z' - 'a'));
+ // Latin-1: Check for values in range [224,254] but not 247.
+ __ Sub_d(a3, a3, Operand(224 - 'a'));
+ // Weren't Latin-1 letters.
+ __ Branch(&fail, hi, a3, Operand(254 - 224));
+ // Check for 247.
+ __ Branch(&fail, eq, a3, Operand(247 - 224));
+
+ __ bind(&loop_check);
+ __ Branch(&loop, lt, a0, Operand(a1));
+ __ jmp(&success);
+
+ __ bind(&fail);
+ GoTo(on_no_match);
+
+ __ bind(&success);
+ // Compute new value of character position after the matched part.
+ __ Sub_d(current_input_offset(), a2, end_of_input_address());
+ if (read_backward) {
+ __ Ld_d(t1, register_location(start_reg)); // Index of start of capture.
+ __ Ld_d(a2,
+ register_location(start_reg + 1)); // Index of end of capture.
+ __ Add_d(current_input_offset(), current_input_offset(), Operand(t1));
+ __ Sub_d(current_input_offset(), current_input_offset(), Operand(a2));
+ }
+ } else {
+ DCHECK(mode_ == UC16);
+ // Put regexp engine registers on stack.
+ RegList regexp_registers_to_retain = current_input_offset().bit() |
+ current_character().bit() |
+ backtrack_stackpointer().bit();
+ __ MultiPush(regexp_registers_to_retain);
+
+ int argument_count = 4;
+ __ PrepareCallCFunction(argument_count, a2);
+
+ // a0 - offset of start of capture.
+ // a1 - length of capture.
+
+ // Put arguments into arguments registers.
+ // Parameters are
+ // a0: Address byte_offset1 - Address captured substring's start.
+ // a1: Address byte_offset2 - Address of current character position.
+ // a2: size_t byte_length - length of capture in bytes(!).
+ // a3: Isolate* isolate.
+
+ // Address of start of capture.
+ __ Add_d(a0, a0, Operand(end_of_input_address()));
+ // Length of capture.
+ __ mov(a2, a1);
+ // Save length in callee-save register for use on return.
+ __ mov(s3, a1);
+ // Address of current input position.
+ __ Add_d(a1, current_input_offset(), Operand(end_of_input_address()));
+ if (read_backward) {
+ __ Sub_d(a1, a1, Operand(s3));
+ }
+ // Isolate.
+ __ li(a3, Operand(ExternalReference::isolate_address(masm_->isolate())));
+
+ {
+ AllowExternalCallThatCantCauseGC scope(masm_);
+ ExternalReference function =
+ unicode ? ExternalReference::re_case_insensitive_compare_unicode(
+ isolate())
+ : ExternalReference::re_case_insensitive_compare_non_unicode(
+ isolate());
+ __ CallCFunction(function, argument_count);
+ }
+
+ // Restore regexp engine registers.
+ __ MultiPop(regexp_registers_to_retain);
+ __ li(code_pointer(), Operand(masm_->CodeObject()), CONSTANT_SIZE);
+ __ Ld_d(end_of_input_address(), MemOperand(frame_pointer(), kInputEnd));
+
+ // Check if function returned non-zero for success or zero for failure.
+ BranchOrBacktrack(on_no_match, eq, a0, Operand(zero_reg));
+ // On success, increment position by length of capture.
+ if (read_backward) {
+ __ Sub_d(current_input_offset(), current_input_offset(), Operand(s3));
+ } else {
+ __ Add_d(current_input_offset(), current_input_offset(), Operand(s3));
+ }
+ }
+
+ __ bind(&fallthrough);
+}
+
+void RegExpMacroAssemblerLOONG64::CheckNotBackReference(int start_reg,
+ bool read_backward,
+ Label* on_no_match) {
+ Label fallthrough;
+
+ // Find length of back-referenced capture.
+ __ Ld_d(a0, register_location(start_reg));
+ __ Ld_d(a1, register_location(start_reg + 1));
+ __ Sub_d(a1, a1, a0); // Length to check.
+
+ // At this point, the capture registers are either both set or both cleared.
+ // If the capture length is zero, then the capture is either empty or cleared.
+ // Fall through in both cases.
+ __ Branch(&fallthrough, eq, a1, Operand(zero_reg));
+
+ if (read_backward) {
+ __ Ld_d(t1, MemOperand(frame_pointer(), kStringStartMinusOne));
+ __ Add_d(t1, t1, a1);
+ BranchOrBacktrack(on_no_match, le, current_input_offset(), Operand(t1));
+ } else {
+ __ Add_d(t1, a1, current_input_offset());
+ // Check that there are enough characters left in the input.
+ BranchOrBacktrack(on_no_match, gt, t1, Operand(zero_reg));
+ }
+
+ // Compute pointers to match string and capture string.
+ __ Add_d(a0, a0, Operand(end_of_input_address()));
+ __ Add_d(a2, end_of_input_address(), Operand(current_input_offset()));
+ if (read_backward) {
+ __ Sub_d(a2, a2, Operand(a1));
+ }
+ __ Add_d(a1, a1, Operand(a0));
+
+ Label loop;
+ __ bind(&loop);
+ if (mode_ == LATIN1) {
+ __ Ld_bu(a3, MemOperand(a0, 0));
+ __ addi_d(a0, a0, char_size());
+ __ Ld_bu(a4, MemOperand(a2, 0));
+ __ addi_d(a2, a2, char_size());
+ } else {
+ DCHECK(mode_ == UC16);
+ __ Ld_hu(a3, MemOperand(a0, 0));
+ __ addi_d(a0, a0, char_size());
+ __ Ld_hu(a4, MemOperand(a2, 0));
+ __ addi_d(a2, a2, char_size());
+ }
+ BranchOrBacktrack(on_no_match, ne, a3, Operand(a4));
+ __ Branch(&loop, lt, a0, Operand(a1));
+
+ // Move current character position to position after match.
+ __ Sub_d(current_input_offset(), a2, end_of_input_address());
+ if (read_backward) {
+ __ Ld_d(t1, register_location(start_reg)); // Index of start of capture.
+ __ Ld_d(a2, register_location(start_reg + 1)); // Index of end of capture.
+ __ Add_d(current_input_offset(), current_input_offset(), Operand(t1));
+ __ Sub_d(current_input_offset(), current_input_offset(), Operand(a2));
+ }
+ __ bind(&fallthrough);
+}
+
+void RegExpMacroAssemblerLOONG64::CheckNotCharacter(uint32_t c,
+ Label* on_not_equal) {
+ BranchOrBacktrack(on_not_equal, ne, current_character(), Operand(c));
+}
+
+void RegExpMacroAssemblerLOONG64::CheckCharacterAfterAnd(uint32_t c,
+ uint32_t mask,
+ Label* on_equal) {
+ __ And(a0, current_character(), Operand(mask));
+ Operand rhs = (c == 0) ? Operand(zero_reg) : Operand(c);
+ BranchOrBacktrack(on_equal, eq, a0, rhs);
+}
+
+void RegExpMacroAssemblerLOONG64::CheckNotCharacterAfterAnd(
+ uint32_t c, uint32_t mask, Label* on_not_equal) {
+ __ And(a0, current_character(), Operand(mask));
+ Operand rhs = (c == 0) ? Operand(zero_reg) : Operand(c);
+ BranchOrBacktrack(on_not_equal, ne, a0, rhs);
+}
+
+void RegExpMacroAssemblerLOONG64::CheckNotCharacterAfterMinusAnd(
+ base::uc16 c, base::uc16 minus, base::uc16 mask, Label* on_not_equal) {
+ DCHECK_GT(String::kMaxUtf16CodeUnit, minus);
+ __ Sub_d(a0, current_character(), Operand(minus));
+ __ And(a0, a0, Operand(mask));
+ BranchOrBacktrack(on_not_equal, ne, a0, Operand(c));
+}
+
+void RegExpMacroAssemblerLOONG64::CheckCharacterInRange(base::uc16 from,
+ base::uc16 to,
+ Label* on_in_range) {
+ __ Sub_d(a0, current_character(), Operand(from));
+ // Unsigned lower-or-same condition.
+ BranchOrBacktrack(on_in_range, ls, a0, Operand(to - from));
+}
+
+void RegExpMacroAssemblerLOONG64::CheckCharacterNotInRange(
+ base::uc16 from, base::uc16 to, Label* on_not_in_range) {
+ __ Sub_d(a0, current_character(), Operand(from));
+ // Unsigned higher condition.
+ BranchOrBacktrack(on_not_in_range, hi, a0, Operand(to - from));
+}
+
+void RegExpMacroAssemblerLOONG64::CheckBitInTable(Handle<ByteArray> table,
+ Label* on_bit_set) {
+ __ li(a0, Operand(table));
+ if (mode_ != LATIN1 || kTableMask != String::kMaxOneByteCharCode) {
+ __ And(a1, current_character(), Operand(kTableSize - 1));
+ __ Add_d(a0, a0, a1);
+ } else {
+ __ Add_d(a0, a0, current_character());
+ }
+
+ __ Ld_bu(a0, FieldMemOperand(a0, ByteArray::kHeaderSize));
+ BranchOrBacktrack(on_bit_set, ne, a0, Operand(zero_reg));
+}
+
+bool RegExpMacroAssemblerLOONG64::CheckSpecialCharacterClass(
+ base::uc16 type, Label* on_no_match) {
+ // Range checks (c in min..max) are generally implemented by an unsigned
+ // (c - min) <= (max - min) check.
+ switch (type) {
+ case 's':
+ // Match space-characters.
+ if (mode_ == LATIN1) {
+ // One byte space characters are '\t'..'\r', ' ' and \u00a0.
+ Label success;
+ __ Branch(&success, eq, current_character(), Operand(' '));
+ // Check range 0x09..0x0D.
+ __ Sub_d(a0, current_character(), Operand('\t'));
+ __ Branch(&success, ls, a0, Operand('\r' - '\t'));
+ // \u00a0 (NBSP).
+ BranchOrBacktrack(on_no_match, ne, a0, Operand(0x00A0 - '\t'));
+ __ bind(&success);
+ return true;
+ }
+ return false;
+ case 'S':
+ // The emitted code for generic character classes is good enough.
+ return false;
+ case 'd':
+ // Match Latin1 digits ('0'..'9').
+ __ Sub_d(a0, current_character(), Operand('0'));
+ BranchOrBacktrack(on_no_match, hi, a0, Operand('9' - '0'));
+ return true;
+ case 'D':
+ // Match non Latin1-digits.
+ __ Sub_d(a0, current_character(), Operand('0'));
+ BranchOrBacktrack(on_no_match, ls, a0, Operand('9' - '0'));
+ return true;
+ case '.': {
+ // Match non-newlines (not 0x0A('\n'), 0x0D('\r'), 0x2028 and 0x2029).
+ __ Xor(a0, current_character(), Operand(0x01));
+ // See if current character is '\n'^1 or '\r'^1, i.e., 0x0B or 0x0C.
+ __ Sub_d(a0, a0, Operand(0x0B));
+ BranchOrBacktrack(on_no_match, ls, a0, Operand(0x0C - 0x0B));
+ if (mode_ == UC16) {
+ // Compare original value to 0x2028 and 0x2029, using the already
+ // computed (current_char ^ 0x01 - 0x0B). I.e., check for
+ // 0x201D (0x2028 - 0x0B) or 0x201E.
+ __ Sub_d(a0, a0, Operand(0x2028 - 0x0B));
+ BranchOrBacktrack(on_no_match, ls, a0, Operand(1));
+ }
+ return true;
+ }
+ case 'n': {
+ // Match newlines (0x0A('\n'), 0x0D('\r'), 0x2028 and 0x2029).
+ __ Xor(a0, current_character(), Operand(0x01));
+ // See if current character is '\n'^1 or '\r'^1, i.e., 0x0B or 0x0C.
+ __ Sub_d(a0, a0, Operand(0x0B));
+ if (mode_ == LATIN1) {
+ BranchOrBacktrack(on_no_match, hi, a0, Operand(0x0C - 0x0B));
+ } else {
+ Label done;
+ BranchOrBacktrack(&done, ls, a0, Operand(0x0C - 0x0B));
+ // Compare original value to 0x2028 and 0x2029, using the already
+ // computed (current_char ^ 0x01 - 0x0B). I.e., check for
+ // 0x201D (0x2028 - 0x0B) or 0x201E.
+ __ Sub_d(a0, a0, Operand(0x2028 - 0x0B));
+ BranchOrBacktrack(on_no_match, hi, a0, Operand(1));
+ __ bind(&done);
+ }
+ return true;
+ }
+ case 'w': {
+ if (mode_ != LATIN1) {
+ // Table is 256 entries, so all Latin1 characters can be tested.
+ BranchOrBacktrack(on_no_match, hi, current_character(), Operand('z'));
+ }
+ ExternalReference map =
+ ExternalReference::re_word_character_map(isolate());
+ __ li(a0, Operand(map));
+ __ Add_d(a0, a0, current_character());
+ __ Ld_bu(a0, MemOperand(a0, 0));
+ BranchOrBacktrack(on_no_match, eq, a0, Operand(zero_reg));
+ return true;
+ }
+ case 'W': {
+ Label done;
+ if (mode_ != LATIN1) {
+ // Table is 256 entries, so all Latin1 characters can be tested.
+ __ Branch(&done, hi, current_character(), Operand('z'));
+ }
+ ExternalReference map =
+ ExternalReference::re_word_character_map(isolate());
+ __ li(a0, Operand(map));
+ __ Add_d(a0, a0, current_character());
+ __ Ld_bu(a0, MemOperand(a0, 0));
+ BranchOrBacktrack(on_no_match, ne, a0, Operand(zero_reg));
+ if (mode_ != LATIN1) {
+ __ bind(&done);
+ }
+ return true;
+ }
+ case '*':
+ // Match any character.
+ return true;
+ // No custom implementation (yet): s(UC16), S(UC16).
+ default:
+ return false;
+ }
+}
+
+void RegExpMacroAssemblerLOONG64::Fail() {
+ __ li(a0, Operand(FAILURE));
+ __ jmp(&exit_label_);
+}
+
+Handle<HeapObject> RegExpMacroAssemblerLOONG64::GetCode(Handle<String> source) {
+ Label return_v0;
+ if (0 /* todo masm_->has_exception()*/) {
+ // If the code gets corrupted due to long regular expressions and lack of
+ // space on trampolines, an internal exception flag is set. If this case
+ // is detected, we will jump into exit sequence right away.
+ //__ bind_to(&entry_label_, internal_failure_label_.pos());
+ } else {
+ // Finalize code - write the entry point code now we know how many
+ // registers we need.
+
+ // Entry code:
+ __ bind(&entry_label_);
+
+ // Tell the system that we have a stack frame. Because the type is MANUAL,
+ // no is generated.
+ FrameScope scope(masm_, StackFrame::MANUAL);
+
+ // Actually emit code to start a new stack frame.
+ // Push arguments
+ // Save callee-save registers.
+ // Start new stack frame.
+ // Store link register in existing stack-cell.
+ // Order here should correspond to order of offset constants in header file.
+ // TODO(plind): we save s0..s7, but ONLY use s3 here - use the regs
+ // or dont save.
+ RegList registers_to_retain = s0.bit() | s1.bit() | s2.bit() | s3.bit() |
+ s4.bit() | s5.bit() | s6.bit() | s7.bit();
+ RegList argument_registers = a0.bit() | a1.bit() | a2.bit() | a3.bit();
+
+ argument_registers |= a4.bit() | a5.bit() | a6.bit() | a7.bit();
+
+ __ MultiPush(ra.bit(), fp.bit(), argument_registers | registers_to_retain);
+ // Set frame pointer in space for it if this is not a direct call
+ // from generated code.
+ // TODO(plind): this 8 is the # of argument regs, should have definition.
+ __ Add_d(frame_pointer(), sp, Operand(8 * kPointerSize));
+ STATIC_ASSERT(kSuccessfulCaptures == kInputString - kSystemPointerSize);
+ __ mov(a0, zero_reg);
+ __ Push(a0); // Make room for success counter and initialize it to 0.
+ STATIC_ASSERT(kStringStartMinusOne ==
+ kSuccessfulCaptures - kSystemPointerSize);
+ __ Push(a0); // Make room for "string start - 1" constant.
+ STATIC_ASSERT(kBacktrackCount == kStringStartMinusOne - kSystemPointerSize);
+ __ Push(a0); // The backtrack counter
+
+ // Check if we have space on the stack for registers.
+ Label stack_limit_hit;
+ Label stack_ok;
+
+ ExternalReference stack_limit =
+ ExternalReference::address_of_jslimit(masm_->isolate());
+ __ li(a0, Operand(stack_limit));
+ __ Ld_d(a0, MemOperand(a0, 0));
+ __ Sub_d(a0, sp, a0);
+ // Handle it if the stack pointer is already below the stack limit.
+ __ Branch(&stack_limit_hit, le, a0, Operand(zero_reg));
+ // Check if there is room for the variable number of registers above
+ // the stack limit.
+ __ Branch(&stack_ok, hs, a0, Operand(num_registers_ * kPointerSize));
+ // Exit with OutOfMemory exception. There is not enough space on the stack
+ // for our working registers.
+ __ li(a0, Operand(EXCEPTION));
+ __ jmp(&return_v0);
+
+ __ bind(&stack_limit_hit);
+ CallCheckStackGuardState(a0);
+ // If returned value is non-zero, we exit with the returned value as result.
+ __ Branch(&return_v0, ne, a0, Operand(zero_reg));
+
+ __ bind(&stack_ok);
+ // Allocate space on stack for registers.
+ __ Sub_d(sp, sp, Operand(num_registers_ * kPointerSize));
+ // Load string end.
+ __ Ld_d(end_of_input_address(), MemOperand(frame_pointer(), kInputEnd));
+ // Load input start.
+ __ Ld_d(a0, MemOperand(frame_pointer(), kInputStart));
+ // Find negative length (offset of start relative to end).
+ __ Sub_d(current_input_offset(), a0, end_of_input_address());
+ // Set a0 to address of char before start of the input string
+ // (effectively string position -1).
+ __ Ld_d(a1, MemOperand(frame_pointer(), kStartIndex));
+ __ Sub_d(a0, current_input_offset(), Operand(char_size()));
+ __ slli_d(t1, a1, (mode_ == UC16) ? 1 : 0);
+ __ Sub_d(a0, a0, t1);
+ // Store this value in a local variable, for use when clearing
+ // position registers.
+ __ St_d(a0, MemOperand(frame_pointer(), kStringStartMinusOne));
+
+ // Initialize code pointer register
+ __ li(code_pointer(), Operand(masm_->CodeObject()), CONSTANT_SIZE);
+
+ Label load_char_start_regexp, start_regexp;
+ // Load newline if index is at start, previous character otherwise.
+ __ Branch(&load_char_start_regexp, ne, a1, Operand(zero_reg));
+ __ li(current_character(), Operand('\n'));
+ __ jmp(&start_regexp);
+
+ // Global regexp restarts matching here.
+ __ bind(&load_char_start_regexp);
+ // Load previous char as initial value of current character register.
+ LoadCurrentCharacterUnchecked(-1, 1);
+ __ bind(&start_regexp);
+
+ // Initialize on-stack registers.
+ if (num_saved_registers_ > 0) { // Always is, if generated from a regexp.
+ // Fill saved registers with initial value = start offset - 1.
+ if (num_saved_registers_ > 8) {
+ // Address of register 0.
+ __ Add_d(a1, frame_pointer(), Operand(kRegisterZero));
+ __ li(a2, Operand(num_saved_registers_));
+ Label init_loop;
+ __ bind(&init_loop);
+ __ St_d(a0, MemOperand(a1, 0));
+ __ Add_d(a1, a1, Operand(-kPointerSize));
+ __ Sub_d(a2, a2, Operand(1));
+ __ Branch(&init_loop, ne, a2, Operand(zero_reg));
+ } else {
+ for (int i = 0; i < num_saved_registers_; i++) {
+ __ St_d(a0, register_location(i));
+ }
+ }
+ }
+
+ // Initialize backtrack stack pointer.
+ __ Ld_d(backtrack_stackpointer(),
+ MemOperand(frame_pointer(), kStackHighEnd));
+
+ __ jmp(&start_label_);
+
+ // Exit code:
+ if (success_label_.is_linked()) {
+ // Save captures when successful.
+ __ bind(&success_label_);
+ if (num_saved_registers_ > 0) {
+ // Copy captures to output.
+ __ Ld_d(a1, MemOperand(frame_pointer(), kInputStart));
+ __ Ld_d(a0, MemOperand(frame_pointer(), kRegisterOutput));
+ __ Ld_d(a2, MemOperand(frame_pointer(), kStartIndex));
+ __ Sub_d(a1, end_of_input_address(), a1);
+ // a1 is length of input in bytes.
+ if (mode_ == UC16) {
+ __ srli_d(a1, a1, 1);
+ }
+ // a1 is length of input in characters.
+ __ Add_d(a1, a1, Operand(a2));
+ // a1 is length of string in characters.
+
+ DCHECK_EQ(0, num_saved_registers_ % 2);
+ // Always an even number of capture registers. This allows us to
+ // unroll the loop once to add an operation between a load of a register
+ // and the following use of that register.
+ for (int i = 0; i < num_saved_registers_; i += 2) {
+ __ Ld_d(a2, register_location(i));
+ __ Ld_d(a3, register_location(i + 1));
+ if (i == 0 && global_with_zero_length_check()) {
+ // Keep capture start in a4 for the zero-length check later.
+ __ mov(t3, a2);
+ }
+ if (mode_ == UC16) {
+ __ srai_d(a2, a2, 1);
+ __ Add_d(a2, a2, a1);
+ __ srai_d(a3, a3, 1);
+ __ Add_d(a3, a3, a1);
+ } else {
+ __ Add_d(a2, a1, Operand(a2));
+ __ Add_d(a3, a1, Operand(a3));
+ }
+ // V8 expects the output to be an int32_t array.
+ __ St_w(a2, MemOperand(a0, 0));
+ __ Add_d(a0, a0, kIntSize);
+ __ St_w(a3, MemOperand(a0, 0));
+ __ Add_d(a0, a0, kIntSize);
+ }
+ }
+
+ if (global()) {
+ // Restart matching if the regular expression is flagged as global.
+ __ Ld_d(a0, MemOperand(frame_pointer(), kSuccessfulCaptures));
+ __ Ld_d(a1, MemOperand(frame_pointer(), kNumOutputRegisters));
+ __ Ld_d(a2, MemOperand(frame_pointer(), kRegisterOutput));
+ // Increment success counter.
+ __ Add_d(a0, a0, 1);
+ __ St_d(a0, MemOperand(frame_pointer(), kSuccessfulCaptures));
+ // Capture results have been stored, so the number of remaining global
+ // output registers is reduced by the number of stored captures.
+ __ Sub_d(a1, a1, num_saved_registers_);
+ // Check whether we have enough room for another set of capture results.
+ //__ mov(v0, a0);
+ __ Branch(&return_v0, lt, a1, Operand(num_saved_registers_));
+
+ __ St_d(a1, MemOperand(frame_pointer(), kNumOutputRegisters));
+ // Advance the location for output.
+ __ Add_d(a2, a2, num_saved_registers_ * kIntSize);
+ __ St_d(a2, MemOperand(frame_pointer(), kRegisterOutput));
+
+ // Prepare a0 to initialize registers with its value in the next run.
+ __ Ld_d(a0, MemOperand(frame_pointer(), kStringStartMinusOne));
+
+ if (global_with_zero_length_check()) {
+ // Special case for zero-length matches.
+ // t3: capture start index
+ // Not a zero-length match, restart.
+ __ Branch(&load_char_start_regexp, ne, current_input_offset(),
+ Operand(t3));
+ // Offset from the end is zero if we already reached the end.
+ __ Branch(&exit_label_, eq, current_input_offset(),
+ Operand(zero_reg));
+ // Advance current position after a zero-length match.
+ Label advance;
+ __ bind(&advance);
+ __ Add_d(current_input_offset(), current_input_offset(),
+ Operand((mode_ == UC16) ? 2 : 1));
+ if (global_unicode()) CheckNotInSurrogatePair(0, &advance);
+ }
+
+ __ Branch(&load_char_start_regexp);
+ } else {
+ __ li(a0, Operand(SUCCESS));
+ }
+ }
+ // Exit and return v0.
+ __ bind(&exit_label_);
+ if (global()) {
+ __ Ld_d(a0, MemOperand(frame_pointer(), kSuccessfulCaptures));
+ }
+
+ __ bind(&return_v0);
+ // Skip sp past regexp registers and local variables..
+ __ mov(sp, frame_pointer());
+ // Restore registers s0..s7 and return (restoring ra to pc).
+ __ MultiPop(ra.bit(), fp.bit(), registers_to_retain);
+ __ Ret();
+
+ // Backtrack code (branch target for conditional backtracks).
+ if (backtrack_label_.is_linked()) {
+ __ bind(&backtrack_label_);
+ Backtrack();
+ }
+
+ Label exit_with_exception;
+
+ // Preempt-code.
+ if (check_preempt_label_.is_linked()) {
+ SafeCallTarget(&check_preempt_label_);
+ // Put regexp engine registers on stack.
+ RegList regexp_registers_to_retain = current_input_offset().bit() |
+ current_character().bit() |
+ backtrack_stackpointer().bit();
+ __ MultiPush(regexp_registers_to_retain);
+ CallCheckStackGuardState(a0);
+ __ MultiPop(regexp_registers_to_retain);
+ // If returning non-zero, we should end execution with the given
+ // result as return value.
+ __ Branch(&return_v0, ne, a0, Operand(zero_reg));
+
+ // String might have moved: Reload end of string from frame.
+ __ Ld_d(end_of_input_address(), MemOperand(frame_pointer(), kInputEnd));
+ __ li(code_pointer(), Operand(masm_->CodeObject()), CONSTANT_SIZE);
+ SafeReturn();
+ }
+
+ // Backtrack stack overflow code.
+ if (stack_overflow_label_.is_linked()) {
+ SafeCallTarget(&stack_overflow_label_);
+ // Reached if the backtrack-stack limit has been hit.
+ // Put regexp engine registers on stack first.
+ RegList regexp_registers =
+ current_input_offset().bit() | current_character().bit();
+ __ MultiPush(regexp_registers);
+
+ // Call GrowStack(backtrack_stackpointer(), &stack_base)
+ static const int num_arguments = 3;
+ __ PrepareCallCFunction(num_arguments, a0);
+ __ mov(a0, backtrack_stackpointer());
+ __ Add_d(a1, frame_pointer(), Operand(kStackHighEnd));
+ __ li(a2, Operand(ExternalReference::isolate_address(masm_->isolate())));
+ ExternalReference grow_stack =
+ ExternalReference::re_grow_stack(masm_->isolate());
+ __ CallCFunction(grow_stack, num_arguments);
+ // Restore regexp registers.
+ __ MultiPop(regexp_registers);
+ // If return nullptr, we have failed to grow the stack, and
+ // must exit with a stack-overflow exception.
+ __ Branch(&exit_with_exception, eq, a0, Operand(zero_reg));
+ // Otherwise use return value as new stack pointer.
+ __ mov(backtrack_stackpointer(), a0);
+ // Restore saved registers and continue.
+ __ li(code_pointer(), Operand(masm_->CodeObject()), CONSTANT_SIZE);
+ __ Ld_d(end_of_input_address(), MemOperand(frame_pointer(), kInputEnd));
+ SafeReturn();
+ }
+
+ if (exit_with_exception.is_linked()) {
+ // If any of the code above needed to exit with an exception.
+ __ bind(&exit_with_exception);
+ // Exit with Result EXCEPTION(-1) to signal thrown exception.
+ __ li(a0, Operand(EXCEPTION));
+ __ jmp(&return_v0);
+ }
+
+ if (fallback_label_.is_linked()) {
+ __ bind(&fallback_label_);
+ __ li(a0, Operand(FALLBACK_TO_EXPERIMENTAL));
+ __ jmp(&return_v0);
+ }
+ }
+
+ CodeDesc code_desc;
+ masm_->GetCode(isolate(), &code_desc);
+ Handle<Code> code =
+ Factory::CodeBuilder(isolate(), code_desc, CodeKind::REGEXP)
+ .set_self_reference(masm_->CodeObject())
+ .Build();
+ LOG(masm_->isolate(),
+ RegExpCodeCreateEvent(Handle<AbstractCode>::cast(code), source));
+ return Handle<HeapObject>::cast(code);
+}
+
+void RegExpMacroAssemblerLOONG64::GoTo(Label* to) {
+ if (to == nullptr) {
+ Backtrack();
+ return;
+ }
+ __ jmp(to);
+ return;
+}
+
+void RegExpMacroAssemblerLOONG64::IfRegisterGE(int reg, int comparand,
+ Label* if_ge) {
+ __ Ld_d(a0, register_location(reg));
+ BranchOrBacktrack(if_ge, ge, a0, Operand(comparand));
+}
+
+void RegExpMacroAssemblerLOONG64::IfRegisterLT(int reg, int comparand,
+ Label* if_lt) {
+ __ Ld_d(a0, register_location(reg));
+ BranchOrBacktrack(if_lt, lt, a0, Operand(comparand));
+}
+
+void RegExpMacroAssemblerLOONG64::IfRegisterEqPos(int reg, Label* if_eq) {
+ __ Ld_d(a0, register_location(reg));
+ BranchOrBacktrack(if_eq, eq, a0, Operand(current_input_offset()));
+}
+
+RegExpMacroAssembler::IrregexpImplementation
+RegExpMacroAssemblerLOONG64::Implementation() {
+ return kLOONG64Implementation;
+}
+
+void RegExpMacroAssemblerLOONG64::PopCurrentPosition() {
+ Pop(current_input_offset());
+}
+
+void RegExpMacroAssemblerLOONG64::PopRegister(int register_index) {
+ Pop(a0);
+ __ St_d(a0, register_location(register_index));
+}
+
+void RegExpMacroAssemblerLOONG64::PushBacktrack(Label* label) {
+ if (label->is_bound()) {
+ int target = label->pos();
+ __ li(a0, Operand(target + Code::kHeaderSize - kHeapObjectTag));
+ } else {
+ Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_);
+ Label after_constant;
+ __ Branch(&after_constant);
+ int offset = masm_->pc_offset();
+ int cp_offset = offset + Code::kHeaderSize - kHeapObjectTag;
+ //__ emit(0);
+ __ nop();
+ masm_->label_at_put(label, offset);
+ __ bind(&after_constant);
+ if (is_int12(cp_offset)) {
+ __ Ld_wu(a0, MemOperand(code_pointer(), cp_offset));
+ } else {
+ __ Add_d(a0, code_pointer(), cp_offset);
+ __ Ld_wu(a0, MemOperand(a0, 0));
+ }
+ }
+ Push(a0);
+ CheckStackLimit();
+}
+
+void RegExpMacroAssemblerLOONG64::PushCurrentPosition() {
+ Push(current_input_offset());
+}
+
+void RegExpMacroAssemblerLOONG64::PushRegister(
+ int register_index, StackCheckFlag check_stack_limit) {
+ __ Ld_d(a0, register_location(register_index));
+ Push(a0);
+ if (check_stack_limit) CheckStackLimit();
+}
+
+void RegExpMacroAssemblerLOONG64::ReadCurrentPositionFromRegister(int reg) {
+ __ Ld_d(current_input_offset(), register_location(reg));
+}
+
+void RegExpMacroAssemblerLOONG64::ReadStackPointerFromRegister(int reg) {
+ __ Ld_d(backtrack_stackpointer(), register_location(reg));
+ __ Ld_d(a0, MemOperand(frame_pointer(), kStackHighEnd));
+ __ Add_d(backtrack_stackpointer(), backtrack_stackpointer(), Operand(a0));
+}
+
+void RegExpMacroAssemblerLOONG64::SetCurrentPositionFromEnd(int by) {
+ Label after_position;
+ __ Branch(&after_position, ge, current_input_offset(),
+ Operand(-by * char_size()));
+ __ li(current_input_offset(), -by * char_size());
+ // On RegExp code entry (where this operation is used), the character before
+ // the current position is expected to be already loaded.
+ // We have advanced the position, so it's safe to read backwards.
+ LoadCurrentCharacterUnchecked(-1, 1);
+ __ bind(&after_position);
+}
+
+void RegExpMacroAssemblerLOONG64::SetRegister(int register_index, int to) {
+ DCHECK(register_index >= num_saved_registers_); // Reserved for positions!
+ __ li(a0, Operand(to));
+ __ St_d(a0, register_location(register_index));
+}
+
+bool RegExpMacroAssemblerLOONG64::Succeed() {
+ __ jmp(&success_label_);
+ return global();
+}
+
+void RegExpMacroAssemblerLOONG64::WriteCurrentPositionToRegister(
+ int reg, int cp_offset) {
+ if (cp_offset == 0) {
+ __ St_d(current_input_offset(), register_location(reg));
+ } else {
+ __ Add_d(a0, current_input_offset(), Operand(cp_offset * char_size()));
+ __ St_d(a0, register_location(reg));
+ }
+}
+
+void RegExpMacroAssemblerLOONG64::ClearRegisters(int reg_from, int reg_to) {
+ DCHECK(reg_from <= reg_to);
+ __ Ld_d(a0, MemOperand(frame_pointer(), kStringStartMinusOne));
+ for (int reg = reg_from; reg <= reg_to; reg++) {
+ __ St_d(a0, register_location(reg));
+ }
+}
+
+void RegExpMacroAssemblerLOONG64::WriteStackPointerToRegister(int reg) {
+ __ Ld_d(a1, MemOperand(frame_pointer(), kStackHighEnd));
+ __ Sub_d(a0, backtrack_stackpointer(), a1);
+ __ St_d(a0, register_location(reg));
+}
+
+// Private methods:
+
+void RegExpMacroAssemblerLOONG64::CallCheckStackGuardState(Register scratch) {
+ DCHECK(!isolate()->IsGeneratingEmbeddedBuiltins());
+ DCHECK(!masm_->options().isolate_independent_code);
+
+ int stack_alignment = base::OS::ActivationFrameAlignment();
+
+ // Align the stack pointer and save the original sp value on the stack.
+ __ mov(scratch, sp);
+ __ Sub_d(sp, sp, Operand(kPointerSize));
+ DCHECK(base::bits::IsPowerOfTwo(stack_alignment));
+ __ And(sp, sp, Operand(-stack_alignment));
+ __ St_d(scratch, MemOperand(sp, 0));
+
+ __ mov(a2, frame_pointer());
+ // Code of self.
+ __ li(a1, Operand(masm_->CodeObject()), CONSTANT_SIZE);
+
+ // We need to make room for the return address on the stack.
+ DCHECK(IsAligned(stack_alignment, kPointerSize));
+ __ Sub_d(sp, sp, Operand(stack_alignment));
+
+ // The stack pointer now points to cell where the return address will be
+ // written. Arguments are in registers, meaning we treat the return address as
+ // argument 5. Since DirectCEntry will handle allocating space for the C
+ // argument slots, we don't need to care about that here. This is how the
+ // stack will look (sp meaning the value of sp at this moment):
+ // [sp + 3] - empty slot if needed for alignment.
+ // [sp + 2] - saved sp.
+ // [sp + 1] - second word reserved for return value.
+ // [sp + 0] - first word reserved for return value.
+
+ // a0 will point to the return address, placed by DirectCEntry.
+ __ mov(a0, sp);
+
+ ExternalReference stack_guard_check =
+ ExternalReference::re_check_stack_guard_state(masm_->isolate());
+ __ li(t7, Operand(stack_guard_check));
+
+ EmbeddedData d = EmbeddedData::FromBlob();
+ CHECK(Builtins::IsIsolateIndependent(Builtin::kDirectCEntry));
+ Address entry = d.InstructionStartOfBuiltin(Builtin::kDirectCEntry);
+ __ li(kScratchReg, Operand(entry, RelocInfo::OFF_HEAP_TARGET));
+ __ Call(kScratchReg);
+
+ // DirectCEntry allocated space for the C argument slots so we have to
+ // drop them with the return address from the stack with loading saved sp.
+ // At this point stack must look:
+ // [sp + 7] - empty slot if needed for alignment.
+ // [sp + 6] - saved sp.
+ // [sp + 5] - second word reserved for return value.
+ // [sp + 4] - first word reserved for return value.
+ // [sp + 3] - C argument slot.
+ // [sp + 2] - C argument slot.
+ // [sp + 1] - C argument slot.
+ // [sp + 0] - C argument slot.
+ __ Ld_d(sp, MemOperand(sp, stack_alignment));
+
+ __ li(code_pointer(), Operand(masm_->CodeObject()));
+}
+
+// Helper function for reading a value out of a stack frame.
+template <typename T>
+static T& frame_entry(Address re_frame, int frame_offset) {
+ return reinterpret_cast<T&>(Memory<int32_t>(re_frame + frame_offset));
+}
+
+template <typename T>
+static T* frame_entry_address(Address re_frame, int frame_offset) {
+ return reinterpret_cast<T*>(re_frame + frame_offset);
+}
+
+int64_t RegExpMacroAssemblerLOONG64::CheckStackGuardState(
+ Address* return_address, Address raw_code, Address re_frame) {
+ Code re_code = Code::cast(Object(raw_code));
+ return NativeRegExpMacroAssembler::CheckStackGuardState(
+ frame_entry<Isolate*>(re_frame, kIsolate),
+ static_cast<int>(frame_entry<int64_t>(re_frame, kStartIndex)),
+ static_cast<RegExp::CallOrigin>(
+ frame_entry<int64_t>(re_frame, kDirectCall)),
+ return_address, re_code,
+ frame_entry_address<Address>(re_frame, kInputString),
+ frame_entry_address<const byte*>(re_frame, kInputStart),
+ frame_entry_address<const byte*>(re_frame, kInputEnd));
+}
+
+MemOperand RegExpMacroAssemblerLOONG64::register_location(int register_index) {
+ DCHECK(register_index < (1 << 30));
+ if (num_registers_ <= register_index) {
+ num_registers_ = register_index + 1;
+ }
+ return MemOperand(frame_pointer(),
+ kRegisterZero - register_index * kPointerSize);
+}
+
+void RegExpMacroAssemblerLOONG64::CheckPosition(int cp_offset,
+ Label* on_outside_input) {
+ if (cp_offset >= 0) {
+ BranchOrBacktrack(on_outside_input, ge, current_input_offset(),
+ Operand(-cp_offset * char_size()));
+ } else {
+ __ Ld_d(a1, MemOperand(frame_pointer(), kStringStartMinusOne));
+ __ Add_d(a0, current_input_offset(), Operand(cp_offset * char_size()));
+ BranchOrBacktrack(on_outside_input, le, a0, Operand(a1));
+ }
+}
+
+void RegExpMacroAssemblerLOONG64::BranchOrBacktrack(Label* to,
+ Condition condition,
+ Register rs,
+ const Operand& rt) {
+ if (condition == al) { // Unconditional.
+ if (to == nullptr) {
+ Backtrack();
+ return;
+ }
+ __ jmp(to);
+ return;
+ }
+ if (to == nullptr) {
+ __ Branch(&backtrack_label_, condition, rs, rt);
+ return;
+ }
+ __ Branch(to, condition, rs, rt);
+}
+
+void RegExpMacroAssemblerLOONG64::SafeCall(Label* to, Condition cond,
+ Register rs, const Operand& rt) {
+ __ Branch(to, cond, rs, rt, true);
+}
+
+void RegExpMacroAssemblerLOONG64::SafeReturn() {
+ __ Pop(ra);
+ __ Add_d(t1, ra, Operand(masm_->CodeObject()));
+ __ Jump(t1);
+}
+
+void RegExpMacroAssemblerLOONG64::SafeCallTarget(Label* name) {
+ __ bind(name);
+ __ Sub_d(ra, ra, Operand(masm_->CodeObject()));
+ __ Push(ra);
+}
+
+void RegExpMacroAssemblerLOONG64::Push(Register source) {
+ DCHECK(source != backtrack_stackpointer());
+ __ Add_d(backtrack_stackpointer(), backtrack_stackpointer(),
+ Operand(-kIntSize));
+ __ St_w(source, MemOperand(backtrack_stackpointer(), 0));
+}
+
+void RegExpMacroAssemblerLOONG64::Pop(Register target) {
+ DCHECK(target != backtrack_stackpointer());
+ __ Ld_w(target, MemOperand(backtrack_stackpointer(), 0));
+ __ Add_d(backtrack_stackpointer(), backtrack_stackpointer(), kIntSize);
+}
+
+void RegExpMacroAssemblerLOONG64::CheckPreemption() {
+ // Check for preemption.
+ ExternalReference stack_limit =
+ ExternalReference::address_of_jslimit(masm_->isolate());
+ __ li(a0, Operand(stack_limit));
+ __ Ld_d(a0, MemOperand(a0, 0));
+ SafeCall(&check_preempt_label_, ls, sp, Operand(a0));
+}
+
+void RegExpMacroAssemblerLOONG64::CheckStackLimit() {
+ ExternalReference stack_limit =
+ ExternalReference::address_of_regexp_stack_limit_address(
+ masm_->isolate());
+
+ __ li(a0, Operand(stack_limit));
+ __ Ld_d(a0, MemOperand(a0, 0));
+ SafeCall(&stack_overflow_label_, ls, backtrack_stackpointer(), Operand(a0));
+}
+
+void RegExpMacroAssemblerLOONG64::LoadCurrentCharacterUnchecked(
+ int cp_offset, int characters) {
+ Register offset = current_input_offset();
+
+ // If unaligned load/stores are not supported then this function must only
+ // be used to load a single character at a time.
+ if (!CanReadUnaligned()) {
+ DCHECK_EQ(1, characters);
+ }
+
+ if (cp_offset != 0) {
+ // t3 is not being used to store the capture start index at this point.
+ __ Add_d(t3, current_input_offset(), Operand(cp_offset * char_size()));
+ offset = t3;
+ }
+
+ if (mode_ == LATIN1) {
+ if (characters == 4) {
+ __ Ld_wu(current_character(), MemOperand(end_of_input_address(), offset));
+ } else if (characters == 2) {
+ __ Ld_hu(current_character(), MemOperand(end_of_input_address(), offset));
+ } else {
+ DCHECK_EQ(1, characters);
+ __ Ld_bu(current_character(), MemOperand(end_of_input_address(), offset));
+ }
+ } else {
+ DCHECK(mode_ == UC16);
+ if (characters == 2) {
+ __ Ld_wu(current_character(), MemOperand(end_of_input_address(), offset));
+ } else {
+ DCHECK_EQ(1, characters);
+ __ Ld_hu(current_character(), MemOperand(end_of_input_address(), offset));
+ }
+ }
+}
+
+#undef __
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_TARGET_ARCH_LOONG64
diff --git a/deps/v8/src/regexp/loong64/regexp-macro-assembler-loong64.h b/deps/v8/src/regexp/loong64/regexp-macro-assembler-loong64.h
new file mode 100644
index 0000000000..ea567543db
--- /dev/null
+++ b/deps/v8/src/regexp/loong64/regexp-macro-assembler-loong64.h
@@ -0,0 +1,214 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_REGEXP_LOONG64_REGEXP_MACRO_ASSEMBLER_LOONG64_H_
+#define V8_REGEXP_LOONG64_REGEXP_MACRO_ASSEMBLER_LOONG64_H_
+
+#include "src/base/strings.h"
+#include "src/codegen/loong64/assembler-loong64.h"
+#include "src/codegen/macro-assembler.h"
+#include "src/regexp/regexp-macro-assembler.h"
+
+namespace v8 {
+namespace internal {
+
+class V8_EXPORT_PRIVATE RegExpMacroAssemblerLOONG64
+ : public NativeRegExpMacroAssembler {
+ public:
+ RegExpMacroAssemblerLOONG64(Isolate* isolate, Zone* zone, Mode mode,
+ int registers_to_save);
+ virtual ~RegExpMacroAssemblerLOONG64();
+ virtual int stack_limit_slack();
+ virtual void AdvanceCurrentPosition(int by);
+ virtual void AdvanceRegister(int reg, int by);
+ virtual void Backtrack();
+ virtual void Bind(Label* label);
+ virtual void CheckAtStart(int cp_offset, Label* on_at_start);
+ virtual void CheckCharacter(uint32_t c, Label* on_equal);
+ virtual void CheckCharacterAfterAnd(uint32_t c, uint32_t mask,
+ Label* on_equal);
+ virtual void CheckCharacterGT(base::uc16 limit, Label* on_greater);
+ virtual void CheckCharacterLT(base::uc16 limit, Label* on_less);
+ // A "greedy loop" is a loop that is both greedy and with a simple
+ // body. It has a particularly simple implementation.
+ virtual void CheckGreedyLoop(Label* on_tos_equals_current_position);
+ virtual void CheckNotAtStart(int cp_offset, Label* on_not_at_start);
+ virtual void CheckNotBackReference(int start_reg, bool read_backward,
+ Label* on_no_match);
+ virtual void CheckNotBackReferenceIgnoreCase(int start_reg,
+ bool read_backward, bool unicode,
+ Label* on_no_match);
+ virtual void CheckNotCharacter(uint32_t c, Label* on_not_equal);
+ virtual void CheckNotCharacterAfterAnd(uint32_t c, uint32_t mask,
+ Label* on_not_equal);
+ virtual void CheckNotCharacterAfterMinusAnd(base::uc16 c, base::uc16 minus,
+ base::uc16 mask,
+ Label* on_not_equal);
+ virtual void CheckCharacterInRange(base::uc16 from, base::uc16 to,
+ Label* on_in_range);
+ virtual void CheckCharacterNotInRange(base::uc16 from, base::uc16 to,
+ Label* on_not_in_range);
+ virtual void CheckBitInTable(Handle<ByteArray> table, Label* on_bit_set);
+
+ // Checks whether the given offset from the current position is before
+ // the end of the string.
+ virtual void CheckPosition(int cp_offset, Label* on_outside_input);
+ virtual bool CheckSpecialCharacterClass(base::uc16 type, Label* on_no_match);
+ virtual void Fail();
+ virtual Handle<HeapObject> GetCode(Handle<String> source);
+ virtual void GoTo(Label* label);
+ virtual void IfRegisterGE(int reg, int comparand, Label* if_ge);
+ virtual void IfRegisterLT(int reg, int comparand, Label* if_lt);
+ virtual void IfRegisterEqPos(int reg, Label* if_eq);
+ virtual IrregexpImplementation Implementation();
+ virtual void LoadCurrentCharacterUnchecked(int cp_offset,
+ int character_count);
+ virtual void PopCurrentPosition();
+ virtual void PopRegister(int register_index);
+ virtual void PushBacktrack(Label* label);
+ virtual void PushCurrentPosition();
+ virtual void PushRegister(int register_index,
+ StackCheckFlag check_stack_limit);
+ virtual void ReadCurrentPositionFromRegister(int reg);
+ virtual void ReadStackPointerFromRegister(int reg);
+ virtual void SetCurrentPositionFromEnd(int by);
+ virtual void SetRegister(int register_index, int to);
+ virtual bool Succeed();
+ virtual void WriteCurrentPositionToRegister(int reg, int cp_offset);
+ virtual void ClearRegisters(int reg_from, int reg_to);
+ virtual void WriteStackPointerToRegister(int reg);
+
+ // Called from RegExp if the stack-guard is triggered.
+ // If the code object is relocated, the return address is fixed before
+ // returning.
+ // {raw_code} is an Address because this is called via ExternalReference.
+ static int64_t CheckStackGuardState(Address* return_address, Address raw_code,
+ Address re_frame);
+
+ void print_regexp_frame_constants();
+
+ private:
+ // Offsets from frame_pointer() of function parameters and stored registers.
+ static const int kFramePointer = 0;
+
+ // Above the frame pointer - Stored registers and stack passed parameters.
+ // Registers s0 to s7, fp, and ra.
+ static const int kStoredRegisters = kFramePointer;
+ // Return address (stored from link register, read into pc on return).
+
+ // TODO(plind): This 9 - is 8 s-regs (s0..s7) plus fp.
+
+ static const int kReturnAddress = kStoredRegisters + 9 * kPointerSize;
+ // Stack frame header.
+ static const int kStackFrameHeader = kReturnAddress;
+ // Stack parameters placed by caller.
+ static const int kIsolate = kStackFrameHeader + kPointerSize;
+
+ // Below the frame pointer.
+ // Register parameters stored by setup code.
+ static const int kDirectCall = kFramePointer - kPointerSize;
+ static const int kStackHighEnd = kDirectCall - kPointerSize;
+ static const int kNumOutputRegisters = kStackHighEnd - kPointerSize;
+ static const int kRegisterOutput = kNumOutputRegisters - kPointerSize;
+ static const int kInputEnd = kRegisterOutput - kPointerSize;
+ static const int kInputStart = kInputEnd - kPointerSize;
+ static const int kStartIndex = kInputStart - kPointerSize;
+ static const int kInputString = kStartIndex - kPointerSize;
+ // When adding local variables remember to push space for them in
+ // the frame in GetCode.
+ static const int kSuccessfulCaptures = kInputString - kPointerSize;
+ static const int kStringStartMinusOne = kSuccessfulCaptures - kPointerSize;
+ static const int kBacktrackCount = kStringStartMinusOne - kSystemPointerSize;
+ // First register address. Following registers are below it on the stack.
+ static const int kRegisterZero = kBacktrackCount - kSystemPointerSize;
+
+ // Initial size of code buffer.
+ static const int kRegExpCodeSize = 1024;
+
+ // Check whether preemption has been requested.
+ void CheckPreemption();
+
+ // Check whether we are exceeding the stack limit on the backtrack stack.
+ void CheckStackLimit();
+
+ // Generate a call to CheckStackGuardState.
+ void CallCheckStackGuardState(Register scratch);
+
+ // The ebp-relative location of a regexp register.
+ MemOperand register_location(int register_index);
+
+ // Register holding the current input position as negative offset from
+ // the end of the string.
+ inline Register current_input_offset() { return a6; }
+
+ // The register containing the current character after LoadCurrentCharacter.
+ inline Register current_character() { return a7; }
+
+ // Register holding address of the end of the input string.
+ inline Register end_of_input_address() { return t2; }
+
+ // Register holding the frame address. Local variables, parameters and
+ // regexp registers are addressed relative to this.
+ inline Register frame_pointer() { return fp; }
+
+ // The register containing the backtrack stack top. Provides a meaningful
+ // name to the register.
+ inline Register backtrack_stackpointer() { return t0; }
+
+ // Register holding pointer to the current code object.
+ inline Register code_pointer() { return a5; }
+
+ // Byte size of chars in the string to match (decided by the Mode argument).
+ inline int char_size() { return static_cast<int>(mode_); }
+
+ // Equivalent to a conditional branch to the label, unless the label
+ // is nullptr, in which case it is a conditional Backtrack.
+ void BranchOrBacktrack(Label* to, Condition condition, Register rs,
+ const Operand& rt);
+
+ // Call and return internally in the generated code in a way that
+ // is GC-safe (i.e., doesn't leave absolute code addresses on the stack)
+ inline void SafeCall(Label* to, Condition cond, Register rs,
+ const Operand& rt);
+ inline void SafeReturn();
+ inline void SafeCallTarget(Label* name);
+
+ // Pushes the value of a register on the backtrack stack. Decrements the
+ // stack pointer by a word size and stores the register's value there.
+ inline void Push(Register source);
+
+ // Pops a value from the backtrack stack. Reads the word at the stack pointer
+ // and increments it by a word size.
+ inline void Pop(Register target);
+
+ Isolate* isolate() const { return masm_->isolate(); }
+
+ MacroAssembler* masm_;
+
+ // Which mode to generate code for (Latin1 or UC16).
+ Mode mode_;
+
+ // One greater than maximal register index actually used.
+ int num_registers_;
+
+ // Number of registers to output at the end (the saved registers
+ // are always 0..num_saved_registers_-1).
+ int num_saved_registers_;
+
+ // Labels used internally.
+ Label entry_label_;
+ Label start_label_;
+ Label success_label_;
+ Label backtrack_label_;
+ Label exit_label_;
+ Label check_preempt_label_;
+ Label stack_overflow_label_;
+ Label internal_failure_label_;
+ Label fallback_label_;
+};
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_REGEXP_LOONG64_REGEXP_MACRO_ASSEMBLER_LOONG64_H_
diff --git a/deps/v8/src/regexp/regexp-ast.h b/deps/v8/src/regexp/regexp-ast.h
index 2b9f767c24..3f771976fd 100644
--- a/deps/v8/src/regexp/regexp-ast.h
+++ b/deps/v8/src/regexp/regexp-ast.h
@@ -6,10 +6,7 @@
#define V8_REGEXP_REGEXP_AST_H_
#include "src/base/strings.h"
-#include "src/objects/js-regexp.h"
-#include "src/objects/objects.h"
-#include "src/objects/string.h"
-#include "src/utils/utils.h"
+#include "src/regexp/regexp-flags.h"
#include "src/zone/zone-containers.h"
#include "src/zone/zone-list.h"
#include "src/zone/zone.h"
@@ -96,13 +93,14 @@ class CharacterRange {
static inline CharacterRange Singleton(base::uc32 value) {
return CharacterRange(value, value);
}
+ static constexpr int kMaxCodePoint = 0x10ffff;
static inline CharacterRange Range(base::uc32 from, base::uc32 to) {
- DCHECK(0 <= from && to <= String::kMaxCodePoint);
+ DCHECK(0 <= from && to <= kMaxCodePoint);
DCHECK(static_cast<uint32_t>(from) <= static_cast<uint32_t>(to));
return CharacterRange(from, to);
}
static inline CharacterRange Everything() {
- return CharacterRange(0, String::kMaxCodePoint);
+ return CharacterRange(0, kMaxCodePoint);
}
static inline ZoneList<CharacterRange>* List(Zone* zone,
CharacterRange range) {
@@ -566,9 +564,9 @@ class RegExpLookaround final : public RegExpTree {
class RegExpBackReference final : public RegExpTree {
public:
- explicit RegExpBackReference(JSRegExp::Flags flags)
+ explicit RegExpBackReference(RegExpFlags flags)
: capture_(nullptr), name_(nullptr), flags_(flags) {}
- RegExpBackReference(RegExpCapture* capture, JSRegExp::Flags flags)
+ RegExpBackReference(RegExpCapture* capture, RegExpFlags flags)
: capture_(capture), name_(nullptr), flags_(flags) {}
void* Accept(RegExpVisitor* visitor, void* data) override;
RegExpNode* ToNode(RegExpCompiler* compiler, RegExpNode* on_success) override;
@@ -587,7 +585,7 @@ class RegExpBackReference final : public RegExpTree {
private:
RegExpCapture* capture_;
const ZoneVector<base::uc16>* name_;
- const JSRegExp::Flags flags_;
+ const RegExpFlags flags_;
};
diff --git a/deps/v8/src/regexp/regexp-bytecode-generator-inl.h b/deps/v8/src/regexp/regexp-bytecode-generator-inl.h
index 2a6ffec929..bfdd9df93c 100644
--- a/deps/v8/src/regexp/regexp-bytecode-generator-inl.h
+++ b/deps/v8/src/regexp/regexp-bytecode-generator-inl.h
@@ -7,7 +7,6 @@
#include "src/regexp/regexp-bytecode-generator.h"
-#include "src/ast/ast.h"
#include "src/regexp/regexp-bytecodes.h"
namespace v8 {
diff --git a/deps/v8/src/regexp/regexp-bytecode-generator.cc b/deps/v8/src/regexp/regexp-bytecode-generator.cc
index 397f4ba87a..c5ad2bfba5 100644
--- a/deps/v8/src/regexp/regexp-bytecode-generator.cc
+++ b/deps/v8/src/regexp/regexp-bytecode-generator.cc
@@ -5,7 +5,7 @@
#include "src/regexp/regexp-bytecode-generator.h"
#include "src/ast/ast.h"
-#include "src/objects/objects-inl.h"
+#include "src/objects/fixed-array-inl.h"
#include "src/regexp/regexp-bytecode-generator-inl.h"
#include "src/regexp/regexp-bytecode-peephole.h"
#include "src/regexp/regexp-bytecodes.h"
diff --git a/deps/v8/src/regexp/regexp-bytecode-generator.h b/deps/v8/src/regexp/regexp-bytecode-generator.h
index 466b535c7e..310ab32cec 100644
--- a/deps/v8/src/regexp/regexp-bytecode-generator.h
+++ b/deps/v8/src/regexp/regexp-bytecode-generator.h
@@ -6,6 +6,7 @@
#define V8_REGEXP_REGEXP_BYTECODE_GENERATOR_H_
#include "src/base/strings.h"
+#include "src/codegen/label.h"
#include "src/regexp/regexp-macro-assembler.h"
namespace v8 {
diff --git a/deps/v8/src/regexp/regexp-bytecode-peephole.cc b/deps/v8/src/regexp/regexp-bytecode-peephole.cc
index fc64db9013..20de4565d2 100644
--- a/deps/v8/src/regexp/regexp-bytecode-peephole.cc
+++ b/deps/v8/src/regexp/regexp-bytecode-peephole.cc
@@ -4,10 +4,8 @@
#include "src/regexp/regexp-bytecode-peephole.h"
-#include "src/execution/isolate.h"
#include "src/flags/flags.h"
-#include "src/objects/fixed-array.h"
-#include "src/objects/objects-inl.h"
+#include "src/objects/fixed-array-inl.h"
#include "src/regexp/regexp-bytecodes.h"
#include "src/utils/memcopy.h"
#include "src/utils/utils.h"
diff --git a/deps/v8/src/regexp/regexp-compiler-tonode.cc b/deps/v8/src/regexp/regexp-compiler-tonode.cc
index f668aa6d84..b80eefae6d 100644
--- a/deps/v8/src/regexp/regexp-compiler-tonode.cc
+++ b/deps/v8/src/regexp/regexp-compiler-tonode.cc
@@ -6,14 +6,12 @@
#include "src/execution/isolate.h"
#include "src/regexp/regexp.h"
-#ifdef V8_INTL_SUPPORT
-#include "src/regexp/special-case.h"
-#endif // V8_INTL_SUPPORT
#include "src/strings/unicode-inl.h"
#include "src/zone/zone-list-inl.h"
#ifdef V8_INTL_SUPPORT
#include "src/base/strings.h"
+#include "src/regexp/special-case.h"
#include "unicode/locid.h"
#include "unicode/uniset.h"
#include "unicode/utypes.h"
@@ -24,6 +22,11 @@ namespace internal {
using namespace regexp_compiler_constants; // NOLINT(build/namespaces)
+constexpr base::uc32 kMaxCodePoint = 0x10ffff;
+constexpr int kMaxUtf16CodeUnit = 0xffff;
+constexpr uint32_t kMaxUtf16CodeUnitU = 0xffff;
+constexpr int32_t kMaxOneByteCharCode = unibrow::Latin1::kMaxChar;
+
// -------------------------------------------------------------------
// Tree to graph conversion
@@ -65,7 +68,7 @@ static bool CompareInverseRanges(ZoneList<CharacterRange>* ranges,
return false;
}
}
- if (range.to() != String::kMaxCodePoint) {
+ if (range.to() != kMaxCodePoint) {
return false;
}
return true;
@@ -359,8 +362,8 @@ RegExpNode* UnanchoredAdvance(RegExpCompiler* compiler,
// we advanced into the middle of a surrogate pair, it will work out, as
// nothing will match from there. We will have to advance again, consuming
// the associated trail surrogate.
- ZoneList<CharacterRange>* range = CharacterRange::List(
- zone, CharacterRange::Range(0, String::kMaxUtf16CodeUnit));
+ ZoneList<CharacterRange>* range =
+ CharacterRange::List(zone, CharacterRange::Range(0, kMaxUtf16CodeUnit));
return TextNode::CreateForCharacterRanges(zone, range, false, on_success);
}
@@ -518,7 +521,7 @@ bool RegExpDisjunction::SortConsecutiveAtoms(RegExpCompiler* compiler) {
DCHECK_LT(first_atom, alternatives->length());
DCHECK_LE(i, alternatives->length());
DCHECK_LE(first_atom, i);
- if (IgnoreCase(compiler->flags())) {
+ if (IsIgnoreCase(compiler->flags())) {
#ifdef V8_INTL_SUPPORT
alternatives->StableSort(CompareFirstCharCaseInsensitve, first_atom,
i - first_atom);
@@ -570,14 +573,14 @@ void RegExpDisjunction::RationalizeConsecutiveAtoms(RegExpCompiler* compiler) {
#ifdef V8_INTL_SUPPORT
icu::UnicodeString new_prefix(atom->data().at(0));
if (new_prefix != common_prefix) {
- if (!IgnoreCase(compiler->flags())) break;
+ if (!IsIgnoreCase(compiler->flags())) break;
if (common_prefix.caseCompare(new_prefix, U_FOLD_CASE_DEFAULT) != 0)
break;
}
#else
unibrow::uchar new_prefix = atom->data().at(0);
if (new_prefix != common_prefix) {
- if (!IgnoreCase(compiler->flags())) break;
+ if (!IsIgnoreCase(compiler->flags())) break;
unibrow::Mapping<unibrow::Ecma262Canonicalize>* canonicalize =
compiler->isolate()->regexp_macro_assembler_canonicalize();
new_prefix = Canonical(canonicalize, new_prefix);
@@ -658,7 +661,7 @@ void RegExpDisjunction::FixSingleCharacterDisjunctions(
i++;
continue;
}
- const JSRegExp::Flags flags = compiler->flags();
+ const RegExpFlags flags = compiler->flags();
DCHECK_IMPLIES(IsUnicode(flags),
!unibrow::Utf16::IsLeadSurrogate(atom->data().at(0)));
bool contains_trail_surrogate =
@@ -740,7 +743,7 @@ namespace {
RegExpNode* BoundaryAssertionAsLookaround(RegExpCompiler* compiler,
RegExpNode* on_success,
RegExpAssertion::AssertionType type,
- JSRegExp::Flags flags) {
+ RegExpFlags flags) {
CHECK(NeedsUnicodeCaseEquivalents(flags));
Zone* zone = compiler->zone();
ZoneList<CharacterRange>* word_range =
@@ -1038,7 +1041,7 @@ static void AddClassNegated(const int* elmv, int elmc,
elmc--;
DCHECK_EQ(kRangeEndMarker, elmv[elmc]);
DCHECK_NE(0x0000, elmv[0]);
- DCHECK_NE(String::kMaxCodePoint, elmv[elmc - 1]);
+ DCHECK_NE(kMaxCodePoint, elmv[elmc - 1]);
base::uc16 last = 0x0000;
for (int i = 0; i < elmc; i += 2) {
DCHECK(last <= elmv[i] - 1);
@@ -1046,7 +1049,7 @@ static void AddClassNegated(const int* elmv, int elmc,
ranges->Add(CharacterRange::Range(last, elmv[i] - 1), zone);
last = elmv[i + 1];
}
- ranges->Add(CharacterRange::Range(last, String::kMaxCodePoint), zone);
+ ranges->Add(CharacterRange::Range(last, kMaxCodePoint), zone);
}
void CharacterRange::AddClassEscape(char type, ZoneList<CharacterRange>* ranges,
@@ -1128,13 +1131,13 @@ void CharacterRange::AddCaseEquivalents(Isolate* isolate, Zone* zone,
for (int i = 0; i < range_count; i++) {
CharacterRange range = ranges->at(i);
base::uc32 from = range.from();
- if (from > String::kMaxUtf16CodeUnit) continue;
- base::uc32 to = std::min({range.to(), String::kMaxUtf16CodeUnitU});
+ if (from > kMaxUtf16CodeUnit) continue;
+ base::uc32 to = std::min({range.to(), kMaxUtf16CodeUnitU});
// Nothing to be done for surrogates.
if (from >= kLeadSurrogateStart && to <= kTrailSurrogateEnd) continue;
if (is_one_byte && !RangeContainsLatin1Equivalents(range)) {
- if (from > String::kMaxOneByteCharCode) continue;
- if (to > String::kMaxOneByteCharCode) to = String::kMaxOneByteCharCode;
+ if (from > kMaxOneByteCharCode) continue;
+ if (to > kMaxOneByteCharCode) to = kMaxOneByteCharCode;
}
others.add(from, to);
}
@@ -1171,13 +1174,13 @@ void CharacterRange::AddCaseEquivalents(Isolate* isolate, Zone* zone,
for (int i = 0; i < range_count; i++) {
CharacterRange range = ranges->at(i);
base::uc32 bottom = range.from();
- if (bottom > String::kMaxUtf16CodeUnit) continue;
- base::uc32 top = std::min({range.to(), String::kMaxUtf16CodeUnitU});
+ if (bottom > kMaxUtf16CodeUnit) continue;
+ base::uc32 top = std::min({range.to(), kMaxUtf16CodeUnitU});
// Nothing to be done for surrogates.
if (bottom >= kLeadSurrogateStart && top <= kTrailSurrogateEnd) continue;
if (is_one_byte && !RangeContainsLatin1Equivalents(range)) {
- if (bottom > String::kMaxOneByteCharCode) continue;
- if (top > String::kMaxOneByteCharCode) top = String::kMaxOneByteCharCode;
+ if (bottom > kMaxOneByteCharCode) continue;
+ if (top > kMaxOneByteCharCode) top = kMaxOneByteCharCode;
}
unibrow::uchar chars[unibrow::Ecma262UnCanonicalize::kMaxWidth];
if (top == bottom) {
@@ -1389,9 +1392,8 @@ void CharacterRange::Negate(ZoneList<CharacterRange>* ranges,
from = range.to() + 1;
i++;
}
- if (from < String::kMaxCodePoint) {
- negated_ranges->Add(CharacterRange::Range(from, String::kMaxCodePoint),
- zone);
+ if (from < kMaxCodePoint) {
+ negated_ranges->Add(CharacterRange::Range(from, kMaxCodePoint), zone);
}
}
diff --git a/deps/v8/src/regexp/regexp-compiler.cc b/deps/v8/src/regexp/regexp-compiler.cc
index 38a3d4447f..5123cd138c 100644
--- a/deps/v8/src/regexp/regexp-compiler.cc
+++ b/deps/v8/src/regexp/regexp-compiler.cc
@@ -6,15 +6,13 @@
#include "src/base/safe_conversions.h"
#include "src/execution/isolate.h"
-#include "src/objects/objects-inl.h"
+#include "src/objects/fixed-array-inl.h"
#include "src/regexp/regexp-macro-assembler-arch.h"
-#ifdef V8_INTL_SUPPORT
-#include "src/regexp/special-case.h"
-#endif // V8_INTL_SUPPORT
#include "src/strings/unicode-inl.h"
#include "src/zone/zone-list-inl.h"
#ifdef V8_INTL_SUPPORT
+#include "src/regexp/special-case.h"
#include "unicode/locid.h"
#include "unicode/uniset.h"
#include "unicode/utypes.h"
@@ -240,7 +238,7 @@ class RecursionCheck {
// Attempts to compile the regexp using an Irregexp code generator. Returns
// a fixed array or a null handle depending on whether it succeeded.
RegExpCompiler::RegExpCompiler(Isolate* isolate, Zone* zone, int capture_count,
- JSRegExp::Flags flags, bool one_byte)
+ RegExpFlags flags, bool one_byte)
: next_register_(JSRegExp::RegistersForCaptureCount(capture_count)),
unicode_lookaround_stack_register_(kNoRegister),
unicode_lookaround_position_register_(kNoRegister),
@@ -1589,7 +1587,7 @@ void TextNode::GetQuickCheckDetails(QuickCheckDetails* details,
QuickCheckDetails::Position* pos =
details->positions(characters_filled_in);
base::uc16 c = quarks[i];
- if (IgnoreCase(compiler->flags())) {
+ if (IsIgnoreCase(compiler->flags())) {
unibrow::uchar chars[4];
int length = GetCaseIndependentLetters(
isolate, c, compiler->one_byte(), chars, 4);
@@ -1819,7 +1817,7 @@ class IterationDecrementer {
LoopChoiceNode* node_;
};
-RegExpNode* SeqRegExpNode::FilterOneByte(int depth, JSRegExp::Flags flags) {
+RegExpNode* SeqRegExpNode::FilterOneByte(int depth, RegExpFlags flags) {
if (info()->replacement_calculated) return replacement();
if (depth < 0) return this;
DCHECK(!info()->visited);
@@ -1827,7 +1825,7 @@ RegExpNode* SeqRegExpNode::FilterOneByte(int depth, JSRegExp::Flags flags) {
return FilterSuccessor(depth - 1, flags);
}
-RegExpNode* SeqRegExpNode::FilterSuccessor(int depth, JSRegExp::Flags flags) {
+RegExpNode* SeqRegExpNode::FilterSuccessor(int depth, RegExpFlags flags) {
RegExpNode* next = on_success_->FilterOneByte(depth - 1, flags);
if (next == nullptr) return set_replacement(nullptr);
on_success_ = next;
@@ -1849,7 +1847,7 @@ static bool RangesContainLatin1Equivalents(ZoneList<CharacterRange>* ranges) {
return false;
}
-RegExpNode* TextNode::FilterOneByte(int depth, JSRegExp::Flags flags) {
+RegExpNode* TextNode::FilterOneByte(int depth, RegExpFlags flags) {
if (info()->replacement_calculated) return replacement();
if (depth < 0) return this;
DCHECK(!info()->visited);
@@ -1861,7 +1859,7 @@ RegExpNode* TextNode::FilterOneByte(int depth, JSRegExp::Flags flags) {
base::Vector<const base::uc16> quarks = elm.atom()->data();
for (int j = 0; j < quarks.length(); j++) {
base::uc16 c = quarks[j];
- if (IgnoreCase(flags)) {
+ if (IsIgnoreCase(flags)) {
c = unibrow::Latin1::TryConvertToLatin1(c);
}
if (c > unibrow::Latin1::kMaxChar) return set_replacement(nullptr);
@@ -1880,7 +1878,7 @@ RegExpNode* TextNode::FilterOneByte(int depth, JSRegExp::Flags flags) {
if (range_count != 0 && ranges->at(0).from() == 0 &&
ranges->at(0).to() >= String::kMaxOneByteCharCode) {
// This will be handled in a later filter.
- if (IgnoreCase(flags) && RangesContainLatin1Equivalents(ranges)) {
+ if (IsIgnoreCase(flags) && RangesContainLatin1Equivalents(ranges)) {
continue;
}
return set_replacement(nullptr);
@@ -1889,7 +1887,7 @@ RegExpNode* TextNode::FilterOneByte(int depth, JSRegExp::Flags flags) {
if (range_count == 0 ||
ranges->at(0).from() > String::kMaxOneByteCharCode) {
// This will be handled in a later filter.
- if (IgnoreCase(flags) && RangesContainLatin1Equivalents(ranges)) {
+ if (IsIgnoreCase(flags) && RangesContainLatin1Equivalents(ranges)) {
continue;
}
return set_replacement(nullptr);
@@ -1900,7 +1898,7 @@ RegExpNode* TextNode::FilterOneByte(int depth, JSRegExp::Flags flags) {
return FilterSuccessor(depth - 1, flags);
}
-RegExpNode* LoopChoiceNode::FilterOneByte(int depth, JSRegExp::Flags flags) {
+RegExpNode* LoopChoiceNode::FilterOneByte(int depth, RegExpFlags flags) {
if (info()->replacement_calculated) return replacement();
if (depth < 0) return this;
if (info()->visited) return this;
@@ -1917,7 +1915,7 @@ RegExpNode* LoopChoiceNode::FilterOneByte(int depth, JSRegExp::Flags flags) {
return ChoiceNode::FilterOneByte(depth - 1, flags);
}
-RegExpNode* ChoiceNode::FilterOneByte(int depth, JSRegExp::Flags flags) {
+RegExpNode* ChoiceNode::FilterOneByte(int depth, RegExpFlags flags) {
if (info()->replacement_calculated) return replacement();
if (depth < 0) return this;
if (info()->visited) return this;
@@ -1969,7 +1967,7 @@ RegExpNode* ChoiceNode::FilterOneByte(int depth, JSRegExp::Flags flags) {
}
RegExpNode* NegativeLookaroundChoiceNode::FilterOneByte(int depth,
- JSRegExp::Flags flags) {
+ RegExpFlags flags) {
if (info()->replacement_calculated) return replacement();
if (depth < 0) return this;
if (info()->visited) return this;
@@ -2321,13 +2319,13 @@ void TextNode::TextEmitPass(RegExpCompiler* compiler, TextEmitPassType pass,
TextElement elm = elements()->at(i);
int cp_offset = trace->cp_offset() + elm.cp_offset() + backward_offset;
if (elm.text_type() == TextElement::ATOM) {
- if (SkipPass(pass, IgnoreCase(compiler->flags()))) continue;
+ if (SkipPass(pass, IsIgnoreCase(compiler->flags()))) continue;
base::Vector<const base::uc16> quarks = elm.atom()->data();
for (int j = preloaded ? 0 : quarks.length() - 1; j >= 0; j--) {
if (first_element_checked && i == 0 && j == 0) continue;
if (DeterminedAlready(quick_check, elm.cp_offset() + j)) continue;
base::uc16 quark = quarks[j];
- if (IgnoreCase(compiler->flags())) {
+ if (IsIgnoreCase(compiler->flags())) {
// Everywhere else we assume that a non-Latin-1 character cannot match
// a Latin-1 character. Avoid the cases where this is assumption is
// invalid by using the Latin1 equivalent instead.
@@ -2491,8 +2489,8 @@ void Trace::AdvanceCurrentPositionInTrace(int by, RegExpCompiler* compiler) {
}
void TextNode::MakeCaseIndependent(Isolate* isolate, bool is_one_byte,
- JSRegExp::Flags flags) {
- if (!IgnoreCase(flags)) return;
+ RegExpFlags flags) {
+ if (!IsIgnoreCase(flags)) return;
#ifdef V8_INTL_SUPPORT
if (NeedsUnicodeCaseEquivalents(flags)) return;
#endif
@@ -3444,7 +3442,7 @@ void BackReferenceNode::Emit(RegExpCompiler* compiler, Trace* trace) {
RecursionCheck rc(compiler);
DCHECK_EQ(start_reg_ + 1, end_reg_);
- if (IgnoreCase(flags_)) {
+ if (IsIgnoreCase(flags_)) {
bool unicode = IsUnicode(flags_);
assembler->CheckNotBackReferenceIgnoreCase(start_reg_, read_backward(),
unicode, trace->backtrack());
@@ -3634,7 +3632,7 @@ class EatsAtLeastPropagator : public AllStatic {
template <typename... Propagators>
class Analysis : public NodeVisitor {
public:
- Analysis(Isolate* isolate, bool is_one_byte, JSRegExp::Flags flags)
+ Analysis(Isolate* isolate, bool is_one_byte, RegExpFlags flags)
: isolate_(isolate),
is_one_byte_(is_one_byte),
flags_(flags),
@@ -3746,14 +3744,14 @@ class Analysis : public NodeVisitor {
private:
Isolate* isolate_;
const bool is_one_byte_;
- const JSRegExp::Flags flags_;
+ const RegExpFlags flags_;
RegExpError error_;
DISALLOW_IMPLICIT_CONSTRUCTORS(Analysis);
};
-RegExpError AnalyzeRegExp(Isolate* isolate, bool is_one_byte,
- JSRegExp::Flags flags, RegExpNode* node) {
+RegExpError AnalyzeRegExp(Isolate* isolate, bool is_one_byte, RegExpFlags flags,
+ RegExpNode* node) {
Analysis<AssertionPropagator, EatsAtLeastPropagator> analysis(
isolate, is_one_byte, flags);
DCHECK_EQ(node->info()->been_analyzed, false);
@@ -3809,7 +3807,7 @@ void TextNode::FillInBMInfo(Isolate* isolate, int initial_offset, int budget,
return;
}
base::uc16 character = atom->data()[j];
- if (IgnoreCase(bm->compiler()->flags())) {
+ if (IsIgnoreCase(bm->compiler()->flags())) {
unibrow::uchar chars[4];
int length = GetCaseIndependentLetters(
isolate, character, bm->max_char() == String::kMaxOneByteCharCode,
@@ -3874,7 +3872,7 @@ RegExpNode* RegExpCompiler::OptionallyStepBackToLeadSurrogate(
}
RegExpNode* RegExpCompiler::PreprocessRegExp(RegExpCompileData* data,
- JSRegExp::Flags flags,
+ RegExpFlags flags,
bool is_one_byte) {
// Wrap the body of the regexp in capture #0.
RegExpNode* captured_body =
diff --git a/deps/v8/src/regexp/regexp-compiler.h b/deps/v8/src/regexp/regexp-compiler.h
index 2be7a48e9a..832a966217 100644
--- a/deps/v8/src/regexp/regexp-compiler.h
+++ b/deps/v8/src/regexp/regexp-compiler.h
@@ -9,6 +9,7 @@
#include "src/base/small-vector.h"
#include "src/base/strings.h"
+#include "src/regexp/regexp-flags.h"
#include "src/regexp/regexp-nodes.h"
namespace v8 {
@@ -49,34 +50,10 @@ constexpr int kPatternTooShortForBoyerMoore = 2;
} // namespace regexp_compiler_constants
-inline bool IgnoreCase(JSRegExp::Flags flags) {
- return (flags & JSRegExp::kIgnoreCase) != 0;
-}
-
-inline bool IsUnicode(JSRegExp::Flags flags) {
- return (flags & JSRegExp::kUnicode) != 0;
-}
-
-inline bool IsSticky(JSRegExp::Flags flags) {
- return (flags & JSRegExp::kSticky) != 0;
-}
-
-inline bool IsGlobal(JSRegExp::Flags flags) {
- return (flags & JSRegExp::kGlobal) != 0;
-}
-
-inline bool DotAll(JSRegExp::Flags flags) {
- return (flags & JSRegExp::kDotAll) != 0;
-}
-
-inline bool Multiline(JSRegExp::Flags flags) {
- return (flags & JSRegExp::kMultiline) != 0;
-}
-
-inline bool NeedsUnicodeCaseEquivalents(JSRegExp::Flags flags) {
+inline bool NeedsUnicodeCaseEquivalents(RegExpFlags flags) {
// Both unicode and ignore_case flags are set. We need to use ICU to find
// the closure over case equivalents.
- return IsUnicode(flags) && IgnoreCase(flags);
+ return IsUnicode(flags) && IsIgnoreCase(flags);
}
// Details of a quick mask-compare check that can look ahead in the
@@ -424,8 +401,8 @@ struct PreloadState {
// Analysis performs assertion propagation and computes eats_at_least_ values.
// See the comments on AssertionPropagator and EatsAtLeastPropagator for more
// details.
-RegExpError AnalyzeRegExp(Isolate* isolate, bool is_one_byte,
- JSRegExp::Flags flags, RegExpNode* node);
+RegExpError AnalyzeRegExp(Isolate* isolate, bool is_one_byte, RegExpFlags flags,
+ RegExpNode* node);
class FrequencyCollator {
public:
@@ -475,7 +452,7 @@ class FrequencyCollator {
class RegExpCompiler {
public:
RegExpCompiler(Isolate* isolate, Zone* zone, int capture_count,
- JSRegExp::Flags flags, bool is_one_byte);
+ RegExpFlags flags, bool is_one_byte);
int AllocateRegister() {
if (next_register_ >= RegExpMacroAssembler::kMaxRegister) {
@@ -527,7 +504,7 @@ class RegExpCompiler {
// - Inserting the implicit .* before/after the regexp if necessary.
// - If the input is a one-byte string, filtering out nodes that can't match.
// - Fixing up regexp matches that start within a surrogate pair.
- RegExpNode* PreprocessRegExp(RegExpCompileData* data, JSRegExp::Flags flags,
+ RegExpNode* PreprocessRegExp(RegExpCompileData* data, RegExpFlags flags,
bool is_one_byte);
// If the regexp matching starts within a surrogate pair, step back to the
@@ -553,7 +530,7 @@ class RegExpCompiler {
inline void IncrementRecursionDepth() { recursion_depth_++; }
inline void DecrementRecursionDepth() { recursion_depth_--; }
- JSRegExp::Flags flags() const { return flags_; }
+ RegExpFlags flags() const { return flags_; }
void SetRegExpTooBig() { reg_exp_too_big_ = true; }
@@ -585,7 +562,7 @@ class RegExpCompiler {
int unicode_lookaround_position_register_;
ZoneVector<RegExpNode*>* work_list_;
int recursion_depth_;
- const JSRegExp::Flags flags_;
+ const RegExpFlags flags_;
RegExpMacroAssembler* macro_assembler_;
bool one_byte_;
bool reg_exp_too_big_;
diff --git a/deps/v8/src/regexp/regexp-error.h b/deps/v8/src/regexp/regexp-error.h
index 628f93638e..6485e74bb6 100644
--- a/deps/v8/src/regexp/regexp-error.h
+++ b/deps/v8/src/regexp/regexp-error.h
@@ -53,6 +53,11 @@ enum class RegExpError : uint32_t {
V8_EXPORT_PRIVATE const char* RegExpErrorString(RegExpError error);
+inline constexpr bool RegExpErrorIsStackOverflow(RegExpError error) {
+ return error == RegExpError::kStackOverflow ||
+ error == RegExpError::kAnalysisStackOverflow;
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/regexp/regexp-flags.h b/deps/v8/src/regexp/regexp-flags.h
new file mode 100644
index 0000000000..b35cd7892b
--- /dev/null
+++ b/deps/v8/src/regexp/regexp-flags.h
@@ -0,0 +1,71 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_REGEXP_REGEXP_FLAGS_H_
+#define V8_REGEXP_REGEXP_FLAGS_H_
+
+#include "src/base/flags.h"
+#include "src/base/optional.h"
+
+namespace v8 {
+namespace internal {
+
+// TODO(jgruber,pthier): Decouple more parts of the codebase from
+// JSRegExp::Flags. Consider removing JSRegExp::Flags.
+
+// Order is important! Sorted in alphabetic order by the flag char. Note this
+// means that flag bits are shuffled. Take care to keep them contiguous when
+// adding/removing flags.
+#define REGEXP_FLAG_LIST(V) \
+ V(has_indices, HasIndices, hasIndices, 'd', 7) \
+ V(global, Global, global, 'g', 0) \
+ V(ignore_case, IgnoreCase, ignoreCase, 'i', 1) \
+ V(linear, Linear, linear, 'l', 6) \
+ V(multiline, Multiline, multiline, 'm', 2) \
+ V(dot_all, DotAll, dotAll, 's', 5) \
+ V(unicode, Unicode, unicode, 'u', 4) \
+ V(sticky, Sticky, sticky, 'y', 3)
+
+#define V(Lower, Camel, LowerCamel, Char, Bit) k##Camel = 1 << Bit,
+enum class RegExpFlag { REGEXP_FLAG_LIST(V) };
+#undef V
+
+#define V(...) +1
+constexpr int kRegExpFlagCount = REGEXP_FLAG_LIST(V);
+#undef V
+
+// Assert alpha-sorted chars.
+#define V(Lower, Camel, LowerCamel, Char, Bit) < Char) && (Char
+static_assert((('a' - 1) REGEXP_FLAG_LIST(V) <= 'z'), "alpha-sort chars");
+#undef V
+
+// Assert contiguous indices.
+#define V(Lower, Camel, LowerCamel, Char, Bit) | (1 << Bit)
+static_assert(((1 << kRegExpFlagCount) - 1) == (0 REGEXP_FLAG_LIST(V)),
+ "contiguous bits");
+#undef V
+
+using RegExpFlags = base::Flags<RegExpFlag>;
+DEFINE_OPERATORS_FOR_FLAGS(RegExpFlags)
+
+#define V(Lower, Camel, ...) \
+ constexpr bool Is##Camel(RegExpFlags f) { \
+ return (f & RegExpFlag::k##Camel) != 0; \
+ }
+REGEXP_FLAG_LIST(V)
+#undef V
+
+// clang-format off
+#define V(Lower, Camel, LowerCamel, Char, Bit) \
+ c == Char ? RegExpFlag::k##Camel :
+constexpr base::Optional<RegExpFlag> TryRegExpFlagFromChar(char c) {
+ return REGEXP_FLAG_LIST(V) base::Optional<RegExpFlag>{};
+}
+#undef V
+// clang-format on
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_REGEXP_REGEXP_FLAGS_H_
diff --git a/deps/v8/src/regexp/regexp-interpreter.cc b/deps/v8/src/regexp/regexp-interpreter.cc
index 02fc334920..f9a959d258 100644
--- a/deps/v8/src/regexp/regexp-interpreter.cc
+++ b/deps/v8/src/regexp/regexp-interpreter.cc
@@ -6,17 +6,18 @@
#include "src/regexp/regexp-interpreter.h"
-#include "src/ast/ast.h"
#include "src/base/small-vector.h"
#include "src/base/strings.h"
+#include "src/execution/isolate.h"
#include "src/logging/counters.h"
#include "src/objects/js-regexp-inl.h"
-#include "src/objects/objects-inl.h"
+#include "src/objects/string-inl.h"
#include "src/regexp/regexp-bytecodes.h"
#include "src/regexp/regexp-macro-assembler.h"
#include "src/regexp/regexp-stack.h" // For kMaximumStackSize.
#include "src/regexp/regexp.h"
#include "src/strings/unicode.h"
+#include "src/utils/memcopy.h"
#include "src/utils/utils.h"
#ifdef V8_INTL_SUPPORT
diff --git a/deps/v8/src/regexp/regexp-interpreter.h b/deps/v8/src/regexp/regexp-interpreter.h
index 9b4a8c6c30..a4d79184b0 100644
--- a/deps/v8/src/regexp/regexp-interpreter.h
+++ b/deps/v8/src/regexp/regexp-interpreter.h
@@ -12,6 +12,8 @@
namespace v8 {
namespace internal {
+class ByteArray;
+
class V8_EXPORT_PRIVATE IrregexpInterpreter : public AllStatic {
public:
enum Result {
diff --git a/deps/v8/src/regexp/regexp-macro-assembler-arch.h b/deps/v8/src/regexp/regexp-macro-assembler-arch.h
index 5d5e3e6a44..5d4663e397 100644
--- a/deps/v8/src/regexp/regexp-macro-assembler-arch.h
+++ b/deps/v8/src/regexp/regexp-macro-assembler-arch.h
@@ -21,6 +21,8 @@
#include "src/regexp/mips/regexp-macro-assembler-mips.h"
#elif V8_TARGET_ARCH_MIPS64
#include "src/regexp/mips64/regexp-macro-assembler-mips64.h"
+#elif V8_TARGET_ARCH_LOONG64
+#include "src/regexp/loong64/regexp-macro-assembler-loong64.h"
#elif V8_TARGET_ARCH_S390
#include "src/regexp/s390/regexp-macro-assembler-s390.h"
#elif V8_TARGET_ARCH_RISCV64
diff --git a/deps/v8/src/regexp/regexp-macro-assembler-tracer.cc b/deps/v8/src/regexp/regexp-macro-assembler-tracer.cc
index af148eb47a..ca6abb4e48 100644
--- a/deps/v8/src/regexp/regexp-macro-assembler-tracer.cc
+++ b/deps/v8/src/regexp/regexp-macro-assembler-tracer.cc
@@ -4,8 +4,8 @@
#include "src/regexp/regexp-macro-assembler-tracer.h"
-#include "src/ast/ast.h"
-#include "src/objects/objects-inl.h"
+#include "src/objects/fixed-array-inl.h"
+#include "src/objects/string.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/src/regexp/regexp-macro-assembler.cc b/deps/v8/src/regexp/regexp-macro-assembler.cc
index 5457398f39..891079b357 100644
--- a/deps/v8/src/regexp/regexp-macro-assembler.cc
+++ b/deps/v8/src/regexp/regexp-macro-assembler.cc
@@ -5,6 +5,7 @@
#include "src/regexp/regexp-macro-assembler.h"
#include "src/codegen/assembler.h"
+#include "src/codegen/label.h"
#include "src/execution/isolate-inl.h"
#include "src/execution/pointer-authentication.h"
#include "src/execution/simulator.h"
@@ -22,12 +23,17 @@ namespace internal {
RegExpMacroAssembler::RegExpMacroAssembler(Isolate* isolate, Zone* zone)
: slow_safe_compiler_(false),
+ backtrack_limit_(JSRegExp::kNoBacktrackLimit),
global_mode_(NOT_GLOBAL),
isolate_(isolate),
zone_(zone) {}
RegExpMacroAssembler::~RegExpMacroAssembler() = default;
+bool RegExpMacroAssembler::has_backtrack_limit() const {
+ return backtrack_limit_ != JSRegExp::kNoBacktrackLimit;
+}
+
int RegExpMacroAssembler::CaseInsensitiveCompareNonUnicode(Address byte_offset1,
Address byte_offset2,
size_t byte_length,
diff --git a/deps/v8/src/regexp/regexp-macro-assembler.h b/deps/v8/src/regexp/regexp-macro-assembler.h
index 31e8b1a370..9bd9ba615e 100644
--- a/deps/v8/src/regexp/regexp-macro-assembler.h
+++ b/deps/v8/src/regexp/regexp-macro-assembler.h
@@ -6,13 +6,15 @@
#define V8_REGEXP_REGEXP_MACRO_ASSEMBLER_H_
#include "src/base/strings.h"
-#include "src/codegen/label.h"
#include "src/regexp/regexp-ast.h"
#include "src/regexp/regexp.h"
namespace v8 {
namespace internal {
+class ByteArray;
+class Label;
+
static const base::uc32 kLeadSurrogateStart = 0xd800;
static const base::uc32 kLeadSurrogateEnd = 0xdbff;
static const base::uc32 kTrailSurrogateStart = 0xdc00;
@@ -45,6 +47,7 @@ class RegExpMacroAssembler {
V(ARM) \
V(ARM64) \
V(MIPS) \
+ V(LOONG64) \
V(RISCV) \
V(S390) \
V(PPC) \
@@ -230,20 +233,18 @@ class RegExpMacroAssembler {
Zone* zone() const { return zone_; }
protected:
- bool has_backtrack_limit() const {
- return backtrack_limit_ != JSRegExp::kNoBacktrackLimit;
- }
+ bool has_backtrack_limit() const;
uint32_t backtrack_limit() const { return backtrack_limit_; }
bool can_fallback() const { return can_fallback_; }
private:
bool slow_safe_compiler_;
- uint32_t backtrack_limit_ = JSRegExp::kNoBacktrackLimit;
+ uint32_t backtrack_limit_;
bool can_fallback_ = false;
GlobalMode global_mode_;
- Isolate* isolate_;
- Zone* zone_;
+ Isolate* const isolate_;
+ Zone* const zone_;
};
class NativeRegExpMacroAssembler: public RegExpMacroAssembler {
diff --git a/deps/v8/src/regexp/regexp-nodes.h b/deps/v8/src/regexp/regexp-nodes.h
index 537cf96201..46b6f5ce21 100644
--- a/deps/v8/src/regexp/regexp-nodes.h
+++ b/deps/v8/src/regexp/regexp-nodes.h
@@ -5,6 +5,7 @@
#ifndef V8_REGEXP_REGEXP_NODES_H_
#define V8_REGEXP_REGEXP_NODES_H_
+#include "src/codegen/label.h"
#include "src/regexp/regexp-macro-assembler.h"
#include "src/zone/zone.h"
@@ -14,7 +15,6 @@ namespace internal {
class AlternativeGenerationList;
class BoyerMooreLookahead;
class GreedyLoopState;
-class Label;
class NodeVisitor;
class QuickCheckDetails;
class RegExpCompiler;
@@ -205,7 +205,7 @@ class RegExpNode : public ZoneObject {
// If we know that the input is one-byte then there are some nodes that can
// never match. This method returns a node that can be substituted for
// itself, or nullptr if the node can never match.
- virtual RegExpNode* FilterOneByte(int depth, JSRegExp::Flags flags) {
+ virtual RegExpNode* FilterOneByte(int depth, RegExpFlags flags) {
return this;
}
// Helper for FilterOneByte.
@@ -296,7 +296,7 @@ class SeqRegExpNode : public RegExpNode {
: RegExpNode(on_success->zone()), on_success_(on_success) {}
RegExpNode* on_success() { return on_success_; }
void set_on_success(RegExpNode* node) { on_success_ = node; }
- RegExpNode* FilterOneByte(int depth, JSRegExp::Flags flags) override;
+ RegExpNode* FilterOneByte(int depth, RegExpFlags flags) override;
void FillInBMInfo(Isolate* isolate, int offset, int budget,
BoyerMooreLookahead* bm, bool not_at_start) override {
on_success_->FillInBMInfo(isolate, offset, budget - 1, bm, not_at_start);
@@ -304,7 +304,7 @@ class SeqRegExpNode : public RegExpNode {
}
protected:
- RegExpNode* FilterSuccessor(int depth, JSRegExp::Flags flags);
+ RegExpNode* FilterSuccessor(int depth, RegExpFlags flags);
private:
RegExpNode* on_success_;
@@ -423,14 +423,14 @@ class TextNode : public SeqRegExpNode {
ZoneList<TextElement>* elements() { return elms_; }
bool read_backward() { return read_backward_; }
void MakeCaseIndependent(Isolate* isolate, bool is_one_byte,
- JSRegExp::Flags flags);
+ RegExpFlags flags);
int GreedyLoopTextLength() override;
RegExpNode* GetSuccessorOfOmnivorousTextNode(
RegExpCompiler* compiler) override;
void FillInBMInfo(Isolate* isolate, int offset, int budget,
BoyerMooreLookahead* bm, bool not_at_start) override;
void CalculateOffsets();
- RegExpNode* FilterOneByte(int depth, JSRegExp::Flags flags) override;
+ RegExpNode* FilterOneByte(int depth, RegExpFlags flags) override;
int Length();
private:
@@ -498,7 +498,7 @@ class AssertionNode : public SeqRegExpNode {
class BackReferenceNode : public SeqRegExpNode {
public:
- BackReferenceNode(int start_reg, int end_reg, JSRegExp::Flags flags,
+ BackReferenceNode(int start_reg, int end_reg, RegExpFlags flags,
bool read_backward, RegExpNode* on_success)
: SeqRegExpNode(on_success),
start_reg_(start_reg),
@@ -521,7 +521,7 @@ class BackReferenceNode : public SeqRegExpNode {
private:
int start_reg_;
int end_reg_;
- JSRegExp::Flags flags_;
+ RegExpFlags flags_;
bool read_backward_;
};
@@ -623,7 +623,7 @@ class ChoiceNode : public RegExpNode {
virtual bool try_to_emit_quick_check_for_alternative(bool is_first) {
return true;
}
- RegExpNode* FilterOneByte(int depth, JSRegExp::Flags flags) override;
+ RegExpNode* FilterOneByte(int depth, RegExpFlags flags) override;
virtual bool read_backward() { return false; }
protected:
@@ -695,7 +695,7 @@ class NegativeLookaroundChoiceNode : public ChoiceNode {
return !is_first;
}
void Accept(NodeVisitor* visitor) override;
- RegExpNode* FilterOneByte(int depth, JSRegExp::Flags flags) override;
+ RegExpNode* FilterOneByte(int depth, RegExpFlags flags) override;
};
class LoopChoiceNode : public ChoiceNode {
@@ -728,7 +728,7 @@ class LoopChoiceNode : public ChoiceNode {
int min_loop_iterations() const { return min_loop_iterations_; }
bool read_backward() override { return read_backward_; }
void Accept(NodeVisitor* visitor) override;
- RegExpNode* FilterOneByte(int depth, JSRegExp::Flags flags) override;
+ RegExpNode* FilterOneByte(int depth, RegExpFlags flags) override;
private:
// AddAlternative is made private for loop nodes because alternatives
diff --git a/deps/v8/src/regexp/regexp-parser.cc b/deps/v8/src/regexp/regexp-parser.cc
index 1201e555ad..1d9f24b792 100644
--- a/deps/v8/src/regexp/regexp-parser.cc
+++ b/deps/v8/src/regexp/regexp-parser.cc
@@ -4,12 +4,9 @@
#include "src/regexp/regexp-parser.h"
-#include <vector>
-
#include "src/execution/isolate.h"
-#include "src/heap/factory.h"
-#include "src/objects/objects-inl.h"
#include "src/regexp/property-sequences.h"
+#include "src/regexp/regexp-ast.h"
#include "src/regexp/regexp-macro-assembler.h"
#include "src/regexp/regexp.h"
#include "src/strings/char-predicates-inl.h"
@@ -24,14 +21,386 @@
namespace v8 {
namespace internal {
-RegExpParser::RegExpParser(FlatStringReader* in, JSRegExp::Flags flags,
- Isolate* isolate, Zone* zone)
- : isolate_(isolate),
- zone_(zone),
+namespace {
+
+// A BufferedZoneList is an automatically growing list, just like (and backed
+// by) a ZoneList, that is optimized for the case of adding and removing
+// a single element. The last element added is stored outside the backing list,
+// and if no more than one element is ever added, the ZoneList isn't even
+// allocated.
+// Elements must not be nullptr pointers.
+template <typename T, int initial_size>
+class BufferedZoneList {
+ public:
+ BufferedZoneList() : list_(nullptr), last_(nullptr) {}
+
+ // Adds element at end of list. This element is buffered and can
+ // be read using last() or removed using RemoveLast until a new Add or until
+ // RemoveLast or GetList has been called.
+ void Add(T* value, Zone* zone) {
+ if (last_ != nullptr) {
+ if (list_ == nullptr) {
+ list_ = zone->New<ZoneList<T*>>(initial_size, zone);
+ }
+ list_->Add(last_, zone);
+ }
+ last_ = value;
+ }
+
+ T* last() {
+ DCHECK(last_ != nullptr);
+ return last_;
+ }
+
+ T* RemoveLast() {
+ DCHECK(last_ != nullptr);
+ T* result = last_;
+ if ((list_ != nullptr) && (list_->length() > 0))
+ last_ = list_->RemoveLast();
+ else
+ last_ = nullptr;
+ return result;
+ }
+
+ T* Get(int i) {
+ DCHECK((0 <= i) && (i < length()));
+ if (list_ == nullptr) {
+ DCHECK_EQ(0, i);
+ return last_;
+ } else {
+ if (i == list_->length()) {
+ DCHECK(last_ != nullptr);
+ return last_;
+ } else {
+ return list_->at(i);
+ }
+ }
+ }
+
+ void Clear() {
+ list_ = nullptr;
+ last_ = nullptr;
+ }
+
+ int length() {
+ int length = (list_ == nullptr) ? 0 : list_->length();
+ return length + ((last_ == nullptr) ? 0 : 1);
+ }
+
+ ZoneList<T*>* GetList(Zone* zone) {
+ if (list_ == nullptr) {
+ list_ = zone->New<ZoneList<T*>>(initial_size, zone);
+ }
+ if (last_ != nullptr) {
+ list_->Add(last_, zone);
+ last_ = nullptr;
+ }
+ return list_;
+ }
+
+ private:
+ ZoneList<T*>* list_;
+ T* last_;
+};
+
+// Accumulates RegExp atoms and assertions into lists of terms and alternatives.
+class RegExpBuilder : public ZoneObject {
+ public:
+ RegExpBuilder(Zone* zone, RegExpFlags flags);
+ void AddCharacter(base::uc16 character);
+ void AddUnicodeCharacter(base::uc32 character);
+ void AddEscapedUnicodeCharacter(base::uc32 character);
+ // "Adds" an empty expression. Does nothing except consume a
+ // following quantifier
+ void AddEmpty();
+ void AddCharacterClass(RegExpCharacterClass* cc);
+ void AddCharacterClassForDesugaring(base::uc32 c);
+ void AddAtom(RegExpTree* tree);
+ void AddTerm(RegExpTree* tree);
+ void AddAssertion(RegExpTree* tree);
+ void NewAlternative(); // '|'
+ bool AddQuantifierToAtom(int min, int max,
+ RegExpQuantifier::QuantifierType type);
+ void FlushText();
+ RegExpTree* ToRegExp();
+ RegExpFlags flags() const { return flags_; }
+
+ bool ignore_case() const { return IsIgnoreCase(flags_); }
+ bool multiline() const { return IsMultiline(flags_); }
+ bool dotall() const { return IsDotAll(flags_); }
+
+ private:
+ static const base::uc16 kNoPendingSurrogate = 0;
+ void AddLeadSurrogate(base::uc16 lead_surrogate);
+ void AddTrailSurrogate(base::uc16 trail_surrogate);
+ void FlushPendingSurrogate();
+ void FlushCharacters();
+ void FlushTerms();
+ bool NeedsDesugaringForUnicode(RegExpCharacterClass* cc);
+ bool NeedsDesugaringForIgnoreCase(base::uc32 c);
+ Zone* zone() const { return zone_; }
+ bool unicode() const { return IsUnicode(flags_); }
+
+ Zone* const zone_;
+ bool pending_empty_;
+ const RegExpFlags flags_;
+ ZoneList<base::uc16>* characters_;
+ base::uc16 pending_surrogate_;
+ BufferedZoneList<RegExpTree, 2> terms_;
+ BufferedZoneList<RegExpTree, 2> text_;
+ BufferedZoneList<RegExpTree, 2> alternatives_;
+#ifdef DEBUG
+ enum {ADD_NONE, ADD_CHAR, ADD_TERM, ADD_ASSERT, ADD_ATOM} last_added_;
+#define LAST(x) last_added_ = x;
+#else
+#define LAST(x)
+#endif
+};
+
+enum SubexpressionType {
+ INITIAL,
+ CAPTURE, // All positive values represent captures.
+ POSITIVE_LOOKAROUND,
+ NEGATIVE_LOOKAROUND,
+ GROUPING
+};
+
+class RegExpParserState : public ZoneObject {
+ public:
+ // Push a state on the stack.
+ RegExpParserState(RegExpParserState* previous_state,
+ SubexpressionType group_type,
+ RegExpLookaround::Type lookaround_type,
+ int disjunction_capture_index,
+ const ZoneVector<base::uc16>* capture_name,
+ RegExpFlags flags, Zone* zone)
+ : previous_state_(previous_state),
+ builder_(zone->New<RegExpBuilder>(zone, flags)),
+ group_type_(group_type),
+ lookaround_type_(lookaround_type),
+ disjunction_capture_index_(disjunction_capture_index),
+ capture_name_(capture_name) {}
+ // Parser state of containing expression, if any.
+ RegExpParserState* previous_state() const { return previous_state_; }
+ bool IsSubexpression() { return previous_state_ != nullptr; }
+ // RegExpBuilder building this regexp's AST.
+ RegExpBuilder* builder() const { return builder_; }
+ // Type of regexp being parsed (parenthesized group or entire regexp).
+ SubexpressionType group_type() const { return group_type_; }
+ // Lookahead or Lookbehind.
+ RegExpLookaround::Type lookaround_type() const { return lookaround_type_; }
+ // Index in captures array of first capture in this sub-expression, if any.
+ // Also the capture index of this sub-expression itself, if group_type
+ // is CAPTURE.
+ int capture_index() const { return disjunction_capture_index_; }
+ // The name of the current sub-expression, if group_type is CAPTURE. Only
+ // used for named captures.
+ const ZoneVector<base::uc16>* capture_name() const { return capture_name_; }
+
+ bool IsNamedCapture() const { return capture_name_ != nullptr; }
+
+ // Check whether the parser is inside a capture group with the given index.
+ bool IsInsideCaptureGroup(int index) const {
+ for (const RegExpParserState* s = this; s != nullptr;
+ s = s->previous_state()) {
+ if (s->group_type() != CAPTURE) continue;
+ // Return true if we found the matching capture index.
+ if (index == s->capture_index()) return true;
+ // Abort if index is larger than what has been parsed up till this state.
+ if (index > s->capture_index()) return false;
+ }
+ return false;
+ }
+
+ // Check whether the parser is inside a capture group with the given name.
+ bool IsInsideCaptureGroup(const ZoneVector<base::uc16>* name) const {
+ DCHECK_NOT_NULL(name);
+ for (const RegExpParserState* s = this; s != nullptr;
+ s = s->previous_state()) {
+ if (s->capture_name() == nullptr) continue;
+ if (*s->capture_name() == *name) return true;
+ }
+ return false;
+ }
+
+ private:
+ // Linked list implementation of stack of states.
+ RegExpParserState* const previous_state_;
+ // Builder for the stored disjunction.
+ RegExpBuilder* const builder_;
+ // Stored disjunction type (capture, look-ahead or grouping), if any.
+ const SubexpressionType group_type_;
+ // Stored read direction.
+ const RegExpLookaround::Type lookaround_type_;
+ // Stored disjunction's capture index (if any).
+ const int disjunction_capture_index_;
+ // Stored capture name (if any).
+ const ZoneVector<base::uc16>* const capture_name_;
+};
+
+template <class CharT>
+class RegExpParserImpl final {
+ private:
+ RegExpParserImpl(const CharT* input, int input_length, RegExpFlags flags,
+ uintptr_t stack_limit, Zone* zone,
+ const DisallowGarbageCollection& no_gc);
+
+ bool Parse(RegExpCompileData* result);
+
+ RegExpTree* ParsePattern();
+ RegExpTree* ParseDisjunction();
+ RegExpTree* ParseGroup();
+
+ // Parses a {...,...} quantifier and stores the range in the given
+ // out parameters.
+ bool ParseIntervalQuantifier(int* min_out, int* max_out);
+
+ // Parses and returns a single escaped character. The character
+ // must not be 'b' or 'B' since they are usually handle specially.
+ base::uc32 ParseClassCharacterEscape();
+
+ // Checks whether the following is a length-digit hexadecimal number,
+ // and sets the value if it is.
+ bool ParseHexEscape(int length, base::uc32* value);
+ bool ParseUnicodeEscape(base::uc32* value);
+ bool ParseUnlimitedLengthHexNumber(int max_value, base::uc32* value);
+
+ bool ParsePropertyClassName(ZoneVector<char>* name_1,
+ ZoneVector<char>* name_2);
+ bool AddPropertyClassRange(ZoneList<CharacterRange>* add_to, bool negate,
+ const ZoneVector<char>& name_1,
+ const ZoneVector<char>& name_2);
+
+ RegExpTree* GetPropertySequence(const ZoneVector<char>& name_1);
+ RegExpTree* ParseCharacterClass(const RegExpBuilder* state);
+
+ base::uc32 ParseOctalLiteral();
+
+ // Tries to parse the input as a back reference. If successful it
+ // stores the result in the output parameter and returns true. If
+ // it fails it will push back the characters read so the same characters
+ // can be reparsed.
+ bool ParseBackReferenceIndex(int* index_out);
+
+ // Parse inside a class. Either add escaped class to the range, or return
+ // false and pass parsed single character through |char_out|.
+ void ParseClassEscape(ZoneList<CharacterRange>* ranges, Zone* zone,
+ bool add_unicode_case_equivalents, base::uc32* char_out,
+ bool* is_class_escape);
+
+ char ParseClassEscape();
+
+ RegExpTree* ReportError(RegExpError error);
+ void Advance();
+ void Advance(int dist);
+ void Reset(int pos);
+
+ // Reports whether the pattern might be used as a literal search string.
+ // Only use if the result of the parse is a single atom node.
+ bool simple();
+ bool contains_anchor() { return contains_anchor_; }
+ void set_contains_anchor() { contains_anchor_ = true; }
+ int captures_started() { return captures_started_; }
+ int position() { return next_pos_ - 1; }
+ bool failed() { return failed_; }
+ bool unicode() const { return IsUnicode(top_level_flags_); }
+
+ static bool IsSyntaxCharacterOrSlash(base::uc32 c);
+
+ static const base::uc32 kEndMarker = (1 << 21);
+
+ private:
+ // Return the 1-indexed RegExpCapture object, allocate if necessary.
+ RegExpCapture* GetCapture(int index);
+
+ // Creates a new named capture at the specified index. Must be called exactly
+ // once for each named capture. Fails if a capture with the same name is
+ // encountered.
+ bool CreateNamedCaptureAtIndex(const ZoneVector<base::uc16>* name, int index);
+
+ // Parses the name of a capture group (?<name>pattern). The name must adhere
+ // to IdentifierName in the ECMAScript standard.
+ const ZoneVector<base::uc16>* ParseCaptureGroupName();
+
+ bool ParseNamedBackReference(RegExpBuilder* builder,
+ RegExpParserState* state);
+ RegExpParserState* ParseOpenParenthesis(RegExpParserState* state);
+
+ // After the initial parsing pass, patch corresponding RegExpCapture objects
+ // into all RegExpBackReferences. This is done after initial parsing in order
+ // to avoid complicating cases in which references comes before the capture.
+ void PatchNamedBackReferences();
+
+ ZoneVector<RegExpCapture*>* GetNamedCaptures() const;
+
+ // Returns true iff the pattern contains named captures. May call
+ // ScanForCaptures to look ahead at the remaining pattern.
+ bool HasNamedCaptures();
+
+ Zone* zone() const { return zone_; }
+
+ base::uc32 current() { return current_; }
+ bool has_more() { return has_more_; }
+ bool has_next() { return next_pos_ < input_length(); }
+ base::uc32 Next();
+ template <bool update_position>
+ base::uc32 ReadNext();
+ CharT InputAt(int index) const {
+ DCHECK(0 <= index && index < input_length());
+ return input_[index];
+ }
+ int input_length() const { return input_length_; }
+ void ScanForCaptures();
+
+ struct RegExpCaptureNameLess {
+ bool operator()(const RegExpCapture* lhs, const RegExpCapture* rhs) const {
+ DCHECK_NOT_NULL(lhs);
+ DCHECK_NOT_NULL(rhs);
+ return *lhs->name() < *rhs->name();
+ }
+ };
+
+ const DisallowGarbageCollection no_gc_;
+ Zone* const zone_;
+ RegExpError error_ = RegExpError::kNone;
+ int error_pos_ = 0;
+ ZoneList<RegExpCapture*>* captures_;
+ ZoneSet<RegExpCapture*, RegExpCaptureNameLess>* named_captures_;
+ ZoneList<RegExpBackReference*>* named_back_references_;
+ const CharT* const input_;
+ const int input_length_;
+ base::uc32 current_;
+ const RegExpFlags top_level_flags_;
+ int next_pos_;
+ int captures_started_;
+ int capture_count_; // Only valid after we have scanned for captures.
+ bool has_more_;
+ bool simple_;
+ bool contains_anchor_;
+ bool is_scanned_for_captures_;
+ bool has_named_captures_; // Only valid after we have scanned for captures.
+ bool failed_;
+ const uintptr_t stack_limit_;
+
+ friend bool RegExpParser::ParseRegExpFromHeapString(Isolate*, Zone*,
+ Handle<String>,
+ RegExpFlags,
+ RegExpCompileData*);
+ friend bool RegExpParser::VerifyRegExpSyntax<CharT>(
+ Zone*, uintptr_t, const CharT*, int, RegExpFlags, RegExpCompileData*,
+ const DisallowGarbageCollection&);
+};
+
+template <class CharT>
+RegExpParserImpl<CharT>::RegExpParserImpl(
+ const CharT* input, int input_length, RegExpFlags flags,
+ uintptr_t stack_limit, Zone* zone, const DisallowGarbageCollection& no_gc)
+ : zone_(zone),
captures_(nullptr),
named_captures_(nullptr),
named_back_references_(nullptr),
- in_(in),
+ input_(input),
+ input_length_(input_length),
current_(kEndMarker),
top_level_flags_(flags),
next_pos_(0),
@@ -42,30 +411,44 @@ RegExpParser::RegExpParser(FlatStringReader* in, JSRegExp::Flags flags,
contains_anchor_(false),
is_scanned_for_captures_(false),
has_named_captures_(false),
- failed_(false) {
+ failed_(false),
+ stack_limit_(stack_limit) {
Advance();
}
+template <>
template <bool update_position>
-inline base::uc32 RegExpParser::ReadNext() {
+inline base::uc32 RegExpParserImpl<uint8_t>::ReadNext() {
int position = next_pos_;
- base::uc32 c0 = in()->Get(position);
+ base::uc16 c0 = InputAt(position);
+ position++;
+ DCHECK(!unibrow::Utf16::IsLeadSurrogate(c0));
+ if (update_position) next_pos_ = position;
+ return c0;
+}
+
+template <>
+template <bool update_position>
+inline base::uc32 RegExpParserImpl<base::uc16>::ReadNext() {
+ int position = next_pos_;
+ base::uc16 c0 = InputAt(position);
+ base::uc32 result = c0;
position++;
// Read the whole surrogate pair in case of unicode flag, if possible.
- if (unicode() && position < in()->length() &&
- unibrow::Utf16::IsLeadSurrogate(static_cast<base::uc16>(c0))) {
- base::uc16 c1 = in()->Get(position);
+ if (unicode() && position < input_length() &&
+ unibrow::Utf16::IsLeadSurrogate(c0)) {
+ base::uc16 c1 = InputAt(position);
if (unibrow::Utf16::IsTrailSurrogate(c1)) {
- c0 =
- unibrow::Utf16::CombineSurrogatePair(static_cast<base::uc16>(c0), c1);
+ result = unibrow::Utf16::CombineSurrogatePair(c0, c1);
position++;
}
}
if (update_position) next_pos_ = position;
- return c0;
+ return result;
}
-base::uc32 RegExpParser::Next() {
+template <class CharT>
+base::uc32 RegExpParserImpl<CharT>::Next() {
if (has_next()) {
return ReadNext<false>();
} else {
@@ -73,10 +456,10 @@ base::uc32 RegExpParser::Next() {
}
}
-void RegExpParser::Advance() {
+template <class CharT>
+void RegExpParserImpl<CharT>::Advance() {
if (has_next()) {
- StackLimitCheck check(isolate());
- if (check.HasOverflowed()) {
+ if (GetCurrentStackPosition() < stack_limit_) {
if (FLAG_correctness_fuzzer_suppressions) {
FATAL("Aborting on stack overflow");
}
@@ -93,27 +476,31 @@ void RegExpParser::Advance() {
current_ = kEndMarker;
// Advance so that position() points to 1-after-the-last-character. This is
// important so that Reset() to this position works correctly.
- next_pos_ = in()->length() + 1;
+ next_pos_ = input_length() + 1;
has_more_ = false;
}
}
-
-void RegExpParser::Reset(int pos) {
+template <class CharT>
+void RegExpParserImpl<CharT>::Reset(int pos) {
next_pos_ = pos;
- has_more_ = (pos < in()->length());
+ has_more_ = (pos < input_length());
Advance();
}
-void RegExpParser::Advance(int dist) {
+template <class CharT>
+void RegExpParserImpl<CharT>::Advance(int dist) {
next_pos_ += dist - 1;
Advance();
}
+template <class CharT>
+bool RegExpParserImpl<CharT>::simple() {
+ return simple_;
+}
-bool RegExpParser::simple() { return simple_; }
-
-bool RegExpParser::IsSyntaxCharacterOrSlash(base::uc32 c) {
+template <class CharT>
+bool RegExpParserImpl<CharT>::IsSyntaxCharacterOrSlash(base::uc32 c) {
switch (c) {
case '^':
case '$':
@@ -137,14 +524,15 @@ bool RegExpParser::IsSyntaxCharacterOrSlash(base::uc32 c) {
return false;
}
-RegExpTree* RegExpParser::ReportError(RegExpError error) {
+template <class CharT>
+RegExpTree* RegExpParserImpl<CharT>::ReportError(RegExpError error) {
if (failed_) return nullptr; // Do not overwrite any existing error.
failed_ = true;
error_ = error;
error_pos_ = position();
// Zip to the end to make sure no more input is read.
current_ = kEndMarker;
- next_pos_ = in()->length();
+ next_pos_ = input_length();
return nullptr;
}
@@ -154,19 +542,19 @@ RegExpTree* RegExpParser::ReportError(RegExpError error) {
// Pattern ::
// Disjunction
-RegExpTree* RegExpParser::ParsePattern() {
+template <class CharT>
+RegExpTree* RegExpParserImpl<CharT>::ParsePattern() {
RegExpTree* result = ParseDisjunction(CHECK_FAILED);
PatchNamedBackReferences(CHECK_FAILED);
DCHECK(!has_more());
// If the result of parsing is a literal string atom, and it has the
// same length as the input, then the atom is identical to the input.
- if (result->IsAtom() && result->AsAtom()->length() == in()->length()) {
+ if (result->IsAtom() && result->AsAtom()->length() == input_length()) {
simple_ = true;
}
return result;
}
-
// Disjunction ::
// Alternative
// Alternative | Disjunction
@@ -177,7 +565,8 @@ RegExpTree* RegExpParser::ParsePattern() {
// Assertion
// Atom
// Atom Quantifier
-RegExpTree* RegExpParser::ParseDisjunction() {
+template <class CharT>
+RegExpTree* RegExpParserImpl<CharT>::ParseDisjunction() {
// Used to store current state while parsing subexpressions.
RegExpParserState initial_state(nullptr, INITIAL, RegExpLookaround::LOOKAHEAD,
0, nullptr, top_level_flags_, zone());
@@ -220,12 +609,12 @@ RegExpTree* RegExpParser::ParseDisjunction() {
capture->set_body(body);
body = capture;
} else if (group_type == GROUPING) {
- body = zone()->New<RegExpGroup>(body);
+ body = zone()->template New<RegExpGroup>(body);
} else {
DCHECK(group_type == POSITIVE_LOOKAROUND ||
group_type == NEGATIVE_LOOKAROUND);
bool is_positive = (group_type == POSITIVE_LOOKAROUND);
- body = zone()->New<RegExpLookaround>(
+ body = zone()->template New<RegExpLookaround>(
body, is_positive, end_capture_index - capture_index,
capture_index, state->lookaround_type());
}
@@ -250,7 +639,7 @@ RegExpTree* RegExpParser::ParseDisjunction() {
return ReportError(RegExpError::kNothingToRepeat);
case '^': {
Advance();
- builder->AddAssertion(zone()->New<RegExpAssertion>(
+ builder->AddAssertion(zone()->template New<RegExpAssertion>(
builder->multiline() ? RegExpAssertion::START_OF_LINE
: RegExpAssertion::START_OF_INPUT));
set_contains_anchor();
@@ -261,13 +650,14 @@ RegExpTree* RegExpParser::ParseDisjunction() {
RegExpAssertion::AssertionType assertion_type =
builder->multiline() ? RegExpAssertion::END_OF_LINE
: RegExpAssertion::END_OF_INPUT;
- builder->AddAssertion(zone()->New<RegExpAssertion>(assertion_type));
+ builder->AddAssertion(
+ zone()->template New<RegExpAssertion>(assertion_type));
continue;
}
case '.': {
Advance();
ZoneList<CharacterRange>* ranges =
- zone()->New<ZoneList<CharacterRange>>(2, zone());
+ zone()->template New<ZoneList<CharacterRange>>(2, zone());
if (builder->dotall()) {
// Everything.
@@ -278,7 +668,7 @@ RegExpTree* RegExpParser::ParseDisjunction() {
}
RegExpCharacterClass* cc =
- zone()->New<RegExpCharacterClass>(zone(), ranges);
+ zone()->template New<RegExpCharacterClass>(zone(), ranges);
builder->AddCharacterClass(cc);
break;
}
@@ -300,13 +690,13 @@ RegExpTree* RegExpParser::ParseDisjunction() {
return ReportError(RegExpError::kEscapeAtEndOfPattern);
case 'b':
Advance(2);
- builder->AddAssertion(
- zone()->New<RegExpAssertion>(RegExpAssertion::BOUNDARY));
+ builder->AddAssertion(zone()->template New<RegExpAssertion>(
+ RegExpAssertion::BOUNDARY));
continue;
case 'B':
Advance(2);
- builder->AddAssertion(
- zone()->New<RegExpAssertion>(RegExpAssertion::NON_BOUNDARY));
+ builder->AddAssertion(zone()->template New<RegExpAssertion>(
+ RegExpAssertion::NON_BOUNDARY));
continue;
// AtomEscape ::
// CharacterClassEscape
@@ -322,11 +712,11 @@ RegExpTree* RegExpParser::ParseDisjunction() {
base::uc32 c = Next();
Advance(2);
ZoneList<CharacterRange>* ranges =
- zone()->New<ZoneList<CharacterRange>>(2, zone());
+ zone()->template New<ZoneList<CharacterRange>>(2, zone());
CharacterRange::AddClassEscape(
c, ranges, unicode() && builder->ignore_case(), zone());
RegExpCharacterClass* cc =
- zone()->New<RegExpCharacterClass>(zone(), ranges);
+ zone()->template New<RegExpCharacterClass>(zone(), ranges);
builder->AddCharacterClass(cc);
break;
}
@@ -336,13 +726,14 @@ RegExpTree* RegExpParser::ParseDisjunction() {
Advance(2);
if (unicode()) {
ZoneList<CharacterRange>* ranges =
- zone()->New<ZoneList<CharacterRange>>(2, zone());
+ zone()->template New<ZoneList<CharacterRange>>(2, zone());
ZoneVector<char> name_1(zone());
ZoneVector<char> name_2(zone());
if (ParsePropertyClassName(&name_1, &name_2)) {
if (AddPropertyClassRange(ranges, p == 'P', name_1, name_2)) {
RegExpCharacterClass* cc =
- zone()->New<RegExpCharacterClass>(zone(), ranges);
+ zone()->template New<RegExpCharacterClass>(zone(),
+ ranges);
builder->AddCharacterClass(cc);
break;
}
@@ -381,8 +772,8 @@ RegExpTree* RegExpParser::ParseDisjunction() {
builder->AddEmpty();
} else {
RegExpCapture* capture = GetCapture(index);
- RegExpTree* atom =
- zone()->New<RegExpBackReference>(capture, builder->flags());
+ RegExpTree* atom = zone()->template New<RegExpBackReference>(
+ capture, builder->flags());
builder->AddAtom(atom);
}
break;
@@ -575,12 +966,11 @@ RegExpTree* RegExpParser::ParseDisjunction() {
}
}
-RegExpParser::RegExpParserState* RegExpParser::ParseOpenParenthesis(
+template <class CharT>
+RegExpParserState* RegExpParserImpl<CharT>::ParseOpenParenthesis(
RegExpParserState* state) {
RegExpLookaround::Type lookaround_type = state->lookaround_type();
bool is_named_capture = false;
- JSRegExp::Flags switch_on = JSRegExp::kNone;
- JSRegExp::Flags switch_off = JSRegExp::kNone;
const ZoneVector<base::uc16>* capture_name = nullptr;
SubexpressionType subexpr_type = CAPTURE;
Advance();
@@ -623,7 +1013,7 @@ RegExpParser::RegExpParserState* RegExpParser::ParseOpenParenthesis(
}
}
if (subexpr_type == CAPTURE) {
- if (captures_started_ >= JSRegExp::kMaxCaptures) {
+ if (captures_started_ >= RegExpMacroAssembler::kMaxRegisterCount) {
ReportError(RegExpError::kTooManyCaptures);
return nullptr;
}
@@ -633,11 +1023,10 @@ RegExpParser::RegExpParserState* RegExpParser::ParseOpenParenthesis(
capture_name = ParseCaptureGroupName(CHECK_FAILED);
}
}
- JSRegExp::Flags flags = (state->builder()->flags() | switch_on) & ~switch_off;
// Store current state and begin new disjunction parsing.
- return zone()->New<RegExpParserState>(state, subexpr_type, lookaround_type,
- captures_started_, capture_name, flags,
- zone());
+ return zone()->template New<RegExpParserState>(
+ state, subexpr_type, lookaround_type, captures_started_, capture_name,
+ state->builder()->flags(), zone());
}
#ifdef DEBUG
@@ -657,14 +1046,14 @@ static bool IsSpecialClassEscape(base::uc32 c) {
}
#endif
-
// In order to know whether an escape is a backreference or not we have to scan
// the entire regexp and find the number of capturing parentheses. However we
// don't want to scan the regexp twice unless it is necessary. This mini-parser
// is called when needed. It can see the difference between capturing and
// noncapturing parentheses and can skip character classes and backslash-escaped
// characters.
-void RegExpParser::ScanForCaptures() {
+template <class CharT>
+void RegExpParserImpl<CharT>::ScanForCaptures() {
DCHECK(!is_scanned_for_captures_);
const int saved_position = position();
// Start with captures started previous to current position
@@ -718,8 +1107,8 @@ void RegExpParser::ScanForCaptures() {
Reset(saved_position);
}
-
-bool RegExpParser::ParseBackReferenceIndex(int* index_out) {
+template <class CharT>
+bool RegExpParserImpl<CharT>::ParseBackReferenceIndex(int* index_out) {
DCHECK_EQ('\\', current());
DCHECK('1' <= Next() && Next() <= '9');
// Try to parse a decimal literal that is no greater than the total number
@@ -731,7 +1120,7 @@ bool RegExpParser::ParseBackReferenceIndex(int* index_out) {
base::uc32 c = current();
if (IsDecimalDigit(c)) {
value = 10 * value + (c - '0');
- if (value > JSRegExp::kMaxCaptures) {
+ if (value > RegExpMacroAssembler::kMaxRegisterCount) {
Reset(start);
return false;
}
@@ -751,7 +1140,9 @@ bool RegExpParser::ParseBackReferenceIndex(int* index_out) {
return true;
}
-static void push_code_unit(ZoneVector<base::uc16>* v, uint32_t code_unit) {
+namespace {
+
+void push_code_unit(ZoneVector<base::uc16>* v, uint32_t code_unit) {
if (code_unit <= unibrow::Utf16::kMaxNonSurrogateCharCode) {
v->push_back(code_unit);
} else {
@@ -760,8 +1151,12 @@ static void push_code_unit(ZoneVector<base::uc16>* v, uint32_t code_unit) {
}
}
-const ZoneVector<base::uc16>* RegExpParser::ParseCaptureGroupName() {
- ZoneVector<base::uc16>* name = zone()->New<ZoneVector<base::uc16>>(zone());
+} // namespace
+
+template <class CharT>
+const ZoneVector<base::uc16>* RegExpParserImpl<CharT>::ParseCaptureGroupName() {
+ ZoneVector<base::uc16>* name =
+ zone()->template New<ZoneVector<base::uc16>>(zone());
bool at_start = true;
while (true) {
@@ -805,8 +1200,9 @@ const ZoneVector<base::uc16>* RegExpParser::ParseCaptureGroupName() {
return name;
}
-bool RegExpParser::CreateNamedCaptureAtIndex(const ZoneVector<base::uc16>* name,
- int index) {
+template <class CharT>
+bool RegExpParserImpl<CharT>::CreateNamedCaptureAtIndex(
+ const ZoneVector<base::uc16>* name, int index) {
DCHECK(0 < index && index <= captures_started_);
DCHECK_NOT_NULL(name);
@@ -817,7 +1213,8 @@ bool RegExpParser::CreateNamedCaptureAtIndex(const ZoneVector<base::uc16>* name,
if (named_captures_ == nullptr) {
named_captures_ =
- zone_->New<ZoneSet<RegExpCapture*, RegExpCaptureNameLess>>(zone());
+ zone_->template New<ZoneSet<RegExpCapture*, RegExpCaptureNameLess>>(
+ zone());
} else {
// Check for duplicates and bail if we find any.
@@ -833,8 +1230,9 @@ bool RegExpParser::CreateNamedCaptureAtIndex(const ZoneVector<base::uc16>* name,
return true;
}
-bool RegExpParser::ParseNamedBackReference(RegExpBuilder* builder,
- RegExpParserState* state) {
+template <class CharT>
+bool RegExpParserImpl<CharT>::ParseNamedBackReference(
+ RegExpBuilder* builder, RegExpParserState* state) {
// The parser is assumed to be on the '<' in \k<name>.
if (current() != '<') {
ReportError(RegExpError::kInvalidNamedReference);
@@ -851,14 +1249,14 @@ bool RegExpParser::ParseNamedBackReference(RegExpBuilder* builder,
builder->AddEmpty();
} else {
RegExpBackReference* atom =
- zone()->New<RegExpBackReference>(builder->flags());
+ zone()->template New<RegExpBackReference>(builder->flags());
atom->set_name(name);
builder->AddAtom(atom);
if (named_back_references_ == nullptr) {
named_back_references_ =
- zone()->New<ZoneList<RegExpBackReference*>>(1, zone());
+ zone()->template New<ZoneList<RegExpBackReference*>>(1, zone());
}
named_back_references_->Add(atom, zone());
}
@@ -866,7 +1264,8 @@ bool RegExpParser::ParseNamedBackReference(RegExpBuilder* builder,
return true;
}
-void RegExpParser::PatchNamedBackReferences() {
+template <class CharT>
+void RegExpParserImpl<CharT>::PatchNamedBackReferences() {
if (named_back_references_ == nullptr) return;
if (named_captures_ == nullptr) {
@@ -882,7 +1281,8 @@ void RegExpParser::PatchNamedBackReferences() {
// Capture used to search the named_captures_ by name, index of the
// capture is never used.
static const int kInvalidIndex = 0;
- RegExpCapture* search_capture = zone()->New<RegExpCapture>(kInvalidIndex);
+ RegExpCapture* search_capture =
+ zone()->template New<RegExpCapture>(kInvalidIndex);
DCHECK_NULL(search_capture->name());
search_capture->set_name(ref->name());
@@ -899,70 +1299,36 @@ void RegExpParser::PatchNamedBackReferences() {
}
}
-RegExpCapture* RegExpParser::GetCapture(int index) {
+template <class CharT>
+RegExpCapture* RegExpParserImpl<CharT>::GetCapture(int index) {
// The index for the capture groups are one-based. Its index in the list is
// zero-based.
int know_captures =
is_scanned_for_captures_ ? capture_count_ : captures_started_;
DCHECK(index <= know_captures);
if (captures_ == nullptr) {
- captures_ = zone()->New<ZoneList<RegExpCapture*>>(know_captures, zone());
+ captures_ =
+ zone()->template New<ZoneList<RegExpCapture*>>(know_captures, zone());
}
while (captures_->length() < know_captures) {
- captures_->Add(zone()->New<RegExpCapture>(captures_->length() + 1), zone());
+ captures_->Add(zone()->template New<RegExpCapture>(captures_->length() + 1),
+ zone());
}
return captures_->at(index - 1);
}
-namespace {
-
-struct RegExpCaptureIndexLess {
- bool operator()(const RegExpCapture* lhs, const RegExpCapture* rhs) const {
- DCHECK_NOT_NULL(lhs);
- DCHECK_NOT_NULL(rhs);
- return lhs->index() < rhs->index();
- }
-};
-
-} // namespace
-
-Handle<FixedArray> RegExpParser::CreateCaptureNameMap() {
+template <class CharT>
+ZoneVector<RegExpCapture*>* RegExpParserImpl<CharT>::GetNamedCaptures() const {
if (named_captures_ == nullptr || named_captures_->empty()) {
- return Handle<FixedArray>();
+ return nullptr;
}
- // Named captures are sorted by name (because the set is used to ensure
- // name uniqueness). But the capture name map must to be sorted by index.
-
- ZoneVector<RegExpCapture*> sorted_named_captures(
+ return zone()->template New<ZoneVector<RegExpCapture*>>(
named_captures_->begin(), named_captures_->end(), zone());
- std::sort(sorted_named_captures.begin(), sorted_named_captures.end(),
- RegExpCaptureIndexLess{});
- DCHECK_EQ(sorted_named_captures.size(), named_captures_->size());
-
- Factory* factory = isolate()->factory();
-
- int len = static_cast<int>(sorted_named_captures.size()) * 2;
- Handle<FixedArray> array = factory->NewFixedArray(len);
-
- int i = 0;
- for (const auto& capture : sorted_named_captures) {
- base::Vector<const base::uc16> capture_name(capture->name()->data(),
- capture->name()->size());
- // CSA code in ConstructNewResultFromMatchInfo requires these strings to be
- // internalized so they can be used as property names in the 'exec' results.
- Handle<String> name = factory->InternalizeString(capture_name);
- array->set(i * 2, *name);
- array->set(i * 2 + 1, Smi::FromInt(capture->index()));
-
- i++;
- }
- DCHECK_EQ(i * 2, len);
-
- return array;
}
-bool RegExpParser::HasNamedCaptures() {
+template <class CharT>
+bool RegExpParserImpl<CharT>::HasNamedCaptures() {
if (has_named_captures_ || is_scanned_for_captures_) {
return has_named_captures_;
}
@@ -972,27 +1338,6 @@ bool RegExpParser::HasNamedCaptures() {
return has_named_captures_;
}
-bool RegExpParser::RegExpParserState::IsInsideCaptureGroup(int index) {
- for (RegExpParserState* s = this; s != nullptr; s = s->previous_state()) {
- if (s->group_type() != CAPTURE) continue;
- // Return true if we found the matching capture index.
- if (index == s->capture_index()) return true;
- // Abort if index is larger than what has been parsed up till this state.
- if (index > s->capture_index()) return false;
- }
- return false;
-}
-
-bool RegExpParser::RegExpParserState::IsInsideCaptureGroup(
- const ZoneVector<base::uc16>* name) {
- DCHECK_NOT_NULL(name);
- for (RegExpParserState* s = this; s != nullptr; s = s->previous_state()) {
- if (s->capture_name() == nullptr) continue;
- if (*s->capture_name() == *name) return true;
- }
- return false;
-}
-
// QuantifierPrefix ::
// { DecimalDigits }
// { DecimalDigits , }
@@ -1000,7 +1345,9 @@ bool RegExpParser::RegExpParserState::IsInsideCaptureGroup(
//
// Returns true if parsing succeeds, and set the min_out and max_out
// values. Values are truncated to RegExpTree::kInfinity if they overflow.
-bool RegExpParser::ParseIntervalQuantifier(int* min_out, int* max_out) {
+template <class CharT>
+bool RegExpParserImpl<CharT>::ParseIntervalQuantifier(int* min_out,
+ int* max_out) {
DCHECK_EQ(current(), '{');
int start = position();
Advance();
@@ -1059,7 +1406,8 @@ bool RegExpParser::ParseIntervalQuantifier(int* min_out, int* max_out) {
return true;
}
-base::uc32 RegExpParser::ParseOctalLiteral() {
+template <class CharT>
+base::uc32 RegExpParserImpl<CharT>::ParseOctalLiteral() {
DCHECK(('0' <= current() && current() <= '7') || current() == kEndMarker);
// For compatibility with some other browsers (not all), we parse
// up to three octal digits with a value below 256.
@@ -1077,7 +1425,8 @@ base::uc32 RegExpParser::ParseOctalLiteral() {
return value;
}
-bool RegExpParser::ParseHexEscape(int length, base::uc32* value) {
+template <class CharT>
+bool RegExpParserImpl<CharT>::ParseHexEscape(int length, base::uc32* value) {
int start = position();
base::uc32 val = 0;
for (int i = 0; i < length; ++i) {
@@ -1095,7 +1444,8 @@ bool RegExpParser::ParseHexEscape(int length, base::uc32* value) {
}
// This parses RegExpUnicodeEscapeSequence as described in ECMA262.
-bool RegExpParser::ParseUnicodeEscape(base::uc32* value) {
+template <class CharT>
+bool RegExpParserImpl<CharT>::ParseUnicodeEscape(base::uc32* value) {
// Accept both \uxxxx and \u{xxxxxx} (if harmony unicode escapes are
// allowed). In the latter case, the number of hex digits between { } is
// arbitrary. \ and u have already been read.
@@ -1308,10 +1658,11 @@ bool IsUnicodePropertyValueCharacter(char c) {
return (c == '_');
}
-} // anonymous namespace
+} // namespace
-bool RegExpParser::ParsePropertyClassName(ZoneVector<char>* name_1,
- ZoneVector<char>* name_2) {
+template <class CharT>
+bool RegExpParserImpl<CharT>::ParsePropertyClassName(ZoneVector<char>* name_1,
+ ZoneVector<char>* name_2) {
DCHECK(name_1->empty());
DCHECK(name_2->empty());
// Parse the property class as follows:
@@ -1348,10 +1699,10 @@ bool RegExpParser::ParsePropertyClassName(ZoneVector<char>* name_1,
return true;
}
-bool RegExpParser::AddPropertyClassRange(ZoneList<CharacterRange>* add_to,
- bool negate,
- const ZoneVector<char>& name_1,
- const ZoneVector<char>& name_2) {
+template <class CharT>
+bool RegExpParserImpl<CharT>::AddPropertyClassRange(
+ ZoneList<CharacterRange>* add_to, bool negate,
+ const ZoneVector<char>& name_1, const ZoneVector<char>& name_2) {
if (name_2.empty()) {
// First attempt to interpret as general category property value name.
const char* name = name_1.data();
@@ -1388,11 +1739,13 @@ bool RegExpParser::AddPropertyClassRange(ZoneList<CharacterRange>* add_to,
}
}
-RegExpTree* RegExpParser::GetPropertySequence(const ZoneVector<char>& name_1) {
+template <class CharT>
+RegExpTree* RegExpParserImpl<CharT>::GetPropertySequence(
+ const ZoneVector<char>& name_1) {
if (!FLAG_harmony_regexp_sequence) return nullptr;
const char* name = name_1.data();
const base::uc32* sequence_list = nullptr;
- JSRegExp::Flags flags = JSRegExp::kUnicode;
+ RegExpFlags flags = RegExpFlag::kUnicode;
if (NameEquals(name, "Emoji_Flag_Sequence")) {
sequence_list = UnicodePropertySequences::kEmojiFlagSequences;
} else if (NameEquals(name, "Emoji_Tag_Sequence")) {
@@ -1421,12 +1774,12 @@ RegExpTree* RegExpParser::GetPropertySequence(const ZoneVector<char>& name_1) {
// emoji_keycap_sequence := [0-9#*] \x{FE0F 20E3}
RegExpBuilder builder(zone(), flags);
ZoneList<CharacterRange>* prefix_ranges =
- zone()->New<ZoneList<CharacterRange>>(2, zone());
+ zone()->template New<ZoneList<CharacterRange>>(2, zone());
prefix_ranges->Add(CharacterRange::Range('0', '9'), zone());
prefix_ranges->Add(CharacterRange::Singleton('#'), zone());
prefix_ranges->Add(CharacterRange::Singleton('*'), zone());
builder.AddCharacterClass(
- zone()->New<RegExpCharacterClass>(zone(), prefix_ranges));
+ zone()->template New<RegExpCharacterClass>(zone(), prefix_ranges));
builder.AddCharacter(0xFE0F);
builder.AddCharacter(0x20E3);
return builder.ToRegExp();
@@ -1435,17 +1788,17 @@ RegExpTree* RegExpParser::GetPropertySequence(const ZoneVector<char>& name_1) {
// emoji_modifier_sequence := emoji_modifier_base emoji_modifier
RegExpBuilder builder(zone(), flags);
ZoneList<CharacterRange>* modifier_base_ranges =
- zone()->New<ZoneList<CharacterRange>>(2, zone());
+ zone()->template New<ZoneList<CharacterRange>>(2, zone());
LookupPropertyValueName(UCHAR_EMOJI_MODIFIER_BASE, "Y", false,
modifier_base_ranges, zone());
- builder.AddCharacterClass(
- zone()->New<RegExpCharacterClass>(zone(), modifier_base_ranges));
+ builder.AddCharacterClass(zone()->template New<RegExpCharacterClass>(
+ zone(), modifier_base_ranges));
ZoneList<CharacterRange>* modifier_ranges =
- zone()->New<ZoneList<CharacterRange>>(2, zone());
+ zone()->template New<ZoneList<CharacterRange>>(2, zone());
LookupPropertyValueName(UCHAR_EMOJI_MODIFIER, "Y", false, modifier_ranges,
zone());
builder.AddCharacterClass(
- zone()->New<RegExpCharacterClass>(zone(), modifier_ranges));
+ zone()->template New<RegExpCharacterClass>(zone(), modifier_ranges));
return builder.ToRegExp();
}
@@ -1454,26 +1807,30 @@ RegExpTree* RegExpParser::GetPropertySequence(const ZoneVector<char>& name_1) {
#else // V8_INTL_SUPPORT
-bool RegExpParser::ParsePropertyClassName(ZoneVector<char>* name_1,
- ZoneVector<char>* name_2) {
+template <class CharT>
+bool RegExpParserImpl<CharT>::ParsePropertyClassName(ZoneVector<char>* name_1,
+ ZoneVector<char>* name_2) {
return false;
}
-bool RegExpParser::AddPropertyClassRange(ZoneList<CharacterRange>* add_to,
- bool negate,
- const ZoneVector<char>& name_1,
- const ZoneVector<char>& name_2) {
+template <class CharT>
+bool RegExpParserImpl<CharT>::AddPropertyClassRange(
+ ZoneList<CharacterRange>* add_to, bool negate,
+ const ZoneVector<char>& name_1, const ZoneVector<char>& name_2) {
return false;
}
-RegExpTree* RegExpParser::GetPropertySequence(const ZoneVector<char>& name) {
+template <class CharT>
+RegExpTree* RegExpParserImpl<CharT>::GetPropertySequence(
+ const ZoneVector<char>& name) {
return nullptr;
}
#endif // V8_INTL_SUPPORT
-bool RegExpParser::ParseUnlimitedLengthHexNumber(int max_value,
- base::uc32* value) {
+template <class CharT>
+bool RegExpParserImpl<CharT>::ParseUnlimitedLengthHexNumber(int max_value,
+ base::uc32* value) {
base::uc32 x = 0;
int d = base::HexValue(current());
if (d < 0) {
@@ -1491,7 +1848,8 @@ bool RegExpParser::ParseUnlimitedLengthHexNumber(int max_value,
return true;
}
-base::uc32 RegExpParser::ParseClassCharacterEscape() {
+template <class CharT>
+base::uc32 RegExpParserImpl<CharT>::ParseClassCharacterEscape() {
DCHECK_EQ('\\', current());
DCHECK(has_next() && !IsSpecialClassEscape(Next()));
Advance();
@@ -1608,11 +1966,11 @@ base::uc32 RegExpParser::ParseClassCharacterEscape() {
UNREACHABLE();
}
-void RegExpParser::ParseClassEscape(ZoneList<CharacterRange>* ranges,
- Zone* zone,
- bool add_unicode_case_equivalents,
- base::uc32* char_out,
- bool* is_class_escape) {
+template <class CharT>
+void RegExpParserImpl<CharT>::ParseClassEscape(
+ ZoneList<CharacterRange>* ranges, Zone* zone,
+ bool add_unicode_case_equivalents, base::uc32* char_out,
+ bool* is_class_escape) {
base::uc32 current_char = current();
if (current_char == '\\') {
switch (Next()) {
@@ -1658,7 +2016,9 @@ void RegExpParser::ParseClassEscape(ZoneList<CharacterRange>* ranges,
}
}
-RegExpTree* RegExpParser::ParseCharacterClass(const RegExpBuilder* builder) {
+template <class CharT>
+RegExpTree* RegExpParserImpl<CharT>::ParseCharacterClass(
+ const RegExpBuilder* builder) {
DCHECK_EQ(current(), '[');
Advance();
bool is_negated = false;
@@ -1667,7 +2027,7 @@ RegExpTree* RegExpParser::ParseCharacterClass(const RegExpBuilder* builder) {
Advance();
}
ZoneList<CharacterRange>* ranges =
- zone()->New<ZoneList<CharacterRange>>(2, zone());
+ zone()->template New<ZoneList<CharacterRange>>(2, zone());
bool add_unicode_case_equivalents = unicode() && builder->ignore_case();
while (has_more() && current() != ']') {
base::uc32 char_1, char_2;
@@ -1713,15 +2073,14 @@ RegExpTree* RegExpParser::ParseCharacterClass(const RegExpBuilder* builder) {
Advance();
RegExpCharacterClass::CharacterClassFlags character_class_flags;
if (is_negated) character_class_flags = RegExpCharacterClass::NEGATED;
- return zone()->New<RegExpCharacterClass>(zone(), ranges,
- character_class_flags);
+ return zone()->template New<RegExpCharacterClass>(zone(), ranges,
+ character_class_flags);
}
-
#undef CHECK_FAILED
-bool RegExpParser::Parse(RegExpCompileData* result,
- const DisallowGarbageCollection&) {
+template <class CharT>
+bool RegExpParserImpl<CharT>::Parse(RegExpCompileData* result) {
DCHECK(result != nullptr);
RegExpTree* tree = ParsePattern();
if (failed()) {
@@ -1742,35 +2101,12 @@ bool RegExpParser::Parse(RegExpCompileData* result,
result->simple = tree->IsAtom() && simple() && capture_count == 0;
result->contains_anchor = contains_anchor();
result->capture_count = capture_count;
+ result->named_captures = GetNamedCaptures();
}
return !failed();
}
-bool RegExpParser::ParseRegExp(Isolate* isolate, Zone* zone,
- FlatStringReader* input, JSRegExp::Flags flags,
- RegExpCompileData* result) {
- RegExpParser parser(input, flags, isolate, zone);
- bool success;
- {
- DisallowGarbageCollection no_gc;
- success = parser.Parse(result, no_gc);
- }
- if (success) {
- result->capture_name_map = parser.CreateCaptureNameMap();
- }
- return success;
-}
-
-bool RegExpParser::VerifyRegExpSyntax(Isolate* isolate, Zone* zone,
- FlatStringReader* input,
- JSRegExp::Flags flags,
- RegExpCompileData* result,
- const DisallowGarbageCollection& no_gc) {
- RegExpParser parser(input, flags, isolate, zone);
- return parser.Parse(result, no_gc);
-}
-
-RegExpBuilder::RegExpBuilder(Zone* zone, JSRegExp::Flags flags)
+RegExpBuilder::RegExpBuilder(Zone* zone, RegExpFlags flags)
: zone_(zone),
pending_empty_(false),
flags_(flags),
@@ -2054,5 +2390,58 @@ bool RegExpBuilder::AddQuantifierToAtom(
return true;
}
+template class RegExpParserImpl<uint8_t>;
+template class RegExpParserImpl<base::uc16>;
+
+} // namespace
+
+// static
+bool RegExpParser::ParseRegExpFromHeapString(Isolate* isolate, Zone* zone,
+ Handle<String> input,
+ RegExpFlags flags,
+ RegExpCompileData* result) {
+ DisallowGarbageCollection no_gc;
+ uintptr_t stack_limit = isolate->stack_guard()->real_climit();
+ String::FlatContent content = input->GetFlatContent(no_gc);
+ if (content.IsOneByte()) {
+ base::Vector<const uint8_t> v = content.ToOneByteVector();
+ return RegExpParserImpl<uint8_t>{v.begin(), v.length(), flags,
+ stack_limit, zone, no_gc}
+ .Parse(result);
+ } else {
+ base::Vector<const base::uc16> v = content.ToUC16Vector();
+ return RegExpParserImpl<base::uc16>{v.begin(), v.length(), flags,
+ stack_limit, zone, no_gc}
+ .Parse(result);
+ }
+}
+
+// static
+template <class CharT>
+bool RegExpParser::VerifyRegExpSyntax(Zone* zone, uintptr_t stack_limit,
+ const CharT* input, int input_length,
+ RegExpFlags flags,
+ RegExpCompileData* result,
+ const DisallowGarbageCollection& no_gc) {
+ return RegExpParserImpl<CharT>{input, input_length, flags,
+ stack_limit, zone, no_gc}
+ .Parse(result);
+}
+
+template bool RegExpParser::VerifyRegExpSyntax<uint8_t>(
+ Zone*, uintptr_t, const uint8_t*, int, RegExpFlags, RegExpCompileData*,
+ const DisallowGarbageCollection&);
+template bool RegExpParser::VerifyRegExpSyntax<base::uc16>(
+ Zone*, uintptr_t, const base::uc16*, int, RegExpFlags, RegExpCompileData*,
+ const DisallowGarbageCollection&);
+
+// static
+bool RegExpParser::VerifyRegExpSyntax(Isolate* isolate, Zone* zone,
+ Handle<String> input, RegExpFlags flags,
+ RegExpCompileData* result,
+ const DisallowGarbageCollection&) {
+ return ParseRegExpFromHeapString(isolate, zone, input, flags, result);
+}
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/regexp/regexp-parser.h b/deps/v8/src/regexp/regexp-parser.h
index 3766d43fb5..4fc6400297 100644
--- a/deps/v8/src/regexp/regexp-parser.h
+++ b/deps/v8/src/regexp/regexp-parser.h
@@ -5,367 +5,35 @@
#ifndef V8_REGEXP_REGEXP_PARSER_H_
#define V8_REGEXP_REGEXP_PARSER_H_
-#include "src/base/strings.h"
-#include "src/objects/js-regexp.h"
-#include "src/objects/objects.h"
-#include "src/regexp/regexp-ast.h"
-#include "src/regexp/regexp-error.h"
-#include "src/zone/zone.h"
+#include "src/common/assert-scope.h"
+#include "src/handles/handles.h"
+#include "src/regexp/regexp-flags.h"
namespace v8 {
namespace internal {
-struct RegExpCompileData;
-
-// A BufferedZoneList is an automatically growing list, just like (and backed
-// by) a ZoneList, that is optimized for the case of adding and removing
-// a single element. The last element added is stored outside the backing list,
-// and if no more than one element is ever added, the ZoneList isn't even
-// allocated.
-// Elements must not be nullptr pointers.
-template <typename T, int initial_size>
-class BufferedZoneList {
- public:
- BufferedZoneList() : list_(nullptr), last_(nullptr) {}
-
- // Adds element at end of list. This element is buffered and can
- // be read using last() or removed using RemoveLast until a new Add or until
- // RemoveLast or GetList has been called.
- void Add(T* value, Zone* zone) {
- if (last_ != nullptr) {
- if (list_ == nullptr) {
- list_ = zone->New<ZoneList<T*>>(initial_size, zone);
- }
- list_->Add(last_, zone);
- }
- last_ = value;
- }
-
- T* last() {
- DCHECK(last_ != nullptr);
- return last_;
- }
-
- T* RemoveLast() {
- DCHECK(last_ != nullptr);
- T* result = last_;
- if ((list_ != nullptr) && (list_->length() > 0))
- last_ = list_->RemoveLast();
- else
- last_ = nullptr;
- return result;
- }
-
- T* Get(int i) {
- DCHECK((0 <= i) && (i < length()));
- if (list_ == nullptr) {
- DCHECK_EQ(0, i);
- return last_;
- } else {
- if (i == list_->length()) {
- DCHECK(last_ != nullptr);
- return last_;
- } else {
- return list_->at(i);
- }
- }
- }
-
- void Clear() {
- list_ = nullptr;
- last_ = nullptr;
- }
-
- int length() {
- int length = (list_ == nullptr) ? 0 : list_->length();
- return length + ((last_ == nullptr) ? 0 : 1);
- }
-
- ZoneList<T*>* GetList(Zone* zone) {
- if (list_ == nullptr) {
- list_ = zone->New<ZoneList<T*>>(initial_size, zone);
- }
- if (last_ != nullptr) {
- list_->Add(last_, zone);
- last_ = nullptr;
- }
- return list_;
- }
-
- private:
- ZoneList<T*>* list_;
- T* last_;
-};
-
+class String;
+class Zone;
-// Accumulates RegExp atoms and assertions into lists of terms and alternatives.
-class RegExpBuilder : public ZoneObject {
- public:
- RegExpBuilder(Zone* zone, JSRegExp::Flags flags);
- void AddCharacter(base::uc16 character);
- void AddUnicodeCharacter(base::uc32 character);
- void AddEscapedUnicodeCharacter(base::uc32 character);
- // "Adds" an empty expression. Does nothing except consume a
- // following quantifier
- void AddEmpty();
- void AddCharacterClass(RegExpCharacterClass* cc);
- void AddCharacterClassForDesugaring(base::uc32 c);
- void AddAtom(RegExpTree* tree);
- void AddTerm(RegExpTree* tree);
- void AddAssertion(RegExpTree* tree);
- void NewAlternative(); // '|'
- bool AddQuantifierToAtom(int min, int max,
- RegExpQuantifier::QuantifierType type);
- void FlushText();
- RegExpTree* ToRegExp();
- JSRegExp::Flags flags() const { return flags_; }
- void set_flags(JSRegExp::Flags flags) { flags_ = flags; }
-
- bool ignore_case() const { return (flags_ & JSRegExp::kIgnoreCase) != 0; }
- bool multiline() const { return (flags_ & JSRegExp::kMultiline) != 0; }
- bool dotall() const { return (flags_ & JSRegExp::kDotAll) != 0; }
-
- private:
- static const base::uc16 kNoPendingSurrogate = 0;
- void AddLeadSurrogate(base::uc16 lead_surrogate);
- void AddTrailSurrogate(base::uc16 trail_surrogate);
- void FlushPendingSurrogate();
- void FlushCharacters();
- void FlushTerms();
- bool NeedsDesugaringForUnicode(RegExpCharacterClass* cc);
- bool NeedsDesugaringForIgnoreCase(base::uc32 c);
- Zone* zone() const { return zone_; }
- bool unicode() const { return (flags_ & JSRegExp::kUnicode) != 0; }
-
- Zone* zone_;
- bool pending_empty_;
- JSRegExp::Flags flags_;
- ZoneList<base::uc16>* characters_;
- base::uc16 pending_surrogate_;
- BufferedZoneList<RegExpTree, 2> terms_;
- BufferedZoneList<RegExpTree, 2> text_;
- BufferedZoneList<RegExpTree, 2> alternatives_;
-#ifdef DEBUG
- enum { ADD_NONE, ADD_CHAR, ADD_TERM, ADD_ASSERT, ADD_ATOM } last_added_;
-#define LAST(x) last_added_ = x;
-#else
-#define LAST(x)
-#endif
-};
+struct RegExpCompileData;
-class V8_EXPORT_PRIVATE RegExpParser {
+class V8_EXPORT_PRIVATE RegExpParser : public AllStatic {
public:
- RegExpParser(FlatStringReader* in, JSRegExp::Flags flags, Isolate* isolate,
- Zone* zone);
+ static bool ParseRegExpFromHeapString(Isolate* isolate, Zone* zone,
+ Handle<String> input, RegExpFlags flags,
+ RegExpCompileData* result);
- static bool ParseRegExp(Isolate* isolate, Zone* zone, FlatStringReader* input,
- JSRegExp::Flags flags, RegExpCompileData* result);
+ template <class CharT>
+ static bool VerifyRegExpSyntax(Zone* zone, uintptr_t stack_limit,
+ const CharT* input, int input_length,
+ RegExpFlags flags, RegExpCompileData* result,
+ const DisallowGarbageCollection& no_gc);
// Used by the SpiderMonkey embedding of irregexp.
static bool VerifyRegExpSyntax(Isolate* isolate, Zone* zone,
- FlatStringReader* input, JSRegExp::Flags flags,
+ Handle<String> input, RegExpFlags flags,
RegExpCompileData* result,
- const DisallowGarbageCollection& nogc);
-
- private:
- bool Parse(RegExpCompileData* result, const DisallowGarbageCollection&);
-
- RegExpTree* ParsePattern();
- RegExpTree* ParseDisjunction();
- RegExpTree* ParseGroup();
-
- // Parses a {...,...} quantifier and stores the range in the given
- // out parameters.
- bool ParseIntervalQuantifier(int* min_out, int* max_out);
-
- // Parses and returns a single escaped character. The character
- // must not be 'b' or 'B' since they are usually handle specially.
- base::uc32 ParseClassCharacterEscape();
-
- // Checks whether the following is a length-digit hexadecimal number,
- // and sets the value if it is.
- bool ParseHexEscape(int length, base::uc32* value);
- bool ParseUnicodeEscape(base::uc32* value);
- bool ParseUnlimitedLengthHexNumber(int max_value, base::uc32* value);
-
- bool ParsePropertyClassName(ZoneVector<char>* name_1,
- ZoneVector<char>* name_2);
- bool AddPropertyClassRange(ZoneList<CharacterRange>* add_to, bool negate,
- const ZoneVector<char>& name_1,
- const ZoneVector<char>& name_2);
-
- RegExpTree* GetPropertySequence(const ZoneVector<char>& name_1);
- RegExpTree* ParseCharacterClass(const RegExpBuilder* state);
-
- base::uc32 ParseOctalLiteral();
-
- // Tries to parse the input as a back reference. If successful it
- // stores the result in the output parameter and returns true. If
- // it fails it will push back the characters read so the same characters
- // can be reparsed.
- bool ParseBackReferenceIndex(int* index_out);
-
- // Parse inside a class. Either add escaped class to the range, or return
- // false and pass parsed single character through |char_out|.
- void ParseClassEscape(ZoneList<CharacterRange>* ranges, Zone* zone,
- bool add_unicode_case_equivalents, base::uc32* char_out,
- bool* is_class_escape);
-
- char ParseClassEscape();
-
- RegExpTree* ReportError(RegExpError error);
- void Advance();
- void Advance(int dist);
- void Reset(int pos);
-
- // Reports whether the pattern might be used as a literal search string.
- // Only use if the result of the parse is a single atom node.
- bool simple();
- bool contains_anchor() { return contains_anchor_; }
- void set_contains_anchor() { contains_anchor_ = true; }
- int captures_started() { return captures_started_; }
- int position() { return next_pos_ - 1; }
- bool failed() { return failed_; }
- // The Unicode flag can't be changed using in-regexp syntax, so it's OK to
- // just read the initial flag value here.
- bool unicode() const { return (top_level_flags_ & JSRegExp::kUnicode) != 0; }
-
- static bool IsSyntaxCharacterOrSlash(base::uc32 c);
-
- static const base::uc32 kEndMarker = (1 << 21);
-
- private:
- enum SubexpressionType {
- INITIAL,
- CAPTURE, // All positive values represent captures.
- POSITIVE_LOOKAROUND,
- NEGATIVE_LOOKAROUND,
- GROUPING
- };
-
- class RegExpParserState : public ZoneObject {
- public:
- // Push a state on the stack.
- RegExpParserState(RegExpParserState* previous_state,
- SubexpressionType group_type,
- RegExpLookaround::Type lookaround_type,
- int disjunction_capture_index,
- const ZoneVector<base::uc16>* capture_name,
- JSRegExp::Flags flags, Zone* zone)
- : previous_state_(previous_state),
- builder_(zone->New<RegExpBuilder>(zone, flags)),
- group_type_(group_type),
- lookaround_type_(lookaround_type),
- disjunction_capture_index_(disjunction_capture_index),
- capture_name_(capture_name) {}
- // Parser state of containing expression, if any.
- RegExpParserState* previous_state() const { return previous_state_; }
- bool IsSubexpression() { return previous_state_ != nullptr; }
- // RegExpBuilder building this regexp's AST.
- RegExpBuilder* builder() const { return builder_; }
- // Type of regexp being parsed (parenthesized group or entire regexp).
- SubexpressionType group_type() const { return group_type_; }
- // Lookahead or Lookbehind.
- RegExpLookaround::Type lookaround_type() const { return lookaround_type_; }
- // Index in captures array of first capture in this sub-expression, if any.
- // Also the capture index of this sub-expression itself, if group_type
- // is CAPTURE.
- int capture_index() const { return disjunction_capture_index_; }
- // The name of the current sub-expression, if group_type is CAPTURE. Only
- // used for named captures.
- const ZoneVector<base::uc16>* capture_name() const { return capture_name_; }
-
- bool IsNamedCapture() const { return capture_name_ != nullptr; }
-
- // Check whether the parser is inside a capture group with the given index.
- bool IsInsideCaptureGroup(int index);
- // Check whether the parser is inside a capture group with the given name.
- bool IsInsideCaptureGroup(const ZoneVector<base::uc16>* name);
-
- private:
- // Linked list implementation of stack of states.
- RegExpParserState* const previous_state_;
- // Builder for the stored disjunction.
- RegExpBuilder* const builder_;
- // Stored disjunction type (capture, look-ahead or grouping), if any.
- const SubexpressionType group_type_;
- // Stored read direction.
- const RegExpLookaround::Type lookaround_type_;
- // Stored disjunction's capture index (if any).
- const int disjunction_capture_index_;
- // Stored capture name (if any).
- const ZoneVector<base::uc16>* const capture_name_;
- };
-
- // Return the 1-indexed RegExpCapture object, allocate if necessary.
- RegExpCapture* GetCapture(int index);
-
- // Creates a new named capture at the specified index. Must be called exactly
- // once for each named capture. Fails if a capture with the same name is
- // encountered.
- bool CreateNamedCaptureAtIndex(const ZoneVector<base::uc16>* name, int index);
-
- // Parses the name of a capture group (?<name>pattern). The name must adhere
- // to IdentifierName in the ECMAScript standard.
- const ZoneVector<base::uc16>* ParseCaptureGroupName();
-
- bool ParseNamedBackReference(RegExpBuilder* builder,
- RegExpParserState* state);
- RegExpParserState* ParseOpenParenthesis(RegExpParserState* state);
-
- // After the initial parsing pass, patch corresponding RegExpCapture objects
- // into all RegExpBackReferences. This is done after initial parsing in order
- // to avoid complicating cases in which references comes before the capture.
- void PatchNamedBackReferences();
-
- Handle<FixedArray> CreateCaptureNameMap();
-
- // Returns true iff the pattern contains named captures. May call
- // ScanForCaptures to look ahead at the remaining pattern.
- bool HasNamedCaptures();
-
- Isolate* isolate() { return isolate_; }
- Zone* zone() const { return zone_; }
-
- base::uc32 current() { return current_; }
- bool has_more() { return has_more_; }
- bool has_next() { return next_pos_ < in()->length(); }
- base::uc32 Next();
- template <bool update_position>
- base::uc32 ReadNext();
- FlatStringReader* in() { return in_; }
- void ScanForCaptures();
-
- struct RegExpCaptureNameLess {
- bool operator()(const RegExpCapture* lhs, const RegExpCapture* rhs) const {
- DCHECK_NOT_NULL(lhs);
- DCHECK_NOT_NULL(rhs);
- return *lhs->name() < *rhs->name();
- }
- };
-
- Isolate* isolate_;
- Zone* zone_;
- RegExpError error_ = RegExpError::kNone;
- int error_pos_ = 0;
- ZoneList<RegExpCapture*>* captures_;
- ZoneSet<RegExpCapture*, RegExpCaptureNameLess>* named_captures_;
- ZoneList<RegExpBackReference*>* named_back_references_;
- FlatStringReader* in_;
- base::uc32 current_;
- // These are the flags specified outside the regexp syntax ie after the
- // terminating '/' or in the second argument to the constructor. The current
- // flags are stored on the RegExpBuilder.
- JSRegExp::Flags top_level_flags_;
- int next_pos_;
- int captures_started_;
- int capture_count_; // Only valid after we have scanned for captures.
- bool has_more_;
- bool simple_;
- bool contains_anchor_;
- bool is_scanned_for_captures_;
- bool has_named_captures_; // Only valid after we have scanned for captures.
- bool failed_;
+ const DisallowGarbageCollection& no_gc);
};
} // namespace internal
diff --git a/deps/v8/src/regexp/regexp-utils.cc b/deps/v8/src/regexp/regexp-utils.cc
index 1e72a124c9..dabe5ee4a2 100644
--- a/deps/v8/src/regexp/regexp-utils.cc
+++ b/deps/v8/src/regexp/regexp-utils.cc
@@ -120,33 +120,6 @@ MaybeHandle<Object> RegExpUtils::RegExpExec(Isolate* isolate,
}
}
-Maybe<bool> RegExpUtils::IsRegExp(Isolate* isolate, Handle<Object> object) {
- if (!object->IsJSReceiver()) return Just(false);
-
- Handle<JSReceiver> receiver = Handle<JSReceiver>::cast(object);
-
- Handle<Object> match;
- ASSIGN_RETURN_ON_EXCEPTION_VALUE(
- isolate, match,
- JSObject::GetProperty(isolate, receiver,
- isolate->factory()->match_symbol()),
- Nothing<bool>());
-
- if (!match->IsUndefined(isolate)) {
- const bool match_as_boolean = match->BooleanValue(isolate);
-
- if (match_as_boolean && !object->IsJSRegExp()) {
- isolate->CountUsage(v8::Isolate::kRegExpMatchIsTrueishOnNonJSRegExp);
- } else if (!match_as_boolean && object->IsJSRegExp()) {
- isolate->CountUsage(v8::Isolate::kRegExpMatchIsFalseishOnJSRegExp);
- }
-
- return Just(match_as_boolean);
- }
-
- return Just(object->IsJSRegExp());
-}
-
bool RegExpUtils::IsUnmodifiedRegExp(Isolate* isolate, Handle<Object> obj) {
#ifdef V8_ENABLE_FORCE_SLOW_PATH
if (isolate->force_slow_path()) return false;
diff --git a/deps/v8/src/regexp/regexp-utils.h b/deps/v8/src/regexp/regexp-utils.h
index 19f1f24039..c0333fb170 100644
--- a/deps/v8/src/regexp/regexp-utils.h
+++ b/deps/v8/src/regexp/regexp-utils.h
@@ -5,12 +5,15 @@
#ifndef V8_REGEXP_REGEXP_UTILS_H_
#define V8_REGEXP_REGEXP_UTILS_H_
-#include "src/objects/objects.h"
+#include "src/common/globals.h"
namespace v8 {
namespace internal {
+class JSReceiver;
+class Object;
class RegExpMatchInfo;
+class String;
// Helper methods for C++ regexp builtins.
class RegExpUtils : public AllStatic {
@@ -31,10 +34,6 @@ class RegExpUtils : public AllStatic {
Isolate* isolate, Handle<JSReceiver> regexp, Handle<String> string,
Handle<Object> exec);
- // ES#sec-isregexp IsRegExp ( argument )
- // Includes checking of the match property.
- static Maybe<bool> IsRegExp(Isolate* isolate, Handle<Object> object);
-
// Checks whether the given object is an unmodified JSRegExp instance.
// Neither the object's map, nor its prototype's map, nor any relevant
// method on the prototype may be modified.
diff --git a/deps/v8/src/regexp/regexp.cc b/deps/v8/src/regexp/regexp.cc
index 9bdebe1918..742c6d9999 100644
--- a/deps/v8/src/regexp/regexp.cc
+++ b/deps/v8/src/regexp/regexp.cc
@@ -37,7 +37,7 @@ class RegExpImpl final : public AllStatic {
// Prepares a JSRegExp object with Irregexp-specific data.
static void IrregexpInitialize(Isolate* isolate, Handle<JSRegExp> re,
- Handle<String> pattern, JSRegExp::Flags flags,
+ Handle<String> pattern, RegExpFlags flags,
int capture_count, uint32_t backtrack_limit);
// Prepare a RegExp for being executed one or more times (using
@@ -51,7 +51,7 @@ class RegExpImpl final : public AllStatic {
Handle<String> subject);
static void AtomCompile(Isolate* isolate, Handle<JSRegExp> re,
- Handle<String> pattern, JSRegExp::Flags flags,
+ Handle<String> pattern, RegExpFlags flags,
Handle<String> match_pattern);
static int AtomExecRaw(Isolate* isolate, Handle<JSRegExp> regexp,
@@ -90,7 +90,7 @@ class RegExpImpl final : public AllStatic {
// Returns true on success, false on failure.
static bool Compile(Isolate* isolate, Zone* zone, RegExpCompileData* input,
- JSRegExp::Flags flags, Handle<String> pattern,
+ RegExpFlags flags, Handle<String> pattern,
Handle<String> sample_subject, bool is_one_byte,
uint32_t& backtrack_limit);
@@ -102,6 +102,32 @@ class RegExpImpl final : public AllStatic {
static Code IrregexpNativeCode(FixedArray re, bool is_one_byte);
};
+// static
+bool RegExp::CanGenerateBytecode() {
+ return FLAG_regexp_interpret_all || FLAG_regexp_tier_up;
+}
+
+// static
+template <class CharT>
+bool RegExp::VerifySyntax(Zone* zone, uintptr_t stack_limit, const CharT* input,
+ int input_length, RegExpFlags flags,
+ RegExpError* regexp_error_out,
+ const DisallowGarbageCollection& no_gc) {
+ RegExpCompileData data;
+ bool pattern_is_valid = RegExpParser::VerifyRegExpSyntax(
+ zone, stack_limit, input, input_length, flags, &data, no_gc);
+ *regexp_error_out = data.error;
+ return pattern_is_valid;
+}
+
+template bool RegExp::VerifySyntax<uint8_t>(Zone*, uintptr_t, const uint8_t*,
+ int, RegExpFlags,
+ RegExpError* regexp_error_out,
+ const DisallowGarbageCollection&);
+template bool RegExp::VerifySyntax<base::uc16>(
+ Zone*, uintptr_t, const base::uc16*, int, RegExpFlags,
+ RegExpError* regexp_error_out, const DisallowGarbageCollection&);
+
MaybeHandle<Object> RegExp::ThrowRegExpException(Isolate* isolate,
Handle<JSRegExp> re,
Handle<String> pattern,
@@ -154,8 +180,7 @@ static bool HasFewDifferentCharacters(Handle<String> pattern) {
// static
MaybeHandle<Object> RegExp::Compile(Isolate* isolate, Handle<JSRegExp> re,
- Handle<String> pattern,
- JSRegExp::Flags flags,
+ Handle<String> pattern, RegExpFlags flags,
uint32_t backtrack_limit) {
DCHECK(pattern->IsFlat());
@@ -169,8 +194,8 @@ MaybeHandle<Object> RegExp::Compile(Isolate* isolate, Handle<JSRegExp> re,
CompilationCache* compilation_cache = nullptr;
if (is_compilation_cache_enabled) {
compilation_cache = isolate->compilation_cache();
- MaybeHandle<FixedArray> maybe_cached =
- compilation_cache->LookupRegExp(pattern, flags);
+ MaybeHandle<FixedArray> maybe_cached = compilation_cache->LookupRegExp(
+ pattern, JSRegExp::AsJSRegExpFlags(flags));
Handle<FixedArray> cached;
if (maybe_cached.ToHandle(&cached)) {
re->set_data(*cached);
@@ -180,10 +205,9 @@ MaybeHandle<Object> RegExp::Compile(Isolate* isolate, Handle<JSRegExp> re,
PostponeInterruptsScope postpone(isolate);
RegExpCompileData parse_result;
- FlatStringReader reader(isolate, pattern);
DCHECK(!isolate->has_pending_exception());
- if (!RegExpParser::ParseRegExp(isolate, &zone, &reader, flags,
- &parse_result)) {
+ if (!RegExpParser::ParseRegExpFromHeapString(isolate, &zone, pattern, flags,
+ &parse_result)) {
// Throw an exception if we fail to parse the pattern.
return RegExp::ThrowRegExpException(isolate, re, pattern,
parse_result.error);
@@ -210,7 +234,7 @@ MaybeHandle<Object> RegExp::Compile(Isolate* isolate, Handle<JSRegExp> re,
ExperimentalRegExp::Initialize(isolate, re, pattern, flags,
parse_result.capture_count);
has_been_compiled = true;
- } else if (parse_result.simple && !IgnoreCase(flags) && !IsSticky(flags) &&
+ } else if (parse_result.simple && !IsIgnoreCase(flags) && !IsSticky(flags) &&
!HasFewDifferentCharacters(pattern)) {
// Parse-tree is a single atom that is equal to the pattern.
RegExpImpl::AtomCompile(isolate, re, pattern, flags, pattern);
@@ -225,7 +249,7 @@ MaybeHandle<Object> RegExp::Compile(Isolate* isolate, Handle<JSRegExp> re,
ASSIGN_RETURN_ON_EXCEPTION(
isolate, atom_string,
isolate->factory()->NewStringFromTwoByte(atom_pattern), Object);
- if (!IgnoreCase(flags) && !HasFewDifferentCharacters(atom_string)) {
+ if (!IsIgnoreCase(flags) && !HasFewDifferentCharacters(atom_string)) {
RegExpImpl::AtomCompile(isolate, re, pattern, flags, atom_string);
has_been_compiled = true;
}
@@ -239,7 +263,8 @@ MaybeHandle<Object> RegExp::Compile(Isolate* isolate, Handle<JSRegExp> re,
// and we can store it in the cache.
Handle<FixedArray> data(FixedArray::cast(re->data()), isolate);
if (is_compilation_cache_enabled) {
- compilation_cache->PutRegExp(pattern, flags, data);
+ compilation_cache->PutRegExp(pattern, JSRegExp::AsJSRegExpFlags(flags),
+ data);
}
return re;
@@ -301,9 +326,10 @@ MaybeHandle<Object> RegExp::Exec(Isolate* isolate, Handle<JSRegExp> regexp,
// RegExp Atom implementation: Simple string search using indexOf.
void RegExpImpl::AtomCompile(Isolate* isolate, Handle<JSRegExp> re,
- Handle<String> pattern, JSRegExp::Flags flags,
+ Handle<String> pattern, RegExpFlags flags,
Handle<String> match_pattern) {
- isolate->factory()->SetRegExpAtomData(re, pattern, flags, match_pattern);
+ isolate->factory()->SetRegExpAtomData(
+ re, pattern, JSRegExp::AsJSRegExpFlags(flags), match_pattern);
}
static void SetAtomLastCapture(Isolate* isolate,
@@ -420,9 +446,9 @@ bool RegExpImpl::EnsureCompiledIrregexp(Isolate* isolate, Handle<JSRegExp> re,
return CompileIrregexp(isolate, re, sample_subject, is_one_byte);
}
-#ifdef DEBUG
namespace {
+#ifdef DEBUG
bool RegExpCodeIsValidForPreCompilation(Handle<JSRegExp> re, bool is_one_byte) {
Object entry = re->Code(is_one_byte);
Object bytecode = re->Bytecode(is_one_byte);
@@ -448,9 +474,50 @@ bool RegExpCodeIsValidForPreCompilation(Handle<JSRegExp> re, bool is_one_byte) {
return true;
}
+#endif
+
+struct RegExpCaptureIndexLess {
+ bool operator()(const RegExpCapture* lhs, const RegExpCapture* rhs) const {
+ DCHECK_NOT_NULL(lhs);
+ DCHECK_NOT_NULL(rhs);
+ return lhs->index() < rhs->index();
+ }
+};
} // namespace
-#endif
+
+// static
+Handle<FixedArray> RegExp::CreateCaptureNameMap(
+ Isolate* isolate, ZoneVector<RegExpCapture*>* named_captures) {
+ if (named_captures == nullptr) return Handle<FixedArray>();
+
+ DCHECK(!named_captures->empty());
+
+ // Named captures are sorted by name (because the set is used to ensure
+ // name uniqueness). But the capture name map must to be sorted by index.
+
+ std::sort(named_captures->begin(), named_captures->end(),
+ RegExpCaptureIndexLess{});
+
+ int len = static_cast<int>(named_captures->size()) * 2;
+ Handle<FixedArray> array = isolate->factory()->NewFixedArray(len);
+
+ int i = 0;
+ for (const RegExpCapture* capture : *named_captures) {
+ base::Vector<const base::uc16> capture_name(capture->name()->data(),
+ capture->name()->size());
+ // CSA code in ConstructNewResultFromMatchInfo requires these strings to be
+ // internalized so they can be used as property names in the 'exec' results.
+ Handle<String> name = isolate->factory()->InternalizeString(capture_name);
+ array->set(i * 2, *name);
+ array->set(i * 2 + 1, Smi::FromInt(capture->index()));
+
+ i++;
+ }
+ DCHECK_EQ(i * 2, len);
+
+ return array;
+}
bool RegExpImpl::CompileIrregexp(Isolate* isolate, Handle<JSRegExp> re,
Handle<String> sample_subject,
@@ -461,14 +528,13 @@ bool RegExpImpl::CompileIrregexp(Isolate* isolate, Handle<JSRegExp> re,
DCHECK(RegExpCodeIsValidForPreCompilation(re, is_one_byte));
- JSRegExp::Flags flags = re->GetFlags();
+ RegExpFlags flags = JSRegExp::AsRegExpFlags(re->GetFlags());
Handle<String> pattern(re->Pattern(), isolate);
pattern = String::Flatten(isolate, pattern);
RegExpCompileData compile_data;
- FlatStringReader reader(isolate, pattern);
- if (!RegExpParser::ParseRegExp(isolate, &zone, &reader, flags,
- &compile_data)) {
+ if (!RegExpParser::ParseRegExpFromHeapString(isolate, &zone, pattern, flags,
+ &compile_data)) {
// Throw an exception if we fail to parse the pattern.
// THIS SHOULD NOT HAPPEN. We already pre-parsed it successfully once.
USE(RegExp::ThrowRegExpException(isolate, re, pattern, compile_data.error));
@@ -513,7 +579,9 @@ bool RegExpImpl::CompileIrregexp(Isolate* isolate, Handle<JSRegExp> re,
BUILTIN_CODE(isolate, RegExpInterpreterTrampoline);
data->set(JSRegExp::code_index(is_one_byte), ToCodeT(*trampoline));
}
- re->SetCaptureNameMap(compile_data.capture_name_map);
+ Handle<FixedArray> capture_name_map =
+ RegExp::CreateCaptureNameMap(isolate, compile_data.named_captures);
+ re->SetCaptureNameMap(capture_name_map);
int register_max = IrregexpMaxRegisterCount(*data);
if (compile_data.register_count > register_max) {
SetIrregexpMaxRegisterCount(*data, compile_data.register_count);
@@ -553,12 +621,13 @@ Code RegExpImpl::IrregexpNativeCode(FixedArray re, bool is_one_byte) {
}
void RegExpImpl::IrregexpInitialize(Isolate* isolate, Handle<JSRegExp> re,
- Handle<String> pattern,
- JSRegExp::Flags flags, int capture_count,
+ Handle<String> pattern, RegExpFlags flags,
+ int capture_count,
uint32_t backtrack_limit) {
// Initialize compiled code entries to null.
- isolate->factory()->SetRegExpIrregexpData(re, pattern, flags, capture_count,
- backtrack_limit);
+ isolate->factory()->SetRegExpIrregexpData(re, pattern,
+ JSRegExp::AsJSRegExpFlags(flags),
+ capture_count, backtrack_limit);
}
// static
@@ -783,7 +852,7 @@ bool TooMuchRegExpCode(Isolate* isolate, Handle<String> pattern) {
// static
bool RegExp::CompileForTesting(Isolate* isolate, Zone* zone,
- RegExpCompileData* data, JSRegExp::Flags flags,
+ RegExpCompileData* data, RegExpFlags flags,
Handle<String> pattern,
Handle<String> sample_subject,
bool is_one_byte) {
@@ -793,7 +862,7 @@ bool RegExp::CompileForTesting(Isolate* isolate, Zone* zone,
}
bool RegExpImpl::Compile(Isolate* isolate, Zone* zone, RegExpCompileData* data,
- JSRegExp::Flags flags, Handle<String> pattern,
+ RegExpFlags flags, Handle<String> pattern,
Handle<String> sample_subject, bool is_one_byte,
uint32_t& backtrack_limit) {
if (JSRegExp::RegistersForCaptureCount(data->capture_count) >
@@ -868,6 +937,9 @@ bool RegExpImpl::Compile(Isolate* isolate, Zone* zone, RegExpCompileData* data,
#elif V8_TARGET_ARCH_RISCV64
macro_assembler.reset(new RegExpMacroAssemblerRISCV(isolate, zone, mode,
output_register_count));
+#elif V8_TARGET_ARCH_LOONG64
+ macro_assembler.reset(new RegExpMacroAssemblerLOONG64(
+ isolate, zone, mode, output_register_count));
#else
#error "Unsupported architecture"
#endif
@@ -970,7 +1042,7 @@ RegExpGlobalCache::RegExpGlobalCache(Handle<JSRegExp> regexp,
regexp_(regexp),
subject_(subject),
isolate_(isolate) {
- DCHECK(IsGlobal(regexp->GetFlags()));
+ DCHECK(IsGlobal(JSRegExp::AsRegExpFlags(regexp->GetFlags())));
switch (regexp_->TypeTag()) {
case JSRegExp::NOT_COMPILED:
@@ -1045,7 +1117,8 @@ RegExpGlobalCache::~RegExpGlobalCache() {
}
int RegExpGlobalCache::AdvanceZeroLength(int last_index) {
- if (IsUnicode(regexp_->GetFlags()) && last_index + 1 < subject_->length() &&
+ if (IsUnicode(JSRegExp::AsRegExpFlags(regexp_->GetFlags())) &&
+ last_index + 1 < subject_->length() &&
unibrow::Utf16::IsLeadSurrogate(subject_->Get(last_index)) &&
unibrow::Utf16::IsTrailSurrogate(subject_->Get(last_index + 1))) {
// Advance over the surrogate pair.
diff --git a/deps/v8/src/regexp/regexp.h b/deps/v8/src/regexp/regexp.h
index 40fe832fd7..60a240f259 100644
--- a/deps/v8/src/regexp/regexp.h
+++ b/deps/v8/src/regexp/regexp.h
@@ -5,12 +5,18 @@
#ifndef V8_REGEXP_REGEXP_H_
#define V8_REGEXP_REGEXP_H_
-#include "src/objects/js-regexp.h"
+#include "src/common/assert-scope.h"
+#include "src/handles/handles.h"
#include "src/regexp/regexp-error.h"
+#include "src/regexp/regexp-flags.h"
+#include "src/zone/zone-containers.h"
namespace v8 {
namespace internal {
+class JSRegExp;
+class RegExpCapture;
+class RegExpMatchInfo;
class RegExpNode;
class RegExpTree;
@@ -37,9 +43,9 @@ struct RegExpCompileData {
// True, iff the pattern is anchored at the start of the string with '^'.
bool contains_anchor = false;
- // Only use if the pattern contains named captures. If so, this contains a
- // mapping of capture names to capture indices.
- Handle<FixedArray> capture_name_map;
+ // Only set if the pattern contains named captures.
+ // Note: the lifetime equals that of the parse/compile zone.
+ ZoneVector<RegExpCapture*>* named_captures = nullptr;
// The error message. Only used if an error occurred during parsing or
// compilation.
@@ -62,9 +68,15 @@ struct RegExpCompileData {
class RegExp final : public AllStatic {
public:
// Whether the irregexp engine generates interpreter bytecode.
- static bool CanGenerateBytecode() {
- return FLAG_regexp_interpret_all || FLAG_regexp_tier_up;
- }
+ static bool CanGenerateBytecode();
+
+ // Verify the given pattern, i.e. check that parsing succeeds. If
+ // verification fails, `regexp_error_out` is set.
+ template <class CharT>
+ static bool VerifySyntax(Zone* zone, uintptr_t stack_limit,
+ const CharT* input, int input_length,
+ RegExpFlags flags, RegExpError* regexp_error_out,
+ const DisallowGarbageCollection& no_gc);
// Parses the RegExp pattern and prepares the JSRegExp object with
// generic data and choice of implementation - as well as what
@@ -72,7 +84,7 @@ class RegExp final : public AllStatic {
// Returns false if compilation fails.
V8_WARN_UNUSED_RESULT static MaybeHandle<Object> Compile(
Isolate* isolate, Handle<JSRegExp> re, Handle<String> pattern,
- JSRegExp::Flags flags, uint32_t backtrack_limit);
+ RegExpFlags flags, uint32_t backtrack_limit);
// Ensures that a regexp is fully compiled and ready to be executed on a
// subject string. Returns true on success. Return false on failure, and
@@ -131,12 +143,9 @@ class RegExp final : public AllStatic {
Isolate* isolate, Handle<RegExpMatchInfo> last_match_info,
Handle<String> subject, int capture_count, int32_t* match);
- V8_EXPORT_PRIVATE static bool CompileForTesting(Isolate* isolate, Zone* zone,
- RegExpCompileData* input,
- JSRegExp::Flags flags,
- Handle<String> pattern,
- Handle<String> sample_subject,
- bool is_one_byte);
+ V8_EXPORT_PRIVATE static bool CompileForTesting(
+ Isolate* isolate, Zone* zone, RegExpCompileData* input, RegExpFlags flags,
+ Handle<String> pattern, Handle<String> sample_subject, bool is_one_byte);
V8_EXPORT_PRIVATE static void DotPrintForTesting(const char* label,
RegExpNode* node);
@@ -152,6 +161,9 @@ class RegExp final : public AllStatic {
RegExpError error_text);
static bool IsUnmodifiedRegExp(Isolate* isolate, Handle<JSRegExp> regexp);
+
+ static Handle<FixedArray> CreateCaptureNameMap(
+ Isolate* isolate, ZoneVector<RegExpCapture*>* named_captures);
};
// Uses a special global mode of irregexp-generated code to perform a global
diff --git a/deps/v8/src/roots/DIR_METADATA b/deps/v8/src/roots/DIR_METADATA
index ff55846b31..af999da1f2 100644
--- a/deps/v8/src/roots/DIR_METADATA
+++ b/deps/v8/src/roots/DIR_METADATA
@@ -7,5 +7,5 @@
# https://source.chromium.org/chromium/infra/infra/+/master:go/src/infra/tools/dirmd/proto/dir_metadata.proto
monorail {
- component: "Blink>JavaScript>GC"
-} \ No newline at end of file
+ component: "Blink>JavaScript>GarbageCollection"
+}
diff --git a/deps/v8/src/runtime/runtime-atomics.cc b/deps/v8/src/runtime/runtime-atomics.cc
index 32a1353177..1fb80f780d 100644
--- a/deps/v8/src/runtime/runtime-atomics.cc
+++ b/deps/v8/src/runtime/runtime-atomics.cc
@@ -20,7 +20,7 @@ namespace internal {
// Other platforms have CSA support, see builtins-sharedarraybuffer-gen.h.
#if V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_PPC64 || \
V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_S390 || V8_TARGET_ARCH_S390X || \
- V8_TARGET_ARCH_RISCV64
+ V8_TARGET_ARCH_RISCV64 || V8_TARGET_ARCH_LOONG64
namespace {
@@ -606,6 +606,6 @@ RUNTIME_FUNCTION(Runtime_AtomicsXor) { UNREACHABLE(); }
#endif // V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_PPC64
// || V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_S390 || V8_TARGET_ARCH_S390X
- // || V8_TARGET_ARCH_RISCV64
+ // || V8_TARGET_ARCH_RISCV64 || V8_TARGET_ARCH_LOONG64
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/runtime/runtime-collections.cc b/deps/v8/src/runtime/runtime-collections.cc
index e03a9c06ff..7a67c78db1 100644
--- a/deps/v8/src/runtime/runtime-collections.cc
+++ b/deps/v8/src/runtime/runtime-collections.cc
@@ -29,7 +29,9 @@ RUNTIME_FUNCTION(Runtime_SetGrow) {
OrderedHashSet::EnsureGrowable(isolate, table);
if (!table_candidate.ToHandle(&table)) {
THROW_NEW_ERROR_RETURN_FAILURE(
- isolate, NewRangeError(MessageTemplate::kValueOutOfRange));
+ isolate,
+ NewRangeError(MessageTemplate::kCollectionGrowFailed,
+ isolate->factory()->NewStringFromAsciiChecked("Set")));
}
holder->set_table(*table);
return ReadOnlyRoots(isolate).undefined_value();
@@ -64,7 +66,9 @@ RUNTIME_FUNCTION(Runtime_MapGrow) {
OrderedHashMap::EnsureGrowable(isolate, table);
if (!table_candidate.ToHandle(&table)) {
THROW_NEW_ERROR_RETURN_FAILURE(
- isolate, NewRangeError(MessageTemplate::kValueOutOfRange));
+ isolate,
+ NewRangeError(MessageTemplate::kCollectionGrowFailed,
+ isolate->factory()->NewStringFromAsciiChecked("Map")));
}
holder->set_table(*table);
return ReadOnlyRoots(isolate).undefined_value();
diff --git a/deps/v8/src/runtime/runtime-compiler.cc b/deps/v8/src/runtime/runtime-compiler.cc
index 7088e4074e..54924e0f7b 100644
--- a/deps/v8/src/runtime/runtime-compiler.cc
+++ b/deps/v8/src/runtime/runtime-compiler.cc
@@ -83,13 +83,13 @@ RUNTIME_FUNCTION(Runtime_InstallBaselineCode) {
DCHECK_EQ(1, args.length());
CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
Handle<SharedFunctionInfo> sfi(function->shared(), isolate);
- DCHECK(sfi->HasBaselineData());
+ DCHECK(sfi->HasBaselineCode());
IsCompiledScope is_compiled_scope(*sfi, isolate);
DCHECK(!function->HasAvailableOptimizedCode());
DCHECK(!function->HasOptimizationMarker());
DCHECK(!function->has_feedback_vector());
JSFunction::EnsureFeedbackVector(function, &is_compiled_scope);
- Code baseline_code = sfi->baseline_data().baseline_code();
+ Code baseline_code = sfi->baseline_code(kAcquireLoad);
function->set_code(baseline_code);
return baseline_code;
}
diff --git a/deps/v8/src/runtime/runtime-generator.cc b/deps/v8/src/runtime/runtime-generator.cc
index a67a6f09c6..f9e60c64b3 100644
--- a/deps/v8/src/runtime/runtime-generator.cc
+++ b/deps/v8/src/runtime/runtime-generator.cc
@@ -54,8 +54,9 @@ RUNTIME_FUNCTION(Runtime_CreateJSGeneratorObject) {
// Underlying function needs to have bytecode available.
DCHECK(function->shared().HasBytecodeArray());
- int size = function->shared().internal_formal_parameter_count() +
- function->shared().GetBytecodeArray(isolate).register_count();
+ int size =
+ function->shared().internal_formal_parameter_count_without_receiver() +
+ function->shared().GetBytecodeArray(isolate).register_count();
Handle<FixedArray> parameters_and_registers =
isolate->factory()->NewFixedArray(size);
diff --git a/deps/v8/src/runtime/runtime-internal.cc b/deps/v8/src/runtime/runtime-internal.cc
index f9dce4d271..d86fc23622 100644
--- a/deps/v8/src/runtime/runtime-internal.cc
+++ b/deps/v8/src/runtime/runtime-internal.cc
@@ -31,6 +31,12 @@
#include "src/strings/string-builder-inl.h"
#include "src/utils/ostreams.h"
+#if V8_ENABLE_WEBASSEMBLY
+// TODO(jkummerow): Drop this when the "SaveAndClearThreadInWasmFlag"
+// short-term mitigation is no longer needed.
+#include "src/trap-handler/trap-handler.h"
+#endif // V8_ENABLE_WEBASSEMBLY
+
namespace v8 {
namespace internal {
@@ -418,6 +424,34 @@ RUNTIME_FUNCTION(Runtime_BytecodeBudgetInterruptFromCode) {
return ReadOnlyRoots(isolate).undefined_value();
}
+namespace {
+
+#if V8_ENABLE_WEBASSEMBLY
+class SaveAndClearThreadInWasmFlag {
+ public:
+ SaveAndClearThreadInWasmFlag() {
+ if (trap_handler::IsTrapHandlerEnabled()) {
+ if (trap_handler::IsThreadInWasm()) {
+ thread_was_in_wasm_ = true;
+ trap_handler::ClearThreadInWasm();
+ }
+ }
+ }
+ ~SaveAndClearThreadInWasmFlag() {
+ if (thread_was_in_wasm_) {
+ trap_handler::SetThreadInWasm();
+ }
+ }
+
+ private:
+ bool thread_was_in_wasm_{false};
+};
+#else
+class SaveAndClearThreadInWasmFlag {};
+#endif // V8_ENABLE_WEBASSEMBLY
+
+} // namespace
+
RUNTIME_FUNCTION(Runtime_AllocateInYoungGeneration) {
HandleScope scope(isolate);
DCHECK_EQ(2, args.length());
@@ -434,6 +468,14 @@ RUNTIME_FUNCTION(Runtime_AllocateInYoungGeneration) {
CHECK(size <= kMaxRegularHeapObjectSize);
}
+#if V8_ENABLE_WEBASSEMBLY
+ // Short-term mitigation for crbug.com/1236668. When this is called from
+ // WasmGC code, clear the "thread in wasm" flag, which is important in case
+ // any GC needs to happen.
+ // TODO(jkummerow): Find a better fix, likely by replacing the global flag.
+ SaveAndClearThreadInWasmFlag clear_wasm_flag;
+#endif // V8_ENABLE_WEBASSEMBLY
+
// TODO(v8:9472): Until double-aligned allocation is fixed for new-space
// allocations, don't request it.
double_align = false;
diff --git a/deps/v8/src/runtime/runtime-module.cc b/deps/v8/src/runtime/runtime-module.cc
index 52fadb8c8c..9adde80fd9 100644
--- a/deps/v8/src/runtime/runtime-module.cc
+++ b/deps/v8/src/runtime/runtime-module.cc
@@ -12,6 +12,18 @@
namespace v8 {
namespace internal {
+namespace {
+Handle<Script> GetEvalOrigin(Isolate* isolate, Script origin_script) {
+ DisallowGarbageCollection no_gc;
+ while (origin_script.has_eval_from_shared()) {
+ HeapObject maybe_script = origin_script.eval_from_shared().script();
+ CHECK(maybe_script.IsScript());
+ origin_script = Script::cast(maybe_script);
+ }
+ return handle(origin_script, isolate);
+}
+} // namespace
+
RUNTIME_FUNCTION(Runtime_DynamicImportCall) {
HandleScope scope(isolate);
DCHECK_LE(2, args.length());
@@ -25,17 +37,11 @@ RUNTIME_FUNCTION(Runtime_DynamicImportCall) {
import_assertions = args.at<Object>(2);
}
- Handle<Script> script(Script::cast(function->shared().script()), isolate);
-
- while (script->has_eval_from_shared()) {
- Object maybe_script = script->eval_from_shared().script();
- CHECK(maybe_script.IsScript());
- script = handle(Script::cast(maybe_script), isolate);
- }
-
+ Handle<Script> referrer_script =
+ GetEvalOrigin(isolate, Script::cast(function->shared().script()));
RETURN_RESULT_OR_FAILURE(isolate,
isolate->RunHostImportModuleDynamicallyCallback(
- script, specifier, import_assertions));
+ referrer_script, specifier, import_assertions));
}
RUNTIME_FUNCTION(Runtime_GetModuleNamespace) {
diff --git a/deps/v8/src/runtime/runtime-object.cc b/deps/v8/src/runtime/runtime-object.cc
index 42bbb10d92..bec54bd8d4 100644
--- a/deps/v8/src/runtime/runtime-object.cc
+++ b/deps/v8/src/runtime/runtime-object.cc
@@ -49,22 +49,10 @@ MaybeHandle<Object> Runtime::GetObjectProperty(
if (!it.IsFound() && key->IsSymbol() &&
Symbol::cast(*key).is_private_name()) {
- Handle<Symbol> sym = Handle<Symbol>::cast(key);
- Handle<Object> name(sym->description(), isolate);
- DCHECK(name->IsString());
- Handle<String> name_string = Handle<String>::cast(name);
- if (sym->IsPrivateBrand()) {
- Handle<String> class_name = (name_string->length() == 0)
- ? isolate->factory()->anonymous_string()
- : name_string;
- THROW_NEW_ERROR(isolate,
- NewTypeError(MessageTemplate::kInvalidPrivateBrand,
- class_name, lookup_start_object),
- Object);
- }
- THROW_NEW_ERROR(isolate,
- NewTypeError(MessageTemplate::kInvalidPrivateMemberRead,
- name_string, lookup_start_object),
+ MessageTemplate message = Symbol::cast(*key).IsPrivateBrand()
+ ? MessageTemplate::kInvalidPrivateBrand
+ : MessageTemplate::kInvalidPrivateMemberRead;
+ THROW_NEW_ERROR(isolate, NewTypeError(message, key, lookup_start_object),
Object);
}
return result;
@@ -1424,7 +1412,9 @@ RUNTIME_FUNCTION(Runtime_AddPrivateBrand) {
if (it.IsFound()) {
THROW_NEW_ERROR_RETURN_FAILURE(
- isolate, NewTypeError(MessageTemplate::kVarRedeclaration, brand));
+ isolate,
+ NewTypeError(MessageTemplate::kInvalidPrivateBrandReinitialization,
+ brand));
}
PropertyAttributes attributes =
@@ -1447,7 +1437,8 @@ RUNTIME_FUNCTION(Runtime_AddPrivateField) {
if (it.IsFound()) {
THROW_NEW_ERROR_RETURN_FAILURE(
- isolate, NewTypeError(MessageTemplate::kVarRedeclaration, key));
+ isolate,
+ NewTypeError(MessageTemplate::kInvalidPrivateFieldReitialization, key));
}
CHECK(Object::AddDataProperty(&it, value, NONE, Just(kDontThrow),
diff --git a/deps/v8/src/runtime/runtime-scopes.cc b/deps/v8/src/runtime/runtime-scopes.cc
index f49689c292..8b65ffb7cc 100644
--- a/deps/v8/src/runtime/runtime-scopes.cc
+++ b/deps/v8/src/runtime/runtime-scopes.cc
@@ -401,7 +401,8 @@ Handle<JSObject> NewSloppyArguments(Isolate* isolate, Handle<JSFunction> callee,
isolate->factory()->NewArgumentsObject(callee, argument_count);
// Allocate the elements if needed.
- int parameter_count = callee->shared().internal_formal_parameter_count();
+ int parameter_count =
+ callee->shared().internal_formal_parameter_count_without_receiver();
if (argument_count > 0) {
if (parameter_count > 0) {
int mapped_count = std::min(argument_count, parameter_count);
@@ -526,7 +527,8 @@ RUNTIME_FUNCTION(Runtime_NewRestParameter) {
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
CONVERT_ARG_HANDLE_CHECKED(JSFunction, callee, 0)
- int start_index = callee->shared().internal_formal_parameter_count();
+ int start_index =
+ callee->shared().internal_formal_parameter_count_without_receiver();
// This generic runtime function can also be used when the caller has been
// inlined, we use the slow but accurate {GetCallerArguments}.
int argument_count = 0;
diff --git a/deps/v8/src/runtime/runtime-test-wasm.cc b/deps/v8/src/runtime/runtime-test-wasm.cc
index 8425b1fa18..b33cbeae39 100644
--- a/deps/v8/src/runtime/runtime-test-wasm.cc
+++ b/deps/v8/src/runtime/runtime-test-wasm.cc
@@ -2,6 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include "include/v8-wasm.h"
#include "src/base/memory.h"
#include "src/base/platform/mutex.h"
#include "src/execution/arguments-inl.h"
diff --git a/deps/v8/src/runtime/runtime-test.cc b/deps/v8/src/runtime/runtime-test.cc
index 69b0f6241b..3b49e8a891 100644
--- a/deps/v8/src/runtime/runtime-test.cc
+++ b/deps/v8/src/runtime/runtime-test.cc
@@ -2,6 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include "include/v8-function.h"
#include "src/api/api-inl.h"
#include "src/base/numbers/double.h"
#include "src/base/platform/mutex.h"
@@ -70,6 +71,18 @@ V8_WARN_UNUSED_RESULT Object ReturnFuzzSafe(Object value, Isolate* isolate) {
if (!args[index].IsBoolean()) return CrashUnlessFuzzing(isolate); \
bool name = args[index].IsTrue(isolate);
+bool IsAsmWasmFunction(Isolate* isolate, JSFunction function) {
+ DisallowGarbageCollection no_gc;
+#if V8_ENABLE_WEBASSEMBLY
+ // For simplicity we include invalid asm.js functions whose code hasn't yet
+ // been updated to CompileLazy but is still the InstantiateAsmJs builtin.
+ return function.shared().HasAsmWasmData() ||
+ function.code().builtin_id() == Builtin::kInstantiateAsmJs;
+#else
+ return false;
+#endif // V8_ENABLE_WEBASSEMBLY
+}
+
} // namespace
RUNTIME_FUNCTION(Runtime_ClearMegamorphicStubCache) {
@@ -242,11 +255,9 @@ bool CanOptimizeFunction(Handle<JSFunction> function, Isolate* isolate,
return CrashUnlessFuzzingReturnFalse(isolate);
}
-#if V8_ENABLE_WEBASSEMBLY
- if (function->shared().HasAsmWasmData()) {
+ if (IsAsmWasmFunction(isolate, *function)) {
return CrashUnlessFuzzingReturnFalse(isolate);
}
-#endif // V8_ENABLE_WEBASSEMBLY
if (FLAG_testing_d8_test_runner) {
PendingOptimizationTable::MarkedForOptimization(isolate, function);
@@ -362,12 +373,12 @@ RUNTIME_FUNCTION(Runtime_CompileBaseline) {
// First compile the bytecode, if we have to.
if (!is_compiled_scope.is_compiled() &&
- !Compiler::Compile(isolate, function, Compiler::KEEP_EXCEPTION,
+ !Compiler::Compile(isolate, function, Compiler::CLEAR_EXCEPTION,
&is_compiled_scope)) {
return CrashUnlessFuzzing(isolate);
}
- if (!Compiler::CompileBaseline(isolate, function, Compiler::KEEP_EXCEPTION,
+ if (!Compiler::CompileBaseline(isolate, function, Compiler::CLEAR_EXCEPTION,
&is_compiled_scope)) {
return CrashUnlessFuzzing(isolate);
}
@@ -424,9 +435,7 @@ RUNTIME_FUNCTION(Runtime_PrepareFunctionForOptimization) {
return CrashUnlessFuzzing(isolate);
}
-#if V8_ENABLE_WEBASSEMBLY
- if (function->shared().HasAsmWasmData()) return CrashUnlessFuzzing(isolate);
-#endif // V8_ENABLE_WEBASSEMBLY
+ if (IsAsmWasmFunction(isolate, *function)) return CrashUnlessFuzzing(isolate);
// Hold onto the bytecode array between marking and optimization to ensure
// it's not flushed.
@@ -569,7 +578,7 @@ RUNTIME_FUNCTION(Runtime_NeverOptimizeFunction) {
RUNTIME_FUNCTION(Runtime_GetOptimizationStatus) {
HandleScope scope(isolate);
- DCHECK(args.length() == 1 || args.length() == 2);
+ DCHECK_EQ(args.length(), 1);
int status = 0;
if (FLAG_lite_mode || FLAG_jitless) {
// Both jitless and lite modes cannot optimize. Unit tests should handle
@@ -590,32 +599,8 @@ RUNTIME_FUNCTION(Runtime_GetOptimizationStatus) {
if (function_object->IsUndefined()) return Smi::FromInt(status);
if (!function_object->IsJSFunction()) return CrashUnlessFuzzing(isolate);
Handle<JSFunction> function = Handle<JSFunction>::cast(function_object);
-
status |= static_cast<int>(OptimizationStatus::kIsFunction);
- bool sync_with_compiler_thread = true;
- if (args.length() == 2) {
- CONVERT_ARG_HANDLE_CHECKED(Object, sync_object, 1);
- if (!sync_object->IsString()) return CrashUnlessFuzzing(isolate);
- Handle<String> sync = Handle<String>::cast(sync_object);
- if (sync->IsOneByteEqualTo(base::StaticCharVector("no sync"))) {
- sync_with_compiler_thread = false;
- } else if (sync->IsOneByteEqualTo(base::StaticCharVector("sync")) ||
- sync->length() == 0) {
- DCHECK(sync_with_compiler_thread);
- } else {
- return CrashUnlessFuzzing(isolate);
- }
- }
-
- if (isolate->concurrent_recompilation_enabled() &&
- sync_with_compiler_thread) {
- while (function->IsInOptimizationQueue()) {
- isolate->optimizing_compile_dispatcher()->InstallOptimizedFunctions();
- base::OS::Sleep(base::TimeDelta::FromMilliseconds(50));
- }
- }
-
if (function->IsMarkedForOptimization()) {
status |= static_cast<int>(OptimizationStatus::kMarkedForOptimization);
} else if (function->IsMarkedForConcurrentOptimization()) {
@@ -670,39 +655,32 @@ RUNTIME_FUNCTION(Runtime_GetOptimizationStatus) {
return Smi::FromInt(status);
}
-RUNTIME_FUNCTION(Runtime_UnblockConcurrentRecompilation) {
- DCHECK_EQ(0, args.length());
- CHECK(FLAG_block_concurrent_recompilation);
- CHECK(isolate->concurrent_recompilation_enabled());
- isolate->optimizing_compile_dispatcher()->Unblock();
- return ReadOnlyRoots(isolate).undefined_value();
-}
-
RUNTIME_FUNCTION(Runtime_DisableOptimizationFinalization) {
DCHECK_EQ(0, args.length());
- DCHECK(!FLAG_block_concurrent_recompilation);
- CHECK(isolate->concurrent_recompilation_enabled());
- isolate->optimizing_compile_dispatcher()->AwaitCompileTasks();
- isolate->optimizing_compile_dispatcher()->InstallOptimizedFunctions();
- isolate->optimizing_compile_dispatcher()->set_finalize(false);
+ if (isolate->concurrent_recompilation_enabled()) {
+ isolate->optimizing_compile_dispatcher()->AwaitCompileTasks();
+ isolate->optimizing_compile_dispatcher()->InstallOptimizedFunctions();
+ isolate->stack_guard()->ClearInstallCode();
+ isolate->optimizing_compile_dispatcher()->set_finalize(false);
+ }
return ReadOnlyRoots(isolate).undefined_value();
}
RUNTIME_FUNCTION(Runtime_WaitForBackgroundOptimization) {
DCHECK_EQ(0, args.length());
- DCHECK(!FLAG_block_concurrent_recompilation);
- CHECK(isolate->concurrent_recompilation_enabled());
- isolate->optimizing_compile_dispatcher()->AwaitCompileTasks();
+ if (isolate->concurrent_recompilation_enabled()) {
+ isolate->optimizing_compile_dispatcher()->AwaitCompileTasks();
+ }
return ReadOnlyRoots(isolate).undefined_value();
}
RUNTIME_FUNCTION(Runtime_FinalizeOptimization) {
DCHECK_EQ(0, args.length());
- DCHECK(!FLAG_block_concurrent_recompilation);
- CHECK(isolate->concurrent_recompilation_enabled());
- isolate->optimizing_compile_dispatcher()->AwaitCompileTasks();
- isolate->optimizing_compile_dispatcher()->InstallOptimizedFunctions();
- isolate->optimizing_compile_dispatcher()->set_finalize(true);
+ if (isolate->concurrent_recompilation_enabled()) {
+ isolate->optimizing_compile_dispatcher()->AwaitCompileTasks();
+ isolate->optimizing_compile_dispatcher()->InstallOptimizedFunctions();
+ isolate->optimizing_compile_dispatcher()->set_finalize(true);
+ }
return ReadOnlyRoots(isolate).undefined_value();
}
@@ -1117,6 +1095,11 @@ RUNTIME_FUNCTION(Runtime_PretenureAllocationSite) {
JSObject object = JSObject::cast(arg);
Heap* heap = object.GetHeap();
+ if (!heap->InYoungGeneration(object)) {
+ // Object is not in new space, thus there is no memento and nothing to do.
+ return ReturnFuzzSafe(ReadOnlyRoots(isolate).false_value(), isolate);
+ }
+
AllocationMemento memento =
heap->FindAllocationMemento<Heap::kForRuntime>(object.map(), object);
if (memento.is_null())
@@ -1422,10 +1405,8 @@ RUNTIME_FUNCTION(Runtime_NewRegExpWithBacktrackLimit) {
CONVERT_ARG_HANDLE_CHECKED(String, flags_string, 1);
CONVERT_UINT32_ARG_CHECKED(backtrack_limit, 2);
- bool success = false;
JSRegExp::Flags flags =
- JSRegExp::FlagsFromString(isolate, flags_string, &success);
- CHECK(success);
+ JSRegExp::FlagsFromString(isolate, flags_string).value();
RETURN_RESULT_OR_FAILURE(
isolate, JSRegExp::New(isolate, pattern, flags, backtrack_limit));
diff --git a/deps/v8/src/runtime/runtime-typedarray.cc b/deps/v8/src/runtime/runtime-typedarray.cc
index 5d0fc35944..ca3a50ee76 100644
--- a/deps/v8/src/runtime/runtime-typedarray.cc
+++ b/deps/v8/src/runtime/runtime-typedarray.cc
@@ -94,7 +94,7 @@ RUNTIME_FUNCTION(Runtime_TypedArraySortFast) {
CONVERT_ARG_HANDLE_CHECKED(JSTypedArray, array, 0);
DCHECK(!array->WasDetached());
-#if V8_OS_LINUX
+#if MULTI_MAPPED_ALLOCATOR_AVAILABLE
if (FLAG_multi_mapped_mock_allocator) {
// Sorting is meaningless with the mock allocator, and std::sort
// might crash (because aliasing elements violate its assumptions).
diff --git a/deps/v8/src/runtime/runtime.cc b/deps/v8/src/runtime/runtime.cc
index 47f184a3a0..3bcd41dfcb 100644
--- a/deps/v8/src/runtime/runtime.cc
+++ b/deps/v8/src/runtime/runtime.cc
@@ -203,7 +203,9 @@ bool Runtime::IsAllowListedForFuzzing(FunctionId id) {
case Runtime::kArrayBufferDetach:
case Runtime::kDeoptimizeFunction:
case Runtime::kDeoptimizeNow:
+ case Runtime::kDisableOptimizationFinalization:
case Runtime::kEnableCodeLoggingForTesting:
+ case Runtime::kFinalizeOptimization:
case Runtime::kGetUndetectable:
case Runtime::kNeverOptimizeFunction:
case Runtime::kOptimizeFunctionOnNextCall:
@@ -212,6 +214,7 @@ bool Runtime::IsAllowListedForFuzzing(FunctionId id) {
case Runtime::kPretenureAllocationSite:
case Runtime::kSetAllocationTimeout:
case Runtime::kSimulateNewspaceFull:
+ case Runtime::kWaitForBackgroundOptimization:
return true;
// Runtime functions only permitted for non-differential fuzzers.
// This list may contain functions performing extra checks or returning
@@ -221,9 +224,9 @@ bool Runtime::IsAllowListedForFuzzing(FunctionId id) {
case Runtime::kIsBeingInterpreted:
case Runtime::kVerifyType:
return !FLAG_allow_natives_for_differential_fuzzing;
- case Runtime::kCompileBaseline:
case Runtime::kBaselineOsr:
- return FLAG_sparkplug;
+ case Runtime::kCompileBaseline:
+ return ENABLE_SPARKPLUG;
default:
return false;
}
diff --git a/deps/v8/src/runtime/runtime.h b/deps/v8/src/runtime/runtime.h
index 045ffb3641..fed9c01416 100644
--- a/deps/v8/src/runtime/runtime.h
+++ b/deps/v8/src/runtime/runtime.h
@@ -7,7 +7,7 @@
#include <memory>
-#include "include/v8.h"
+#include "include/v8-maybe.h"
#include "src/base/bit-field.h"
#include "src/base/platform/time.h"
#include "src/common/globals.h"
@@ -488,7 +488,7 @@ namespace internal {
F(FinalizeOptimization, 0, 1) \
F(GetCallable, 0, 1) \
F(GetInitializerFunction, 1, 1) \
- F(GetOptimizationStatus, -1, 1) \
+ F(GetOptimizationStatus, 1, 1) \
F(GetUndetectable, 0, 1) \
F(GlobalPrint, 1, 1) \
F(HasDictionaryElements, 1, 1) \
@@ -558,7 +558,6 @@ namespace internal {
F(TraceExit, 1, 1) \
F(TurbofanStaticAssert, 1, 1) \
F(TypedArraySpeciesProtector, 0, 1) \
- F(UnblockConcurrentRecompilation, 0, 1) \
F(WaitForBackgroundOptimization, 0, 1) \
I(DeoptimizeNow, 0, 1)
diff --git a/deps/v8/src/snapshot/context-deserializer.cc b/deps/v8/src/snapshot/context-deserializer.cc
index ad109bacca..fb643ba014 100644
--- a/deps/v8/src/snapshot/context-deserializer.cc
+++ b/deps/v8/src/snapshot/context-deserializer.cc
@@ -61,7 +61,6 @@ void ContextDeserializer::SetupOffHeapArrayBufferBackingStores() {
for (Handle<JSArrayBuffer> buffer : new_off_heap_array_buffers()) {
uint32_t store_index = buffer->GetBackingStoreRefForDeserialization();
auto bs = backing_store(store_index);
- buffer->AllocateExternalPointerEntries(isolate());
// TODO(v8:11111): Support RAB / GSAB.
CHECK(!buffer->is_resizable());
SharedFlag shared =
diff --git a/deps/v8/src/snapshot/context-serializer.cc b/deps/v8/src/snapshot/context-serializer.cc
index 7a02a50caa..96d9d5f03e 100644
--- a/deps/v8/src/snapshot/context-serializer.cc
+++ b/deps/v8/src/snapshot/context-serializer.cc
@@ -177,8 +177,8 @@ void ContextSerializer::SerializeObjectImpl(Handle<HeapObject> obj) {
Handle<JSFunction> closure = Handle<JSFunction>::cast(obj);
closure->ResetIfCodeFlushed();
if (closure->is_compiled()) {
- if (closure->shared().HasBaselineData()) {
- closure->shared().flush_baseline_data();
+ if (closure->shared().HasBaselineCode()) {
+ closure->shared().FlushBaselineCode();
}
closure->set_code(closure->shared().GetCode(), kReleaseStore);
}
diff --git a/deps/v8/src/snapshot/deserializer.cc b/deps/v8/src/snapshot/deserializer.cc
index fab2f80355..9f32faf67a 100644
--- a/deps/v8/src/snapshot/deserializer.cc
+++ b/deps/v8/src/snapshot/deserializer.cc
@@ -482,7 +482,6 @@ void Deserializer<IsolateT>::PostProcessNewObject(Handle<Map> map,
// a numbered reference to an already deserialized backing store.
backing_store = backing_stores_[store_index]->buffer_start();
}
- data_view->AllocateExternalPointerEntries(main_thread_isolate());
data_view->set_data_pointer(
main_thread_isolate(),
reinterpret_cast<uint8_t*>(backing_store) + data_view->byte_offset());
@@ -491,7 +490,6 @@ void Deserializer<IsolateT>::PostProcessNewObject(Handle<Map> map,
// Fixup typed array pointers.
if (typed_array->is_on_heap()) {
Address raw_external_pointer = typed_array->external_pointer_raw();
- typed_array->AllocateExternalPointerEntries(main_thread_isolate());
typed_array->SetOnHeapDataPtr(
main_thread_isolate(), HeapObject::cast(typed_array->base_pointer()),
raw_external_pointer);
@@ -503,7 +501,6 @@ void Deserializer<IsolateT>::PostProcessNewObject(Handle<Map> map,
auto start = backing_store
? reinterpret_cast<byte*>(backing_store->buffer_start())
: nullptr;
- typed_array->AllocateExternalPointerEntries(main_thread_isolate());
typed_array->SetOffHeapDataPtr(main_thread_isolate(), start,
typed_array->byte_offset());
}
@@ -513,7 +510,6 @@ void Deserializer<IsolateT>::PostProcessNewObject(Handle<Map> map,
if (buffer->GetBackingStoreRefForDeserialization() != kNullRefSentinel) {
new_off_heap_array_buffers_.push_back(buffer);
} else {
- buffer->AllocateExternalPointerEntries(main_thread_isolate());
buffer->set_backing_store(main_thread_isolate(), nullptr);
}
} else if (InstanceTypeChecker::IsBytecodeArray(instance_type)) {
diff --git a/deps/v8/src/snapshot/embedded/embedded-data.cc b/deps/v8/src/snapshot/embedded/embedded-data.cc
index 166e41d324..188ed6e879 100644
--- a/deps/v8/src/snapshot/embedded/embedded-data.cc
+++ b/deps/v8/src/snapshot/embedded/embedded-data.cc
@@ -218,7 +218,7 @@ void FinalizeEmbeddedCodeTargets(Isolate* isolate, EmbeddedData* blob) {
#if defined(V8_TARGET_ARCH_X64) || defined(V8_TARGET_ARCH_ARM64) || \
defined(V8_TARGET_ARCH_ARM) || defined(V8_TARGET_ARCH_MIPS) || \
defined(V8_TARGET_ARCH_IA32) || defined(V8_TARGET_ARCH_S390) || \
- defined(V8_TARGET_ARCH_RISCV64)
+ defined(V8_TARGET_ARCH_RISCV64) || defined(V8_TARGET_ARCH_LOONG64)
// On these platforms we emit relative builtin-to-builtin
// jumps for isolate independent builtins in the snapshot. This fixes up the
// relative jumps to the right offsets in the snapshot.
@@ -246,7 +246,7 @@ void FinalizeEmbeddedCodeTargets(Isolate* isolate, EmbeddedData* blob) {
// indirection through the root register.
CHECK(on_heap_it.done());
CHECK(off_heap_it.done());
-#endif // defined(V8_TARGET_ARCH_X64) || defined(V8_TARGET_ARCH_ARM64)
+#endif
}
}
diff --git a/deps/v8/src/snapshot/embedded/embedded-empty.cc b/deps/v8/src/snapshot/embedded/embedded-empty.cc
index c32b459d9d..e5355215f2 100644
--- a/deps/v8/src/snapshot/embedded/embedded-empty.cc
+++ b/deps/v8/src/snapshot/embedded/embedded-empty.cc
@@ -17,15 +17,3 @@ const uint8_t* v8_Default_embedded_blob_code_ = nullptr;
uint32_t v8_Default_embedded_blob_code_size_ = 0;
const uint8_t* v8_Default_embedded_blob_data_ = nullptr;
uint32_t v8_Default_embedded_blob_data_size_ = 0;
-
-#ifdef V8_MULTI_SNAPSHOTS
-extern "C" const uint8_t* v8_Trusted_embedded_blob_code_;
-extern "C" uint32_t v8_Trusted_embedded_blob_code_size_;
-extern "C" const uint8_t* v8_Trusted_embedded_blob_data_;
-extern "C" uint32_t v8_Trusted_embedded_blob_data_size_;
-
-const uint8_t* v8_Trusted_embedded_blob_code_ = nullptr;
-uint32_t v8_Trusted_embedded_blob_code_size_ = 0;
-const uint8_t* v8_Trusted_embedded_blob_data_ = nullptr;
-uint32_t v8_Trusted_embedded_blob_data_size_ = 0;
-#endif
diff --git a/deps/v8/src/snapshot/embedded/platform-embedded-file-writer-aix.cc b/deps/v8/src/snapshot/embedded/platform-embedded-file-writer-aix.cc
index 41cd9dbca0..e858da90b5 100644
--- a/deps/v8/src/snapshot/embedded/platform-embedded-file-writer-aix.cc
+++ b/deps/v8/src/snapshot/embedded/platform-embedded-file-writer-aix.cc
@@ -65,8 +65,14 @@ void PlatformEmbeddedFileWriterAIX::DeclareSymbolGlobal(const char* name) {
}
void PlatformEmbeddedFileWriterAIX::AlignToCodeAlignment() {
+#if V8_TARGET_ARCH_X64
+ // On x64 use 64-bytes code alignment to allow 64-bytes loop header alignment.
+ STATIC_ASSERT((1 << 6) >= kCodeAlignment);
+ fprintf(fp_, ".align 6\n");
+#else
STATIC_ASSERT((1 << 5) >= kCodeAlignment);
fprintf(fp_, ".align 5\n");
+#endif
}
void PlatformEmbeddedFileWriterAIX::AlignToDataAlignment() {
diff --git a/deps/v8/src/snapshot/embedded/platform-embedded-file-writer-generic.cc b/deps/v8/src/snapshot/embedded/platform-embedded-file-writer-generic.cc
index e2d5dcb41c..641d3638f3 100644
--- a/deps/v8/src/snapshot/embedded/platform-embedded-file-writer-generic.cc
+++ b/deps/v8/src/snapshot/embedded/platform-embedded-file-writer-generic.cc
@@ -74,8 +74,14 @@ void PlatformEmbeddedFileWriterGeneric::DeclareSymbolGlobal(const char* name) {
}
void PlatformEmbeddedFileWriterGeneric::AlignToCodeAlignment() {
+#if V8_TARGET_ARCH_X64
+ // On x64 use 64-bytes code alignment to allow 64-bytes loop header alignment.
+ STATIC_ASSERT(64 >= kCodeAlignment);
+ fprintf(fp_, ".balign 64\n");
+#else
STATIC_ASSERT(32 >= kCodeAlignment);
fprintf(fp_, ".balign 32\n");
+#endif
}
void PlatformEmbeddedFileWriterGeneric::AlignToDataAlignment() {
@@ -152,8 +158,9 @@ int PlatformEmbeddedFileWriterGeneric::IndentedDataDirective(
DataDirective PlatformEmbeddedFileWriterGeneric::ByteChunkDataDirective()
const {
-#if defined(V8_TARGET_ARCH_MIPS) || defined(V8_TARGET_ARCH_MIPS64)
- // MIPS uses a fixed 4 byte instruction set, using .long
+#if defined(V8_TARGET_ARCH_MIPS) || defined(V8_TARGET_ARCH_MIPS64) || \
+ defined(V8_TARGET_ARCH_LOONG64)
+ // MIPS and LOONG64 uses a fixed 4 byte instruction set, using .long
// to prevent any unnecessary padding.
return kLong;
#else
diff --git a/deps/v8/src/snapshot/embedded/platform-embedded-file-writer-mac.cc b/deps/v8/src/snapshot/embedded/platform-embedded-file-writer-mac.cc
index 5fa12ec6ea..cfe9bbcde1 100644
--- a/deps/v8/src/snapshot/embedded/platform-embedded-file-writer-mac.cc
+++ b/deps/v8/src/snapshot/embedded/platform-embedded-file-writer-mac.cc
@@ -56,12 +56,18 @@ void PlatformEmbeddedFileWriterMac::DeclareSymbolGlobal(const char* name) {
// prevents something along the compilation chain from messing with the
// embedded blob. Using .global here causes embedded blob hash verification
// failures at runtime.
- STATIC_ASSERT(32 >= kCodeAlignment);
fprintf(fp_, ".private_extern _%s\n", name);
}
void PlatformEmbeddedFileWriterMac::AlignToCodeAlignment() {
+#if V8_TARGET_ARCH_X64
+ // On x64 use 64-bytes code alignment to allow 64-bytes loop header alignment.
+ STATIC_ASSERT(64 >= kCodeAlignment);
+ fprintf(fp_, ".balign 64\n");
+#else
+ STATIC_ASSERT(32 >= kCodeAlignment);
fprintf(fp_, ".balign 32\n");
+#endif
}
void PlatformEmbeddedFileWriterMac::AlignToDataAlignment() {
diff --git a/deps/v8/src/snapshot/embedded/platform-embedded-file-writer-win.cc b/deps/v8/src/snapshot/embedded/platform-embedded-file-writer-win.cc
index 7b4eadd98a..83b85c8df9 100644
--- a/deps/v8/src/snapshot/embedded/platform-embedded-file-writer-win.cc
+++ b/deps/v8/src/snapshot/embedded/platform-embedded-file-writer-win.cc
@@ -637,7 +637,14 @@ void PlatformEmbeddedFileWriterWin::DeclareSymbolGlobal(const char* name) {
}
void PlatformEmbeddedFileWriterWin::AlignToCodeAlignment() {
+#if V8_TARGET_ARCH_X64
+ // On x64 use 64-bytes code alignment to allow 64-bytes loop header alignment.
+ STATIC_ASSERT(64 >= kCodeAlignment);
+ fprintf(fp_, ".balign 64\n");
+#else
+ STATIC_ASSERT(32 >= kCodeAlignment);
fprintf(fp_, ".balign 32\n");
+#endif
}
void PlatformEmbeddedFileWriterWin::AlignToDataAlignment() {
diff --git a/deps/v8/src/snapshot/mksnapshot.cc b/deps/v8/src/snapshot/mksnapshot.cc
index 4e5b43b23f..86b0304fb0 100644
--- a/deps/v8/src/snapshot/mksnapshot.cc
+++ b/deps/v8/src/snapshot/mksnapshot.cc
@@ -9,6 +9,7 @@
#include <iomanip>
#include "include/libplatform/libplatform.h"
+#include "include/v8-initialization.h"
#include "src/base/platform/platform.h"
#include "src/base/platform/wrappers.h"
#include "src/base/sanitizer/msan.h"
@@ -239,6 +240,11 @@ int main(int argc, char** argv) {
v8::V8::InitializeICUDefaultLocation(argv[0]);
std::unique_ptr<v8::Platform> platform = v8::platform::NewDefaultPlatform();
v8::V8::InitializePlatform(platform.get());
+#ifdef V8_VIRTUAL_MEMORY_CAGE
+ if (!v8::V8::InitializeVirtualMemoryCage()) {
+ FATAL("Could not initialize the virtual memory cage");
+ }
+#endif
v8::V8::Initialize();
{
diff --git a/deps/v8/src/snapshot/serializer.cc b/deps/v8/src/snapshot/serializer.cc
index 68fb1a01a6..47221dd952 100644
--- a/deps/v8/src/snapshot/serializer.cc
+++ b/deps/v8/src/snapshot/serializer.cc
@@ -121,12 +121,14 @@ void Serializer::SerializeObject(Handle<HeapObject> obj) {
// indirection and serialize the actual string directly.
if (obj->IsThinString(isolate())) {
obj = handle(ThinString::cast(*obj).actual(isolate()), isolate());
- } else if (obj->IsBaselineData()) {
- // For now just serialize the BytecodeArray instead of baseline data.
- // TODO(v8:11429,pthier): Handle BaselineData in cases we want to serialize
- // Baseline code.
- obj = handle(Handle<BaselineData>::cast(obj)->GetActiveBytecodeArray(),
- isolate());
+ } else if (obj->IsCodeT()) {
+ Code code = FromCodeT(CodeT::cast(*obj));
+ if (code.kind() == CodeKind::BASELINE) {
+ // For now just serialize the BytecodeArray instead of baseline code.
+ // TODO(v8:11429,pthier): Handle Baseline code in cases we want to
+ // serialize it.
+ obj = handle(code.bytecode_or_interpreter_data(isolate()), isolate());
+ }
}
SerializeObjectImpl(obj);
}
@@ -521,10 +523,6 @@ void Serializer::ObjectSerializer::SerializeJSArrayBuffer() {
ArrayBufferExtension* extension = buffer->extension();
// The embedder-allocated backing store only exists for the off-heap case.
-#ifdef V8_HEAP_SANDBOX
- uint32_t external_pointer_entry =
- buffer->GetBackingStoreRefForDeserialization();
-#endif
if (backing_store != nullptr) {
uint32_t ref = SerializeBackingStore(backing_store, byte_length);
buffer->SetBackingStoreRefForSerialization(ref);
@@ -538,11 +536,7 @@ void Serializer::ObjectSerializer::SerializeJSArrayBuffer() {
SerializeObject();
-#ifdef V8_HEAP_SANDBOX
- buffer->SetBackingStoreRefForSerialization(external_pointer_entry);
-#else
buffer->set_backing_store(isolate(), backing_store);
-#endif
buffer->set_extension(extension);
}
diff --git a/deps/v8/src/snapshot/snapshot.h b/deps/v8/src/snapshot/snapshot.h
index 2f16eee6d5..f176faa607 100644
--- a/deps/v8/src/snapshot/snapshot.h
+++ b/deps/v8/src/snapshot/snapshot.h
@@ -5,7 +5,7 @@
#ifndef V8_SNAPSHOT_SNAPSHOT_H_
#define V8_SNAPSHOT_SNAPSHOT_H_
-#include "include/v8.h" // For StartupData.
+#include "include/v8-snapshot.h" // For StartupData.
#include "src/common/assert-scope.h"
#include "src/common/globals.h"
diff --git a/deps/v8/src/tasks/OWNERS b/deps/v8/src/tasks/OWNERS
index f7a22ea908..69a86ca984 100644
--- a/deps/v8/src/tasks/OWNERS
+++ b/deps/v8/src/tasks/OWNERS
@@ -1,4 +1,3 @@
ahaas@chromium.org
clemensb@chromium.org
mlippautz@chromium.org
-rmcilroy@chromium.org
diff --git a/deps/v8/src/third_party/vtune/BUILD.gn b/deps/v8/src/third_party/vtune/BUILD.gn
index e8582dbb79..d763da1064 100644
--- a/deps/v8/src/third_party/vtune/BUILD.gn
+++ b/deps/v8/src/third_party/vtune/BUILD.gn
@@ -22,6 +22,11 @@ static_library("v8_vtune") {
"vtune-jit.h",
]
configs += [ ":vtune_ittapi" ]
+
+ # TODO(delphick): Consider changing these to be v8_source_sets
+ if (!build_with_chromium && is_clang) {
+ configs -= [ "//build/config/clang:find_bad_constructs" ]
+ }
deps = [ "../../..:v8" ]
}
diff --git a/deps/v8/src/third_party/vtune/v8-vtune.h b/deps/v8/src/third_party/vtune/v8-vtune.h
index 34da9cb5bf..2ef1bf8cc4 100644
--- a/deps/v8/src/third_party/vtune/v8-vtune.h
+++ b/deps/v8/src/third_party/vtune/v8-vtune.h
@@ -58,7 +58,7 @@
#ifndef V8_VTUNE_H_
#define V8_VTUNE_H_
-#include "../../../include/v8.h"
+#include "../../../include/v8-callbacks.h"
namespace vTune {
diff --git a/deps/v8/src/third_party/vtune/vtune-jit.cc b/deps/v8/src/third_party/vtune/vtune-jit.cc
index 08fbfbfe39..7b9d338c3e 100644
--- a/deps/v8/src/third_party/vtune/vtune-jit.cc
+++ b/deps/v8/src/third_party/vtune/vtune-jit.cc
@@ -56,6 +56,8 @@
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
+#include "vtune-jit.h"
+
#include <stdlib.h>
#include <string.h>
@@ -65,8 +67,12 @@
#include <unordered_map>
#include <vector>
+#include "../../../include/v8-callbacks.h"
+#include "../../../include/v8-initialization.h"
+#include "../../../include/v8-local-handle.h"
+#include "../../../include/v8-primitive.h"
+#include "../../../include/v8-script.h"
#include "v8-vtune.h"
-#include "vtune-jit.h"
namespace vTune {
namespace internal {
diff --git a/deps/v8/src/third_party/vtune/vtune-jit.h b/deps/v8/src/third_party/vtune/vtune-jit.h
index 4e5af45c61..148c82434f 100644
--- a/deps/v8/src/third_party/vtune/vtune-jit.h
+++ b/deps/v8/src/third_party/vtune/vtune-jit.h
@@ -58,11 +58,14 @@
#ifndef VTUNE_VTUNE_JIT_H_
#define VTUNE_VTUNE_JIT_H_
-#include "../../../include/v8.h"
#include "third_party/ittapi/include/jitprofiling.h"
#define VTUNERUNNING (iJIT_IsProfilingActive() == iJIT_SAMPLING_ON)
+namespace v8 {
+struct JitCodeEvent;
+}
+
namespace vTune {
namespace internal {
using namespace v8;
diff --git a/deps/v8/src/torque/implementation-visitor.cc b/deps/v8/src/torque/implementation-visitor.cc
index b814f0cc63..dceb660c21 100644
--- a/deps/v8/src/torque/implementation-visitor.cc
+++ b/deps/v8/src/torque/implementation-visitor.cc
@@ -579,7 +579,7 @@ void ImplementationVisitor::Visit(Builtin* builtin) {
.Position(signature.parameter_names[signature.implicit_count]->pos);
}
- csa_ccfile() << " TNode<Word32T> argc = UncheckedParameter<Word32T>("
+ csa_ccfile() << " TNode<Word32T> argc = UncheckedParameter<Word32T>("
<< "Descriptor::kJSActualArgumentsCount);\n";
csa_ccfile() << " TNode<IntPtrT> "
"arguments_length(ChangeInt32ToIntPtr(UncheckedCast<"
@@ -588,13 +588,17 @@ void ImplementationVisitor::Visit(Builtin* builtin) {
"UncheckedCast<RawPtrT>(LoadFramePointer());\n";
csa_ccfile() << " TorqueStructArguments "
"torque_arguments(GetFrameArguments(arguments_frame, "
- "arguments_length));\n";
+ "arguments_length, FrameArgumentsArgcType::"
+ << (kJSArgcIncludesReceiver ? "kCountIncludesReceiver"
+ : "kCountExcludesReceiver")
+ << "));\n";
csa_ccfile()
<< " CodeStubArguments arguments(this, torque_arguments);\n";
parameters.Push("torque_arguments.frame");
parameters.Push("torque_arguments.base");
parameters.Push("torque_arguments.length");
+ parameters.Push("torque_arguments.actual_count");
const Type* arguments_type = TypeOracle::GetArgumentsType();
StackRange range = parameter_types.PushMany(LowerType(arguments_type));
parameter_bindings.Add(*signature.arguments_variable,
@@ -625,7 +629,7 @@ void ImplementationVisitor::Visit(Builtin* builtin) {
? "arguments.GetReceiver()"
: "UncheckedParameter<Object>(Descriptor::kReceiver)")
<< ";\n";
- csa_ccfile() << "USE(" << generated_name << ");\n";
+ csa_ccfile() << " USE(" << generated_name << ");\n";
expected_types = {TypeOracle::GetJSAnyType()};
} else if (param_name == "newTarget") {
csa_ccfile() << " TNode<Object> " << generated_name
@@ -3521,7 +3525,7 @@ void ImplementationVisitor::GenerateBuiltinDefinitionsAndInterfaceDescriptors(
// count.
int parameter_count =
static_cast<int>(builtin->signature().ExplicitCount());
- builtin_definitions << ", " << parameter_count;
+ builtin_definitions << ", " << JSParameterCount(parameter_count);
// And the receiver is explicitly declared.
builtin_definitions << ", kReceiver";
for (size_t i = builtin->signature().implicit_count;
@@ -3855,11 +3859,13 @@ namespace {
class ClassFieldOffsetGenerator : public FieldOffsetsGenerator {
public:
ClassFieldOffsetGenerator(std::ostream& header, std::ostream& inline_header,
- const ClassType* type, std::string gen_name)
+ const ClassType* type, std::string gen_name,
+ const ClassType* parent)
: FieldOffsetsGenerator(type),
hdr_(header),
inl_(inline_header),
- previous_field_end_("P::kHeaderSize"),
+ previous_field_end_((parent && parent->IsShape()) ? "P::kSize"
+ : "P::kHeaderSize"),
gen_name_(gen_name) {}
void WriteField(const Field& f, const std::string& size_string) override {
std::string field = "k" + CamelifyString(f.name_and_type.name) + "Offset";
@@ -3981,7 +3987,7 @@ base::Optional<std::vector<Field>> GetOrderedUniqueIndexFields(
void CppClassGenerator::GenerateClass() {
// Is<name>_NonInline(HeapObject)
- {
+ if (!type_->IsShape()) {
cpp::Function f("Is"s + name_ + "_NonInline");
f.SetDescription("Alias for HeapObject::Is"s + name_ +
"() that avoids inlining.");
@@ -4046,7 +4052,8 @@ void CppClassGenerator::GenerateClass() {
}
hdr_ << "\n";
- ClassFieldOffsetGenerator g(hdr_, inl_, type_, gen_name_);
+ ClassFieldOffsetGenerator g(hdr_, inl_, type_, gen_name_,
+ type_->GetSuperClass());
for (auto f : type_->fields()) {
CurrentSourcePosition::Scope scope(f.pos);
g.RecordOffsetFor(f);
@@ -4174,6 +4181,15 @@ void CppClassGenerator::GenerateClassCasts() {
}
void CppClassGenerator::GenerateClassConstructors() {
+ const ClassType* typecheck_type = type_;
+ while (typecheck_type->IsShape()) {
+ typecheck_type = typecheck_type->GetSuperClass();
+
+ // Shapes have already been checked earlier to inherit from JSObject, so we
+ // should have found an appropriate type.
+ DCHECK(typecheck_type);
+ }
+
hdr_ << " public:\n";
hdr_ << " template <class DAlias = D>\n";
hdr_ << " constexpr " << gen_name_ << "() : P() {\n";
@@ -4194,7 +4210,8 @@ void CppClassGenerator::GenerateClassConstructors() {
inl_ << "template<class D, class P>\n";
inl_ << "inline " << gen_name_T_ << "::" << gen_name_ << "(Address ptr)\n";
inl_ << " : P(ptr) {\n";
- inl_ << " SLOW_DCHECK(Is" << name_ << "_NonInline(*this));\n";
+ inl_ << " SLOW_DCHECK(Is" << typecheck_type->name()
+ << "_NonInline(*this));\n";
inl_ << "}\n";
inl_ << "template<class D, class P>\n";
@@ -4204,7 +4221,7 @@ void CppClassGenerator::GenerateClassConstructors() {
inl_ << " SLOW_DCHECK("
<< "(allow_smi == HeapObject::AllowInlineSmiStorage::kAllowBeingASmi"
" && this->IsSmi()) || Is"
- << name_ << "_NonInline(*this));\n";
+ << typecheck_type->name() << "_NonInline(*this));\n";
inl_ << "}\n";
}
@@ -4603,9 +4620,11 @@ void ImplementationVisitor::GenerateClassDefinitions(
for (const ClassType* type : TypeOracle::GetClasses()) {
auto& streams = GlobalContext::GeneratedPerFile(type->AttributedToFile());
std::ostream& header = streams.class_definition_headerfile;
- header << "class " << type->GetGeneratedTNodeTypeName() << ";\n";
- forward_declarations << "class " << type->GetGeneratedTNodeTypeName()
- << ";\n";
+ std::string name = type->GenerateCppClassDefinitions()
+ ? type->name()
+ : type->GetGeneratedTNodeTypeName();
+ header << "class " << name << ";\n";
+ forward_declarations << "class " << name << ";\n";
}
for (const ClassType* type : TypeOracle::GetClasses()) {
diff --git a/deps/v8/src/utils/address-map.h b/deps/v8/src/utils/address-map.h
index 6a9c513bc6..0a6c749b39 100644
--- a/deps/v8/src/utils/address-map.h
+++ b/deps/v8/src/utils/address-map.h
@@ -5,7 +5,6 @@
#ifndef V8_UTILS_ADDRESS_MAP_H_
#define V8_UTILS_ADDRESS_MAP_H_
-#include "include/v8.h"
#include "src/base/hashmap.h"
#include "src/common/assert-scope.h"
#include "src/objects/heap-object.h"
diff --git a/deps/v8/src/utils/allocation.cc b/deps/v8/src/utils/allocation.cc
index 9cdd53fa6d..6f6225797a 100644
--- a/deps/v8/src/utils/allocation.cc
+++ b/deps/v8/src/utils/allocation.cc
@@ -17,6 +17,7 @@
#include "src/base/vector.h"
#include "src/flags/flags.h"
#include "src/init/v8.h"
+#include "src/init/vm-cage.h"
#include "src/utils/memcopy.h"
#if V8_LIBC_BIONIC
@@ -53,6 +54,7 @@ class PageAllocatorInitializer {
page_allocator_ = default_page_allocator.get();
}
#if defined(LEAK_SANITIZER)
+ static_assert(!V8_VIRTUAL_MEMORY_CAGE_BOOL, "Not currently supported");
static base::LeakyObject<base::LsanPageAllocator> lsan_allocator(
page_allocator_);
page_allocator_ = lsan_allocator.get();
@@ -61,16 +63,25 @@ class PageAllocatorInitializer {
PageAllocator* page_allocator() const { return page_allocator_; }
+#ifdef V8_VIRTUAL_MEMORY_CAGE
+ PageAllocator* data_cage_page_allocator() const {
+ return data_cage_page_allocator_;
+ }
+#endif
+
void SetPageAllocatorForTesting(PageAllocator* allocator) {
page_allocator_ = allocator;
}
private:
PageAllocator* page_allocator_;
+#ifdef V8_VIRTUAL_MEMORY_CAGE
+ PageAllocator* data_cage_page_allocator_;
+#endif
};
DEFINE_LAZY_LEAKY_OBJECT_GETTER(PageAllocatorInitializer,
- GetPageTableInitializer)
+ GetPageAllocatorInitializer)
// We will attempt allocation this many times. After each failure, we call
// OnCriticalMemoryPressure to try to free some memory.
@@ -79,14 +90,29 @@ const int kAllocationTries = 2;
} // namespace
v8::PageAllocator* GetPlatformPageAllocator() {
- DCHECK_NOT_NULL(GetPageTableInitializer()->page_allocator());
- return GetPageTableInitializer()->page_allocator();
+ DCHECK_NOT_NULL(GetPageAllocatorInitializer()->page_allocator());
+ return GetPageAllocatorInitializer()->page_allocator();
}
+#ifdef V8_VIRTUAL_MEMORY_CAGE
+// TODO(chromium:1218005) once we disallow disabling the cage, name this e.g.
+// "GetPlatformDataPageAllocator", and set it to the PlatformPageAllocator when
+// V8_VIRTUAL_MEMORY_CAGE is not defined. Then use that allocator whenever
+// allocating ArrayBuffer backing stores inside v8.
+v8::PageAllocator* GetPlatformDataCagePageAllocator() {
+ if (GetProcessWideVirtualMemoryCage()->is_disabled()) {
+ return GetPlatformPageAllocator();
+ } else {
+ CHECK(GetProcessWideVirtualMemoryCage()->is_initialized());
+ return GetProcessWideVirtualMemoryCage()->GetDataCagePageAllocator();
+ }
+}
+#endif
+
v8::PageAllocator* SetPlatformPageAllocatorForTesting(
v8::PageAllocator* new_page_allocator) {
v8::PageAllocator* old_page_allocator = GetPlatformPageAllocator();
- GetPageTableInitializer()->SetPageAllocatorForTesting(new_page_allocator);
+ GetPageAllocatorInitializer()->SetPageAllocatorForTesting(new_page_allocator);
return old_page_allocator;
}
@@ -323,7 +349,8 @@ inline Address VirtualMemoryCageStart(
}
} // namespace
-bool VirtualMemoryCage::InitReservation(const ReservationParams& params) {
+bool VirtualMemoryCage::InitReservation(
+ const ReservationParams& params, base::AddressRegion existing_reservation) {
DCHECK(!reservation_.IsReserved());
const size_t allocate_page_size = params.page_allocator->AllocatePageSize();
@@ -337,7 +364,16 @@ bool VirtualMemoryCage::InitReservation(const ReservationParams& params) {
RoundUp(params.base_alignment, allocate_page_size)) -
RoundUp(params.base_bias_size, allocate_page_size);
- if (params.base_alignment == ReservationParams::kAnyBaseAlignment) {
+ if (!existing_reservation.is_empty()) {
+ CHECK_EQ(existing_reservation.size(), params.reservation_size);
+ CHECK(params.base_alignment == ReservationParams::kAnyBaseAlignment ||
+ IsAligned(existing_reservation.begin(), params.base_alignment));
+ reservation_ =
+ VirtualMemory(params.page_allocator, existing_reservation.begin(),
+ existing_reservation.size());
+ base_ = reservation_.address() + params.base_bias_size;
+ reservation_is_owned_ = false;
+ } else if (params.base_alignment == ReservationParams::kAnyBaseAlignment) {
// When the base doesn't need to be aligned, the virtual memory reservation
// fails only due to OOM.
VirtualMemory reservation(params.page_allocator, params.reservation_size,
@@ -426,7 +462,13 @@ void VirtualMemoryCage::Free() {
if (IsReserved()) {
base_ = kNullAddress;
page_allocator_.reset();
- reservation_.Free();
+ if (reservation_is_owned_) {
+ reservation_.Free();
+ } else {
+ // Reservation is owned by the Platform.
+ DCHECK(V8_VIRTUAL_MEMORY_CAGE_BOOL);
+ reservation_.Reset();
+ }
}
}
diff --git a/deps/v8/src/utils/allocation.h b/deps/v8/src/utils/allocation.h
index 1d161b7e24..93499cc5e1 100644
--- a/deps/v8/src/utils/allocation.h
+++ b/deps/v8/src/utils/allocation.h
@@ -100,6 +100,12 @@ V8_EXPORT_PRIVATE void AlignedFree(void* ptr);
// Returns platfrom page allocator instance. Guaranteed to be a valid pointer.
V8_EXPORT_PRIVATE v8::PageAllocator* GetPlatformPageAllocator();
+#ifdef V8_VIRTUAL_MEMORY_CAGE
+// Returns the platform data cage page allocator instance. Guaranteed to be a
+// valid pointer.
+V8_EXPORT_PRIVATE v8::PageAllocator* GetPlatformDataCagePageAllocator();
+#endif
+
// Sets the given page allocator as the platform page allocator and returns
// the current one. This function *must* be used only for testing purposes.
// It is not thread-safe and the testing infrastructure should ensure that
@@ -310,6 +316,9 @@ class VirtualMemory final {
// and the base bias size must be AllocatePageSize-aligned.
// - The base alignment may be kAnyBaseAlignment to denote any alignment is
// acceptable. In this case the base bias size does not need to be aligned.
+//
+// TODO(chromium:1218005) can we either combine this class and
+// v8::VirtualMemoryCage in v8-platform.h or rename one of the two?
class VirtualMemoryCage {
public:
VirtualMemoryCage();
@@ -351,13 +360,23 @@ class VirtualMemoryCage {
// A number of attempts is made to try to reserve a region that satisfies the
// constraints in params, but this may fail. The base address may be different
// than the one requested.
- bool InitReservation(const ReservationParams& params);
+ // If an existing reservation is provided, it will be used for this cage
+ // instead. The caller retains ownership of the reservation and is responsible
+ // for keeping the memory reserved during the lifetime of this object.
+ bool InitReservation(
+ const ReservationParams& params,
+ base::AddressRegion existing_reservation = base::AddressRegion());
void Free();
protected:
Address base_ = kNullAddress;
std::unique_ptr<base::BoundedPageAllocator> page_allocator_;
+ // Whether this cage owns the virtual memory reservation and thus should
+ // release it upon destruction. TODO(chromium:1218005) this is only needed
+ // when V8_VIRTUAL_MEMORY_CAGE is enabled. Maybe we can remove this again e.g.
+ // by merging this class and v8::VirtualMemoryCage in v8-platform.h.
+ bool reservation_is_owned_ = true;
VirtualMemory reservation_;
};
diff --git a/deps/v8/src/utils/v8dll-main.cc b/deps/v8/src/utils/v8dll-main.cc
index 6b484cfc8e..9bdd97f365 100644
--- a/deps/v8/src/utils/v8dll-main.cc
+++ b/deps/v8/src/utils/v8dll-main.cc
@@ -5,7 +5,7 @@
// The GYP based build ends up defining USING_V8_SHARED when compiling this
// file.
#undef USING_V8_SHARED
-#include "include/v8.h"
+#include "include/v8config.h"
#if V8_OS_WIN
#include "src/base/win32-headers.h"
diff --git a/deps/v8/src/wasm/baseline/arm/liftoff-assembler-arm.h b/deps/v8/src/wasm/baseline/arm/liftoff-assembler-arm.h
index 6e2bacc043..211cf82398 100644
--- a/deps/v8/src/wasm/baseline/arm/liftoff-assembler-arm.h
+++ b/deps/v8/src/wasm/baseline/arm/liftoff-assembler-arm.h
@@ -4262,14 +4262,34 @@ void LiftoffAssembler::MaybeOSR() {}
void LiftoffAssembler::emit_set_if_nan(Register dst, DoubleRegister src,
ValueKind kind) {
- UNIMPLEMENTED();
+ if (kind == kF32) {
+ FloatRegister src_f = liftoff::GetFloatRegister(src);
+ VFPCompareAndSetFlags(src_f, src_f);
+ } else {
+ DCHECK_EQ(kind, kF64);
+ VFPCompareAndSetFlags(src, src);
+ }
+
+ // Store a non-zero value if src is NaN.
+ str(dst, MemOperand(dst), ne); // x != x iff isnan(x)
}
-void LiftoffAssembler::emit_s128_set_if_nan(Register dst, DoubleRegister src,
+void LiftoffAssembler::emit_s128_set_if_nan(Register dst, LiftoffRegister src,
Register tmp_gp,
- DoubleRegister tmp_fp,
+ LiftoffRegister tmp_s128,
ValueKind lane_kind) {
- UNIMPLEMENTED();
+ QwNeonRegister src_q = liftoff::GetSimd128Register(src);
+ QwNeonRegister tmp_q = liftoff::GetSimd128Register(tmp_s128);
+ if (lane_kind == kF32) {
+ vpadd(tmp_q.low(), src_q.low(), src_q.high());
+ LowDwVfpRegister tmp_d =
+ LowDwVfpRegister::from_code(tmp_s128.low_fp().code());
+ vadd(tmp_d.low(), tmp_d.low(), tmp_d.high());
+ } else {
+ DCHECK_EQ(lane_kind, kF64);
+ vadd(tmp_q.low(), src_q.low(), src_q.high());
+ }
+ emit_set_if_nan(dst, tmp_q.low(), lane_kind);
}
void LiftoffStackSlots::Construct(int param_slots) {
diff --git a/deps/v8/src/wasm/baseline/arm64/liftoff-assembler-arm64.h b/deps/v8/src/wasm/baseline/arm64/liftoff-assembler-arm64.h
index a52370f293..1d29ce72bc 100644
--- a/deps/v8/src/wasm/baseline/arm64/liftoff-assembler-arm64.h
+++ b/deps/v8/src/wasm/baseline/arm64/liftoff-assembler-arm64.h
@@ -1173,12 +1173,7 @@ void LiftoffAssembler::emit_i32_ctz(Register dst, Register src) {
}
bool LiftoffAssembler::emit_i32_popcnt(Register dst, Register src) {
- UseScratchRegisterScope temps(this);
- VRegister scratch = temps.AcquireV(kFormat8B);
- Fmov(scratch.S(), src.W());
- Cnt(scratch, scratch);
- Addv(scratch.B(), scratch);
- Fmov(dst.W(), scratch.S());
+ PopcntHelper(dst.W(), src.W());
return true;
}
@@ -1193,12 +1188,7 @@ void LiftoffAssembler::emit_i64_ctz(LiftoffRegister dst, LiftoffRegister src) {
bool LiftoffAssembler::emit_i64_popcnt(LiftoffRegister dst,
LiftoffRegister src) {
- UseScratchRegisterScope temps(this);
- VRegister scratch = temps.AcquireV(kFormat8B);
- Fmov(scratch.D(), src.gp().X());
- Cnt(scratch, scratch);
- Addv(scratch.B(), scratch);
- Fmov(dst.gp().X(), scratch.D());
+ PopcntHelper(dst.gp().X(), src.gp().X());
return true;
}
@@ -1717,13 +1707,13 @@ void LiftoffAssembler::LoadLane(LiftoffRegister dst, LiftoffRegister src,
UseScratchRegisterScope temps(this);
MemOperand src_op{
liftoff::GetEffectiveAddress(this, &temps, addr, offset_reg, offset_imm)};
- *protected_load_pc = pc_offset();
MachineType mem_type = type.mem_type();
if (dst != src) {
Mov(dst.fp().Q(), src.fp().Q());
}
+ *protected_load_pc = pc_offset();
if (mem_type == MachineType::Int8()) {
ld1(dst.fp().B(), laneidx, src_op);
} else if (mem_type == MachineType::Int16()) {
@@ -3259,14 +3249,35 @@ void LiftoffAssembler::MaybeOSR() {}
void LiftoffAssembler::emit_set_if_nan(Register dst, DoubleRegister src,
ValueKind kind) {
- UNIMPLEMENTED();
+ Label not_nan;
+ if (kind == kF32) {
+ Fcmp(src.S(), src.S());
+ B(eq, &not_nan); // x != x iff isnan(x)
+ // If it's a NaN, it must be non-zero, so store that as the set value.
+ Str(src.S(), MemOperand(dst));
+ } else {
+ DCHECK_EQ(kind, kF64);
+ Fcmp(src.D(), src.D());
+ B(eq, &not_nan); // x != x iff isnan(x)
+ // Double-precision NaNs must be non-zero in the most-significant 32
+ // bits, so store that.
+ St1(src.V4S(), 1, MemOperand(dst));
+ }
+ Bind(&not_nan);
}
-void LiftoffAssembler::emit_s128_set_if_nan(Register dst, DoubleRegister src,
+void LiftoffAssembler::emit_s128_set_if_nan(Register dst, LiftoffRegister src,
Register tmp_gp,
- DoubleRegister tmp_fp,
+ LiftoffRegister tmp_s128,
ValueKind lane_kind) {
- UNIMPLEMENTED();
+ DoubleRegister tmp_fp = tmp_s128.fp();
+ if (lane_kind == kF32) {
+ Fmaxv(tmp_fp.S(), src.fp().V4S());
+ } else {
+ DCHECK_EQ(lane_kind, kF64);
+ Fmaxp(tmp_fp.D(), src.fp().V2D());
+ }
+ emit_set_if_nan(dst, tmp_fp, lane_kind);
}
void LiftoffStackSlots::Construct(int param_slots) {
diff --git a/deps/v8/src/wasm/baseline/ia32/liftoff-assembler-ia32.h b/deps/v8/src/wasm/baseline/ia32/liftoff-assembler-ia32.h
index bb2fed83c6..5f92d50f6f 100644
--- a/deps/v8/src/wasm/baseline/ia32/liftoff-assembler-ia32.h
+++ b/deps/v8/src/wasm/baseline/ia32/liftoff-assembler-ia32.h
@@ -2718,40 +2718,6 @@ void EmitSimdShiftOpImm(LiftoffAssembler* assm, LiftoffRegister dst,
}
}
-enum class ShiftSignedness { kSigned, kUnsigned };
-
-template <bool is_signed>
-void EmitI8x16Shr(LiftoffAssembler* assm, LiftoffRegister dst,
- LiftoffRegister lhs, LiftoffRegister rhs) {
- // Same algorithm is used for both signed and unsigned shifts, the only
- // difference is the actual shift and pack in the end. This is the same
- // algorithm as used in code-generator-ia32.cc
- Register tmp =
- assm->GetUnusedRegister(kGpReg, LiftoffRegList::ForRegs(rhs)).gp();
- XMMRegister tmp_simd =
- assm->GetUnusedRegister(kFpReg, LiftoffRegList::ForRegs(dst, lhs)).fp();
-
- // Unpack the bytes into words, do logical shifts, and repack.
- assm->Punpckhbw(liftoff::kScratchDoubleReg, lhs.fp());
- assm->Punpcklbw(dst.fp(), lhs.fp());
- assm->mov(tmp, rhs.gp());
- // Take shift value modulo 8.
- assm->and_(tmp, 7);
- assm->add(tmp, Immediate(8));
- assm->Movd(tmp_simd, tmp);
- if (is_signed) {
- assm->Psraw(liftoff::kScratchDoubleReg, liftoff::kScratchDoubleReg,
- tmp_simd);
- assm->Psraw(dst.fp(), dst.fp(), tmp_simd);
- assm->Packsswb(dst.fp(), liftoff::kScratchDoubleReg);
- } else {
- assm->Psrlw(liftoff::kScratchDoubleReg, liftoff::kScratchDoubleReg,
- tmp_simd);
- assm->Psrlw(dst.fp(), dst.fp(), tmp_simd);
- assm->Packuswb(dst.fp(), liftoff::kScratchDoubleReg);
- }
-}
-
inline void EmitAnyTrue(LiftoffAssembler* assm, LiftoffRegister dst,
LiftoffRegister src) {
Register tmp =
@@ -2809,23 +2775,19 @@ void LiftoffAssembler::LoadTransform(LiftoffRegister dst, Register src_addr,
}
} else if (transform == LoadTransformationKind::kZeroExtend) {
if (memtype == MachineType::Int32()) {
- movss(dst.fp(), src_op);
+ Movss(dst.fp(), src_op);
} else {
DCHECK_EQ(MachineType::Int64(), memtype);
- movsd(dst.fp(), src_op);
+ Movsd(dst.fp(), src_op);
}
} else {
DCHECK_EQ(LoadTransformationKind::kSplat, transform);
if (memtype == MachineType::Int8()) {
- Pinsrb(dst.fp(), src_op, 0);
- Pxor(liftoff::kScratchDoubleReg, liftoff::kScratchDoubleReg);
- Pshufb(dst.fp(), liftoff::kScratchDoubleReg);
+ S128Load8Splat(dst.fp(), src_op, liftoff::kScratchDoubleReg);
} else if (memtype == MachineType::Int16()) {
- Pinsrw(dst.fp(), src_op, 0);
- Pshuflw(dst.fp(), dst.fp(), uint8_t{0});
- Punpcklqdq(dst.fp(), dst.fp());
+ S128Load16Splat(dst.fp(), src_op, liftoff::kScratchDoubleReg);
} else if (memtype == MachineType::Int32()) {
- Vbroadcastss(dst.fp(), src_op);
+ S128Load32Splat(dst.fp(), src_op);
} else if (memtype == MachineType::Int64()) {
Movddup(dst.fp(), src_op);
}
@@ -2875,12 +2837,7 @@ void LiftoffAssembler::StoreLane(Register dst, Register offset,
S128Store32Lane(dst_op, src.fp(), lane);
} else {
DCHECK_EQ(MachineRepresentation::kWord64, rep);
- if (lane == 0) {
- Movlps(dst_op, src.fp());
- } else {
- DCHECK_EQ(1, lane);
- Movhps(dst_op, src.fp());
- }
+ S128Store64Lane(dst_op, src.fp(), lane);
}
}
@@ -2951,16 +2908,12 @@ void LiftoffAssembler::emit_i8x16_popcnt(LiftoffRegister dst,
void LiftoffAssembler::emit_i8x16_splat(LiftoffRegister dst,
LiftoffRegister src) {
- Movd(dst.fp(), src.gp());
- Pxor(liftoff::kScratchDoubleReg, liftoff::kScratchDoubleReg);
- Pshufb(dst.fp(), liftoff::kScratchDoubleReg);
+ I8x16Splat(dst.fp(), src.gp(), liftoff::kScratchDoubleReg);
}
void LiftoffAssembler::emit_i16x8_splat(LiftoffRegister dst,
LiftoffRegister src) {
- Movd(dst.fp(), src.gp());
- Pshuflw(dst.fp(), dst.fp(), uint8_t{0});
- Pshufd(dst.fp(), dst.fp(), uint8_t{0});
+ I16x8Splat(dst.fp(), src.gp());
}
void LiftoffAssembler::emit_i32x4_splat(LiftoffRegister dst,
@@ -3366,89 +3319,48 @@ void LiftoffAssembler::emit_i8x16_bitmask(LiftoffRegister dst,
void LiftoffAssembler::emit_i8x16_shl(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- static constexpr RegClass tmp_rc = reg_class_for(kI32);
- static constexpr RegClass tmp_simd_rc = reg_class_for(kS128);
- LiftoffRegister tmp = GetUnusedRegister(tmp_rc, LiftoffRegList::ForRegs(rhs));
+ LiftoffRegister tmp = GetUnusedRegister(kGpReg, LiftoffRegList::ForRegs(rhs));
LiftoffRegister tmp_simd =
- GetUnusedRegister(tmp_simd_rc, LiftoffRegList::ForRegs(dst, lhs));
- // Mask off the unwanted bits before word-shifting.
- Pcmpeqw(liftoff::kScratchDoubleReg, liftoff::kScratchDoubleReg);
- mov(tmp.gp(), rhs.gp());
- and_(tmp.gp(), Immediate(7));
- add(tmp.gp(), Immediate(8));
- Movd(tmp_simd.fp(), tmp.gp());
- Psrlw(liftoff::kScratchDoubleReg, liftoff::kScratchDoubleReg, tmp_simd.fp());
- Packuswb(liftoff::kScratchDoubleReg, liftoff::kScratchDoubleReg);
-
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope scope(this, AVX);
- vpand(dst.fp(), lhs.fp(), liftoff::kScratchDoubleReg);
- } else {
- if (dst.fp() != lhs.fp()) movaps(dst.fp(), lhs.fp());
- andps(dst.fp(), liftoff::kScratchDoubleReg);
- }
- sub(tmp.gp(), Immediate(8));
- Movd(tmp_simd.fp(), tmp.gp());
- Psllw(dst.fp(), dst.fp(), tmp_simd.fp());
+ GetUnusedRegister(kFpReg, LiftoffRegList::ForRegs(dst, lhs));
+ I8x16Shl(dst.fp(), lhs.fp(), rhs.gp(), tmp.gp(), liftoff::kScratchDoubleReg,
+ tmp_simd.fp());
}
void LiftoffAssembler::emit_i8x16_shli(LiftoffRegister dst, LiftoffRegister lhs,
int32_t rhs) {
- static constexpr RegClass tmp_rc = reg_class_for(kI32);
- LiftoffRegister tmp = GetUnusedRegister(tmp_rc, {});
- byte shift = static_cast<byte>(rhs & 0x7);
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope scope(this, AVX);
- vpsllw(dst.fp(), lhs.fp(), shift);
- } else {
- if (dst.fp() != lhs.fp()) movaps(dst.fp(), lhs.fp());
- psllw(dst.fp(), shift);
- }
-
- uint8_t bmask = static_cast<uint8_t>(0xff << shift);
- uint32_t mask = bmask << 24 | bmask << 16 | bmask << 8 | bmask;
- mov(tmp.gp(), mask);
- Movd(liftoff::kScratchDoubleReg, tmp.gp());
- Pshufd(liftoff::kScratchDoubleReg, liftoff::kScratchDoubleReg, uint8_t{0});
- Pand(dst.fp(), liftoff::kScratchDoubleReg);
+ LiftoffRegister tmp = GetUnusedRegister(kGpReg, {});
+ I8x16Shl(dst.fp(), lhs.fp(), rhs, tmp.gp(), liftoff::kScratchDoubleReg);
}
void LiftoffAssembler::emit_i8x16_shr_s(LiftoffRegister dst,
LiftoffRegister lhs,
LiftoffRegister rhs) {
- liftoff::EmitI8x16Shr</*is_signed=*/true>(this, dst, lhs, rhs);
+ Register tmp = GetUnusedRegister(kGpReg, LiftoffRegList::ForRegs(rhs)).gp();
+ XMMRegister tmp_simd =
+ GetUnusedRegister(kFpReg, LiftoffRegList::ForRegs(dst, lhs)).fp();
+ I8x16ShrS(dst.fp(), lhs.fp(), rhs.gp(), tmp, liftoff::kScratchDoubleReg,
+ tmp_simd);
}
void LiftoffAssembler::emit_i8x16_shri_s(LiftoffRegister dst,
LiftoffRegister lhs, int32_t rhs) {
- Punpckhbw(liftoff::kScratchDoubleReg, lhs.fp());
- Punpcklbw(dst.fp(), lhs.fp());
- uint8_t shift = (rhs & 7) + 8;
- Psraw(liftoff::kScratchDoubleReg, shift);
- Psraw(dst.fp(), shift);
- Packsswb(dst.fp(), liftoff::kScratchDoubleReg);
+ I8x16ShrS(dst.fp(), lhs.fp(), rhs, liftoff::kScratchDoubleReg);
}
void LiftoffAssembler::emit_i8x16_shr_u(LiftoffRegister dst,
LiftoffRegister lhs,
LiftoffRegister rhs) {
- liftoff::EmitI8x16Shr</*is_signed=*/false>(this, dst, lhs, rhs);
+ Register tmp = GetUnusedRegister(kGpReg, LiftoffRegList::ForRegs(rhs)).gp();
+ XMMRegister tmp_simd =
+ GetUnusedRegister(kFpReg, LiftoffRegList::ForRegs(dst, lhs)).fp();
+ I8x16ShrU(dst.fp(), lhs.fp(), rhs.gp(), tmp, liftoff::kScratchDoubleReg,
+ tmp_simd);
}
void LiftoffAssembler::emit_i8x16_shri_u(LiftoffRegister dst,
LiftoffRegister lhs, int32_t rhs) {
Register tmp = GetUnusedRegister(kGpReg, {}).gp();
- // Perform 16-bit shift, then mask away high bits.
- uint8_t shift = rhs & 7;
- liftoff::EmitSimdShiftOpImm<&Assembler::vpsrlw, &Assembler::psrlw, 3>(
- this, dst, lhs, rhs);
-
- uint8_t bmask = 0xff >> shift;
- uint32_t mask = bmask << 24 | bmask << 16 | bmask << 8 | bmask;
- mov(tmp, mask);
- Movd(liftoff::kScratchDoubleReg, tmp);
- Pshufd(liftoff::kScratchDoubleReg, liftoff::kScratchDoubleReg, uint8_t{0});
- Pand(dst.fp(), liftoff::kScratchDoubleReg);
+ I8x16ShrU(dst.fp(), lhs.fp(), rhs, tmp, liftoff::kScratchDoubleReg);
}
void LiftoffAssembler::emit_i8x16_add(LiftoffRegister dst, LiftoffRegister lhs,
@@ -4300,26 +4212,8 @@ void LiftoffAssembler::emit_f64x2_promote_low_f32x4(LiftoffRegister dst,
void LiftoffAssembler::emit_i32x4_sconvert_f32x4(LiftoffRegister dst,
LiftoffRegister src) {
- // NAN->0
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope scope(this, AVX);
- vcmpeqps(liftoff::kScratchDoubleReg, src.fp(), src.fp());
- vpand(dst.fp(), src.fp(), liftoff::kScratchDoubleReg);
- } else {
- movaps(liftoff::kScratchDoubleReg, src.fp());
- cmpeqps(liftoff::kScratchDoubleReg, liftoff::kScratchDoubleReg);
- if (dst.fp() != src.fp()) movaps(dst.fp(), src.fp());
- andps(dst.fp(), liftoff::kScratchDoubleReg);
- }
- // Set top bit if >= 0 (but not -0.0!).
- Pxor(liftoff::kScratchDoubleReg, dst.fp());
- // Convert to int.
- Cvttps2dq(dst.fp(), dst.fp());
- // Set top bit if >=0 is now < 0.
- Pand(liftoff::kScratchDoubleReg, dst.fp());
- Psrad(liftoff::kScratchDoubleReg, liftoff::kScratchDoubleReg, byte{31});
- // Set positive overflow lanes to 0x7FFFFFFF.
- Pxor(dst.fp(), liftoff::kScratchDoubleReg);
+ Register tmp = GetUnusedRegister(kGpReg, {}).gp();
+ I32x4SConvertF32x4(dst.fp(), src.fp(), liftoff::kScratchDoubleReg, tmp);
}
void LiftoffAssembler::emit_i32x4_uconvert_f32x4(LiftoffRegister dst,
@@ -4787,22 +4681,14 @@ void LiftoffAssembler::CallIndirect(const ValueKindSig* sig,
// Since we have more cache registers than parameter registers, the
// {LiftoffCompiler} should always be able to place {target} in a register.
DCHECK(target.is_valid());
- if (FLAG_untrusted_code_mitigations) {
- RetpolineCall(target);
- } else {
- call(target);
- }
+ call(target);
}
void LiftoffAssembler::TailCallIndirect(Register target) {
// Since we have more cache registers than parameter registers, the
// {LiftoffCompiler} should always be able to place {target} in a register.
DCHECK(target.is_valid());
- if (FLAG_untrusted_code_mitigations) {
- RetpolineJump(target);
- } else {
- jmp(target);
- }
+ jmp(target);
}
void LiftoffAssembler::CallRuntimeStub(WasmCode::RuntimeStubId sid) {
@@ -4836,19 +4722,19 @@ void LiftoffAssembler::emit_set_if_nan(Register dst, DoubleRegister src,
bind(&ret);
}
-void LiftoffAssembler::emit_s128_set_if_nan(Register dst, DoubleRegister src,
+void LiftoffAssembler::emit_s128_set_if_nan(Register dst, LiftoffRegister src,
Register tmp_gp,
- DoubleRegister tmp_fp,
+ LiftoffRegister tmp_s128,
ValueKind lane_kind) {
if (lane_kind == kF32) {
- movaps(tmp_fp, src);
- cmpunordps(tmp_fp, tmp_fp);
+ movaps(tmp_s128.fp(), src.fp());
+ cmpunordps(tmp_s128.fp(), tmp_s128.fp());
} else {
DCHECK_EQ(lane_kind, kF64);
- movapd(tmp_fp, src);
- cmpunordpd(tmp_fp, tmp_fp);
+ movapd(tmp_s128.fp(), src.fp());
+ cmpunordpd(tmp_s128.fp(), tmp_s128.fp());
}
- pmovmskb(tmp_gp, tmp_fp);
+ pmovmskb(tmp_gp, tmp_s128.fp());
or_(Operand(dst, 0), tmp_gp);
}
diff --git a/deps/v8/src/wasm/baseline/liftoff-assembler-defs.h b/deps/v8/src/wasm/baseline/liftoff-assembler-defs.h
index d445655dca..5b43a2a41d 100644
--- a/deps/v8/src/wasm/baseline/liftoff-assembler-defs.h
+++ b/deps/v8/src/wasm/baseline/liftoff-assembler-defs.h
@@ -46,6 +46,18 @@ constexpr RegList kLiftoffAssemblerGpCacheRegs =
constexpr RegList kLiftoffAssemblerFpCacheRegs = DoubleRegister::ListOf(
f0, f2, f4, f6, f8, f10, f12, f14, f16, f18, f20, f22, f24, f26);
+#elif V8_TARGET_ARCH_LOONG64
+
+// t6-t8 and s3-s4: scratch registers, s6: root
+constexpr RegList kLiftoffAssemblerGpCacheRegs =
+ Register::ListOf(a0, a1, a2, a3, a4, a5, a6, a7, t0, t1, t2, t3, t4, t5, s0,
+ s1, s2, s5, s7, s8);
+
+// f29: zero, f30-f31: macro-assembler scratch float Registers.
+constexpr RegList kLiftoffAssemblerFpCacheRegs = DoubleRegister::ListOf(
+ f0, f1, f2, f3, f4, f5, f6, f7, f8, f9, f10, f11, f12, f13, f14, f15, f16,
+ f17, f18, f19, f20, f21, f22, f23, f24, f25, f26, f27, f28);
+
#elif V8_TARGET_ARCH_ARM
// r10: root, r11: fp, r12: ip, r13: sp, r14: lr, r15: pc.
@@ -95,8 +107,8 @@ constexpr RegList kLiftoffAssemblerGpCacheRegs =
// Any change of kLiftoffAssemblerGpCacheRegs also need to update
// kPushedFpRegs in frame-constants-riscv64.h
constexpr RegList kLiftoffAssemblerFpCacheRegs =
- DoubleRegister::ListOf(ft0, ft1, ft2, ft3, ft4, ft5, ft6, ft7, fa0, fa1,
- fa2, fa3, fa4, fa5, fa6, fa7, ft8, ft9, ft10, ft11);
+ DoubleRegister::ListOf(ft1, ft2, ft3, ft4, ft5, ft6, ft7, fa0, fa1, fa2,
+ fa3, fa4, fa5, fa6, fa7, ft8, ft9, ft10, ft11);
#else
constexpr RegList kLiftoffAssemblerGpCacheRegs = 0xff;
diff --git a/deps/v8/src/wasm/baseline/liftoff-assembler.h b/deps/v8/src/wasm/baseline/liftoff-assembler.h
index 19611fb0ee..c94c7ece9e 100644
--- a/deps/v8/src/wasm/baseline/liftoff-assembler.h
+++ b/deps/v8/src/wasm/baseline/liftoff-assembler.h
@@ -1456,12 +1456,12 @@ class LiftoffAssembler : public TurboAssembler {
// Instrumentation for shadow-stack-compatible OSR on x64.
inline void MaybeOSR();
- // Set the i32 at address dst to 1 if src is a NaN.
+ // Set the i32 at address dst to a non-zero value if src is a NaN.
inline void emit_set_if_nan(Register dst, DoubleRegister src, ValueKind kind);
// Set the i32 at address dst to a non-zero value if src contains a NaN.
- inline void emit_s128_set_if_nan(Register dst, DoubleRegister src,
- Register tmp_gp, DoubleRegister tmp_fp,
+ inline void emit_s128_set_if_nan(Register dst, LiftoffRegister src,
+ Register tmp_gp, LiftoffRegister tmp_s128,
ValueKind lane_kind);
////////////////////////////////////
@@ -1711,6 +1711,8 @@ bool CheckCompatibleStackSlotTypes(ValueKind a, ValueKind b);
#include "src/wasm/baseline/mips/liftoff-assembler-mips.h"
#elif V8_TARGET_ARCH_MIPS64
#include "src/wasm/baseline/mips64/liftoff-assembler-mips64.h"
+#elif V8_TARGET_ARCH_LOONG64
+#include "src/wasm/baseline/loong64/liftoff-assembler-loong64.h"
#elif V8_TARGET_ARCH_S390
#include "src/wasm/baseline/s390/liftoff-assembler-s390.h"
#elif V8_TARGET_ARCH_RISCV64
diff --git a/deps/v8/src/wasm/baseline/liftoff-compiler.cc b/deps/v8/src/wasm/baseline/liftoff-compiler.cc
index eeed531cf8..65226ab408 100644
--- a/deps/v8/src/wasm/baseline/liftoff-compiler.cc
+++ b/deps/v8/src/wasm/baseline/liftoff-compiler.cc
@@ -306,7 +306,7 @@ void CheckBailoutAllowed(LiftoffBailoutReason reason, const char* detail,
// Some externally maintained architectures don't fully implement Liftoff yet.
#if V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_S390X || \
- V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_PPC64
+ V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_PPC64 || V8_TARGET_ARCH_LOONG64
return;
#endif
@@ -2808,30 +2808,6 @@ class LiftoffCompiler {
__ DeallocateStackSlot(sizeof(MemoryTracingInfo));
}
- Register AddMemoryMasking(Register index, uintptr_t* offset,
- LiftoffRegList* pinned) {
- if (!FLAG_untrusted_code_mitigations ||
- env_->bounds_checks == kTrapHandler) {
- return index;
- }
- CODE_COMMENT("mask memory index");
- // Make sure that we can overwrite {index}.
- if (__ cache_state()->is_used(LiftoffRegister(index))) {
- Register old_index = index;
- pinned->clear(LiftoffRegister{old_index});
- index = pinned->set(__ GetUnusedRegister(kGpReg, *pinned)).gp();
- if (index != old_index) {
- __ Move(index, old_index, kPointerKind);
- }
- }
- Register tmp = __ GetUnusedRegister(kGpReg, *pinned).gp();
- LOAD_INSTANCE_FIELD(tmp, MemoryMask, kSystemPointerSize, *pinned);
- if (*offset) __ emit_ptrsize_addi(index, index, *offset);
- __ emit_ptrsize_and(index, index, tmp);
- *offset = 0;
- return index;
- }
-
bool IndexStaticallyInBounds(const LiftoffAssembler::VarState& index_slot,
int access_size, uintptr_t* offset) {
if (!index_slot.is_const()) return false;
@@ -2892,7 +2868,6 @@ class LiftoffCompiler {
CODE_COMMENT("load from memory");
LiftoffRegList pinned = LiftoffRegList::ForRegs(index);
- index = AddMemoryMasking(index, &offset, &pinned);
// Load the memory start address only now to reduce register pressure
// (important on ia32).
@@ -2937,7 +2912,6 @@ class LiftoffCompiler {
uintptr_t offset = imm.offset;
LiftoffRegList pinned = LiftoffRegList::ForRegs(index);
- index = AddMemoryMasking(index, &offset, &pinned);
CODE_COMMENT("load with transformation");
Register addr = GetMemoryStart(pinned);
LiftoffRegister value = __ GetUnusedRegister(reg_class_for(kS128), {});
@@ -2977,7 +2951,6 @@ class LiftoffCompiler {
uintptr_t offset = imm.offset;
pinned.set(index);
- index = AddMemoryMasking(index, &offset, &pinned);
CODE_COMMENT("load lane");
Register addr = GetMemoryStart(pinned);
LiftoffRegister result = __ GetUnusedRegister(reg_class_for(kS128), {});
@@ -3023,7 +2996,6 @@ class LiftoffCompiler {
if (index == no_reg) return;
pinned.set(index);
- index = AddMemoryMasking(index, &offset, &pinned);
CODE_COMMENT("store to memory");
uint32_t protected_store_pc = 0;
// Load the memory start address only now to reduce register pressure
@@ -3058,7 +3030,6 @@ class LiftoffCompiler {
uintptr_t offset = imm.offset;
pinned.set(index);
- index = AddMemoryMasking(index, &offset, &pinned);
CODE_COMMENT("store lane to memory");
Register addr = pinned.set(GetMemoryStart(pinned));
uint32_t protected_store_pc = 0;
@@ -4340,7 +4311,6 @@ class LiftoffCompiler {
pinned.set(index);
AlignmentCheckMem(decoder, type.size(), imm.offset, index, pinned);
uintptr_t offset = imm.offset;
- index = AddMemoryMasking(index, &offset, &pinned);
CODE_COMMENT("atomic store to memory");
Register addr = pinned.set(GetMemoryStart(pinned));
LiftoffRegList outer_pinned;
@@ -4363,7 +4333,6 @@ class LiftoffCompiler {
LiftoffRegList pinned = LiftoffRegList::ForRegs(index);
AlignmentCheckMem(decoder, type.size(), imm.offset, index, pinned);
uintptr_t offset = imm.offset;
- index = AddMemoryMasking(index, &offset, &pinned);
CODE_COMMENT("atomic load from memory");
Register addr = pinned.set(GetMemoryStart(pinned));
RegClass rc = reg_class_for(kind);
@@ -4411,7 +4380,6 @@ class LiftoffCompiler {
AlignmentCheckMem(decoder, type.size(), imm.offset, index, pinned);
uintptr_t offset = imm.offset;
- index = AddMemoryMasking(index, &offset, &pinned);
Register addr = pinned.set(GetMemoryStart(pinned));
(asm_.*emit_fn)(addr, index, offset, value, result, type);
@@ -4434,7 +4402,6 @@ class LiftoffCompiler {
AlignmentCheckMem(decoder, type.size(), imm.offset, index, pinned);
uintptr_t offset = imm.offset;
- index = AddMemoryMasking(index, &offset, &pinned);
Register addr = pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp();
LOAD_INSTANCE_FIELD(addr, MemoryStart, kSystemPointerSize, pinned);
__ emit_i32_add(addr, addr, index);
@@ -4467,7 +4434,6 @@ class LiftoffCompiler {
AlignmentCheckMem(decoder, type.size(), imm.offset, index, pinned);
uintptr_t offset = imm.offset;
- index = AddMemoryMasking(index, &offset, &pinned);
Register addr = pinned.set(GetMemoryStart(pinned));
LiftoffRegister result =
pinned.set(__ GetUnusedRegister(reg_class_for(result_kind), pinned));
@@ -4514,7 +4480,6 @@ class LiftoffCompiler {
pinned);
uintptr_t offset = imm.offset;
- index_reg = AddMemoryMasking(index_reg, &offset, &pinned);
Register index_plus_offset =
__ cache_state()->is_used(LiftoffRegister(index_reg))
? pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp()
@@ -4531,8 +4496,7 @@ class LiftoffCompiler {
__ cache_state()->stack_state.end()[-2];
LiftoffAssembler::VarState index = __ cache_state()->stack_state.end()[-3];
- // We have to set the correct register for the index. It may have changed
- // above in {AddMemoryMasking}.
+ // We have to set the correct register for the index.
index.MakeRegister(LiftoffRegister(index_plus_offset));
static constexpr WasmCode::RuntimeStubId kTargets[2][2]{
@@ -4562,7 +4526,6 @@ class LiftoffCompiler {
AlignmentCheckMem(decoder, kInt32Size, imm.offset, index_reg, pinned);
uintptr_t offset = imm.offset;
- index_reg = AddMemoryMasking(index_reg, &offset, &pinned);
Register index_plus_offset =
__ cache_state()->is_used(LiftoffRegister(index_reg))
? pinned.set(__ GetUnusedRegister(kGpReg, pinned)).gp()
@@ -5055,7 +5018,7 @@ class LiftoffCompiler {
Label* trap_label =
AddOutOfLineTrap(decoder, WasmCode::kThrowWasmTrapArrayTooLarge);
__ emit_i32_cond_jumpi(kUnsignedGreaterThan, trap_label, length.gp(),
- static_cast<int>(wasm::kV8MaxWasmArrayLength));
+ WasmArray::MaxLength(imm.array_type));
}
ValueKind elem_kind = imm.array_type->element_type().kind();
int elem_size = element_size_bytes(elem_kind);
@@ -5184,6 +5147,8 @@ class LiftoffCompiler {
void ArrayCopy(FullDecoder* decoder, const Value& dst, const Value& dst_index,
const Value& src, const Value& src_index,
const Value& length) {
+ // TODO(7748): Unify implementation with TF: Implement this with
+ // GenerateCCall. Remove runtime function and builtin in wasm.tq.
CallRuntimeStub(WasmCode::kWasmArrayCopyWithChecks,
MakeSig::Params(kI32, kI32, kI32, kOptRef, kOptRef),
// Builtin parameter order:
@@ -5778,28 +5743,6 @@ class LiftoffCompiler {
__ emit_cond_jump(kUnsignedGreaterEqual, invalid_func_label, kI32, index,
tmp_const);
- // Mask the index to prevent SSCA.
- if (FLAG_untrusted_code_mitigations) {
- CODE_COMMENT("Mask indirect call index");
- // mask = ((index - size) & ~index) >> 31
- // Reuse allocated registers; note: size is still stored in {tmp_const}.
- Register diff = table;
- Register neg_index = tmp_const;
- Register mask = scratch;
- // 1) diff = index - size
- __ emit_i32_sub(diff, index, tmp_const);
- // 2) neg_index = ~index
- __ LoadConstant(LiftoffRegister(neg_index), WasmValue(int32_t{-1}));
- __ emit_i32_xor(neg_index, neg_index, index);
- // 3) mask = diff & neg_index
- __ emit_i32_and(mask, diff, neg_index);
- // 4) mask = mask >> 31
- __ emit_i32_sari(mask, mask, 31);
-
- // Apply mask.
- __ emit_i32_and(index, index, mask);
- }
-
CODE_COMMENT("Check indirect call signature");
// Load the signature from {instance->ift_sig_ids[key]}
if (imm.table_imm.index == 0) {
@@ -6151,14 +6094,14 @@ class LiftoffCompiler {
ValueKind lane_kind) {
RegClass rc = reg_class_for(kS128);
LiftoffRegister tmp_gp = pinned.set(__ GetUnusedRegister(kGpReg, pinned));
- LiftoffRegister tmp_fp = pinned.set(__ GetUnusedRegister(rc, pinned));
+ LiftoffRegister tmp_s128 = pinned.set(__ GetUnusedRegister(rc, pinned));
LiftoffRegister nondeterminism_addr =
pinned.set(__ GetUnusedRegister(kGpReg, pinned));
__ LoadConstant(
nondeterminism_addr,
WasmValue::ForUintPtr(reinterpret_cast<uintptr_t>(nondeterminism_)));
- __ emit_s128_set_if_nan(nondeterminism_addr.gp(), dst.fp(), tmp_gp.gp(),
- tmp_fp.fp(), lane_kind);
+ __ emit_s128_set_if_nan(nondeterminism_addr.gp(), dst, tmp_gp.gp(),
+ tmp_s128, lane_kind);
}
static constexpr WasmOpcode kNoOutstandingOp = kExprUnreachable;
diff --git a/deps/v8/src/wasm/baseline/loong64/liftoff-assembler-loong64.h b/deps/v8/src/wasm/baseline/loong64/liftoff-assembler-loong64.h
new file mode 100644
index 0000000000..f22e013601
--- /dev/null
+++ b/deps/v8/src/wasm/baseline/loong64/liftoff-assembler-loong64.h
@@ -0,0 +1,2817 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_WASM_BASELINE_LOONG64_LIFTOFF_ASSEMBLER_LOONG64_H_
+#define V8_WASM_BASELINE_LOONG64_LIFTOFF_ASSEMBLER_LOONG64_H_
+
+#include "src/base/platform/wrappers.h"
+#include "src/codegen/machine-type.h"
+#include "src/heap/memory-chunk.h"
+#include "src/wasm/baseline/liftoff-assembler.h"
+#include "src/wasm/wasm-objects.h"
+
+namespace v8 {
+namespace internal {
+namespace wasm {
+
+namespace liftoff {
+
+inline constexpr Condition ToCondition(LiftoffCondition liftoff_cond) {
+ switch (liftoff_cond) {
+ case kEqual:
+ return eq;
+ case kUnequal:
+ return ne;
+ case kSignedLessThan:
+ return lt;
+ case kSignedLessEqual:
+ return le;
+ case kSignedGreaterThan:
+ return gt;
+ case kSignedGreaterEqual:
+ return ge;
+ case kUnsignedLessThan:
+ return ult;
+ case kUnsignedLessEqual:
+ return ule;
+ case kUnsignedGreaterThan:
+ return ugt;
+ case kUnsignedGreaterEqual:
+ return uge;
+ }
+}
+
+// Liftoff Frames.
+//
+// slot Frame
+// +--------------------+---------------------------
+// n+4 | optional padding slot to keep the stack 16 byte aligned.
+// n+3 | parameter n |
+// ... | ... |
+// 4 | parameter 1 | or parameter 2
+// 3 | parameter 0 | or parameter 1
+// 2 | (result address) | or parameter 0
+// -----+--------------------+---------------------------
+// 1 | return addr (ra) |
+// 0 | previous frame (fp)|
+// -----+--------------------+ <-- frame ptr (fp)
+// -1 | 0xa: WASM |
+// -2 | instance |
+// -----+--------------------+---------------------------
+// -3 | slot 0 | ^
+// -4 | slot 1 | |
+// | | Frame slots
+// | | |
+// | | v
+// | optional padding slot to keep the stack 16 byte aligned.
+// -----+--------------------+ <-- stack ptr (sp)
+//
+
+// fp-8 holds the stack marker, fp-16 is the instance parameter.
+constexpr int kInstanceOffset = 16;
+
+inline MemOperand GetStackSlot(int offset) { return MemOperand(fp, -offset); }
+
+inline MemOperand GetInstanceOperand() { return GetStackSlot(kInstanceOffset); }
+
+template <typename T>
+inline MemOperand GetMemOp(LiftoffAssembler* assm, Register addr,
+ Register offset, T offset_imm) {
+ if (is_int32(offset_imm)) {
+ int32_t offset_imm32 = static_cast<int32_t>(offset_imm);
+ if (offset == no_reg) return MemOperand(addr, offset_imm32);
+ assm->add_d(kScratchReg, addr, offset);
+ return MemOperand(kScratchReg, offset_imm32);
+ }
+ // Offset immediate does not fit in 31 bits.
+ assm->li(kScratchReg, Operand(offset_imm));
+ assm->add_d(kScratchReg, kScratchReg, addr);
+ if (offset != no_reg) {
+ assm->add_d(kScratchReg, kScratchReg, offset);
+ }
+ return MemOperand(kScratchReg, 0);
+}
+
+inline void Load(LiftoffAssembler* assm, LiftoffRegister dst, MemOperand src,
+ ValueKind kind) {
+ switch (kind) {
+ case kI32:
+ assm->Ld_w(dst.gp(), src);
+ break;
+ case kI64:
+ case kRef:
+ case kOptRef:
+ case kRtt:
+ case kRttWithDepth:
+ assm->Ld_d(dst.gp(), src);
+ break;
+ case kF32:
+ assm->Fld_s(dst.fp(), src);
+ break;
+ case kF64:
+ assm->Fld_d(dst.fp(), src);
+ break;
+ case kS128:
+ UNREACHABLE();
+ break;
+ default:
+ UNREACHABLE();
+ }
+}
+
+inline void Store(LiftoffAssembler* assm, Register base, int32_t offset,
+ LiftoffRegister src, ValueKind kind) {
+ MemOperand dst(base, offset);
+ switch (kind) {
+ case kI32:
+ assm->St_w(src.gp(), dst);
+ break;
+ case kI64:
+ case kOptRef:
+ case kRef:
+ case kRtt:
+ case kRttWithDepth:
+ assm->St_d(src.gp(), dst);
+ break;
+ case kF32:
+ assm->Fst_s(src.fp(), dst);
+ break;
+ case kF64:
+ assm->Fst_d(src.fp(), dst);
+ break;
+ default:
+ UNREACHABLE();
+ }
+}
+
+inline void push(LiftoffAssembler* assm, LiftoffRegister reg, ValueKind kind) {
+ switch (kind) {
+ case kI32:
+ assm->addi_d(sp, sp, -kSystemPointerSize);
+ assm->St_w(reg.gp(), MemOperand(sp, 0));
+ break;
+ case kI64:
+ case kOptRef:
+ case kRef:
+ case kRtt:
+ assm->Push(reg.gp());
+ break;
+ case kF32:
+ assm->addi_d(sp, sp, -kSystemPointerSize);
+ assm->Fst_s(reg.fp(), MemOperand(sp, 0));
+ break;
+ case kF64:
+ assm->addi_d(sp, sp, -kSystemPointerSize);
+ assm->Fst_d(reg.fp(), MemOperand(sp, 0));
+ break;
+ case kS128:
+ UNREACHABLE();
+ break;
+ default:
+ UNREACHABLE();
+ }
+}
+
+} // namespace liftoff
+
+int LiftoffAssembler::PrepareStackFrame() {
+ int offset = pc_offset();
+ // When constant that represents size of stack frame can't be represented
+ // as 16bit we need three instructions to add it to sp, so we reserve space
+ // for this case.
+ addi_d(sp, sp, 0);
+ nop();
+ nop();
+ return offset;
+}
+
+void LiftoffAssembler::PrepareTailCall(int num_callee_stack_params,
+ int stack_param_delta) {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+
+ // Push the return address and frame pointer to complete the stack frame.
+ Ld_d(scratch, MemOperand(fp, 8));
+ Push(scratch);
+ Ld_d(scratch, MemOperand(fp, 0));
+ Push(scratch);
+
+ // Shift the whole frame upwards.
+ int slot_count = num_callee_stack_params + 2;
+ for (int i = slot_count - 1; i >= 0; --i) {
+ Ld_d(scratch, MemOperand(sp, i * 8));
+ St_d(scratch, MemOperand(fp, (i - stack_param_delta) * 8));
+ }
+
+ // Set the new stack and frame pointer.
+ addi_d(sp, fp, -stack_param_delta * 8);
+ Pop(ra, fp);
+}
+
+void LiftoffAssembler::AlignFrameSize() {}
+
+void LiftoffAssembler::PatchPrepareStackFrame(
+ int offset, SafepointTableBuilder* safepoint_table_builder) {
+ // The frame_size includes the frame marker and the instance slot. Both are
+ // pushed as part of frame construction, so we don't need to allocate memory
+ // for them anymore.
+ int frame_size = GetTotalFrameSize() - 2 * kSystemPointerSize;
+
+ // We can't run out of space, just pass anything big enough to not cause the
+ // assembler to try to grow the buffer.
+ constexpr int kAvailableSpace = 256;
+ TurboAssembler patching_assembler(
+ nullptr, AssemblerOptions{}, CodeObjectRequired::kNo,
+ ExternalAssemblerBuffer(buffer_start_ + offset, kAvailableSpace));
+
+ if (V8_LIKELY(frame_size < 4 * KB)) {
+ // This is the standard case for small frames: just subtract from SP and be
+ // done with it.
+ patching_assembler.Add_d(sp, sp, Operand(-frame_size));
+ return;
+ }
+
+ // The frame size is bigger than 4KB, so we might overflow the available stack
+ // space if we first allocate the frame and then do the stack check (we will
+ // need some remaining stack space for throwing the exception). That's why we
+ // check the available stack space before we allocate the frame. To do this we
+ // replace the {__ Add_d(sp, sp, -frame_size)} with a jump to OOL code that
+ // does this "extended stack check".
+ //
+ // The OOL code can simply be generated here with the normal assembler,
+ // because all other code generation, including OOL code, has already finished
+ // when {PatchPrepareStackFrame} is called. The function prologue then jumps
+ // to the current {pc_offset()} to execute the OOL code for allocating the
+ // large frame.
+ // Emit the unconditional branch in the function prologue (from {offset} to
+ // {pc_offset()}).
+
+ int imm32 = pc_offset() - offset;
+ CHECK(is_int26(imm32));
+ patching_assembler.b(imm32 >> 2);
+
+ // If the frame is bigger than the stack, we throw the stack overflow
+ // exception unconditionally. Thereby we can avoid the integer overflow
+ // check in the condition code.
+ RecordComment("OOL: stack check for large frame");
+ Label continuation;
+ if (frame_size < FLAG_stack_size * 1024) {
+ Register stack_limit = kScratchReg;
+ Ld_d(stack_limit,
+ FieldMemOperand(kWasmInstanceRegister,
+ WasmInstanceObject::kRealStackLimitAddressOffset));
+ Ld_d(stack_limit, MemOperand(stack_limit, 0));
+ Add_d(stack_limit, stack_limit, Operand(frame_size));
+ Branch(&continuation, uge, sp, Operand(stack_limit));
+ }
+
+ Call(wasm::WasmCode::kWasmStackOverflow, RelocInfo::WASM_STUB_CALL);
+ // The call will not return; just define an empty safepoint.
+ safepoint_table_builder->DefineSafepoint(this);
+ if (FLAG_debug_code) stop();
+
+ bind(&continuation);
+
+ // Now allocate the stack space. Note that this might do more than just
+ // decrementing the SP;
+ Add_d(sp, sp, Operand(-frame_size));
+
+ // Jump back to the start of the function, from {pc_offset()} to
+ // right after the reserved space for the {__ Add_d(sp, sp, -framesize)}
+ // (which is a Branch now).
+ int func_start_offset = offset + 3 * kInstrSize;
+ imm32 = func_start_offset - pc_offset();
+ CHECK(is_int26(imm32));
+ b(imm32 >> 2);
+}
+
+void LiftoffAssembler::FinishCode() {}
+
+void LiftoffAssembler::AbortCompilation() {}
+
+// static
+constexpr int LiftoffAssembler::StaticStackFrameSize() {
+ return liftoff::kInstanceOffset;
+}
+
+int LiftoffAssembler::SlotSizeForType(ValueKind kind) {
+ switch (kind) {
+ case kS128:
+ return element_size_bytes(kind);
+ default:
+ return kStackSlotSize;
+ }
+}
+
+bool LiftoffAssembler::NeedsAlignment(ValueKind kind) {
+ return kind == kS128 || is_reference(kind);
+}
+
+void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value,
+ RelocInfo::Mode rmode) {
+ switch (value.type().kind()) {
+ case kI32:
+ TurboAssembler::li(reg.gp(), Operand(value.to_i32(), rmode));
+ break;
+ case kI64:
+ TurboAssembler::li(reg.gp(), Operand(value.to_i64(), rmode));
+ break;
+ case kF32:
+ TurboAssembler::Move(reg.fp(), value.to_f32_boxed().get_bits());
+ break;
+ case kF64:
+ TurboAssembler::Move(reg.fp(), value.to_f64_boxed().get_bits());
+ break;
+ default:
+ UNREACHABLE();
+ }
+}
+
+void LiftoffAssembler::LoadInstanceFromFrame(Register dst) {
+ Ld_d(dst, liftoff::GetInstanceOperand());
+}
+
+void LiftoffAssembler::LoadFromInstance(Register dst, Register instance,
+ int offset, int size) {
+ DCHECK_LE(0, offset);
+ switch (size) {
+ case 1:
+ Ld_b(dst, MemOperand(instance, offset));
+ break;
+ case 4:
+ Ld_w(dst, MemOperand(instance, offset));
+ break;
+ case 8:
+ Ld_d(dst, MemOperand(instance, offset));
+ break;
+ default:
+ UNIMPLEMENTED();
+ }
+}
+
+void LiftoffAssembler::LoadTaggedPointerFromInstance(Register dst,
+ Register instance,
+ int32_t offset) {
+ STATIC_ASSERT(kTaggedSize == kSystemPointerSize);
+ Ld_d(dst, MemOperand(instance, offset));
+}
+
+void LiftoffAssembler::SpillInstance(Register instance) {
+ St_d(instance, liftoff::GetInstanceOperand());
+}
+
+void LiftoffAssembler::ResetOSRTarget() {}
+
+void LiftoffAssembler::FillInstanceInto(Register dst) {
+ Ld_d(dst, liftoff::GetInstanceOperand());
+}
+
+void LiftoffAssembler::LoadTaggedPointer(Register dst, Register src_addr,
+ Register offset_reg,
+ int32_t offset_imm,
+ LiftoffRegList pinned) {
+ STATIC_ASSERT(kTaggedSize == kInt64Size);
+ MemOperand src_op = liftoff::GetMemOp(this, src_addr, offset_reg, offset_imm);
+ Ld_d(dst, src_op);
+}
+
+void LiftoffAssembler::LoadFullPointer(Register dst, Register src_addr,
+ int32_t offset_imm) {
+ MemOperand src_op = liftoff::GetMemOp(this, src_addr, no_reg, offset_imm);
+ Ld_d(dst, src_op);
+}
+
+void LiftoffAssembler::StoreTaggedPointer(Register dst_addr,
+ Register offset_reg,
+ int32_t offset_imm,
+ LiftoffRegister src,
+ LiftoffRegList pinned,
+ SkipWriteBarrier skip_write_barrier) {
+ UseScratchRegisterScope temps(this);
+ Operand offset_op =
+ offset_reg.is_valid() ? Operand(offset_reg) : Operand(offset_imm);
+ // For the write barrier (below), we cannot have both an offset register and
+ // an immediate offset. Add them to a 32-bit offset initially, but in a 64-bit
+ // register, because that's needed in the MemOperand below.
+ if (offset_reg.is_valid() && offset_imm) {
+ Register effective_offset = temps.Acquire();
+ Add_d(effective_offset, offset_reg, Operand(offset_imm));
+ offset_op = Operand(effective_offset);
+ }
+ if (offset_op.is_reg()) {
+ St_d(src.gp(), MemOperand(dst_addr, offset_op.rm()));
+ } else {
+ St_d(src.gp(), MemOperand(dst_addr, offset_imm));
+ }
+
+ if (skip_write_barrier || FLAG_disable_write_barriers) return;
+
+ Label write_barrier;
+ Label exit;
+ CheckPageFlag(dst_addr, MemoryChunk::kPointersFromHereAreInterestingMask, ne,
+ &write_barrier);
+ b(&exit);
+ bind(&write_barrier);
+ JumpIfSmi(src.gp(), &exit);
+ CheckPageFlag(src.gp(), MemoryChunk::kPointersToHereAreInterestingMask, eq,
+ &exit);
+ CallRecordWriteStubSaveRegisters(
+ dst_addr, offset_op, RememberedSetAction::kEmit, SaveFPRegsMode::kSave,
+ StubCallMode::kCallWasmRuntimeStub);
+ bind(&exit);
+}
+
+void LiftoffAssembler::Load(LiftoffRegister dst, Register src_addr,
+ Register offset_reg, uintptr_t offset_imm,
+ LoadType type, LiftoffRegList pinned,
+ uint32_t* protected_load_pc, bool is_load_mem,
+ bool i64_offset) {
+ MemOperand src_op = liftoff::GetMemOp(this, src_addr, offset_reg, offset_imm);
+
+ if (protected_load_pc) *protected_load_pc = pc_offset();
+ switch (type.value()) {
+ case LoadType::kI32Load8U:
+ case LoadType::kI64Load8U:
+ Ld_bu(dst.gp(), src_op);
+ break;
+ case LoadType::kI32Load8S:
+ case LoadType::kI64Load8S:
+ Ld_b(dst.gp(), src_op);
+ break;
+ case LoadType::kI32Load16U:
+ case LoadType::kI64Load16U:
+ TurboAssembler::Ld_hu(dst.gp(), src_op);
+ break;
+ case LoadType::kI32Load16S:
+ case LoadType::kI64Load16S:
+ TurboAssembler::Ld_h(dst.gp(), src_op);
+ break;
+ case LoadType::kI64Load32U:
+ TurboAssembler::Ld_wu(dst.gp(), src_op);
+ break;
+ case LoadType::kI32Load:
+ case LoadType::kI64Load32S:
+ TurboAssembler::Ld_w(dst.gp(), src_op);
+ break;
+ case LoadType::kI64Load:
+ TurboAssembler::Ld_d(dst.gp(), src_op);
+ break;
+ case LoadType::kF32Load:
+ TurboAssembler::Fld_s(dst.fp(), src_op);
+ break;
+ case LoadType::kF64Load:
+ TurboAssembler::Fld_d(dst.fp(), src_op);
+ break;
+ case LoadType::kS128Load:
+ UNREACHABLE();
+ break;
+ default:
+ UNREACHABLE();
+ }
+}
+
+void LiftoffAssembler::Store(Register dst_addr, Register offset_reg,
+ uintptr_t offset_imm, LiftoffRegister src,
+ StoreType type, LiftoffRegList pinned,
+ uint32_t* protected_store_pc, bool is_store_mem) {
+ MemOperand dst_op = liftoff::GetMemOp(this, dst_addr, offset_reg, offset_imm);
+
+ if (protected_store_pc) *protected_store_pc = pc_offset();
+ switch (type.value()) {
+ case StoreType::kI32Store8:
+ case StoreType::kI64Store8:
+ St_b(src.gp(), dst_op);
+ break;
+ case StoreType::kI32Store16:
+ case StoreType::kI64Store16:
+ TurboAssembler::St_h(src.gp(), dst_op);
+ break;
+ case StoreType::kI32Store:
+ case StoreType::kI64Store32:
+ TurboAssembler::St_w(src.gp(), dst_op);
+ break;
+ case StoreType::kI64Store:
+ TurboAssembler::St_d(src.gp(), dst_op);
+ break;
+ case StoreType::kF32Store:
+ TurboAssembler::Fst_s(src.fp(), dst_op);
+ break;
+ case StoreType::kF64Store:
+ TurboAssembler::Fst_d(src.fp(), dst_op);
+ break;
+ case StoreType::kS128Store:
+ UNREACHABLE();
+ break;
+ default:
+ UNREACHABLE();
+ }
+}
+
+void LiftoffAssembler::AtomicLoad(LiftoffRegister dst, Register src_addr,
+ Register offset_reg, uintptr_t offset_imm,
+ LoadType type, LiftoffRegList pinned) {
+ bailout(kAtomics, "AtomicLoad");
+}
+
+void LiftoffAssembler::AtomicStore(Register dst_addr, Register offset_reg,
+ uintptr_t offset_imm, LiftoffRegister src,
+ StoreType type, LiftoffRegList pinned) {
+ bailout(kAtomics, "AtomicStore");
+}
+
+void LiftoffAssembler::AtomicAdd(Register dst_addr, Register offset_reg,
+ uintptr_t offset_imm, LiftoffRegister value,
+ LiftoffRegister result, StoreType type) {
+ bailout(kAtomics, "AtomicAdd");
+}
+
+void LiftoffAssembler::AtomicSub(Register dst_addr, Register offset_reg,
+ uintptr_t offset_imm, LiftoffRegister value,
+ LiftoffRegister result, StoreType type) {
+ bailout(kAtomics, "AtomicSub");
+}
+
+void LiftoffAssembler::AtomicAnd(Register dst_addr, Register offset_reg,
+ uintptr_t offset_imm, LiftoffRegister value,
+ LiftoffRegister result, StoreType type) {
+ bailout(kAtomics, "AtomicAnd");
+}
+
+void LiftoffAssembler::AtomicOr(Register dst_addr, Register offset_reg,
+ uintptr_t offset_imm, LiftoffRegister value,
+ LiftoffRegister result, StoreType type) {
+ bailout(kAtomics, "AtomicOr");
+}
+
+void LiftoffAssembler::AtomicXor(Register dst_addr, Register offset_reg,
+ uintptr_t offset_imm, LiftoffRegister value,
+ LiftoffRegister result, StoreType type) {
+ bailout(kAtomics, "AtomicXor");
+}
+
+void LiftoffAssembler::AtomicExchange(Register dst_addr, Register offset_reg,
+ uintptr_t offset_imm,
+ LiftoffRegister value,
+ LiftoffRegister result, StoreType type) {
+ bailout(kAtomics, "AtomicExchange");
+}
+
+void LiftoffAssembler::AtomicCompareExchange(
+ Register dst_addr, Register offset_reg, uintptr_t offset_imm,
+ LiftoffRegister expected, LiftoffRegister new_value, LiftoffRegister result,
+ StoreType type) {
+ bailout(kAtomics, "AtomicCompareExchange");
+}
+
+void LiftoffAssembler::AtomicFence() { dbar(0); }
+
+void LiftoffAssembler::LoadCallerFrameSlot(LiftoffRegister dst,
+ uint32_t caller_slot_idx,
+ ValueKind kind) {
+ MemOperand src(fp, kSystemPointerSize * (caller_slot_idx + 1));
+ liftoff::Load(this, dst, src, kind);
+}
+
+void LiftoffAssembler::StoreCallerFrameSlot(LiftoffRegister src,
+ uint32_t caller_slot_idx,
+ ValueKind kind) {
+ int32_t offset = kSystemPointerSize * (caller_slot_idx + 1);
+ liftoff::Store(this, fp, offset, src, kind);
+}
+
+void LiftoffAssembler::LoadReturnStackSlot(LiftoffRegister dst, int offset,
+ ValueKind kind) {
+ liftoff::Load(this, dst, MemOperand(sp, offset), kind);
+}
+
+void LiftoffAssembler::MoveStackValue(uint32_t dst_offset, uint32_t src_offset,
+ ValueKind kind) {
+ DCHECK_NE(dst_offset, src_offset);
+ LiftoffRegister reg = GetUnusedRegister(reg_class_for(kind), {});
+ Fill(reg, src_offset, kind);
+ Spill(dst_offset, reg, kind);
+}
+
+void LiftoffAssembler::Move(Register dst, Register src, ValueKind kind) {
+ DCHECK_NE(dst, src);
+ // TODO(ksreten): Handle different sizes here.
+ TurboAssembler::Move(dst, src);
+}
+
+void LiftoffAssembler::Move(DoubleRegister dst, DoubleRegister src,
+ ValueKind kind) {
+ DCHECK_NE(dst, src);
+ if (kind != kS128) {
+ TurboAssembler::Move(dst, src);
+ } else {
+ UNREACHABLE();
+ }
+}
+
+void LiftoffAssembler::Spill(int offset, LiftoffRegister reg, ValueKind kind) {
+ RecordUsedSpillOffset(offset);
+ MemOperand dst = liftoff::GetStackSlot(offset);
+ switch (kind) {
+ case kI32:
+ St_w(reg.gp(), dst);
+ break;
+ case kI64:
+ case kRef:
+ case kOptRef:
+ case kRtt:
+ case kRttWithDepth:
+ St_d(reg.gp(), dst);
+ break;
+ case kF32:
+ Fst_s(reg.fp(), dst);
+ break;
+ case kF64:
+ TurboAssembler::Fst_d(reg.fp(), dst);
+ break;
+ case kS128:
+ UNREACHABLE();
+ break;
+ default:
+ UNREACHABLE();
+ }
+}
+
+void LiftoffAssembler::Spill(int offset, WasmValue value) {
+ RecordUsedSpillOffset(offset);
+ MemOperand dst = liftoff::GetStackSlot(offset);
+ switch (value.type().kind()) {
+ case kI32: {
+ LiftoffRegister tmp = GetUnusedRegister(kGpReg, {});
+ TurboAssembler::li(tmp.gp(), Operand(value.to_i32()));
+ St_w(tmp.gp(), dst);
+ break;
+ }
+ case kI64:
+ case kRef:
+ case kOptRef: {
+ LiftoffRegister tmp = GetUnusedRegister(kGpReg, {});
+ TurboAssembler::li(tmp.gp(), value.to_i64());
+ St_d(tmp.gp(), dst);
+ break;
+ }
+ default:
+ // kWasmF32 and kWasmF64 are unreachable, since those
+ // constants are not tracked.
+ UNREACHABLE();
+ }
+}
+
+void LiftoffAssembler::Fill(LiftoffRegister reg, int offset, ValueKind kind) {
+ MemOperand src = liftoff::GetStackSlot(offset);
+ switch (kind) {
+ case kI32:
+ Ld_w(reg.gp(), src);
+ break;
+ case kI64:
+ case kRef:
+ case kOptRef:
+ // TODO(LOONG_dev): LOONG64 Check, MIPS64 dosn't need, ARM64/LOONG64 need?
+ case kRtt:
+ case kRttWithDepth:
+ Ld_d(reg.gp(), src);
+ break;
+ case kF32:
+ Fld_s(reg.fp(), src);
+ break;
+ case kF64:
+ TurboAssembler::Fld_d(reg.fp(), src);
+ break;
+ case kS128:
+ UNREACHABLE();
+ break;
+ default:
+ UNREACHABLE();
+ }
+}
+
+void LiftoffAssembler::FillI64Half(Register, int offset, RegPairHalf) {
+ UNREACHABLE();
+}
+
+void LiftoffAssembler::FillStackSlotsWithZero(int start, int size) {
+ DCHECK_LT(0, size);
+ RecordUsedSpillOffset(start + size);
+
+ if (size <= 12 * kStackSlotSize) {
+ // Special straight-line code for up to 12 slots. Generates one
+ // instruction per slot (<= 12 instructions total).
+ uint32_t remainder = size;
+ for (; remainder >= kStackSlotSize; remainder -= kStackSlotSize) {
+ St_d(zero_reg, liftoff::GetStackSlot(start + remainder));
+ }
+ DCHECK(remainder == 4 || remainder == 0);
+ if (remainder) {
+ St_w(zero_reg, liftoff::GetStackSlot(start + remainder));
+ }
+ } else {
+ // General case for bigger counts (12 instructions).
+ // Use a0 for start address (inclusive), a1 for end address (exclusive).
+ Push(a1, a0);
+ Add_d(a0, fp, Operand(-start - size));
+ Add_d(a1, fp, Operand(-start));
+
+ Label loop;
+ bind(&loop);
+ St_d(zero_reg, MemOperand(a0, 0));
+ addi_d(a0, a0, kSystemPointerSize);
+ BranchShort(&loop, ne, a0, Operand(a1));
+
+ Pop(a1, a0);
+ }
+}
+
+void LiftoffAssembler::emit_i64_clz(LiftoffRegister dst, LiftoffRegister src) {
+ TurboAssembler::Clz_d(dst.gp(), src.gp());
+}
+
+void LiftoffAssembler::emit_i64_ctz(LiftoffRegister dst, LiftoffRegister src) {
+ TurboAssembler::Ctz_d(dst.gp(), src.gp());
+}
+
+bool LiftoffAssembler::emit_i64_popcnt(LiftoffRegister dst,
+ LiftoffRegister src) {
+ TurboAssembler::Popcnt_d(dst.gp(), src.gp());
+ return true;
+}
+
+void LiftoffAssembler::emit_i32_mul(Register dst, Register lhs, Register rhs) {
+ TurboAssembler::Mul_w(dst, lhs, rhs);
+}
+
+void LiftoffAssembler::emit_i32_divs(Register dst, Register lhs, Register rhs,
+ Label* trap_div_by_zero,
+ Label* trap_div_unrepresentable) {
+ TurboAssembler::Branch(trap_div_by_zero, eq, rhs, Operand(zero_reg));
+
+ // Check if lhs == kMinInt and rhs == -1, since this case is unrepresentable.
+ TurboAssembler::li(kScratchReg, 1);
+ TurboAssembler::li(kScratchReg2, 1);
+ TurboAssembler::LoadZeroOnCondition(kScratchReg, lhs, Operand(kMinInt), eq);
+ TurboAssembler::LoadZeroOnCondition(kScratchReg2, rhs, Operand(-1), eq);
+ add_d(kScratchReg, kScratchReg, kScratchReg2);
+ TurboAssembler::Branch(trap_div_unrepresentable, eq, kScratchReg,
+ Operand(zero_reg));
+
+ TurboAssembler::Div_w(dst, lhs, rhs);
+}
+
+void LiftoffAssembler::emit_i32_divu(Register dst, Register lhs, Register rhs,
+ Label* trap_div_by_zero) {
+ TurboAssembler::Branch(trap_div_by_zero, eq, rhs, Operand(zero_reg));
+ TurboAssembler::Div_wu(dst, lhs, rhs);
+}
+
+void LiftoffAssembler::emit_i32_rems(Register dst, Register lhs, Register rhs,
+ Label* trap_div_by_zero) {
+ TurboAssembler::Branch(trap_div_by_zero, eq, rhs, Operand(zero_reg));
+ TurboAssembler::Mod_w(dst, lhs, rhs);
+}
+
+void LiftoffAssembler::emit_i32_remu(Register dst, Register lhs, Register rhs,
+ Label* trap_div_by_zero) {
+ TurboAssembler::Branch(trap_div_by_zero, eq, rhs, Operand(zero_reg));
+ TurboAssembler::Mod_wu(dst, lhs, rhs);
+}
+
+#define I32_BINOP(name, instruction) \
+ void LiftoffAssembler::emit_i32_##name(Register dst, Register lhs, \
+ Register rhs) { \
+ instruction(dst, lhs, rhs); \
+ }
+
+// clang-format off
+I32_BINOP(add, add_w)
+I32_BINOP(sub, sub_w)
+I32_BINOP(and, and_)
+I32_BINOP(or, or_)
+I32_BINOP(xor, xor_)
+// clang-format on
+
+#undef I32_BINOP
+
+#define I32_BINOP_I(name, instruction) \
+ void LiftoffAssembler::emit_i32_##name##i(Register dst, Register lhs, \
+ int32_t imm) { \
+ instruction(dst, lhs, Operand(imm)); \
+ }
+
+// clang-format off
+I32_BINOP_I(add, Add_w)
+I32_BINOP_I(sub, Sub_w)
+I32_BINOP_I(and, And)
+I32_BINOP_I(or, Or)
+I32_BINOP_I(xor, Xor)
+// clang-format on
+
+#undef I32_BINOP_I
+
+void LiftoffAssembler::emit_i32_clz(Register dst, Register src) {
+ TurboAssembler::Clz_w(dst, src);
+}
+
+void LiftoffAssembler::emit_i32_ctz(Register dst, Register src) {
+ TurboAssembler::Ctz_w(dst, src);
+}
+
+bool LiftoffAssembler::emit_i32_popcnt(Register dst, Register src) {
+ TurboAssembler::Popcnt_w(dst, src);
+ return true;
+}
+
+#define I32_SHIFTOP(name, instruction) \
+ void LiftoffAssembler::emit_i32_##name(Register dst, Register src, \
+ Register amount) { \
+ instruction(dst, src, amount); \
+ }
+#define I32_SHIFTOP_I(name, instruction, instruction1) \
+ I32_SHIFTOP(name, instruction) \
+ void LiftoffAssembler::emit_i32_##name##i(Register dst, Register src, \
+ int amount) { \
+ instruction1(dst, src, amount & 0x1f); \
+ }
+
+I32_SHIFTOP_I(shl, sll_w, slli_w)
+I32_SHIFTOP_I(sar, sra_w, srai_w)
+I32_SHIFTOP_I(shr, srl_w, srli_w)
+
+#undef I32_SHIFTOP
+#undef I32_SHIFTOP_I
+
+void LiftoffAssembler::emit_i64_addi(LiftoffRegister dst, LiftoffRegister lhs,
+ int64_t imm) {
+ TurboAssembler::Add_d(dst.gp(), lhs.gp(), Operand(imm));
+}
+
+void LiftoffAssembler::emit_i64_mul(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ TurboAssembler::Mul_d(dst.gp(), lhs.gp(), rhs.gp());
+}
+
+bool LiftoffAssembler::emit_i64_divs(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs,
+ Label* trap_div_by_zero,
+ Label* trap_div_unrepresentable) {
+ TurboAssembler::Branch(trap_div_by_zero, eq, rhs.gp(), Operand(zero_reg));
+
+ // Check if lhs == MinInt64 and rhs == -1, since this case is unrepresentable.
+ TurboAssembler::li(kScratchReg, 1);
+ TurboAssembler::li(kScratchReg2, 1);
+ TurboAssembler::LoadZeroOnCondition(
+ kScratchReg, lhs.gp(), Operand(std::numeric_limits<int64_t>::min()), eq);
+ TurboAssembler::LoadZeroOnCondition(kScratchReg2, rhs.gp(), Operand(-1), eq);
+ add_d(kScratchReg, kScratchReg, kScratchReg2);
+ TurboAssembler::Branch(trap_div_unrepresentable, eq, kScratchReg,
+ Operand(zero_reg));
+
+ TurboAssembler::Div_d(dst.gp(), lhs.gp(), rhs.gp());
+ return true;
+}
+
+bool LiftoffAssembler::emit_i64_divu(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs,
+ Label* trap_div_by_zero) {
+ TurboAssembler::Branch(trap_div_by_zero, eq, rhs.gp(), Operand(zero_reg));
+ TurboAssembler::Div_du(dst.gp(), lhs.gp(), rhs.gp());
+ return true;
+}
+
+bool LiftoffAssembler::emit_i64_rems(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs,
+ Label* trap_div_by_zero) {
+ TurboAssembler::Branch(trap_div_by_zero, eq, rhs.gp(), Operand(zero_reg));
+ TurboAssembler::Mod_d(dst.gp(), lhs.gp(), rhs.gp());
+ return true;
+}
+
+bool LiftoffAssembler::emit_i64_remu(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs,
+ Label* trap_div_by_zero) {
+ TurboAssembler::Branch(trap_div_by_zero, eq, rhs.gp(), Operand(zero_reg));
+ TurboAssembler::Mod_du(dst.gp(), lhs.gp(), rhs.gp());
+ return true;
+}
+
+#define I64_BINOP(name, instruction) \
+ void LiftoffAssembler::emit_i64_##name( \
+ LiftoffRegister dst, LiftoffRegister lhs, LiftoffRegister rhs) { \
+ instruction(dst.gp(), lhs.gp(), rhs.gp()); \
+ }
+
+// clang-format off
+I64_BINOP(add, Add_d)
+I64_BINOP(sub, Sub_d)
+I64_BINOP(and, and_)
+I64_BINOP(or, or_)
+I64_BINOP(xor, xor_)
+// clang-format on
+
+#undef I64_BINOP
+
+#define I64_BINOP_I(name, instruction) \
+ void LiftoffAssembler::emit_i64_##name##i( \
+ LiftoffRegister dst, LiftoffRegister lhs, int32_t imm) { \
+ instruction(dst.gp(), lhs.gp(), Operand(imm)); \
+ }
+
+// clang-format off
+I64_BINOP_I(and, And)
+I64_BINOP_I(or, Or)
+I64_BINOP_I(xor, Xor)
+// clang-format on
+
+#undef I64_BINOP_I
+
+#define I64_SHIFTOP(name, instruction) \
+ void LiftoffAssembler::emit_i64_##name( \
+ LiftoffRegister dst, LiftoffRegister src, Register amount) { \
+ instruction(dst.gp(), src.gp(), amount); \
+ }
+#define I64_SHIFTOP_I(name, instruction, instructioni) \
+ I64_SHIFTOP(name, instruction) \
+ void LiftoffAssembler::emit_i64_##name##i(LiftoffRegister dst, \
+ LiftoffRegister src, int amount) { \
+ instructioni(dst.gp(), src.gp(), amount & 63); \
+ }
+
+I64_SHIFTOP_I(shl, sll_d, slli_d)
+I64_SHIFTOP_I(sar, sra_d, srai_d)
+I64_SHIFTOP_I(shr, srl_d, srli_d)
+
+#undef I64_SHIFTOP
+#undef I64_SHIFTOP_I
+
+void LiftoffAssembler::emit_u32_to_intptr(Register dst, Register src) {
+ bstrpick_d(dst, src, 31, 0);
+}
+
+void LiftoffAssembler::emit_f32_neg(DoubleRegister dst, DoubleRegister src) {
+ TurboAssembler::Neg_s(dst, src);
+}
+
+void LiftoffAssembler::emit_f64_neg(DoubleRegister dst, DoubleRegister src) {
+ TurboAssembler::Neg_d(dst, src);
+}
+
+void LiftoffAssembler::emit_f32_min(DoubleRegister dst, DoubleRegister lhs,
+ DoubleRegister rhs) {
+ Label ool, done;
+ TurboAssembler::Float32Min(dst, lhs, rhs, &ool);
+ Branch(&done);
+
+ bind(&ool);
+ TurboAssembler::Float32MinOutOfLine(dst, lhs, rhs);
+ bind(&done);
+}
+
+void LiftoffAssembler::emit_f32_max(DoubleRegister dst, DoubleRegister lhs,
+ DoubleRegister rhs) {
+ Label ool, done;
+ TurboAssembler::Float32Max(dst, lhs, rhs, &ool);
+ Branch(&done);
+
+ bind(&ool);
+ TurboAssembler::Float32MaxOutOfLine(dst, lhs, rhs);
+ bind(&done);
+}
+
+void LiftoffAssembler::emit_f32_copysign(DoubleRegister dst, DoubleRegister lhs,
+ DoubleRegister rhs) {
+ bailout(kComplexOperation, "f32_copysign");
+}
+
+void LiftoffAssembler::emit_f64_min(DoubleRegister dst, DoubleRegister lhs,
+ DoubleRegister rhs) {
+ Label ool, done;
+ TurboAssembler::Float64Min(dst, lhs, rhs, &ool);
+ Branch(&done);
+
+ bind(&ool);
+ TurboAssembler::Float64MinOutOfLine(dst, lhs, rhs);
+ bind(&done);
+}
+
+void LiftoffAssembler::emit_f64_max(DoubleRegister dst, DoubleRegister lhs,
+ DoubleRegister rhs) {
+ Label ool, done;
+ TurboAssembler::Float64Max(dst, lhs, rhs, &ool);
+ Branch(&done);
+
+ bind(&ool);
+ TurboAssembler::Float64MaxOutOfLine(dst, lhs, rhs);
+ bind(&done);
+}
+
+void LiftoffAssembler::emit_f64_copysign(DoubleRegister dst, DoubleRegister lhs,
+ DoubleRegister rhs) {
+ bailout(kComplexOperation, "f64_copysign");
+}
+
+#define FP_BINOP(name, instruction) \
+ void LiftoffAssembler::emit_##name(DoubleRegister dst, DoubleRegister lhs, \
+ DoubleRegister rhs) { \
+ instruction(dst, lhs, rhs); \
+ }
+#define FP_UNOP(name, instruction) \
+ void LiftoffAssembler::emit_##name(DoubleRegister dst, DoubleRegister src) { \
+ instruction(dst, src); \
+ }
+#define FP_UNOP_RETURN_TRUE(name, instruction) \
+ bool LiftoffAssembler::emit_##name(DoubleRegister dst, DoubleRegister src) { \
+ instruction(dst, src); \
+ return true; \
+ }
+
+FP_BINOP(f32_add, fadd_s)
+FP_BINOP(f32_sub, fsub_s)
+FP_BINOP(f32_mul, fmul_s)
+FP_BINOP(f32_div, fdiv_s)
+FP_UNOP(f32_abs, fabs_s)
+FP_UNOP_RETURN_TRUE(f32_ceil, Ceil_s)
+FP_UNOP_RETURN_TRUE(f32_floor, Floor_s)
+FP_UNOP_RETURN_TRUE(f32_trunc, Trunc_s)
+FP_UNOP_RETURN_TRUE(f32_nearest_int, Round_s)
+FP_UNOP(f32_sqrt, fsqrt_s)
+FP_BINOP(f64_add, fadd_d)
+FP_BINOP(f64_sub, fsub_d)
+FP_BINOP(f64_mul, fmul_d)
+FP_BINOP(f64_div, fdiv_d)
+FP_UNOP(f64_abs, fabs_d)
+FP_UNOP_RETURN_TRUE(f64_ceil, Ceil_d)
+FP_UNOP_RETURN_TRUE(f64_floor, Floor_d)
+FP_UNOP_RETURN_TRUE(f64_trunc, Trunc_d)
+FP_UNOP_RETURN_TRUE(f64_nearest_int, Round_d)
+FP_UNOP(f64_sqrt, fsqrt_d)
+
+#undef FP_BINOP
+#undef FP_UNOP
+#undef FP_UNOP_RETURN_TRUE
+
+bool LiftoffAssembler::emit_type_conversion(WasmOpcode opcode,
+ LiftoffRegister dst,
+ LiftoffRegister src, Label* trap) {
+ switch (opcode) {
+ case kExprI32ConvertI64:
+ TurboAssembler::bstrpick_w(dst.gp(), src.gp(), 31, 0);
+ return true;
+ case kExprI32SConvertF32: {
+ LiftoffRegister rounded =
+ GetUnusedRegister(kFpReg, LiftoffRegList::ForRegs(src));
+ LiftoffRegister converted_back =
+ GetUnusedRegister(kFpReg, LiftoffRegList::ForRegs(src, rounded));
+
+ // Real conversion.
+ TurboAssembler::Trunc_s(rounded.fp(), src.fp());
+ ftintrz_w_s(kScratchDoubleReg, rounded.fp());
+ movfr2gr_s(dst.gp(), kScratchDoubleReg);
+ // Avoid INT32_MAX as an overflow indicator and use INT32_MIN instead,
+ // because INT32_MIN allows easier out-of-bounds detection.
+ TurboAssembler::Add_w(kScratchReg, dst.gp(), 1);
+ TurboAssembler::Slt(kScratchReg2, kScratchReg, dst.gp());
+ TurboAssembler::Movn(dst.gp(), kScratchReg, kScratchReg2);
+
+ // Checking if trap.
+ movgr2fr_w(kScratchDoubleReg, dst.gp());
+ ffint_s_w(converted_back.fp(), kScratchDoubleReg);
+ TurboAssembler::CompareF32(rounded.fp(), converted_back.fp(), CEQ);
+ TurboAssembler::BranchFalseF(trap);
+ return true;
+ }
+ case kExprI32UConvertF32: {
+ LiftoffRegister rounded =
+ GetUnusedRegister(kFpReg, LiftoffRegList::ForRegs(src));
+ LiftoffRegister converted_back =
+ GetUnusedRegister(kFpReg, LiftoffRegList::ForRegs(src, rounded));
+
+ // Real conversion.
+ TurboAssembler::Trunc_s(rounded.fp(), src.fp());
+ TurboAssembler::Ftintrz_uw_s(dst.gp(), rounded.fp(), kScratchDoubleReg);
+ // Avoid UINT32_MAX as an overflow indicator and use 0 instead,
+ // because 0 allows easier out-of-bounds detection.
+ TurboAssembler::Add_w(kScratchReg, dst.gp(), 1);
+ TurboAssembler::Movz(dst.gp(), zero_reg, kScratchReg);
+
+ // Checking if trap.
+ TurboAssembler::Ffint_d_uw(converted_back.fp(), dst.gp());
+ fcvt_s_d(converted_back.fp(), converted_back.fp());
+ TurboAssembler::CompareF32(rounded.fp(), converted_back.fp(), CEQ);
+ TurboAssembler::BranchFalseF(trap);
+ return true;
+ }
+ case kExprI32SConvertF64: {
+ LiftoffRegister rounded =
+ GetUnusedRegister(kFpReg, LiftoffRegList::ForRegs(src));
+ LiftoffRegister converted_back =
+ GetUnusedRegister(kFpReg, LiftoffRegList::ForRegs(src, rounded));
+
+ // Real conversion.
+ TurboAssembler::Trunc_d(rounded.fp(), src.fp());
+ ftintrz_w_d(kScratchDoubleReg, rounded.fp());
+ movfr2gr_s(dst.gp(), kScratchDoubleReg);
+
+ // Checking if trap.
+ ffint_d_w(converted_back.fp(), kScratchDoubleReg);
+ TurboAssembler::CompareF64(rounded.fp(), converted_back.fp(), CEQ);
+ TurboAssembler::BranchFalseF(trap);
+ return true;
+ }
+ case kExprI32UConvertF64: {
+ LiftoffRegister rounded =
+ GetUnusedRegister(kFpReg, LiftoffRegList::ForRegs(src));
+ LiftoffRegister converted_back =
+ GetUnusedRegister(kFpReg, LiftoffRegList::ForRegs(src, rounded));
+
+ // Real conversion.
+ TurboAssembler::Trunc_d(rounded.fp(), src.fp());
+ TurboAssembler::Ftintrz_uw_d(dst.gp(), rounded.fp(), kScratchDoubleReg);
+
+ // Checking if trap.
+ TurboAssembler::Ffint_d_uw(converted_back.fp(), dst.gp());
+ TurboAssembler::CompareF64(rounded.fp(), converted_back.fp(), CEQ);
+ TurboAssembler::BranchFalseF(trap);
+ return true;
+ }
+ case kExprI32ReinterpretF32:
+ TurboAssembler::FmoveLow(dst.gp(), src.fp());
+ return true;
+ case kExprI64SConvertI32:
+ slli_w(dst.gp(), src.gp(), 0);
+ return true;
+ case kExprI64UConvertI32:
+ TurboAssembler::bstrpick_d(dst.gp(), src.gp(), 31, 0);
+ return true;
+ case kExprI64SConvertF32: {
+ LiftoffRegister rounded =
+ GetUnusedRegister(kFpReg, LiftoffRegList::ForRegs(src));
+ LiftoffRegister converted_back =
+ GetUnusedRegister(kFpReg, LiftoffRegList::ForRegs(src, rounded));
+
+ // Real conversion.
+ TurboAssembler::Trunc_s(rounded.fp(), src.fp());
+ ftintrz_l_s(kScratchDoubleReg, rounded.fp());
+ movfr2gr_d(dst.gp(), kScratchDoubleReg);
+ // Avoid INT64_MAX as an overflow indicator and use INT64_MIN instead,
+ // because INT64_MIN allows easier out-of-bounds detection.
+ TurboAssembler::Add_d(kScratchReg, dst.gp(), 1);
+ TurboAssembler::Slt(kScratchReg2, kScratchReg, dst.gp());
+ TurboAssembler::Movn(dst.gp(), kScratchReg, kScratchReg2);
+
+ // Checking if trap.
+ movgr2fr_d(kScratchDoubleReg, dst.gp());
+ ffint_s_l(converted_back.fp(), kScratchDoubleReg);
+ TurboAssembler::CompareF32(rounded.fp(), converted_back.fp(), CEQ);
+ TurboAssembler::BranchFalseF(trap);
+ return true;
+ }
+ case kExprI64UConvertF32: {
+ // Real conversion.
+ TurboAssembler::Ftintrz_ul_s(dst.gp(), src.fp(), kScratchDoubleReg,
+ kScratchReg);
+
+ // Checking if trap.
+ TurboAssembler::Branch(trap, eq, kScratchReg, Operand(zero_reg));
+ return true;
+ }
+ case kExprI64SConvertF64: {
+ LiftoffRegister rounded =
+ GetUnusedRegister(kFpReg, LiftoffRegList::ForRegs(src));
+ LiftoffRegister converted_back =
+ GetUnusedRegister(kFpReg, LiftoffRegList::ForRegs(src, rounded));
+
+ // Real conversion.
+ TurboAssembler::Trunc_d(rounded.fp(), src.fp());
+ ftintrz_l_d(kScratchDoubleReg, rounded.fp());
+ movfr2gr_d(dst.gp(), kScratchDoubleReg);
+ // Avoid INT64_MAX as an overflow indicator and use INT64_MIN instead,
+ // because INT64_MIN allows easier out-of-bounds detection.
+ TurboAssembler::Add_d(kScratchReg, dst.gp(), 1);
+ TurboAssembler::Slt(kScratchReg2, kScratchReg, dst.gp());
+ TurboAssembler::Movn(dst.gp(), kScratchReg, kScratchReg2);
+
+ // Checking if trap.
+ movgr2fr_d(kScratchDoubleReg, dst.gp());
+ ffint_d_l(converted_back.fp(), kScratchDoubleReg);
+ TurboAssembler::CompareF64(rounded.fp(), converted_back.fp(), CEQ);
+ TurboAssembler::BranchFalseF(trap);
+ return true;
+ }
+ case kExprI64UConvertF64: {
+ // Real conversion.
+ TurboAssembler::Ftintrz_ul_d(dst.gp(), src.fp(), kScratchDoubleReg,
+ kScratchReg);
+
+ // Checking if trap.
+ TurboAssembler::Branch(trap, eq, kScratchReg, Operand(zero_reg));
+ return true;
+ }
+ case kExprI64ReinterpretF64:
+ movfr2gr_d(dst.gp(), src.fp());
+ return true;
+ case kExprF32SConvertI32: {
+ LiftoffRegister scratch =
+ GetUnusedRegister(kFpReg, LiftoffRegList::ForRegs(dst));
+ movgr2fr_w(scratch.fp(), src.gp());
+ ffint_s_w(dst.fp(), scratch.fp());
+ return true;
+ }
+ case kExprF32UConvertI32:
+ TurboAssembler::Ffint_s_uw(dst.fp(), src.gp());
+ return true;
+ case kExprF32ConvertF64:
+ fcvt_s_d(dst.fp(), src.fp());
+ return true;
+ case kExprF32ReinterpretI32:
+ TurboAssembler::FmoveLow(dst.fp(), src.gp());
+ return true;
+ case kExprF64SConvertI32: {
+ LiftoffRegister scratch =
+ GetUnusedRegister(kFpReg, LiftoffRegList::ForRegs(dst));
+ movgr2fr_w(scratch.fp(), src.gp());
+ ffint_d_w(dst.fp(), scratch.fp());
+ return true;
+ }
+ case kExprF64UConvertI32:
+ TurboAssembler::Ffint_d_uw(dst.fp(), src.gp());
+ return true;
+ case kExprF64ConvertF32:
+ fcvt_d_s(dst.fp(), src.fp());
+ return true;
+ case kExprF64ReinterpretI64:
+ movgr2fr_d(dst.fp(), src.gp());
+ return true;
+ case kExprI32SConvertSatF32:
+ bailout(kNonTrappingFloatToInt, "kExprI32SConvertSatF32");
+ return true;
+ case kExprI32UConvertSatF32:
+ bailout(kNonTrappingFloatToInt, "kExprI32UConvertSatF32");
+ return true;
+ case kExprI32SConvertSatF64:
+ bailout(kNonTrappingFloatToInt, "kExprI32SConvertSatF64");
+ return true;
+ case kExprI32UConvertSatF64:
+ bailout(kNonTrappingFloatToInt, "kExprI32UConvertSatF64");
+ return true;
+ case kExprI64SConvertSatF32:
+ bailout(kNonTrappingFloatToInt, "kExprI64SConvertSatF32");
+ return true;
+ case kExprI64UConvertSatF32:
+ bailout(kNonTrappingFloatToInt, "kExprI64UConvertSatF32");
+ return true;
+ case kExprI64SConvertSatF64:
+ bailout(kNonTrappingFloatToInt, "kExprI64SConvertSatF64");
+ return true;
+ case kExprI64UConvertSatF64:
+ bailout(kNonTrappingFloatToInt, "kExprI64UConvertSatF64");
+ return true;
+ default:
+ return false;
+ }
+}
+
+void LiftoffAssembler::emit_i32_signextend_i8(Register dst, Register src) {
+ bailout(kComplexOperation, "i32_signextend_i8");
+}
+
+void LiftoffAssembler::emit_i32_signextend_i16(Register dst, Register src) {
+ bailout(kComplexOperation, "i32_signextend_i16");
+}
+
+void LiftoffAssembler::emit_i64_signextend_i8(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kComplexOperation, "i64_signextend_i8");
+}
+
+void LiftoffAssembler::emit_i64_signextend_i16(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kComplexOperation, "i64_signextend_i16");
+}
+
+void LiftoffAssembler::emit_i64_signextend_i32(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kComplexOperation, "i64_signextend_i32");
+}
+
+void LiftoffAssembler::emit_jump(Label* label) {
+ TurboAssembler::Branch(label);
+}
+
+void LiftoffAssembler::emit_jump(Register target) {
+ TurboAssembler::Jump(target);
+}
+
+void LiftoffAssembler::emit_cond_jump(LiftoffCondition liftoff_cond,
+ Label* label, ValueKind kind,
+ Register lhs, Register rhs) {
+ Condition cond = liftoff::ToCondition(liftoff_cond);
+ if (rhs == no_reg) {
+ DCHECK(kind == kI32 || kind == kI64);
+ TurboAssembler::Branch(label, cond, lhs, Operand(zero_reg));
+ } else {
+ DCHECK((kind == kI32 || kind == kI64) ||
+ (is_reference(kind) &&
+ (liftoff_cond == kEqual || liftoff_cond == kUnequal)));
+ TurboAssembler::Branch(label, cond, lhs, Operand(rhs));
+ }
+}
+
+void LiftoffAssembler::emit_i32_cond_jumpi(LiftoffCondition liftoff_cond,
+ Label* label, Register lhs,
+ int32_t imm) {
+ Condition cond = liftoff::ToCondition(liftoff_cond);
+ TurboAssembler::Branch(label, cond, lhs, Operand(imm));
+}
+
+void LiftoffAssembler::emit_i32_eqz(Register dst, Register src) {
+ sltui(dst, src, 1);
+}
+
+void LiftoffAssembler::emit_i32_set_cond(LiftoffCondition liftoff_cond,
+ Register dst, Register lhs,
+ Register rhs) {
+ Condition cond = liftoff::ToCondition(liftoff_cond);
+ Register tmp = dst;
+ if (dst == lhs || dst == rhs) {
+ tmp = GetUnusedRegister(kGpReg, LiftoffRegList::ForRegs(lhs, rhs)).gp();
+ }
+ // Write 1 as result.
+ TurboAssembler::li(tmp, 1);
+
+ // If negative condition is true, write 0 as result.
+ Condition neg_cond = NegateCondition(cond);
+ TurboAssembler::LoadZeroOnCondition(tmp, lhs, Operand(rhs), neg_cond);
+
+ // If tmp != dst, result will be moved.
+ TurboAssembler::Move(dst, tmp);
+}
+
+void LiftoffAssembler::emit_i64_eqz(Register dst, LiftoffRegister src) {
+ sltui(dst, src.gp(), 1);
+}
+
+void LiftoffAssembler::emit_i64_set_cond(LiftoffCondition liftoff_cond,
+ Register dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ Condition cond = liftoff::ToCondition(liftoff_cond);
+ Register tmp = dst;
+ if (dst == lhs.gp() || dst == rhs.gp()) {
+ tmp = GetUnusedRegister(kGpReg, LiftoffRegList::ForRegs(lhs, rhs)).gp();
+ }
+ // Write 1 as result.
+ TurboAssembler::li(tmp, 1);
+
+ // If negative condition is true, write 0 as result.
+ Condition neg_cond = NegateCondition(cond);
+ TurboAssembler::LoadZeroOnCondition(tmp, lhs.gp(), Operand(rhs.gp()),
+ neg_cond);
+
+ // If tmp != dst, result will be moved.
+ TurboAssembler::Move(dst, tmp);
+}
+
+namespace liftoff {
+
+inline FPUCondition ConditionToConditionCmpFPU(LiftoffCondition condition,
+ bool* predicate) {
+ switch (condition) {
+ case kEqual:
+ *predicate = true;
+ return CEQ;
+ case kUnequal:
+ *predicate = false;
+ return CEQ;
+ case kUnsignedLessThan:
+ *predicate = true;
+ return CLT;
+ case kUnsignedGreaterEqual:
+ *predicate = false;
+ return CLT;
+ case kUnsignedLessEqual:
+ *predicate = true;
+ return CLE;
+ case kUnsignedGreaterThan:
+ *predicate = false;
+ return CLE;
+ default:
+ *predicate = true;
+ break;
+ }
+ UNREACHABLE();
+}
+
+} // namespace liftoff
+
+void LiftoffAssembler::emit_f32_set_cond(LiftoffCondition liftoff_cond,
+ Register dst, DoubleRegister lhs,
+ DoubleRegister rhs) {
+ Condition cond = liftoff::ToCondition(liftoff_cond);
+ Label not_nan, cont;
+ TurboAssembler::CompareIsNanF32(lhs, rhs);
+ TurboAssembler::BranchFalseF(&not_nan);
+ // If one of the operands is NaN, return 1 for f32.ne, else 0.
+ if (cond == ne) {
+ TurboAssembler::li(dst, 1);
+ } else {
+ TurboAssembler::Move(dst, zero_reg);
+ }
+ TurboAssembler::Branch(&cont);
+
+ bind(&not_nan);
+
+ TurboAssembler::li(dst, 1);
+ bool predicate;
+ FPUCondition fcond =
+ liftoff::ConditionToConditionCmpFPU(liftoff_cond, &predicate);
+ TurboAssembler::CompareF32(lhs, rhs, fcond);
+ if (predicate) {
+ TurboAssembler::LoadZeroIfNotFPUCondition(dst);
+ } else {
+ TurboAssembler::LoadZeroIfFPUCondition(dst);
+ }
+
+ bind(&cont);
+}
+
+void LiftoffAssembler::emit_f64_set_cond(LiftoffCondition liftoff_cond,
+ Register dst, DoubleRegister lhs,
+ DoubleRegister rhs) {
+ Condition cond = liftoff::ToCondition(liftoff_cond);
+ Label not_nan, cont;
+ TurboAssembler::CompareIsNanF64(lhs, rhs);
+ TurboAssembler::BranchFalseF(&not_nan);
+ // If one of the operands is NaN, return 1 for f64.ne, else 0.
+ if (cond == ne) {
+ TurboAssembler::li(dst, 1);
+ } else {
+ TurboAssembler::Move(dst, zero_reg);
+ }
+ TurboAssembler::Branch(&cont);
+
+ bind(&not_nan);
+
+ TurboAssembler::li(dst, 1);
+ bool predicate;
+ FPUCondition fcond =
+ liftoff::ConditionToConditionCmpFPU(liftoff_cond, &predicate);
+ TurboAssembler::CompareF64(lhs, rhs, fcond);
+ if (predicate) {
+ TurboAssembler::LoadZeroIfNotFPUCondition(dst);
+ } else {
+ TurboAssembler::LoadZeroIfFPUCondition(dst);
+ }
+
+ bind(&cont);
+}
+
+bool LiftoffAssembler::emit_select(LiftoffRegister dst, Register condition,
+ LiftoffRegister true_value,
+ LiftoffRegister false_value,
+ ValueKind kind) {
+ return false;
+}
+
+void LiftoffAssembler::emit_smi_check(Register obj, Label* target,
+ SmiCheckMode mode) {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ And(scratch, obj, Operand(kSmiTagMask));
+ Condition condition = mode == kJumpOnSmi ? eq : ne;
+ Branch(target, condition, scratch, Operand(zero_reg));
+}
+
+void LiftoffAssembler::LoadTransform(LiftoffRegister dst, Register src_addr,
+ Register offset_reg, uintptr_t offset_imm,
+ LoadType type,
+ LoadTransformationKind transform,
+ uint32_t* protected_load_pc) {
+ bailout(kSimd, "load extend and load splat unimplemented");
+}
+
+void LiftoffAssembler::LoadLane(LiftoffRegister dst, LiftoffRegister src,
+ Register addr, Register offset_reg,
+ uintptr_t offset_imm, LoadType type,
+ uint8_t laneidx, uint32_t* protected_load_pc) {
+ bailout(kSimd, "loadlane");
+}
+
+void LiftoffAssembler::StoreLane(Register dst, Register offset,
+ uintptr_t offset_imm, LiftoffRegister src,
+ StoreType type, uint8_t lane,
+ uint32_t* protected_store_pc) {
+ bailout(kSimd, "storelane");
+}
+
+void LiftoffAssembler::emit_i8x16_shuffle(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs,
+ const uint8_t shuffle[16],
+ bool is_swizzle) {
+ bailout(kSimd, "emit_i8x16_shuffle");
+}
+
+void LiftoffAssembler::emit_i8x16_swizzle(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i8x16_swizzle");
+}
+
+void LiftoffAssembler::emit_i8x16_splat(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_i8x16_splat");
+}
+
+void LiftoffAssembler::emit_i16x8_splat(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_i16x8_splat");
+}
+
+void LiftoffAssembler::emit_i32x4_splat(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_i32x4_splat");
+}
+
+void LiftoffAssembler::emit_i64x2_splat(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_i64x2_splat");
+}
+
+void LiftoffAssembler::emit_f32x4_splat(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_f32x4_splat");
+}
+
+void LiftoffAssembler::emit_f64x2_splat(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_f64x2_splat");
+}
+
+#define SIMD_BINOP(name1, name2) \
+ void LiftoffAssembler::emit_##name1##_extmul_low_##name2( \
+ LiftoffRegister dst, LiftoffRegister src1, LiftoffRegister src2) { \
+ bailout(kSimd, "emit_" #name1 "_extmul_low_" #name2); \
+ } \
+ void LiftoffAssembler::emit_##name1##_extmul_high_##name2( \
+ LiftoffRegister dst, LiftoffRegister src1, LiftoffRegister src2) { \
+ bailout(kSimd, "emit_" #name1 "_extmul_high_" #name2); \
+ }
+
+SIMD_BINOP(i16x8, i8x16_s)
+SIMD_BINOP(i16x8, i8x16_u)
+
+SIMD_BINOP(i32x4, i16x8_s)
+SIMD_BINOP(i32x4, i16x8_u)
+
+SIMD_BINOP(i64x2, i32x4_s)
+SIMD_BINOP(i64x2, i32x4_u)
+
+#undef SIMD_BINOP
+
+#define SIMD_BINOP(name1, name2) \
+ void LiftoffAssembler::emit_##name1##_extadd_pairwise_##name2( \
+ LiftoffRegister dst, LiftoffRegister src) { \
+ bailout(kSimd, "emit_" #name1 "_extadd_pairwise_" #name2); \
+ }
+
+SIMD_BINOP(i16x8, i8x16_s)
+SIMD_BINOP(i16x8, i8x16_u)
+SIMD_BINOP(i32x4, i16x8_s)
+SIMD_BINOP(i32x4, i16x8_u)
+#undef SIMD_BINOP
+
+void LiftoffAssembler::emit_i16x8_q15mulr_sat_s(LiftoffRegister dst,
+ LiftoffRegister src1,
+ LiftoffRegister src2) {
+ bailout(kSimd, "emit_i16x8_q15mulr_sat_s");
+}
+
+void LiftoffAssembler::emit_i8x16_eq(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i8x16_eq");
+}
+
+void LiftoffAssembler::emit_i8x16_ne(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i8x16_ne");
+}
+
+void LiftoffAssembler::emit_i8x16_gt_s(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i8x16_gt_s");
+}
+
+void LiftoffAssembler::emit_i8x16_gt_u(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i8x16_gt_u");
+}
+
+void LiftoffAssembler::emit_i8x16_ge_s(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i8x16_ge_s");
+}
+
+void LiftoffAssembler::emit_i8x16_ge_u(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i8x16_ge_u");
+}
+
+void LiftoffAssembler::emit_i16x8_eq(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i16x8_eq");
+}
+
+void LiftoffAssembler::emit_i16x8_ne(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i16x8_ne");
+}
+
+void LiftoffAssembler::emit_i16x8_gt_s(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i16x8_gt_s");
+}
+
+void LiftoffAssembler::emit_i16x8_gt_u(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i16x8_gt_u");
+}
+
+void LiftoffAssembler::emit_i16x8_ge_s(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i16x8_ge_s");
+}
+
+void LiftoffAssembler::emit_i16x8_ge_u(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i16x8_ge_u");
+}
+
+void LiftoffAssembler::emit_i32x4_eq(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i32x4_eq");
+}
+
+void LiftoffAssembler::emit_i32x4_ne(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i32x4_ne");
+}
+
+void LiftoffAssembler::emit_i32x4_gt_s(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i32x4_gt_s");
+}
+
+void LiftoffAssembler::emit_i32x4_gt_u(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i32x4_gt_u");
+}
+
+void LiftoffAssembler::emit_i32x4_ge_s(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i32x4_ge_s");
+}
+
+void LiftoffAssembler::emit_i32x4_ge_u(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i32x4_ge_u");
+}
+
+void LiftoffAssembler::emit_f32x4_eq(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_f32x4_eq");
+}
+
+void LiftoffAssembler::emit_f32x4_ne(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_f32x4_ne");
+}
+
+void LiftoffAssembler::emit_f32x4_lt(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_f32x4_lt");
+}
+
+void LiftoffAssembler::emit_f32x4_le(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_f32x4_le");
+}
+
+void LiftoffAssembler::emit_i64x2_eq(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i64x2_eq");
+}
+
+void LiftoffAssembler::emit_i64x2_ne(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i64x2_ne");
+}
+
+void LiftoffAssembler::emit_i64x2_abs(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_i64x2_abs");
+}
+
+void LiftoffAssembler::emit_f64x2_eq(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_f64x2_eq");
+}
+
+void LiftoffAssembler::emit_f64x2_ne(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_f64x2_ne");
+}
+
+void LiftoffAssembler::emit_f64x2_lt(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_f64x2_lt");
+}
+
+void LiftoffAssembler::emit_f64x2_le(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_f64x2_le");
+}
+
+void LiftoffAssembler::emit_s128_const(LiftoffRegister dst,
+ const uint8_t imms[16]) {
+ bailout(kSimd, "emit_s128_const");
+}
+
+void LiftoffAssembler::emit_s128_not(LiftoffRegister dst, LiftoffRegister src) {
+ bailout(kSimd, "emit_s128_not");
+}
+
+void LiftoffAssembler::emit_s128_and(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_s128_and");
+}
+
+void LiftoffAssembler::emit_s128_or(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_s128_or");
+}
+
+void LiftoffAssembler::emit_s128_xor(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_s128_xor");
+}
+
+void LiftoffAssembler::emit_s128_and_not(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_s128_and_not");
+}
+
+void LiftoffAssembler::emit_s128_select(LiftoffRegister dst,
+ LiftoffRegister src1,
+ LiftoffRegister src2,
+ LiftoffRegister mask) {
+ bailout(kSimd, "emit_s128_select");
+}
+
+void LiftoffAssembler::emit_i8x16_neg(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_i8x16_neg");
+}
+
+void LiftoffAssembler::emit_v128_anytrue(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_v128_anytrue");
+}
+
+void LiftoffAssembler::emit_i8x16_alltrue(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_i8x16_alltrue");
+}
+
+void LiftoffAssembler::emit_i8x16_bitmask(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_i8x16_bitmask");
+}
+
+void LiftoffAssembler::emit_i8x16_shl(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i8x16_shl");
+}
+
+void LiftoffAssembler::emit_i8x16_shli(LiftoffRegister dst, LiftoffRegister lhs,
+ int32_t rhs) {
+ bailout(kSimd, "emit_i8x16_shli");
+}
+
+void LiftoffAssembler::emit_i8x16_shr_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i8x16_shr_s");
+}
+
+void LiftoffAssembler::emit_i8x16_shri_s(LiftoffRegister dst,
+ LiftoffRegister lhs, int32_t rhs) {
+ bailout(kSimd, "emit_i8x16_shri_s");
+}
+
+void LiftoffAssembler::emit_i8x16_shr_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i8x16_shr_u");
+}
+
+void LiftoffAssembler::emit_i8x16_shri_u(LiftoffRegister dst,
+ LiftoffRegister lhs, int32_t rhs) {
+ bailout(kSimd, "emit_i8x16_shri_u");
+}
+
+void LiftoffAssembler::emit_i8x16_add(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i8x16_add");
+}
+
+void LiftoffAssembler::emit_i8x16_add_sat_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i8x16_add_sat_s");
+}
+
+void LiftoffAssembler::emit_i8x16_add_sat_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i8x16_add_sat_u");
+}
+
+void LiftoffAssembler::emit_i8x16_sub(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i8x16_sub");
+}
+
+void LiftoffAssembler::emit_i8x16_sub_sat_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i8x16_sub_sat_s");
+}
+
+void LiftoffAssembler::emit_i8x16_sub_sat_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i8x16_sub_sat_u");
+}
+
+void LiftoffAssembler::emit_i8x16_min_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i8x16_min_s");
+}
+
+void LiftoffAssembler::emit_i8x16_min_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i8x16_min_u");
+}
+
+void LiftoffAssembler::emit_i8x16_max_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i8x16_max_s");
+}
+
+void LiftoffAssembler::emit_i8x16_max_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i8x16_max_u");
+}
+
+void LiftoffAssembler::emit_i8x16_popcnt(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_i8x16_popcnt");
+}
+
+void LiftoffAssembler::emit_i16x8_neg(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_i16x8_neg");
+}
+
+void LiftoffAssembler::emit_i16x8_alltrue(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_i16x8_alltrue");
+}
+
+void LiftoffAssembler::emit_i16x8_bitmask(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_i16x8_bitmask");
+}
+
+void LiftoffAssembler::emit_i16x8_shl(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i16x8_shl");
+}
+
+void LiftoffAssembler::emit_i16x8_shli(LiftoffRegister dst, LiftoffRegister lhs,
+ int32_t rhs) {
+ bailout(kSimd, "emit_i16x8_shli");
+}
+
+void LiftoffAssembler::emit_i16x8_shr_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i16x8_shr_s");
+}
+
+void LiftoffAssembler::emit_i16x8_shri_s(LiftoffRegister dst,
+ LiftoffRegister lhs, int32_t rhs) {
+ bailout(kSimd, "emit_i16x8_shri_s");
+}
+
+void LiftoffAssembler::emit_i16x8_shr_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i16x8_shr_u");
+}
+
+void LiftoffAssembler::emit_i16x8_shri_u(LiftoffRegister dst,
+ LiftoffRegister lhs, int32_t rhs) {
+ bailout(kSimd, "emit_i16x8_shri_u");
+}
+
+void LiftoffAssembler::emit_i16x8_add(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i16x8_add");
+}
+
+void LiftoffAssembler::emit_i16x8_add_sat_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i16x8_add_sat_s");
+}
+
+void LiftoffAssembler::emit_i16x8_add_sat_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i16x8_add_sat_u");
+}
+
+void LiftoffAssembler::emit_i16x8_sub(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i16x8_sub");
+}
+
+void LiftoffAssembler::emit_i16x8_sub_sat_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i16x8_sub_sat_s");
+}
+
+void LiftoffAssembler::emit_i16x8_sub_sat_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i16x8_sub_sat_u");
+}
+
+void LiftoffAssembler::emit_i16x8_mul(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i16x8_mul");
+}
+
+void LiftoffAssembler::emit_i16x8_min_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i16x8_min_s");
+}
+
+void LiftoffAssembler::emit_i16x8_min_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i16x8_min_u");
+}
+
+void LiftoffAssembler::emit_i16x8_max_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i16x8_max_s");
+}
+
+void LiftoffAssembler::emit_i16x8_max_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i16x8_max_u");
+}
+
+void LiftoffAssembler::emit_i32x4_neg(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_i32x4_neg");
+}
+
+void LiftoffAssembler::emit_i32x4_alltrue(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_i32x4_alltrue");
+}
+
+void LiftoffAssembler::emit_i32x4_bitmask(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_i32x4_bitmask");
+}
+
+void LiftoffAssembler::emit_i32x4_shl(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i32x4_shl");
+}
+
+void LiftoffAssembler::emit_i32x4_shli(LiftoffRegister dst, LiftoffRegister lhs,
+ int32_t rhs) {
+ bailout(kSimd, "emit_i32x4_shli");
+}
+
+void LiftoffAssembler::emit_i32x4_shr_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i32x4_shr_s");
+}
+
+void LiftoffAssembler::emit_i32x4_shri_s(LiftoffRegister dst,
+ LiftoffRegister lhs, int32_t rhs) {
+ bailout(kSimd, "emit_i32x4_shri_s");
+}
+
+void LiftoffAssembler::emit_i32x4_shr_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i32x4_shr_u");
+}
+
+void LiftoffAssembler::emit_i32x4_shri_u(LiftoffRegister dst,
+ LiftoffRegister lhs, int32_t rhs) {
+ bailout(kSimd, "emit_i32x4_shri_u");
+}
+
+void LiftoffAssembler::emit_i32x4_add(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i32x4_add");
+}
+
+void LiftoffAssembler::emit_i32x4_sub(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i32x4_sub");
+}
+
+void LiftoffAssembler::emit_i32x4_mul(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i32x4_mul");
+}
+
+void LiftoffAssembler::emit_i32x4_min_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i32x4_min_s");
+}
+
+void LiftoffAssembler::emit_i32x4_min_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i32x4_min_u");
+}
+
+void LiftoffAssembler::emit_i32x4_max_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i32x4_max_s");
+}
+
+void LiftoffAssembler::emit_i32x4_max_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i32x4_max_u");
+}
+
+void LiftoffAssembler::emit_i32x4_dot_i16x8_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i32x4_dot_i16x8_s");
+}
+
+void LiftoffAssembler::emit_i64x2_neg(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_i64x2_neg");
+}
+
+void LiftoffAssembler::emit_i64x2_alltrue(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_i64x2_alltrue");
+}
+
+void LiftoffAssembler::emit_i64x2_bitmask(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_i64x2_bitmask");
+}
+
+void LiftoffAssembler::emit_i64x2_shl(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i64x2_shl");
+}
+
+void LiftoffAssembler::emit_i64x2_shli(LiftoffRegister dst, LiftoffRegister lhs,
+ int32_t rhs) {
+ bailout(kSimd, "emit_i64x2_shli");
+}
+
+void LiftoffAssembler::emit_i64x2_shr_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i64x2_shr_s");
+}
+
+void LiftoffAssembler::emit_i64x2_shri_s(LiftoffRegister dst,
+ LiftoffRegister lhs, int32_t rhs) {
+ bailout(kSimd, "emit_i64x2_shri_s");
+}
+
+void LiftoffAssembler::emit_i64x2_shr_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i64x2_shr_u");
+}
+
+void LiftoffAssembler::emit_i64x2_shri_u(LiftoffRegister dst,
+ LiftoffRegister lhs, int32_t rhs) {
+ bailout(kSimd, "emit_i64x2_shri_u");
+}
+
+void LiftoffAssembler::emit_i64x2_add(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i64x2_add");
+}
+
+void LiftoffAssembler::emit_i64x2_sub(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i64x2_sub");
+}
+
+void LiftoffAssembler::emit_i64x2_mul(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i64x2_mul");
+}
+
+void LiftoffAssembler::emit_i64x2_gt_s(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i64x2_gt_s");
+}
+
+void LiftoffAssembler::emit_i64x2_ge_s(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i64x2_ge_s");
+}
+
+void LiftoffAssembler::emit_f32x4_abs(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_f32x4_abs");
+}
+
+void LiftoffAssembler::emit_f32x4_neg(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_f32x4_neg");
+}
+
+void LiftoffAssembler::emit_f32x4_sqrt(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_f32x4_sqrt");
+}
+
+bool LiftoffAssembler::emit_f32x4_ceil(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_f32x4_ceil");
+ return true;
+}
+
+bool LiftoffAssembler::emit_f32x4_floor(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_f32x4_floor");
+ return true;
+}
+
+bool LiftoffAssembler::emit_f32x4_trunc(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_f32x4_trunc");
+ return true;
+}
+
+bool LiftoffAssembler::emit_f32x4_nearest_int(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_f32x4_nearest_int");
+ return true;
+}
+
+void LiftoffAssembler::emit_f32x4_add(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_f32x4_add");
+}
+
+void LiftoffAssembler::emit_f32x4_sub(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_f32x4_sub");
+}
+
+void LiftoffAssembler::emit_f32x4_mul(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_f32x4_mul");
+}
+
+void LiftoffAssembler::emit_f32x4_div(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_f32x4_div");
+}
+
+void LiftoffAssembler::emit_f32x4_min(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_f32x4_min");
+}
+
+void LiftoffAssembler::emit_f32x4_max(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_f32x4_max");
+}
+
+void LiftoffAssembler::emit_f32x4_pmin(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_f32x4_pmin");
+}
+
+void LiftoffAssembler::emit_f32x4_pmax(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_f32x4_pmax");
+}
+
+void LiftoffAssembler::emit_f64x2_abs(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_f64x2_abs");
+}
+
+void LiftoffAssembler::emit_f64x2_neg(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_f64x2_neg");
+}
+
+void LiftoffAssembler::emit_f64x2_sqrt(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_f64x2_sqrt");
+}
+
+bool LiftoffAssembler::emit_f64x2_ceil(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_f64x2_ceil");
+ return true;
+}
+
+bool LiftoffAssembler::emit_f64x2_floor(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_f64x2_floor");
+ return true;
+}
+
+bool LiftoffAssembler::emit_f64x2_trunc(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_f64x2_trunc");
+ return true;
+}
+
+bool LiftoffAssembler::emit_f64x2_nearest_int(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_f64x2_nearest_int");
+ return true;
+}
+
+void LiftoffAssembler::emit_f64x2_add(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_f64x2_add");
+}
+
+void LiftoffAssembler::emit_f64x2_sub(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_f64x2_sub");
+}
+
+void LiftoffAssembler::emit_f64x2_mul(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_f64x2_mul");
+}
+
+void LiftoffAssembler::emit_f64x2_div(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_f64x2_div");
+}
+
+void LiftoffAssembler::emit_f64x2_min(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_f64x2_min");
+}
+
+void LiftoffAssembler::emit_f64x2_max(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_f64x2_max");
+}
+
+void LiftoffAssembler::emit_f64x2_pmin(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_f64x2_pmin");
+}
+
+void LiftoffAssembler::emit_f64x2_pmax(LiftoffRegister dst, LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_f64x2_pmax");
+}
+
+void LiftoffAssembler::emit_f64x2_convert_low_i32x4_s(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_f64x2_convert_low_i32x4_s");
+}
+
+void LiftoffAssembler::emit_f64x2_convert_low_i32x4_u(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_f64x2_convert_low_i32x4_u");
+}
+
+void LiftoffAssembler::emit_f64x2_promote_low_f32x4(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_f64x2_promote_low_f32x4");
+}
+
+void LiftoffAssembler::emit_i32x4_sconvert_f32x4(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_i32x4_sconvert_f32x4");
+}
+
+void LiftoffAssembler::emit_i32x4_uconvert_f32x4(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_i32x4_uconvert_f32x4");
+}
+
+void LiftoffAssembler::emit_i32x4_trunc_sat_f64x2_s_zero(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_i32x4_trunc_sat_f64x2_s_zero");
+}
+
+void LiftoffAssembler::emit_i32x4_trunc_sat_f64x2_u_zero(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_i32x4_trunc_sat_f64x2_u_zero");
+}
+
+void LiftoffAssembler::emit_f32x4_sconvert_i32x4(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_f32x4_sconvert_i32x4");
+}
+
+void LiftoffAssembler::emit_f32x4_uconvert_i32x4(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_f32x4_uconvert_i32x4");
+}
+
+void LiftoffAssembler::emit_f32x4_demote_f64x2_zero(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_f32x4_demote_f64x2_zero");
+}
+
+void LiftoffAssembler::emit_i8x16_sconvert_i16x8(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i8x16_sconvert_i16x8");
+}
+
+void LiftoffAssembler::emit_i8x16_uconvert_i16x8(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i8x16_uconvert_i16x8");
+}
+
+void LiftoffAssembler::emit_i16x8_sconvert_i32x4(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i16x8_sconvert_i32x4");
+}
+
+void LiftoffAssembler::emit_i16x8_uconvert_i32x4(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i16x8_uconvert_i32x4");
+}
+
+void LiftoffAssembler::emit_i16x8_sconvert_i8x16_low(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_i16x8_sconvert_i8x16_low");
+}
+
+void LiftoffAssembler::emit_i16x8_sconvert_i8x16_high(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_i16x8_sconvert_i8x16_high");
+}
+
+void LiftoffAssembler::emit_i16x8_uconvert_i8x16_low(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_i16x8_uconvert_i8x16_low");
+}
+
+void LiftoffAssembler::emit_i16x8_uconvert_i8x16_high(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_i16x8_uconvert_i8x16_high");
+}
+
+void LiftoffAssembler::emit_i32x4_sconvert_i16x8_low(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_i32x4_sconvert_i16x8_low");
+}
+
+void LiftoffAssembler::emit_i32x4_sconvert_i16x8_high(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_i32x4_sconvert_i16x8_high");
+}
+
+void LiftoffAssembler::emit_i32x4_uconvert_i16x8_low(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_i32x4_uconvert_i16x8_low");
+}
+
+void LiftoffAssembler::emit_i32x4_uconvert_i16x8_high(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_i32x4_uconvert_i16x8_high");
+}
+
+void LiftoffAssembler::emit_i64x2_sconvert_i32x4_low(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_i64x2_sconvert_i32x4_low");
+}
+
+void LiftoffAssembler::emit_i64x2_sconvert_i32x4_high(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_i64x2_sconvert_i32x4_high");
+}
+
+void LiftoffAssembler::emit_i64x2_uconvert_i32x4_low(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_i64x2_uconvert_i32x4_low");
+}
+
+void LiftoffAssembler::emit_i64x2_uconvert_i32x4_high(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_i64x2_uconvert_i32x4_high");
+}
+
+void LiftoffAssembler::emit_i8x16_rounding_average_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i8x16_rounding_average_u");
+}
+
+void LiftoffAssembler::emit_i16x8_rounding_average_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ LiftoffRegister rhs) {
+ bailout(kSimd, "emit_i16x8_rounding_average_u");
+}
+
+void LiftoffAssembler::emit_i8x16_abs(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_i8x16_abs");
+}
+
+void LiftoffAssembler::emit_i16x8_abs(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_i16x8_abs");
+}
+
+void LiftoffAssembler::emit_i32x4_abs(LiftoffRegister dst,
+ LiftoffRegister src) {
+ bailout(kSimd, "emit_i32x4_abs");
+}
+
+void LiftoffAssembler::emit_i8x16_extract_lane_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ uint8_t imm_lane_idx) {
+ bailout(kSimd, "emit_i8x16_extract_lane_s");
+}
+
+void LiftoffAssembler::emit_i8x16_extract_lane_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ uint8_t imm_lane_idx) {
+ bailout(kSimd, "emit_i8x16_extract_lane_u");
+}
+
+void LiftoffAssembler::emit_i16x8_extract_lane_s(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ uint8_t imm_lane_idx) {
+ bailout(kSimd, "emit_i16x8_extract_lane_s");
+}
+
+void LiftoffAssembler::emit_i16x8_extract_lane_u(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ uint8_t imm_lane_idx) {
+ bailout(kSimd, "emit_i16x8_extract_lane_u");
+}
+
+void LiftoffAssembler::emit_i32x4_extract_lane(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ uint8_t imm_lane_idx) {
+ bailout(kSimd, "emit_i32x4_extract_lane");
+}
+
+void LiftoffAssembler::emit_i64x2_extract_lane(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ uint8_t imm_lane_idx) {
+ bailout(kSimd, "emit_i64x2_extract_lane");
+}
+
+void LiftoffAssembler::emit_f32x4_extract_lane(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ uint8_t imm_lane_idx) {
+ bailout(kSimd, "emit_f32x4_extract_lane");
+}
+
+void LiftoffAssembler::emit_f64x2_extract_lane(LiftoffRegister dst,
+ LiftoffRegister lhs,
+ uint8_t imm_lane_idx) {
+ bailout(kSimd, "emit_f64x2_extract_lane");
+}
+
+void LiftoffAssembler::emit_i8x16_replace_lane(LiftoffRegister dst,
+ LiftoffRegister src1,
+ LiftoffRegister src2,
+ uint8_t imm_lane_idx) {
+ bailout(kSimd, "emit_i8x16_replace_lane");
+}
+
+void LiftoffAssembler::emit_i16x8_replace_lane(LiftoffRegister dst,
+ LiftoffRegister src1,
+ LiftoffRegister src2,
+ uint8_t imm_lane_idx) {
+ bailout(kSimd, "emit_i16x8_replace_lane");
+}
+
+void LiftoffAssembler::emit_i32x4_replace_lane(LiftoffRegister dst,
+ LiftoffRegister src1,
+ LiftoffRegister src2,
+ uint8_t imm_lane_idx) {
+ bailout(kSimd, "emit_i32x4_replace_lane");
+}
+
+void LiftoffAssembler::emit_i64x2_replace_lane(LiftoffRegister dst,
+ LiftoffRegister src1,
+ LiftoffRegister src2,
+ uint8_t imm_lane_idx) {
+ bailout(kSimd, "emit_i64x2_replace_lane");
+}
+
+void LiftoffAssembler::emit_f32x4_replace_lane(LiftoffRegister dst,
+ LiftoffRegister src1,
+ LiftoffRegister src2,
+ uint8_t imm_lane_idx) {
+ bailout(kSimd, "emit_f32x4_replace_lane");
+}
+
+void LiftoffAssembler::emit_f64x2_replace_lane(LiftoffRegister dst,
+ LiftoffRegister src1,
+ LiftoffRegister src2,
+ uint8_t imm_lane_idx) {
+ bailout(kSimd, "emit_f64x2_replace_lane");
+}
+
+void LiftoffAssembler::StackCheck(Label* ool_code, Register limit_address) {
+ TurboAssembler::Ld_d(limit_address, MemOperand(limit_address, 0));
+ TurboAssembler::Branch(ool_code, ule, sp, Operand(limit_address));
+}
+
+void LiftoffAssembler::CallTrapCallbackForTesting() {
+ PrepareCallCFunction(0, GetUnusedRegister(kGpReg, {}).gp());
+ CallCFunction(ExternalReference::wasm_call_trap_callback_for_testing(), 0);
+}
+
+void LiftoffAssembler::AssertUnreachable(AbortReason reason) {
+ if (FLAG_debug_code) Abort(reason);
+}
+
+void LiftoffAssembler::PushRegisters(LiftoffRegList regs) {
+ LiftoffRegList gp_regs = regs & kGpCacheRegList;
+ unsigned num_gp_regs = gp_regs.GetNumRegsSet();
+ if (num_gp_regs) {
+ unsigned offset = num_gp_regs * kSystemPointerSize;
+ addi_d(sp, sp, -offset);
+ while (!gp_regs.is_empty()) {
+ LiftoffRegister reg = gp_regs.GetFirstRegSet();
+ offset -= kSystemPointerSize;
+ St_d(reg.gp(), MemOperand(sp, offset));
+ gp_regs.clear(reg);
+ }
+ DCHECK_EQ(offset, 0);
+ }
+ LiftoffRegList fp_regs = regs & kFpCacheRegList;
+ unsigned num_fp_regs = fp_regs.GetNumRegsSet();
+ if (num_fp_regs) {
+ unsigned slot_size = 8;
+ addi_d(sp, sp, -(num_fp_regs * slot_size));
+ unsigned offset = 0;
+ while (!fp_regs.is_empty()) {
+ LiftoffRegister reg = fp_regs.GetFirstRegSet();
+ TurboAssembler::Fst_d(reg.fp(), MemOperand(sp, offset));
+ fp_regs.clear(reg);
+ offset += slot_size;
+ }
+ DCHECK_EQ(offset, num_fp_regs * slot_size);
+ }
+}
+
+void LiftoffAssembler::PopRegisters(LiftoffRegList regs) {
+ LiftoffRegList fp_regs = regs & kFpCacheRegList;
+ unsigned fp_offset = 0;
+ while (!fp_regs.is_empty()) {
+ LiftoffRegister reg = fp_regs.GetFirstRegSet();
+ TurboAssembler::Fld_d(reg.fp(), MemOperand(sp, fp_offset));
+ fp_regs.clear(reg);
+ fp_offset += 8;
+ }
+ if (fp_offset) addi_d(sp, sp, fp_offset);
+ LiftoffRegList gp_regs = regs & kGpCacheRegList;
+ unsigned gp_offset = 0;
+ while (!gp_regs.is_empty()) {
+ LiftoffRegister reg = gp_regs.GetLastRegSet();
+ Ld_d(reg.gp(), MemOperand(sp, gp_offset));
+ gp_regs.clear(reg);
+ gp_offset += kSystemPointerSize;
+ }
+ addi_d(sp, sp, gp_offset);
+}
+
+void LiftoffAssembler::RecordSpillsInSafepoint(Safepoint& safepoint,
+ LiftoffRegList all_spills,
+ LiftoffRegList ref_spills,
+ int spill_offset) {
+ int spill_space_size = 0;
+ while (!all_spills.is_empty()) {
+ LiftoffRegister reg = all_spills.GetFirstRegSet();
+ if (ref_spills.has(reg)) {
+ safepoint.DefinePointerSlot(spill_offset);
+ }
+ all_spills.clear(reg);
+ ++spill_offset;
+ spill_space_size += kSystemPointerSize;
+ }
+ // Record the number of additional spill slots.
+ RecordOolSpillSpaceSize(spill_space_size);
+}
+
+void LiftoffAssembler::DropStackSlotsAndRet(uint32_t num_stack_slots) {
+ DCHECK_LT(num_stack_slots,
+ (1 << 16) / kSystemPointerSize); // 16 bit immediate
+ Drop(static_cast<int>(num_stack_slots));
+ Ret();
+}
+
+void LiftoffAssembler::CallC(const ValueKindSig* sig,
+ const LiftoffRegister* args,
+ const LiftoffRegister* rets,
+ ValueKind out_argument_kind, int stack_bytes,
+ ExternalReference ext_ref) {
+ addi_d(sp, sp, -stack_bytes);
+
+ int arg_bytes = 0;
+ for (ValueKind param_kind : sig->parameters()) {
+ liftoff::Store(this, sp, arg_bytes, *args++, param_kind);
+ arg_bytes += element_size_bytes(param_kind);
+ }
+ DCHECK_LE(arg_bytes, stack_bytes);
+
+ // Pass a pointer to the buffer with the arguments to the C function.
+ // On LoongArch, the first argument is passed in {a0}.
+ constexpr Register kFirstArgReg = a0;
+ mov(kFirstArgReg, sp);
+
+ // Now call the C function.
+ constexpr int kNumCCallArgs = 1;
+ PrepareCallCFunction(kNumCCallArgs, kScratchReg);
+ CallCFunction(ext_ref, kNumCCallArgs);
+
+ // Move return value to the right register.
+ const LiftoffRegister* next_result_reg = rets;
+ if (sig->return_count() > 0) {
+ DCHECK_EQ(1, sig->return_count());
+ constexpr Register kReturnReg = a0;
+ if (kReturnReg != next_result_reg->gp()) {
+ Move(*next_result_reg, LiftoffRegister(kReturnReg), sig->GetReturn(0));
+ }
+ ++next_result_reg;
+ }
+
+ // Load potential output value from the buffer on the stack.
+ if (out_argument_kind != kVoid) {
+ liftoff::Load(this, *next_result_reg, MemOperand(sp, 0), out_argument_kind);
+ }
+
+ addi_d(sp, sp, stack_bytes);
+}
+
+void LiftoffAssembler::CallNativeWasmCode(Address addr) {
+ Call(addr, RelocInfo::WASM_CALL);
+}
+
+void LiftoffAssembler::TailCallNativeWasmCode(Address addr) {
+ Jump(addr, RelocInfo::WASM_CALL);
+}
+
+void LiftoffAssembler::CallIndirect(const ValueKindSig* sig,
+ compiler::CallDescriptor* call_descriptor,
+ Register target) {
+ if (target == no_reg) {
+ Pop(kScratchReg);
+ Call(kScratchReg);
+ } else {
+ Call(target);
+ }
+}
+
+void LiftoffAssembler::TailCallIndirect(Register target) {
+ if (target == no_reg) {
+ Pop(kScratchReg);
+ Jump(kScratchReg);
+ } else {
+ Jump(target);
+ }
+}
+
+void LiftoffAssembler::CallRuntimeStub(WasmCode::RuntimeStubId sid) {
+ // A direct call to a wasm runtime stub defined in this module.
+ // Just encode the stub index. This will be patched at relocation.
+ Call(static_cast<Address>(sid), RelocInfo::WASM_STUB_CALL);
+}
+
+void LiftoffAssembler::AllocateStackSlot(Register addr, uint32_t size) {
+ addi_d(sp, sp, -size);
+ TurboAssembler::Move(addr, sp);
+}
+
+void LiftoffAssembler::DeallocateStackSlot(uint32_t size) {
+ addi_d(sp, sp, size);
+}
+
+void LiftoffAssembler::MaybeOSR() {}
+
+void LiftoffAssembler::emit_set_if_nan(Register dst, FPURegister src,
+ ValueKind kind) {
+ UseScratchRegisterScope temps(this);
+ Register scratch = temps.Acquire();
+ Label not_nan;
+ if (kind == kF32) {
+ CompareIsNanF32(src, src);
+ } else {
+ DCHECK_EQ(kind, kF64);
+ CompareIsNanF64(src, src);
+ }
+ BranchFalseShortF(&not_nan);
+ li(scratch, 1);
+ St_w(scratch, MemOperand(dst, 0));
+ bind(&not_nan);
+}
+
+void LiftoffAssembler::emit_s128_set_if_nan(Register dst, LiftoffRegister src,
+ Register tmp_gp,
+ LiftoffRegister tmp_s128,
+ ValueKind lane_kind) {
+ UNIMPLEMENTED();
+}
+
+void LiftoffStackSlots::Construct(int param_slots) {
+ DCHECK_LT(0, slots_.size());
+ SortInPushOrder();
+ int last_stack_slot = param_slots;
+ for (auto& slot : slots_) {
+ const int stack_slot = slot.dst_slot_;
+ int stack_decrement = (last_stack_slot - stack_slot) * kSystemPointerSize;
+ DCHECK_LT(0, stack_decrement);
+ last_stack_slot = stack_slot;
+ const LiftoffAssembler::VarState& src = slot.src_;
+ switch (src.loc()) {
+ case LiftoffAssembler::VarState::kStack:
+ if (src.kind() != kS128) {
+ asm_->AllocateStackSpace(stack_decrement - kSystemPointerSize);
+ asm_->Ld_d(kScratchReg, liftoff::GetStackSlot(slot.src_offset_));
+ asm_->Push(kScratchReg);
+ } else {
+ asm_->AllocateStackSpace(stack_decrement - kSimd128Size);
+ asm_->Ld_d(kScratchReg, liftoff::GetStackSlot(slot.src_offset_ - 8));
+ asm_->Push(kScratchReg);
+ asm_->Ld_d(kScratchReg, liftoff::GetStackSlot(slot.src_offset_));
+ asm_->Push(kScratchReg);
+ }
+ break;
+ case LiftoffAssembler::VarState::kRegister: {
+ int pushed_bytes = SlotSizeInBytes(slot);
+ asm_->AllocateStackSpace(stack_decrement - pushed_bytes);
+ liftoff::push(asm_, src.reg(), src.kind());
+ break;
+ }
+ case LiftoffAssembler::VarState::kIntConst: {
+ asm_->AllocateStackSpace(stack_decrement - kSystemPointerSize);
+ asm_->li(kScratchReg, Operand(src.i32_const()));
+ asm_->Push(kScratchReg);
+ break;
+ }
+ }
+ }
+}
+
+} // namespace wasm
+} // namespace internal
+} // namespace v8
+
+#endif // V8_WASM_BASELINE_LOONG64_LIFTOFF_ASSEMBLER_LOONG64_H_
diff --git a/deps/v8/src/wasm/baseline/mips/liftoff-assembler-mips.h b/deps/v8/src/wasm/baseline/mips/liftoff-assembler-mips.h
index 4ab036da8e..35eabecbf0 100644
--- a/deps/v8/src/wasm/baseline/mips/liftoff-assembler-mips.h
+++ b/deps/v8/src/wasm/baseline/mips/liftoff-assembler-mips.h
@@ -3067,20 +3067,22 @@ void LiftoffAssembler::emit_set_if_nan(Register dst, FPURegister src,
ValueKind kind) {
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
- li(scratch, 1);
+ Label not_nan;
if (kind == kF32) {
CompareIsNanF32(src, src);
} else {
DCHECK_EQ(kind, kF64);
CompareIsNanF64(src, src);
}
- LoadZeroIfNotFPUCondition(scratch);
- Sw(scratch, MemOperand(dst));
+ BranchFalseShortF(&not_nan, USE_DELAY_SLOT);
+ li(scratch, 1);
+ sw(scratch, MemOperand(dst));
+ bind(&not_nan);
}
-void LiftoffAssembler::emit_s128_set_if_nan(Register dst, DoubleRegister src,
+void LiftoffAssembler::emit_s128_set_if_nan(Register dst, LiftoffRegister src,
Register tmp_gp,
- DoubleRegister tmp_fp,
+ LiftoffRegister tmp_s128,
ValueKind lane_kind) {
UNIMPLEMENTED();
}
diff --git a/deps/v8/src/wasm/baseline/mips64/liftoff-assembler-mips64.h b/deps/v8/src/wasm/baseline/mips64/liftoff-assembler-mips64.h
index 0a23c190e9..e47da84148 100644
--- a/deps/v8/src/wasm/baseline/mips64/liftoff-assembler-mips64.h
+++ b/deps/v8/src/wasm/baseline/mips64/liftoff-assembler-mips64.h
@@ -3235,22 +3235,35 @@ void LiftoffAssembler::emit_set_if_nan(Register dst, FPURegister src,
ValueKind kind) {
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
- li(scratch, 1);
+ Label not_nan;
if (kind == kF32) {
CompareIsNanF32(src, src);
} else {
DCHECK_EQ(kind, kF64);
CompareIsNanF64(src, src);
}
- LoadZeroIfNotFPUCondition(scratch);
- Sd(scratch, MemOperand(dst));
+ BranchFalseShortF(&not_nan, USE_DELAY_SLOT);
+ li(scratch, 1);
+ Sw(dst, MemOperand(dst));
+ bind(&not_nan);
}
-void LiftoffAssembler::emit_s128_set_if_nan(Register dst, DoubleRegister src,
+void LiftoffAssembler::emit_s128_set_if_nan(Register dst, LiftoffRegister src,
Register tmp_gp,
- DoubleRegister tmp_fp,
+ LiftoffRegister tmp_s128,
ValueKind lane_kind) {
- UNIMPLEMENTED();
+ Label not_nan;
+ if (lane_kind == kF32) {
+ fcun_w(tmp_s128.fp().toW(), src.fp().toW(), src.fp().toW());
+ } else {
+ DCHECK_EQ(lane_kind, kF64);
+ fcun_d(tmp_s128.fp().toW(), src.fp().toW(), src.fp().toW());
+ }
+ BranchMSA(&not_nan, MSA_BRANCH_V, all_zero, tmp_s128.fp().toW(),
+ USE_DELAY_SLOT);
+ li(tmp_gp, 1);
+ Sw(tmp_gp, MemOperand(dst));
+ bind(&not_nan);
}
void LiftoffStackSlots::Construct(int param_slots) {
diff --git a/deps/v8/src/wasm/baseline/ppc/liftoff-assembler-ppc.h b/deps/v8/src/wasm/baseline/ppc/liftoff-assembler-ppc.h
index 8e3808d259..617e193bd1 100644
--- a/deps/v8/src/wasm/baseline/ppc/liftoff-assembler-ppc.h
+++ b/deps/v8/src/wasm/baseline/ppc/liftoff-assembler-ppc.h
@@ -41,7 +41,7 @@ namespace liftoff {
//
//
-constexpr int32_t kInstanceOffset = 2 * kSystemPointerSize;
+constexpr int32_t kInstanceOffset = 3 * kSystemPointerSize;
inline MemOperand GetHalfStackSlot(int offset, RegPairHalf half) {
int32_t half_offset =
@@ -106,7 +106,26 @@ int LiftoffAssembler::PrepareStackFrame() {
void LiftoffAssembler::PrepareTailCall(int num_callee_stack_params,
int stack_param_delta) {
- bailout(kUnsupportedArchitecture, "PrepareTailCall");
+ Register scratch = ip;
+ // Push the return address and frame pointer to complete the stack frame.
+ AddS64(sp, sp, Operand(-2 * kSystemPointerSize), r0);
+ LoadU64(scratch, MemOperand(fp, kSystemPointerSize), r0);
+ StoreU64(scratch, MemOperand(sp, kSystemPointerSize), r0);
+ LoadU64(scratch, MemOperand(fp), r0);
+ StoreU64(scratch, MemOperand(sp), r0);
+
+ // Shift the whole frame upwards.
+ int slot_count = num_callee_stack_params + 2;
+ for (int i = slot_count - 1; i >= 0; --i) {
+ LoadU64(scratch, MemOperand(sp, i * kSystemPointerSize), r0);
+ StoreU64(scratch,
+ MemOperand(fp, (i - stack_param_delta) * kSystemPointerSize), r0);
+ }
+
+ // Set the new stack and frame pointer.
+ AddS64(sp, fp, Operand(-stack_param_delta * kSystemPointerSize), r0);
+ Pop(r0, fp);
+ mtlr(r0);
}
void LiftoffAssembler::AlignFrameSize() {}
@@ -169,14 +188,14 @@ void LiftoffAssembler::LoadConstant(LiftoffRegister reg, WasmValue value,
case kF32: {
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
- mov(scratch, Operand(value.to_f32_boxed().get_scalar()));
- MovIntToFloat(reg.fp(), scratch);
+ mov(scratch, Operand(value.to_f32_boxed().get_bits()));
+ MovIntToFloat(reg.fp(), scratch, ip);
break;
}
case kF64: {
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
- mov(scratch, Operand(value.to_f64_boxed().get_scalar()));
+ mov(scratch, Operand(value.to_f64_boxed().get_bits()));
MovInt64ToDouble(reg.fp(), scratch);
break;
}
@@ -750,12 +769,19 @@ void LiftoffAssembler::FillStackSlotsWithZero(int start, int size) {
#define SIGN_EXT(r) extsw(r, r)
#define ROUND_F64_TO_F32(fpr) frsp(fpr, fpr)
#define INT32_AND_WITH_1F(x) Operand(x & 0x1f)
+#define INT32_AND_WITH_3F(x) Operand(x & 0x3f)
#define REGISTER_AND_WITH_1F \
([&](Register rhs) { \
andi(r0, rhs, Operand(31)); \
return r0; \
})
+#define REGISTER_AND_WITH_3F \
+ ([&](Register rhs) { \
+ andi(r0, rhs, Operand(63)); \
+ return r0; \
+ })
+
#define LFR_TO_REG(reg) reg.gp()
// V(name, instr, dtype, stype, dcast, scast, rcast, return_val, return_type)
@@ -772,16 +798,12 @@ void LiftoffAssembler::FillStackSlotsWithZero(int start, int size) {
true, bool) \
V(f32_trunc, friz, DoubleRegister, DoubleRegister, , , ROUND_F64_TO_F32, \
true, bool) \
- V(f32_nearest_int, frin, DoubleRegister, DoubleRegister, , , \
- ROUND_F64_TO_F32, true, bool) \
V(f64_abs, fabs, DoubleRegister, DoubleRegister, , , USE, , void) \
V(f64_neg, fneg, DoubleRegister, DoubleRegister, , , USE, , void) \
V(f64_sqrt, fsqrt, DoubleRegister, DoubleRegister, , , USE, , void) \
V(f64_floor, frim, DoubleRegister, DoubleRegister, , , USE, true, bool) \
V(f64_ceil, frip, DoubleRegister, DoubleRegister, , , USE, true, bool) \
V(f64_trunc, friz, DoubleRegister, DoubleRegister, , , USE, true, bool) \
- V(f64_nearest_int, frin, DoubleRegister, DoubleRegister, , , USE, true, \
- bool) \
V(i32_clz, CountLeadingZerosU32, Register, Register, , , USE, , void) \
V(i32_ctz, CountTrailingZerosU32, Register, Register, , , USE, , void) \
V(i64_clz, CountLeadingZerosU64, LiftoffRegister, LiftoffRegister, \
@@ -873,17 +895,17 @@ UNOP_LIST(EMIT_UNOP_FUNCTION)
V(i32_shr, ShiftRightU32, Register, Register, Register, , , \
REGISTER_AND_WITH_1F, USE, , void) \
V(i64_shl, ShiftLeftU64, LiftoffRegister, LiftoffRegister, Register, \
- LFR_TO_REG, LFR_TO_REG, , USE, , void) \
+ LFR_TO_REG, LFR_TO_REG, REGISTER_AND_WITH_3F, USE, , void) \
V(i64_sar, ShiftRightS64, LiftoffRegister, LiftoffRegister, Register, \
- LFR_TO_REG, LFR_TO_REG, , USE, , void) \
+ LFR_TO_REG, LFR_TO_REG, REGISTER_AND_WITH_3F, USE, , void) \
V(i64_shr, ShiftRightU64, LiftoffRegister, LiftoffRegister, Register, \
- LFR_TO_REG, LFR_TO_REG, , USE, , void) \
+ LFR_TO_REG, LFR_TO_REG, REGISTER_AND_WITH_3F, USE, , void) \
V(i64_shli, ShiftLeftU64, LiftoffRegister, LiftoffRegister, int32_t, \
- LFR_TO_REG, LFR_TO_REG, Operand, USE, , void) \
+ LFR_TO_REG, LFR_TO_REG, INT32_AND_WITH_3F, USE, , void) \
V(i64_sari, ShiftRightS64, LiftoffRegister, LiftoffRegister, int32_t, \
- LFR_TO_REG, LFR_TO_REG, Operand, USE, , void) \
+ LFR_TO_REG, LFR_TO_REG, INT32_AND_WITH_3F, USE, , void) \
V(i64_shri, ShiftRightU64, LiftoffRegister, LiftoffRegister, int32_t, \
- LFR_TO_REG, LFR_TO_REG, Operand, USE, , void) \
+ LFR_TO_REG, LFR_TO_REG, INT32_AND_WITH_3F, USE, , void) \
V(f64_add, AddF64, DoubleRegister, DoubleRegister, DoubleRegister, , , , \
USE, , void) \
V(f64_sub, SubF64, DoubleRegister, DoubleRegister, DoubleRegister, , , , \
@@ -921,53 +943,139 @@ BINOP_LIST(EMIT_BINOP_FUNCTION)
#undef REGISTER_AND_WITH_1F
#undef LFR_TO_REG
+bool LiftoffAssembler::emit_f32_nearest_int(DoubleRegister dst,
+ DoubleRegister src) {
+ return false;
+}
+
+bool LiftoffAssembler::emit_f64_nearest_int(DoubleRegister dst,
+ DoubleRegister src) {
+ return false;
+}
+
void LiftoffAssembler::emit_i32_divs(Register dst, Register lhs, Register rhs,
Label* trap_div_by_zero,
Label* trap_div_unrepresentable) {
- bailout(kUnsupportedArchitecture, "i32_divs");
+ Label cont;
+
+ // Check for division by zero.
+ CmpS32(rhs, Operand::Zero(), r0);
+ b(eq, trap_div_by_zero);
+
+ // Check for kMinInt / -1. This is unrepresentable.
+ CmpS32(rhs, Operand(-1), r0);
+ bne(&cont);
+ CmpS32(lhs, Operand(kMinInt), r0);
+ b(eq, trap_div_unrepresentable);
+
+ bind(&cont);
+ DivS32(dst, lhs, rhs);
}
void LiftoffAssembler::emit_i32_divu(Register dst, Register lhs, Register rhs,
Label* trap_div_by_zero) {
- bailout(kUnsupportedArchitecture, "i32_divu");
+ CmpS32(rhs, Operand::Zero(), r0);
+ beq(trap_div_by_zero);
+ DivU32(dst, lhs, rhs);
}
void LiftoffAssembler::emit_i32_rems(Register dst, Register lhs, Register rhs,
Label* trap_div_by_zero) {
- bailout(kUnsupportedArchitecture, "i32_rems");
+ Label cont, done, trap_div_unrepresentable;
+ // Check for division by zero.
+ CmpS32(rhs, Operand::Zero(), r0);
+ beq(trap_div_by_zero);
+
+ // Check kMinInt/-1 case.
+ CmpS32(rhs, Operand(-1), r0);
+ bne(&cont);
+ CmpS32(lhs, Operand(kMinInt), r0);
+ beq(&trap_div_unrepresentable);
+
+ // Continue noraml calculation.
+ bind(&cont);
+ ModS32(dst, lhs, rhs);
+ bne(&done);
+
+ // trap by kMinInt/-1 case.
+ bind(&trap_div_unrepresentable);
+ mov(dst, Operand(0));
+ bind(&done);
}
void LiftoffAssembler::emit_i32_remu(Register dst, Register lhs, Register rhs,
Label* trap_div_by_zero) {
- bailout(kUnsupportedArchitecture, "i32_remu");
+ CmpS32(rhs, Operand::Zero(), r0);
+ beq(trap_div_by_zero);
+ ModU32(dst, lhs, rhs);
}
bool LiftoffAssembler::emit_i64_divs(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs,
Label* trap_div_by_zero,
Label* trap_div_unrepresentable) {
- bailout(kUnsupportedArchitecture, "i64_divs");
+ constexpr int64_t kMinInt64 = static_cast<int64_t>(1) << 63;
+ Label cont;
+ // Check for division by zero.
+ CmpS64(rhs.gp(), Operand::Zero(), r0);
+ beq(trap_div_by_zero);
+
+ // Check for kMinInt / -1. This is unrepresentable.
+ CmpS64(rhs.gp(), Operand(-1), r0);
+ bne(&cont);
+ CmpS64(lhs.gp(), Operand(kMinInt64), r0);
+ beq(trap_div_unrepresentable);
+
+ bind(&cont);
+ DivS64(dst.gp(), lhs.gp(), rhs.gp());
return true;
}
bool LiftoffAssembler::emit_i64_divu(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs,
Label* trap_div_by_zero) {
- bailout(kUnsupportedArchitecture, "i64_divu");
+ CmpS64(rhs.gp(), Operand::Zero(), r0);
+ beq(trap_div_by_zero);
+ // Do div.
+ DivU64(dst.gp(), lhs.gp(), rhs.gp());
return true;
}
bool LiftoffAssembler::emit_i64_rems(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs,
Label* trap_div_by_zero) {
- bailout(kUnsupportedArchitecture, "i64_rems");
+ constexpr int64_t kMinInt64 = static_cast<int64_t>(1) << 63;
+
+ Label trap_div_unrepresentable;
+ Label done;
+ Label cont;
+
+ // Check for division by zero.
+ CmpS64(rhs.gp(), Operand::Zero(), r0);
+ beq(trap_div_by_zero);
+
+ // Check for kMinInt / -1. This is unrepresentable.
+ CmpS64(rhs.gp(), Operand(-1), r0);
+ bne(&cont);
+ CmpS64(lhs.gp(), Operand(kMinInt64), r0);
+ beq(&trap_div_unrepresentable);
+
+ bind(&cont);
+ ModS64(dst.gp(), lhs.gp(), rhs.gp());
+ bne(&done);
+
+ bind(&trap_div_unrepresentable);
+ mov(dst.gp(), Operand(0));
+ bind(&done);
return true;
}
bool LiftoffAssembler::emit_i64_remu(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs,
Label* trap_div_by_zero) {
- bailout(kUnsupportedArchitecture, "i64_remu");
+ CmpS64(rhs.gp(), Operand::Zero(), r0);
+ beq(trap_div_by_zero);
+ ModU64(dst.gp(), lhs.gp(), rhs.gp());
return true;
}
@@ -1083,10 +1191,10 @@ void LiftoffAssembler::emit_i64_set_cond(LiftoffCondition liftoff_cond,
void LiftoffAssembler::emit_f32_set_cond(LiftoffCondition liftoff_cond,
Register dst, DoubleRegister lhs,
DoubleRegister rhs) {
- fcmpu(lhs, rhs);
+ fcmpu(lhs, rhs, cr7);
Label done;
mov(dst, Operand(1));
- b(liftoff::ToCondition(liftoff_cond), &done);
+ b(liftoff::ToCondition(liftoff_cond), &done, cr7);
mov(dst, Operand::Zero());
bind(&done);
}
@@ -1114,7 +1222,9 @@ void LiftoffAssembler::LoadTransform(LiftoffRegister dst, Register src_addr,
void LiftoffAssembler::emit_smi_check(Register obj, Label* target,
SmiCheckMode mode) {
- bailout(kUnsupportedArchitecture, "emit_smi_check");
+ TestIfSmi(obj, r0);
+ Condition condition = mode == kJumpOnSmi ? eq : ne;
+ b(condition, target, cr0); // branch if SMI
}
void LiftoffAssembler::LoadLane(LiftoffRegister dst, LiftoffRegister src,
@@ -2266,18 +2376,31 @@ void LiftoffAssembler::AssertUnreachable(AbortReason reason) {
}
void LiftoffAssembler::PushRegisters(LiftoffRegList regs) {
- bailout(kUnsupportedArchitecture, "PushRegisters");
+ MultiPush(regs.GetGpList());
+ MultiPushDoubles(regs.GetFpList());
}
void LiftoffAssembler::PopRegisters(LiftoffRegList regs) {
- bailout(kUnsupportedArchitecture, "PopRegisters");
+ MultiPopDoubles(regs.GetFpList());
+ MultiPop(regs.GetGpList());
}
void LiftoffAssembler::RecordSpillsInSafepoint(Safepoint& safepoint,
LiftoffRegList all_spills,
LiftoffRegList ref_spills,
int spill_offset) {
- bailout(kRefTypes, "RecordSpillsInSafepoint");
+ int spill_space_size = 0;
+ while (!all_spills.is_empty()) {
+ LiftoffRegister reg = all_spills.GetLastRegSet();
+ if (ref_spills.has(reg)) {
+ safepoint.DefinePointerSlot(spill_offset);
+ }
+ all_spills.clear(reg);
+ ++spill_offset;
+ spill_space_size += kSystemPointerSize;
+ }
+ // Record the number of additional spill slots.
+ RecordOolSpillSpaceSize(spill_space_size);
}
void LiftoffAssembler::DropStackSlotsAndRet(uint32_t num_stack_slots) {
@@ -2289,15 +2412,95 @@ void LiftoffAssembler::CallC(const ValueKindSig* sig,
const LiftoffRegister* rets,
ValueKind out_argument_kind, int stack_bytes,
ExternalReference ext_ref) {
- bailout(kUnsupportedArchitecture, "CallC");
+ int total_size = RoundUp(stack_bytes, kSystemPointerSize);
+
+ int size = total_size;
+ constexpr int kStackPageSize = 4 * KB;
+
+ // Reserve space in the stack.
+ while (size > kStackPageSize) {
+ SubS64(sp, sp, Operand(kStackPageSize), r0);
+ StoreU64(r0, MemOperand(sp));
+ size -= kStackPageSize;
+ }
+
+ SubS64(sp, sp, Operand(size), r0);
+
+ int arg_bytes = 0;
+ for (ValueKind param_kind : sig->parameters()) {
+ switch (param_kind) {
+ case kI32:
+ StoreU32(args->gp(), MemOperand(sp, arg_bytes), r0);
+ break;
+ case kI64:
+ StoreU64(args->gp(), MemOperand(sp, arg_bytes), r0);
+ break;
+ case kF32:
+ StoreF32(args->fp(), MemOperand(sp, arg_bytes), r0);
+ break;
+ case kF64:
+ StoreF64(args->fp(), MemOperand(sp, arg_bytes), r0);
+ break;
+ default:
+ UNREACHABLE();
+ }
+ args++;
+ arg_bytes += element_size_bytes(param_kind);
+ }
+
+ DCHECK_LE(arg_bytes, stack_bytes);
+
+ // Pass a pointer to the buffer with the arguments to the C function.
+ mr(r3, sp);
+
+ // Now call the C function.
+ constexpr int kNumCCallArgs = 1;
+ PrepareCallCFunction(kNumCCallArgs, r0);
+ CallCFunction(ext_ref, kNumCCallArgs);
+
+ // Move return value to the right register.
+ const LiftoffRegister* result_reg = rets;
+ if (sig->return_count() > 0) {
+ DCHECK_EQ(1, sig->return_count());
+ constexpr Register kReturnReg = r3;
+ if (kReturnReg != rets->gp()) {
+ Move(*rets, LiftoffRegister(kReturnReg), sig->GetReturn(0));
+ }
+ result_reg++;
+ }
+
+ // Load potential output value from the buffer on the stack.
+ if (out_argument_kind != kVoid) {
+ switch (out_argument_kind) {
+ case kI32:
+ LoadS32(result_reg->gp(), MemOperand(sp));
+ break;
+ case kI64:
+ case kOptRef:
+ case kRef:
+ case kRtt:
+ case kRttWithDepth:
+ LoadU64(result_reg->gp(), MemOperand(sp));
+ break;
+ case kF32:
+ LoadF32(result_reg->fp(), MemOperand(sp));
+ break;
+ case kF64:
+ LoadF64(result_reg->fp(), MemOperand(sp));
+ break;
+ default:
+ UNREACHABLE();
+ }
+ }
+ AddS64(sp, sp, Operand(total_size), r0);
}
void LiftoffAssembler::CallNativeWasmCode(Address addr) {
- bailout(kUnsupportedArchitecture, "CallNativeWasmCode");
+ Call(addr, RelocInfo::WASM_CALL);
}
void LiftoffAssembler::TailCallNativeWasmCode(Address addr) {
- bailout(kUnsupportedArchitecture, "TailCallNativeWasmCode");
+ Jump(addr, RelocInfo::WASM_CALL);
}
void LiftoffAssembler::CallIndirect(const ValueKindSig* sig,
@@ -2315,11 +2518,12 @@ void LiftoffAssembler::CallRuntimeStub(WasmCode::RuntimeStubId sid) {
}
void LiftoffAssembler::AllocateStackSlot(Register addr, uint32_t size) {
- bailout(kUnsupportedArchitecture, "AllocateStackSlot");
+ SubS64(sp, sp, Operand(size), r0);
+ mr(addr, sp);
}
void LiftoffAssembler::DeallocateStackSlot(uint32_t size) {
- bailout(kUnsupportedArchitecture, "DeallocateStackSlot");
+ AddS64(sp, sp, Operand(size));
}
void LiftoffAssembler::MaybeOSR() {}
@@ -2329,15 +2533,114 @@ void LiftoffAssembler::emit_set_if_nan(Register dst, DoubleRegister src,
UNIMPLEMENTED();
}
-void LiftoffAssembler::emit_s128_set_if_nan(Register dst, DoubleRegister src,
+void LiftoffAssembler::emit_s128_set_if_nan(Register dst, LiftoffRegister src,
Register tmp_gp,
- DoubleRegister tmp_fp,
+ LiftoffRegister tmp_s128,
ValueKind lane_kind) {
UNIMPLEMENTED();
}
void LiftoffStackSlots::Construct(int param_slots) {
- asm_->bailout(kUnsupportedArchitecture, "LiftoffStackSlots::Construct");
+ DCHECK_LT(0, slots_.size());
+ SortInPushOrder();
+ int last_stack_slot = param_slots;
+ for (auto& slot : slots_) {
+ const int stack_slot = slot.dst_slot_;
+ int stack_decrement = (last_stack_slot - stack_slot) * kSystemPointerSize;
+ DCHECK_LT(0, stack_decrement);
+ last_stack_slot = stack_slot;
+ const LiftoffAssembler::VarState& src = slot.src_;
+ switch (src.loc()) {
+ case LiftoffAssembler::VarState::kStack: {
+ switch (src.kind()) {
+ case kI32:
+ case kRef:
+ case kOptRef:
+ case kRtt:
+ case kRttWithDepth:
+ case kI64: {
+ asm_->AllocateStackSpace(stack_decrement - kSystemPointerSize);
+ UseScratchRegisterScope temps(asm_);
+ Register scratch = temps.Acquire();
+ asm_->LoadU64(scratch, liftoff::GetStackSlot(slot.src_offset_), r0);
+ asm_->Push(scratch);
+ break;
+ }
+ case kF32: {
+ asm_->AllocateStackSpace(stack_decrement - kSystemPointerSize);
+ asm_->LoadF32(kScratchDoubleReg,
+ liftoff::GetStackSlot(slot.src_offset_), r0);
+ asm_->AddS64(sp, sp, Operand(-kSystemPointerSize));
+ asm_->StoreF32(kScratchDoubleReg, MemOperand(sp), r0);
+ break;
+ }
+ case kF64: {
+ asm_->AllocateStackSpace(stack_decrement - kDoubleSize);
+ asm_->LoadF64(kScratchDoubleReg,
+ liftoff::GetStackSlot(slot.src_offset_), r0);
+ asm_->AddS64(sp, sp, Operand(-kSystemPointerSize), r0);
+ asm_->StoreF64(kScratchDoubleReg, MemOperand(sp), r0);
+ break;
+ }
+ case kS128: {
+ asm_->bailout(kSimd, "LiftoffStackSlots::Construct");
+ break;
+ }
+ default:
+ UNREACHABLE();
+ }
+ break;
+ }
+ case LiftoffAssembler::VarState::kRegister: {
+ int pushed_bytes = SlotSizeInBytes(slot);
+ asm_->AllocateStackSpace(stack_decrement - pushed_bytes);
+ switch (src.kind()) {
+ case kI64:
+ case kI32:
+ case kRef:
+ case kOptRef:
+ case kRtt:
+ case kRttWithDepth:
+ asm_->push(src.reg().gp());
+ break;
+ case kF32:
+ asm_->AddS64(sp, sp, Operand(-kSystemPointerSize), r0);
+ asm_->StoreF32(src.reg().fp(), MemOperand(sp), r0);
+ break;
+ case kF64:
+ asm_->AddS64(sp, sp, Operand(-kSystemPointerSize), r0);
+ asm_->StoreF64(src.reg().fp(), MemOperand(sp), r0);
+ break;
+ case kS128: {
+ asm_->bailout(kSimd, "LiftoffStackSlots::Construct");
+ break;
+ }
+ default:
+ UNREACHABLE();
+ }
+ break;
+ }
+ case LiftoffAssembler::VarState::kIntConst: {
+ asm_->AllocateStackSpace(stack_decrement - kSystemPointerSize);
+ DCHECK(src.kind() == kI32 || src.kind() == kI64);
+ UseScratchRegisterScope temps(asm_);
+ Register scratch = temps.Acquire();
+
+ switch (src.kind()) {
+ case kI32:
+ asm_->mov(scratch, Operand(src.i32_const()));
+ break;
+ case kI64:
+ asm_->mov(scratch, Operand(int64_t{slot.src_.i32_const()}));
+ break;
+ default:
+ UNREACHABLE();
+ }
+ asm_->push(scratch);
+ break;
+ }
+ }
+ }
}
} // namespace wasm
diff --git a/deps/v8/src/wasm/baseline/riscv64/liftoff-assembler-riscv64.h b/deps/v8/src/wasm/baseline/riscv64/liftoff-assembler-riscv64.h
index fef59471c1..1860a1920f 100644
--- a/deps/v8/src/wasm/baseline/riscv64/liftoff-assembler-riscv64.h
+++ b/deps/v8/src/wasm/baseline/riscv64/liftoff-assembler-riscv64.h
@@ -79,16 +79,16 @@ inline MemOperand GetMemOp(LiftoffAssembler* assm, Register addr,
if (is_uint31(offset_imm)) {
int32_t offset_imm32 = static_cast<int32_t>(offset_imm);
if (offset == no_reg) return MemOperand(addr, offset_imm32);
- assm->Add64(kScratchReg, addr, offset);
- return MemOperand(kScratchReg, offset_imm32);
+ assm->Add64(kScratchReg2, addr, offset);
+ return MemOperand(kScratchReg2, offset_imm32);
}
// Offset immediate does not fit in 31 bits.
- assm->li(kScratchReg, offset_imm);
- assm->Add64(kScratchReg, kScratchReg, addr);
+ assm->li(kScratchReg2, offset_imm);
+ assm->Add64(kScratchReg2, kScratchReg2, addr);
if (offset != no_reg) {
- assm->Add64(kScratchReg, kScratchReg, offset);
+ assm->Add64(kScratchReg2, kScratchReg2, offset);
}
- return MemOperand(kScratchReg, 0);
+ return MemOperand(kScratchReg2, 0);
}
inline void Load(LiftoffAssembler* assm, LiftoffRegister dst, MemOperand src,
@@ -128,10 +128,10 @@ inline void Store(LiftoffAssembler* assm, Register base, int32_t offset,
assm->Usd(src.gp(), dst);
break;
case kF32:
- assm->UStoreFloat(src.fp(), dst);
+ assm->UStoreFloat(src.fp(), dst, kScratchReg);
break;
case kF64:
- assm->UStoreDouble(src.fp(), dst);
+ assm->UStoreDouble(src.fp(), dst, kScratchReg);
break;
default:
UNREACHABLE();
@@ -335,7 +335,7 @@ void LiftoffAssembler::PatchPrepareStackFrame(
// space if we first allocate the frame and then do the stack check (we will
// need some remaining stack space for throwing the exception). That's why we
// check the available stack space before we allocate the frame. To do this we
- // replace the {__ Daddu(sp, sp, -frame_size)} with a jump to OOL code that
+ // replace the {__ Add64(sp, sp, -frame_size)} with a jump to OOL code that
// does this "extended stack check".
//
// The OOL code can simply be generated here with the normal assembler,
@@ -376,7 +376,7 @@ void LiftoffAssembler::PatchPrepareStackFrame(
Add64(sp, sp, Operand(-frame_size));
// Jump back to the start of the function, from {pc_offset()} to
- // right after the reserved space for the {__ Daddu(sp, sp, -framesize)}
+ // right after the reserved space for the {__ Add64(sp, sp, -framesize)}
// (which is a Branch now).
int func_start_offset = offset + 2 * kInstrSize;
imm32 = func_start_offset - pc_offset();
@@ -552,11 +552,20 @@ void LiftoffAssembler::Load(LiftoffRegister dst, Register src_addr,
TurboAssembler::Uld(dst.gp(), src_op);
break;
case LoadType::kF32Load:
- TurboAssembler::ULoadFloat(dst.fp(), src_op);
+ TurboAssembler::ULoadFloat(dst.fp(), src_op, kScratchReg);
break;
case LoadType::kF64Load:
- TurboAssembler::ULoadDouble(dst.fp(), src_op);
+ TurboAssembler::ULoadDouble(dst.fp(), src_op, kScratchReg);
break;
+ case LoadType::kS128Load: {
+ VU.set(kScratchReg, E8, m1);
+ Register src_reg = src_op.offset() == 0 ? src_op.rm() : kScratchReg;
+ if (src_op.offset() != 0) {
+ TurboAssembler::Add64(src_reg, src_op.rm(), src_op.offset());
+ }
+ vl(dst.fp().toV(), src_reg, 0, E8);
+ break;
+ }
default:
UNREACHABLE();
}
@@ -607,11 +616,20 @@ void LiftoffAssembler::Store(Register dst_addr, Register offset_reg,
TurboAssembler::Usd(src.gp(), dst_op);
break;
case StoreType::kF32Store:
- TurboAssembler::UStoreFloat(src.fp(), dst_op);
+ TurboAssembler::UStoreFloat(src.fp(), dst_op, kScratchReg);
break;
case StoreType::kF64Store:
- TurboAssembler::UStoreDouble(src.fp(), dst_op);
+ TurboAssembler::UStoreDouble(src.fp(), dst_op, kScratchReg);
break;
+ case StoreType::kS128Store: {
+ VU.set(kScratchReg, E8, m1);
+ Register dst_reg = dst_op.offset() == 0 ? dst_op.rm() : kScratchReg;
+ if (dst_op.offset() != 0) {
+ Add64(kScratchReg, dst_op.rm(), dst_op.offset());
+ }
+ vs(src.fp().toV(), dst_reg, 0, VSew::E8);
+ break;
+ }
default:
UNREACHABLE();
}
@@ -747,24 +765,26 @@ void LiftoffAssembler::AtomicLoad(LiftoffRegister dst, Register src_addr,
switch (type.value()) {
case LoadType::kI32Load8U:
case LoadType::kI64Load8U:
+ fence(PSR | PSW, PSR | PSW);
lbu(dst.gp(), src_reg, 0);
- sync();
+ fence(PSR, PSR | PSW);
return;
case LoadType::kI32Load16U:
case LoadType::kI64Load16U:
+ fence(PSR | PSW, PSR | PSW);
lhu(dst.gp(), src_reg, 0);
- sync();
+ fence(PSR, PSR | PSW);
return;
case LoadType::kI32Load:
- lr_w(true, true, dst.gp(), src_reg);
- return;
case LoadType::kI64Load32U:
- lr_w(true, true, dst.gp(), src_reg);
- slli(dst.gp(), dst.gp(), 32);
- srli(dst.gp(), dst.gp(), 32);
+ fence(PSR | PSW, PSR | PSW);
+ lw(dst.gp(), src_reg, 0);
+ fence(PSR, PSR | PSW);
return;
case LoadType::kI64Load:
- lr_d(true, true, dst.gp(), src_reg);
+ fence(PSR | PSW, PSR | PSW);
+ ld(dst.gp(), src_reg, 0);
+ fence(PSR, PSR | PSW);
return;
default:
UNREACHABLE();
@@ -780,22 +800,22 @@ void LiftoffAssembler::AtomicStore(Register dst_addr, Register offset_reg,
switch (type.value()) {
case StoreType::kI64Store8:
case StoreType::kI32Store8:
- sync();
+ fence(PSR | PSW, PSW);
sb(src.gp(), dst_reg, 0);
- sync();
return;
case StoreType::kI64Store16:
case StoreType::kI32Store16:
- sync();
+ fence(PSR | PSW, PSW);
sh(src.gp(), dst_reg, 0);
- sync();
return;
case StoreType::kI64Store32:
case StoreType::kI32Store:
- sc_w(true, true, zero_reg, dst_reg, src.gp());
+ fence(PSR | PSW, PSW);
+ sw(src.gp(), dst_reg, 0);
return;
case StoreType::kI64Store:
- sc_d(true, true, zero_reg, dst_reg, src.gp());
+ fence(PSR | PSW, PSW);
+ sd(src.gp(), dst_reg, 0);
return;
default:
UNREACHABLE();
@@ -948,7 +968,11 @@ void LiftoffAssembler::Move(Register dst, Register src, ValueKind kind) {
void LiftoffAssembler::Move(DoubleRegister dst, DoubleRegister src,
ValueKind kind) {
DCHECK_NE(dst, src);
- TurboAssembler::Move(dst, src);
+ if (kind != kS128) {
+ TurboAssembler::Move(dst, src);
+ } else {
+ TurboAssembler::vmv_vv(dst.toV(), dst.toV());
+ }
}
void LiftoffAssembler::Spill(int offset, LiftoffRegister reg, ValueKind kind) {
@@ -971,9 +995,15 @@ void LiftoffAssembler::Spill(int offset, LiftoffRegister reg, ValueKind kind) {
case kF64:
TurboAssembler::StoreDouble(reg.fp(), dst);
break;
- case kS128:
- bailout(kSimd, "Spill S128");
+ case kS128: {
+ VU.set(kScratchReg, E8, m1);
+ Register dst_reg = dst.offset() == 0 ? dst.rm() : kScratchReg;
+ if (dst.offset() != 0) {
+ Add64(kScratchReg, dst.rm(), dst.offset());
+ }
+ vs(reg.fp().toV(), dst_reg, 0, VSew::E8);
break;
+ }
default:
UNREACHABLE();
}
@@ -1021,6 +1051,15 @@ void LiftoffAssembler::Fill(LiftoffRegister reg, int offset, ValueKind kind) {
case kF64:
TurboAssembler::LoadDouble(reg.fp(), src);
break;
+ case kS128: {
+ VU.set(kScratchReg, E8, m1);
+ Register src_reg = src.offset() == 0 ? src.rm() : kScratchReg;
+ if (src.offset() != 0) {
+ TurboAssembler::Add64(src_reg, src.rm(), src.offset());
+ }
+ vl(reg.fp().toV(), src_reg, 0, E8);
+ break;
+ }
default:
UNREACHABLE();
}
@@ -1072,7 +1111,7 @@ void LiftoffAssembler::emit_i64_ctz(LiftoffRegister dst, LiftoffRegister src) {
bool LiftoffAssembler::emit_i64_popcnt(LiftoffRegister dst,
LiftoffRegister src) {
- TurboAssembler::Popcnt64(dst.gp(), src.gp());
+ TurboAssembler::Popcnt64(dst.gp(), src.gp(), kScratchReg);
return true;
}
@@ -1154,7 +1193,7 @@ void LiftoffAssembler::emit_i32_ctz(Register dst, Register src) {
}
bool LiftoffAssembler::emit_i32_popcnt(Register dst, Register src) {
- TurboAssembler::Popcnt32(dst, src);
+ TurboAssembler::Popcnt32(dst, src, kScratchReg);
return true;
}
@@ -1663,7 +1702,33 @@ void LiftoffAssembler::emit_i8x16_shuffle(LiftoffRegister dst,
LiftoffRegister rhs,
const uint8_t shuffle[16],
bool is_swizzle) {
- bailout(kSimd, "emit_i8x16_shuffle");
+ VRegister dst_v = dst.fp().toV();
+ VRegister lhs_v = lhs.fp().toV();
+ VRegister rhs_v = rhs.fp().toV();
+
+ uint64_t imm1 = *(reinterpret_cast<const uint64_t*>(shuffle));
+ uint64_t imm2 = *((reinterpret_cast<const uint64_t*>(shuffle)) + 1);
+ VU.set(kScratchReg, VSew::E64, Vlmul::m1);
+ li(kScratchReg, 1);
+ vmv_vx(v0, kScratchReg);
+ li(kScratchReg, imm1);
+ vmerge_vx(kSimd128ScratchReg, kScratchReg, kSimd128ScratchReg);
+ li(kScratchReg, imm2);
+ vsll_vi(v0, v0, 1);
+ vmerge_vx(kSimd128ScratchReg, kScratchReg, kSimd128ScratchReg);
+
+ VU.set(kScratchReg, E8, m1);
+ if (dst_v == lhs_v) {
+ vmv_vv(kSimd128ScratchReg2, lhs_v);
+ lhs_v = kSimd128ScratchReg2;
+ } else if (dst_v == rhs_v) {
+ vmv_vv(kSimd128ScratchReg2, rhs_v);
+ rhs_v = kSimd128ScratchReg2;
+ }
+ vrgather_vv(dst_v, lhs_v, kSimd128ScratchReg);
+ vadd_vi(kSimd128ScratchReg, kSimd128ScratchReg, -16);
+ vrgather_vv(kSimd128ScratchReg, rhs_v, kSimd128ScratchReg);
+ vor_vv(dst_v, dst_v, kSimd128ScratchReg);
}
void LiftoffAssembler::emit_i8x16_popcnt(LiftoffRegister dst,
@@ -1679,42 +1744,46 @@ void LiftoffAssembler::emit_i8x16_swizzle(LiftoffRegister dst,
void LiftoffAssembler::emit_i8x16_splat(LiftoffRegister dst,
LiftoffRegister src) {
- bailout(kSimd, "emit_i8x16_splat");
+ VU.set(kScratchReg, E8, m1);
+ vmv_vx(dst.fp().toV(), src.gp());
}
void LiftoffAssembler::emit_i16x8_splat(LiftoffRegister dst,
LiftoffRegister src) {
- bailout(kSimd, "emit_i16x8_splat");
+ VU.set(kScratchReg, E16, m1);
+ vmv_vx(dst.fp().toV(), src.gp());
}
void LiftoffAssembler::emit_i32x4_splat(LiftoffRegister dst,
LiftoffRegister src) {
- bailout(kSimd, "emit_i32x4_splat");
+ VU.set(kScratchReg, E32, m1);
+ vmv_vx(dst.fp().toV(), src.gp());
}
void LiftoffAssembler::emit_i64x2_splat(LiftoffRegister dst,
LiftoffRegister src) {
- bailout(kSimd, "emit_i64x2_splat");
+ VU.set(kScratchReg, E64, m1);
+ vmv_vx(dst.fp().toV(), src.gp());
}
void LiftoffAssembler::emit_i64x2_eq(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "emit_i64x2_eq");
+ WasmRvvEq(dst.fp().toV(), lhs.fp().toV(), rhs.fp().toV(), E64, m1);
}
void LiftoffAssembler::emit_i64x2_ne(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "i64x2_ne");
+ WasmRvvNe(dst.fp().toV(), lhs.fp().toV(), rhs.fp().toV(), E64, m1);
}
void LiftoffAssembler::emit_i64x2_gt_s(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "i64x2.gt_s");
+ WasmRvvGtS(dst.fp().toV(), lhs.fp().toV(), rhs.fp().toV(), E64, m1);
}
void LiftoffAssembler::emit_i64x2_ge_s(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "i64x2.ge_s");
+ WasmRvvGeS(dst.fp().toV(), lhs.fp().toV(), rhs.fp().toV(), E64, m1);
}
void LiftoffAssembler::emit_f32x4_splat(LiftoffRegister dst,
@@ -1756,7 +1825,11 @@ void LiftoffAssembler::emit_i16x8_q15mulr_sat_s(LiftoffRegister dst,
void LiftoffAssembler::emit_i64x2_bitmask(LiftoffRegister dst,
LiftoffRegister src) {
- bailout(kSimd, "i64x2_bitmask");
+ VU.set(kScratchReg, E64, m1);
+ vmv_vx(kSimd128RegZero, zero_reg);
+ vmslt_vv(kSimd128ScratchReg, src.fp().toV(), kSimd128RegZero);
+ VU.set(kScratchReg, E32, m1);
+ vmv_xs(dst.gp(), kSimd128ScratchReg);
}
void LiftoffAssembler::emit_i64x2_sconvert_i32x4_low(LiftoffRegister dst,
@@ -1781,92 +1854,92 @@ void LiftoffAssembler::emit_i64x2_uconvert_i32x4_high(LiftoffRegister dst,
void LiftoffAssembler::emit_i8x16_eq(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "emit_i8x16_eq");
+ WasmRvvEq(dst.fp().toV(), lhs.fp().toV(), rhs.fp().toV(), E8, m1);
}
void LiftoffAssembler::emit_i8x16_ne(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "emit_i8x16_ne");
+ WasmRvvNe(dst.fp().toV(), lhs.fp().toV(), rhs.fp().toV(), E8, m1);
}
void LiftoffAssembler::emit_i8x16_gt_s(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "emit_i8x16_gt_s");
+ WasmRvvGtS(dst.fp().toV(), lhs.fp().toV(), rhs.fp().toV(), E8, m1);
}
void LiftoffAssembler::emit_i8x16_gt_u(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "emit_i8x16_gt_u");
+ WasmRvvGtU(dst.fp().toV(), lhs.fp().toV(), rhs.fp().toV(), E16, m1);
}
void LiftoffAssembler::emit_i8x16_ge_s(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "emit_i8x16_ge_s");
+ WasmRvvGeS(dst.fp().toV(), lhs.fp().toV(), rhs.fp().toV(), E8, m1);
}
void LiftoffAssembler::emit_i8x16_ge_u(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "emit_i8x16_ge_u");
+ WasmRvvGeU(dst.fp().toV(), lhs.fp().toV(), rhs.fp().toV(), E8, m1);
}
void LiftoffAssembler::emit_i16x8_eq(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "emit_i16x8_eq");
+ WasmRvvEq(dst.fp().toV(), lhs.fp().toV(), rhs.fp().toV(), E16, m1);
}
void LiftoffAssembler::emit_i16x8_ne(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "emit_i16x8_ne");
+ WasmRvvNe(dst.fp().toV(), lhs.fp().toV(), rhs.fp().toV(), E16, m1);
}
void LiftoffAssembler::emit_i16x8_gt_s(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "emit_i16x8_gt_s");
+ WasmRvvGtS(dst.fp().toV(), lhs.fp().toV(), rhs.fp().toV(), E16, m1);
}
void LiftoffAssembler::emit_i16x8_gt_u(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "emit_i16x8_gt_u");
+ WasmRvvGtU(dst.fp().toV(), lhs.fp().toV(), rhs.fp().toV(), E16, m1);
}
void LiftoffAssembler::emit_i16x8_ge_s(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "emit_i16x8_ge_s");
+ WasmRvvGeS(dst.fp().toV(), lhs.fp().toV(), rhs.fp().toV(), E16, m1);
}
void LiftoffAssembler::emit_i16x8_ge_u(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "emit_i16x8_ge_u");
+ WasmRvvGeU(dst.fp().toV(), lhs.fp().toV(), rhs.fp().toV(), E16, m1);
}
void LiftoffAssembler::emit_i32x4_eq(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "emit_i32x4_eq");
+ WasmRvvEq(dst.fp().toV(), lhs.fp().toV(), rhs.fp().toV(), E32, m1);
}
void LiftoffAssembler::emit_i32x4_ne(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "emit_i32x4_ne");
+ WasmRvvNe(dst.fp().toV(), lhs.fp().toV(), rhs.fp().toV(), E32, m1);
}
void LiftoffAssembler::emit_i32x4_gt_s(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "emit_i32x4_gt_s");
+ WasmRvvGtS(dst.fp().toV(), lhs.fp().toV(), rhs.fp().toV(), E32, m1);
}
void LiftoffAssembler::emit_i32x4_gt_u(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "emit_i32x4_gt_u");
+ WasmRvvGtU(dst.fp().toV(), lhs.fp().toV(), rhs.fp().toV(), E32, m1);
}
void LiftoffAssembler::emit_i32x4_ge_s(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "emit_i32x4_ge_s");
+ WasmRvvGeS(dst.fp().toV(), lhs.fp().toV(), rhs.fp().toV(), E32, m1);
}
void LiftoffAssembler::emit_i32x4_ge_u(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "emit_i32x4_ge_u");
+ WasmRvvGeU(dst.fp().toV(), lhs.fp().toV(), rhs.fp().toV(), E32, m1);
}
void LiftoffAssembler::emit_f32x4_eq(LiftoffRegister dst, LiftoffRegister lhs,
@@ -1941,32 +2014,38 @@ void LiftoffAssembler::emit_f64x2_le(LiftoffRegister dst, LiftoffRegister lhs,
void LiftoffAssembler::emit_s128_const(LiftoffRegister dst,
const uint8_t imms[16]) {
- bailout(kSimd, "emit_s128_const");
+ WasmRvvS128const(dst.fp().toV(), imms);
}
void LiftoffAssembler::emit_s128_not(LiftoffRegister dst, LiftoffRegister src) {
- bailout(kSimd, "emit_s128_not");
+ VU.set(kScratchReg, E8, m1);
+ vnot_vv(dst.fp().toV(), src.fp().toV());
}
void LiftoffAssembler::emit_s128_and(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "emit_s128_and");
+ VU.set(kScratchReg, E8, m1);
+ vand_vv(dst.fp().toV(), lhs.fp().toV(), rhs.fp().toV());
}
void LiftoffAssembler::emit_s128_or(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "emit_s128_or");
+ VU.set(kScratchReg, E8, m1);
+ vor_vv(dst.fp().toV(), lhs.fp().toV(), rhs.fp().toV());
}
void LiftoffAssembler::emit_s128_xor(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "emit_s128_xor");
+ VU.set(kScratchReg, E8, m1);
+ vxor_vv(dst.fp().toV(), lhs.fp().toV(), rhs.fp().toV());
}
void LiftoffAssembler::emit_s128_and_not(LiftoffRegister dst,
LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "emit_s128_and_not");
+ VU.set(kScratchReg, E8, m1);
+ vnot_vv(dst.fp().toV(), rhs.fp().toV());
+ vand_vv(dst.fp().toV(), lhs.fp().toV(), dst.fp().toV());
}
void LiftoffAssembler::emit_s128_select(LiftoffRegister dst,
@@ -1978,32 +2057,55 @@ void LiftoffAssembler::emit_s128_select(LiftoffRegister dst,
void LiftoffAssembler::emit_i8x16_neg(LiftoffRegister dst,
LiftoffRegister src) {
- bailout(kSimd, "emit_i8x16_neg");
+ VU.set(kScratchReg, E8, m1);
+ vneg_vv(dst.fp().toV(), src.fp().toV());
}
void LiftoffAssembler::emit_v128_anytrue(LiftoffRegister dst,
LiftoffRegister src) {
- bailout(kSimd, "emit_v128_anytrue");
+ VU.set(kScratchReg, E8, m1);
+ Label t;
+ vmv_sx(kSimd128ScratchReg, zero_reg);
+ vredmaxu_vs(kSimd128ScratchReg, src.fp().toV(), kSimd128ScratchReg);
+ vmv_xs(dst.gp(), kSimd128ScratchReg);
+ beq(dst.gp(), zero_reg, &t);
+ li(dst.gp(), 1);
+ bind(&t);
}
void LiftoffAssembler::emit_i8x16_alltrue(LiftoffRegister dst,
LiftoffRegister src) {
- bailout(kSimd, "emit_i8x16_alltrue");
+ VU.set(kScratchReg, E8, m1);
+ Label alltrue;
+ li(kScratchReg, -1);
+ vmv_sx(kSimd128ScratchReg, kScratchReg);
+ vredminu_vs(kSimd128ScratchReg, src.fp().toV(), kSimd128ScratchReg);
+ vmv_xs(dst.gp(), kSimd128ScratchReg);
+ beqz(dst.gp(), &alltrue);
+ li(dst.gp(), 1);
+ bind(&alltrue);
}
void LiftoffAssembler::emit_i8x16_bitmask(LiftoffRegister dst,
LiftoffRegister src) {
- bailout(kSimd, "emit_i8x16_bitmask");
+ VU.set(kScratchReg, E8, m1);
+ vmv_vx(kSimd128RegZero, zero_reg);
+ vmslt_vv(kSimd128ScratchReg, src.fp().toV(), kSimd128RegZero);
+ VU.set(kScratchReg, E32, m1);
+ vmv_xs(dst.gp(), kSimd128ScratchReg);
}
void LiftoffAssembler::emit_i8x16_shl(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "emit_i8x16_shl");
+ VU.set(kScratchReg, E8, m1);
+ vsll_vx(dst.fp().toV(), lhs.fp().toV(), rhs.gp());
}
void LiftoffAssembler::emit_i8x16_shli(LiftoffRegister dst, LiftoffRegister lhs,
int32_t rhs) {
- bailout(kSimd, "emit_i8x16_shli");
+ DCHECK(is_uint5(rhs));
+ VU.set(kScratchReg, E8, m1);
+ vsll_vi(dst.fp().toV(), lhs.fp().toV(), rhs);
}
void LiftoffAssembler::emit_i8x16_shr_s(LiftoffRegister dst,
@@ -2030,36 +2132,42 @@ void LiftoffAssembler::emit_i8x16_shri_u(LiftoffRegister dst,
void LiftoffAssembler::emit_i8x16_add(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "emit_i8x16_add");
+ VU.set(kScratchReg, E8, m1);
+ vadd_vv(dst.fp().toV(), lhs.fp().toV(), rhs.fp().toV());
}
void LiftoffAssembler::emit_i8x16_add_sat_s(LiftoffRegister dst,
LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "emit_i8x16_add_sat_s");
+ VU.set(kScratchReg, E8, m1);
+ vsadd_vv(dst.fp().toV(), lhs.fp().toV(), rhs.fp().toV());
}
void LiftoffAssembler::emit_i8x16_add_sat_u(LiftoffRegister dst,
LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "emit_i8x16_add_sat_u");
+ VU.set(kScratchReg, E8, m1);
+ vsaddu_vv(dst.fp().toV(), lhs.fp().toV(), rhs.fp().toV());
}
void LiftoffAssembler::emit_i8x16_sub(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "emit_i8x16_sub");
+ VU.set(kScratchReg, E8, m1);
+ vsub_vv(dst.fp().toV(), lhs.fp().toV(), rhs.fp().toV());
}
void LiftoffAssembler::emit_i8x16_sub_sat_s(LiftoffRegister dst,
LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "emit_i8x16_sub_sat_s");
+ VU.set(kScratchReg, E8, m1);
+ vssub_vv(dst.fp().toV(), lhs.fp().toV(), rhs.fp().toV());
}
void LiftoffAssembler::emit_i8x16_sub_sat_u(LiftoffRegister dst,
LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "emit_i8x16_sub_sat_u");
+ VU.set(kScratchReg, E8, m1);
+ vssubu_vv(dst.fp().toV(), lhs.fp().toV(), rhs.fp().toV());
}
void LiftoffAssembler::emit_i8x16_min_s(LiftoffRegister dst,
@@ -2093,22 +2201,37 @@ void LiftoffAssembler::emit_i16x8_neg(LiftoffRegister dst,
void LiftoffAssembler::emit_i16x8_alltrue(LiftoffRegister dst,
LiftoffRegister src) {
- bailout(kSimd, "emit_i16x8_alltrue");
+ VU.set(kScratchReg, E16, m1);
+ Label alltrue;
+ li(kScratchReg, -1);
+ vmv_sx(kSimd128ScratchReg, kScratchReg);
+ vredminu_vs(kSimd128ScratchReg, src.fp().toV(), kSimd128ScratchReg);
+ vmv_xs(dst.gp(), kSimd128ScratchReg);
+ beqz(dst.gp(), &alltrue);
+ li(dst.gp(), 1);
+ bind(&alltrue);
}
void LiftoffAssembler::emit_i16x8_bitmask(LiftoffRegister dst,
LiftoffRegister src) {
- bailout(kSimd, "emit_i16x8_bitmask");
+ VU.set(kScratchReg, E16, m1);
+ vmv_vx(kSimd128RegZero, zero_reg);
+ vmslt_vv(kSimd128ScratchReg, src.fp().toV(), kSimd128RegZero);
+ VU.set(kScratchReg, E32, m1);
+ vmv_xs(dst.gp(), kSimd128ScratchReg);
}
void LiftoffAssembler::emit_i16x8_shl(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "emit_i16x8_shl");
+ VU.set(kScratchReg, E16, m1);
+ vsll_vx(dst.fp().toV(), lhs.fp().toV(), rhs.gp());
}
void LiftoffAssembler::emit_i16x8_shli(LiftoffRegister dst, LiftoffRegister lhs,
int32_t rhs) {
- bailout(kSimd, "emit_i16x8_shli");
+ DCHECK(is_uint5(rhs));
+ VU.set(kScratchReg, E16, m1);
+ vsll_vi(dst.fp().toV(), lhs.fp().toV(), rhs);
}
void LiftoffAssembler::emit_i16x8_shr_s(LiftoffRegister dst,
@@ -2135,7 +2258,8 @@ void LiftoffAssembler::emit_i16x8_shri_u(LiftoffRegister dst,
void LiftoffAssembler::emit_i16x8_add(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "emit_i16x8_add");
+ VU.set(kScratchReg, E16, m1);
+ vadd_vv(dst.fp().toV(), lhs.fp().toV(), rhs.fp().toV());
}
void LiftoffAssembler::emit_i16x8_add_sat_s(LiftoffRegister dst,
@@ -2152,7 +2276,8 @@ void LiftoffAssembler::emit_i16x8_add_sat_u(LiftoffRegister dst,
void LiftoffAssembler::emit_i16x8_sub(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "emit_i16x8_sub");
+ VU.set(kScratchReg, E16, m1);
+ vsub_vv(dst.fp().toV(), lhs.fp().toV(), rhs.fp().toV());
}
void LiftoffAssembler::emit_i16x8_sub_sat_s(LiftoffRegister dst,
@@ -2203,22 +2328,36 @@ void LiftoffAssembler::emit_i32x4_neg(LiftoffRegister dst,
void LiftoffAssembler::emit_i32x4_alltrue(LiftoffRegister dst,
LiftoffRegister src) {
- bailout(kSimd, "emit_i32x4_alltrue");
+ VU.set(kScratchReg, E32, m1);
+ Label alltrue;
+ li(kScratchReg, -1);
+ vmv_sx(kSimd128ScratchReg, kScratchReg);
+ vredminu_vs(kSimd128ScratchReg, src.fp().toV(), kSimd128ScratchReg);
+ vmv_xs(dst.gp(), kSimd128ScratchReg);
+ beqz(dst.gp(), &alltrue);
+ li(dst.gp(), 1);
+ bind(&alltrue);
}
void LiftoffAssembler::emit_i32x4_bitmask(LiftoffRegister dst,
LiftoffRegister src) {
- bailout(kSimd, "emit_i32x4_bitmask");
+ VU.set(kScratchReg, E32, m1);
+ vmv_vx(kSimd128RegZero, zero_reg);
+ vmslt_vv(kSimd128ScratchReg, src.fp().toV(), kSimd128RegZero);
+ vmv_xs(dst.gp(), kSimd128ScratchReg);
}
void LiftoffAssembler::emit_i32x4_shl(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "emit_i32x4_shl");
+ VU.set(kScratchReg, E32, m1);
+ vsll_vx(dst.fp().toV(), lhs.fp().toV(), rhs.gp());
}
void LiftoffAssembler::emit_i32x4_shli(LiftoffRegister dst, LiftoffRegister lhs,
int32_t rhs) {
- bailout(kSimd, "emit_i32x4_shli");
+ DCHECK(is_uint5(rhs));
+ VU.set(kScratchReg, E32, m1);
+ vsll_vi(dst.fp().toV(), lhs.fp().toV(), rhs);
}
void LiftoffAssembler::emit_i32x4_shr_s(LiftoffRegister dst,
@@ -2245,12 +2384,14 @@ void LiftoffAssembler::emit_i32x4_shri_u(LiftoffRegister dst,
void LiftoffAssembler::emit_i32x4_add(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "emit_i32x4_add");
+ VU.set(kScratchReg, E32, m1);
+ vadd_vv(dst.fp().toV(), lhs.fp().toV(), rhs.fp().toV());
}
void LiftoffAssembler::emit_i32x4_sub(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "emit_i32x4_sub");
+ VU.set(kScratchReg, E32, m1);
+ vsub_vv(dst.fp().toV(), lhs.fp().toV(), rhs.fp().toV());
}
void LiftoffAssembler::emit_i32x4_mul(LiftoffRegister dst, LiftoffRegister lhs,
@@ -2295,17 +2436,32 @@ void LiftoffAssembler::emit_i64x2_neg(LiftoffRegister dst,
void LiftoffAssembler::emit_i64x2_alltrue(LiftoffRegister dst,
LiftoffRegister src) {
- bailout(kSimd, "emit_i64x2_alltrue");
+ VU.set(kScratchReg, E64, m1);
+ Label alltrue;
+ li(kScratchReg, -1);
+ vmv_sx(kSimd128ScratchReg, kScratchReg);
+ vredminu_vs(kSimd128ScratchReg, src.fp().toV(), kSimd128ScratchReg);
+ vmv_xs(dst.gp(), kSimd128ScratchReg);
+ beqz(dst.gp(), &alltrue);
+ li(dst.gp(), 1);
+ bind(&alltrue);
}
void LiftoffAssembler::emit_i64x2_shl(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "emit_i64x2_shl");
+ VU.set(kScratchReg, E64, m1);
+ vsll_vx(dst.fp().toV(), lhs.fp().toV(), rhs.gp());
}
void LiftoffAssembler::emit_i64x2_shli(LiftoffRegister dst, LiftoffRegister lhs,
int32_t rhs) {
- bailout(kSimd, "emit_i64x2_shli");
+ VU.set(kScratchReg, E64, m1);
+ if (is_uint5(rhs)) {
+ vsll_vi(dst.fp().toV(), lhs.fp().toV(), rhs);
+ } else {
+ li(kScratchReg, rhs);
+ vsll_vx(dst.fp().toV(), lhs.fp().toV(), kScratchReg);
+ }
}
void LiftoffAssembler::emit_i64x2_shr_s(LiftoffRegister dst,
@@ -2332,12 +2488,14 @@ void LiftoffAssembler::emit_i64x2_shri_u(LiftoffRegister dst,
void LiftoffAssembler::emit_i64x2_add(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "emit_i64x2_add");
+ VU.set(kScratchReg, E64, m1);
+ vadd_vv(dst.fp().toV(), lhs.fp().toV(), rhs.fp().toV());
}
void LiftoffAssembler::emit_i64x2_sub(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- bailout(kSimd, "emit_i64x2_sub");
+ VU.set(kScratchReg, E8, m1);
+ vsub_vv(dst.fp().toV(), lhs.fp().toV(), rhs.fp().toV());
}
void LiftoffAssembler::emit_i64x2_mul(LiftoffRegister dst, LiftoffRegister lhs,
@@ -2637,7 +2795,11 @@ void LiftoffAssembler::emit_i16x8_extadd_pairwise_i8x16_u(LiftoffRegister dst,
void LiftoffAssembler::emit_i32x4_abs(LiftoffRegister dst,
LiftoffRegister src) {
- bailout(kSimd, "emit_i32x4_abs");
+ VU.set(kScratchReg, E32, m1);
+ vmv_vx(kSimd128RegZero, zero_reg);
+ vmv_vv(dst.fp().toV(), src.fp().toV());
+ vmslt_vv(v0, src.fp().toV(), kSimd128RegZero);
+ vsub_vv(dst.fp().toV(), kSimd128RegZero, src.fp().toV(), Mask);
}
void LiftoffAssembler::emit_i8x16_extract_lane_s(LiftoffRegister dst,
@@ -2667,7 +2829,9 @@ void LiftoffAssembler::emit_i16x8_extract_lane_u(LiftoffRegister dst,
void LiftoffAssembler::emit_i32x4_extract_lane(LiftoffRegister dst,
LiftoffRegister lhs,
uint8_t imm_lane_idx) {
- bailout(kSimd, "emit_i32x4_extract_lane");
+ VU.set(kScratchReg, E32, m1);
+ vslidedown_vi(v31, lhs.fp().toV(), imm_lane_idx);
+ vmv_xs(dst.gp(), v31);
}
void LiftoffAssembler::emit_i64x2_extract_lane(LiftoffRegister dst,
@@ -2692,28 +2856,40 @@ void LiftoffAssembler::emit_i8x16_replace_lane(LiftoffRegister dst,
LiftoffRegister src1,
LiftoffRegister src2,
uint8_t imm_lane_idx) {
- bailout(kSimd, "emit_i8x16_replace_lane");
+ VU.set(kScratchReg, E8, m1);
+ li(kScratchReg, 0x1 << imm_lane_idx);
+ vmv_sx(v0, kScratchReg);
+ vmerge_vx(dst.fp().toV(), src2.gp(), src1.fp().toV());
}
void LiftoffAssembler::emit_i16x8_replace_lane(LiftoffRegister dst,
LiftoffRegister src1,
LiftoffRegister src2,
uint8_t imm_lane_idx) {
- bailout(kSimd, "emit_i16x8_replace_lane");
+ VU.set(kScratchReg, E16, m1);
+ li(kScratchReg, 0x1 << imm_lane_idx);
+ vmv_sx(v0, kScratchReg);
+ vmerge_vx(dst.fp().toV(), src2.gp(), src1.fp().toV());
}
void LiftoffAssembler::emit_i32x4_replace_lane(LiftoffRegister dst,
LiftoffRegister src1,
LiftoffRegister src2,
uint8_t imm_lane_idx) {
- bailout(kSimd, "emit_i32x4_replace_lane");
+ VU.set(kScratchReg, E32, m1);
+ li(kScratchReg, 0x1 << imm_lane_idx);
+ vmv_sx(v0, kScratchReg);
+ vmerge_vx(dst.fp().toV(), src2.gp(), src1.fp().toV());
}
void LiftoffAssembler::emit_i64x2_replace_lane(LiftoffRegister dst,
LiftoffRegister src1,
LiftoffRegister src2,
uint8_t imm_lane_idx) {
- bailout(kSimd, "emit_i64x2_replace_lane");
+ VU.set(kScratchReg, E64, m1);
+ li(kScratchReg, 0x1 << imm_lane_idx);
+ vmv_sx(v0, kScratchReg);
+ vmerge_vx(dst.fp().toV(), src2.gp(), src1.fp().toV());
}
void LiftoffAssembler::emit_f32x4_replace_lane(LiftoffRegister dst,
@@ -2730,9 +2906,9 @@ void LiftoffAssembler::emit_f64x2_replace_lane(LiftoffRegister dst,
bailout(kSimd, "emit_f64x2_replace_lane");
}
-void LiftoffAssembler::emit_s128_set_if_nan(Register dst, DoubleRegister src,
+void LiftoffAssembler::emit_s128_set_if_nan(Register dst, LiftoffRegister src,
Register tmp_gp,
- DoubleRegister tmp_fp,
+ LiftoffRegister tmp_s128,
ValueKind lane_kind) {
bailout(kSimd, "emit_s128_set_if_nan");
}
diff --git a/deps/v8/src/wasm/baseline/s390/liftoff-assembler-s390.h b/deps/v8/src/wasm/baseline/s390/liftoff-assembler-s390.h
index 722b0b074b..3db9ea0975 100644
--- a/deps/v8/src/wasm/baseline/s390/liftoff-assembler-s390.h
+++ b/deps/v8/src/wasm/baseline/s390/liftoff-assembler-s390.h
@@ -2143,81 +2143,116 @@ void LiftoffAssembler::emit_smi_check(Register obj, Label* target,
b(condition, target); // branch if SMI
}
-#define SIMD_BINOP_LIST(V) \
- V(f64x2_add, F64x2Add) \
- V(f64x2_sub, F64x2Sub) \
- V(f64x2_mul, F64x2Mul) \
- V(f64x2_div, F64x2Div) \
- V(f64x2_min, F64x2Min) \
- V(f64x2_max, F64x2Max) \
- V(f64x2_eq, F64x2Eq) \
- V(f64x2_ne, F64x2Ne) \
- V(f64x2_lt, F64x2Lt) \
- V(f64x2_le, F64x2Le) \
- V(f32x4_add, F32x4Add) \
- V(f32x4_sub, F32x4Sub) \
- V(f32x4_mul, F32x4Mul) \
- V(f32x4_div, F32x4Div) \
- V(f32x4_min, F32x4Min) \
- V(f32x4_max, F32x4Max) \
- V(f32x4_eq, F32x4Eq) \
- V(f32x4_ne, F32x4Ne) \
- V(f32x4_lt, F32x4Lt) \
- V(f32x4_le, F32x4Le) \
- V(i64x2_add, I64x2Add) \
- V(i64x2_sub, I64x2Sub) \
- V(i64x2_mul, I64x2Mul) \
- V(i64x2_eq, I64x2Eq) \
- V(i64x2_ne, I64x2Ne) \
- V(i64x2_gt_s, I64x2GtS) \
- V(i64x2_ge_s, I64x2GeS) \
- V(i32x4_add, I32x4Add) \
- V(i32x4_sub, I32x4Sub) \
- V(i32x4_mul, I32x4Mul) \
- V(i32x4_eq, I32x4Eq) \
- V(i32x4_ne, I32x4Ne) \
- V(i32x4_gt_s, I32x4GtS) \
- V(i32x4_ge_s, I32x4GeS) \
- V(i32x4_gt_u, I32x4GtU) \
- V(i32x4_ge_u, I32x4GeU) \
- V(i32x4_min_s, I32x4MinS) \
- V(i32x4_min_u, I32x4MinU) \
- V(i32x4_max_s, I32x4MaxS) \
- V(i32x4_max_u, I32x4MaxU) \
- V(i16x8_add, I16x8Add) \
- V(i16x8_sub, I16x8Sub) \
- V(i16x8_mul, I16x8Mul) \
- V(i16x8_eq, I16x8Eq) \
- V(i16x8_ne, I16x8Ne) \
- V(i16x8_gt_s, I16x8GtS) \
- V(i16x8_ge_s, I16x8GeS) \
- V(i16x8_gt_u, I16x8GtU) \
- V(i16x8_ge_u, I16x8GeU) \
- V(i16x8_min_s, I16x8MinS) \
- V(i16x8_min_u, I16x8MinU) \
- V(i16x8_max_s, I16x8MaxS) \
- V(i16x8_max_u, I16x8MaxU) \
- V(i8x16_add, I8x16Add) \
- V(i8x16_sub, I8x16Sub) \
- V(i8x16_eq, I8x16Eq) \
- V(i8x16_ne, I8x16Ne) \
- V(i8x16_gt_s, I8x16GtS) \
- V(i8x16_ge_s, I8x16GeS) \
- V(i8x16_gt_u, I8x16GtU) \
- V(i8x16_ge_u, I8x16GeU) \
- V(i8x16_min_s, I8x16MinS) \
- V(i8x16_min_u, I8x16MinU) \
- V(i8x16_max_s, I8x16MaxS) \
- V(i8x16_max_u, I8x16MaxU)
-
-#define EMIT_SIMD_BINOP(name, op) \
+#define SIMD_BINOP_RR_LIST(V) \
+ V(f64x2_add, F64x2Add, fp) \
+ V(f64x2_sub, F64x2Sub, fp) \
+ V(f64x2_mul, F64x2Mul, fp) \
+ V(f64x2_div, F64x2Div, fp) \
+ V(f64x2_min, F64x2Min, fp) \
+ V(f64x2_max, F64x2Max, fp) \
+ V(f64x2_eq, F64x2Eq, fp) \
+ V(f64x2_ne, F64x2Ne, fp) \
+ V(f64x2_lt, F64x2Lt, fp) \
+ V(f64x2_le, F64x2Le, fp) \
+ V(f32x4_add, F32x4Add, fp) \
+ V(f32x4_sub, F32x4Sub, fp) \
+ V(f32x4_mul, F32x4Mul, fp) \
+ V(f32x4_div, F32x4Div, fp) \
+ V(f32x4_min, F32x4Min, fp) \
+ V(f32x4_max, F32x4Max, fp) \
+ V(f32x4_eq, F32x4Eq, fp) \
+ V(f32x4_ne, F32x4Ne, fp) \
+ V(f32x4_lt, F32x4Lt, fp) \
+ V(f32x4_le, F32x4Le, fp) \
+ V(i64x2_add, I64x2Add, fp) \
+ V(i64x2_sub, I64x2Sub, fp) \
+ V(i64x2_mul, I64x2Mul, fp) \
+ V(i64x2_eq, I64x2Eq, fp) \
+ V(i64x2_ne, I64x2Ne, fp) \
+ V(i64x2_gt_s, I64x2GtS, fp) \
+ V(i64x2_ge_s, I64x2GeS, fp) \
+ V(i64x2_shl, I64x2Shl, gp) \
+ V(i64x2_shr_s, I64x2ShrS, gp) \
+ V(i64x2_shr_u, I64x2ShrU, gp) \
+ V(i32x4_add, I32x4Add, fp) \
+ V(i32x4_sub, I32x4Sub, fp) \
+ V(i32x4_mul, I32x4Mul, fp) \
+ V(i32x4_eq, I32x4Eq, fp) \
+ V(i32x4_ne, I32x4Ne, fp) \
+ V(i32x4_gt_s, I32x4GtS, fp) \
+ V(i32x4_ge_s, I32x4GeS, fp) \
+ V(i32x4_gt_u, I32x4GtU, fp) \
+ V(i32x4_ge_u, I32x4GeU, fp) \
+ V(i32x4_min_s, I32x4MinS, fp) \
+ V(i32x4_min_u, I32x4MinU, fp) \
+ V(i32x4_max_s, I32x4MaxS, fp) \
+ V(i32x4_max_u, I32x4MaxU, fp) \
+ V(i32x4_shl, I32x4Shl, gp) \
+ V(i32x4_shr_s, I32x4ShrS, gp) \
+ V(i32x4_shr_u, I32x4ShrU, gp) \
+ V(i16x8_add, I16x8Add, fp) \
+ V(i16x8_sub, I16x8Sub, fp) \
+ V(i16x8_mul, I16x8Mul, fp) \
+ V(i16x8_eq, I16x8Eq, fp) \
+ V(i16x8_ne, I16x8Ne, fp) \
+ V(i16x8_gt_s, I16x8GtS, fp) \
+ V(i16x8_ge_s, I16x8GeS, fp) \
+ V(i16x8_gt_u, I16x8GtU, fp) \
+ V(i16x8_ge_u, I16x8GeU, fp) \
+ V(i16x8_min_s, I16x8MinS, fp) \
+ V(i16x8_min_u, I16x8MinU, fp) \
+ V(i16x8_max_s, I16x8MaxS, fp) \
+ V(i16x8_max_u, I16x8MaxU, fp) \
+ V(i16x8_shl, I16x8Shl, gp) \
+ V(i16x8_shr_s, I16x8ShrS, gp) \
+ V(i16x8_shr_u, I16x8ShrU, gp) \
+ V(i8x16_add, I8x16Add, fp) \
+ V(i8x16_sub, I8x16Sub, fp) \
+ V(i8x16_eq, I8x16Eq, fp) \
+ V(i8x16_ne, I8x16Ne, fp) \
+ V(i8x16_gt_s, I8x16GtS, fp) \
+ V(i8x16_ge_s, I8x16GeS, fp) \
+ V(i8x16_gt_u, I8x16GtU, fp) \
+ V(i8x16_ge_u, I8x16GeU, fp) \
+ V(i8x16_min_s, I8x16MinS, fp) \
+ V(i8x16_min_u, I8x16MinU, fp) \
+ V(i8x16_max_s, I8x16MaxS, fp) \
+ V(i8x16_max_u, I8x16MaxU, fp) \
+ V(i8x16_shl, I8x16Shl, gp) \
+ V(i8x16_shr_s, I8x16ShrS, gp) \
+ V(i8x16_shr_u, I8x16ShrU, gp)
+
+#define EMIT_SIMD_BINOP_RR(name, op, stype) \
void LiftoffAssembler::emit_##name(LiftoffRegister dst, LiftoffRegister lhs, \
LiftoffRegister rhs) { \
- op(dst.fp(), lhs.fp(), rhs.fp()); \
+ op(dst.fp(), lhs.fp(), rhs.stype()); \
}
-SIMD_BINOP_LIST(EMIT_SIMD_BINOP)
-#undef EMIT_SIMD_BINOP
-#undef SIMD_BINOP_LIST
+SIMD_BINOP_RR_LIST(EMIT_SIMD_BINOP_RR)
+#undef EMIT_SIMD_BINOP_RR
+#undef SIMD_BINOP_RR_LIST
+
+#define SIMD_BINOP_RI_LIST(V) \
+ V(i64x2_shli, I64x2Shl) \
+ V(i64x2_shri_s, I64x2ShrS) \
+ V(i64x2_shri_u, I64x2ShrU) \
+ V(i32x4_shli, I32x4Shl) \
+ V(i32x4_shri_s, I32x4ShrS) \
+ V(i32x4_shri_u, I32x4ShrU) \
+ V(i16x8_shli, I16x8Shl) \
+ V(i16x8_shri_s, I16x8ShrS) \
+ V(i16x8_shri_u, I16x8ShrU) \
+ V(i8x16_shli, I8x16Shl) \
+ V(i8x16_shri_s, I8x16ShrS) \
+ V(i8x16_shri_u, I8x16ShrU)
+
+#define EMIT_SIMD_BINOP_RI(name, op) \
+ void LiftoffAssembler::emit_##name(LiftoffRegister dst, LiftoffRegister lhs, \
+ int32_t rhs) { \
+ op(dst.fp(), lhs.fp(), Operand(rhs)); \
+ }
+SIMD_BINOP_RI_LIST(EMIT_SIMD_BINOP_RI)
+#undef EMIT_SIMD_BINOP_RI
+#undef SIMD_BINOP_RI_LIST
#define SIMD_UNOP_LIST(V) \
V(f64x2_splat, F64x2Splat, fp, fp) \
@@ -2424,38 +2459,6 @@ void LiftoffAssembler::emit_i64x2_alltrue(LiftoffRegister dst,
bailout(kSimd, "i64x2_alltrue");
}
-void LiftoffAssembler::emit_i64x2_shl(LiftoffRegister dst, LiftoffRegister lhs,
- LiftoffRegister rhs) {
- bailout(kSimd, "i64x2_shl");
-}
-
-void LiftoffAssembler::emit_i64x2_shli(LiftoffRegister dst, LiftoffRegister lhs,
- int32_t rhs) {
- bailout(kSimd, "i64x2_shli");
-}
-
-void LiftoffAssembler::emit_i64x2_shr_s(LiftoffRegister dst,
- LiftoffRegister lhs,
- LiftoffRegister rhs) {
- bailout(kSimd, "i64x2_shr_s");
-}
-
-void LiftoffAssembler::emit_i64x2_shri_s(LiftoffRegister dst,
- LiftoffRegister lhs, int32_t rhs) {
- bailout(kSimd, "i64x2_shri_s");
-}
-
-void LiftoffAssembler::emit_i64x2_shr_u(LiftoffRegister dst,
- LiftoffRegister lhs,
- LiftoffRegister rhs) {
- bailout(kSimd, "i64x2_shr_u");
-}
-
-void LiftoffAssembler::emit_i64x2_shri_u(LiftoffRegister dst,
- LiftoffRegister lhs, int32_t rhs) {
- bailout(kSimd, "i64x2_shri_u");
-}
-
void LiftoffAssembler::emit_i64x2_extmul_low_i32x4_s(LiftoffRegister dst,
LiftoffRegister src1,
LiftoffRegister src2) {
@@ -2520,38 +2523,6 @@ void LiftoffAssembler::emit_i32x4_bitmask(LiftoffRegister dst,
bailout(kSimd, "i32x4_bitmask");
}
-void LiftoffAssembler::emit_i32x4_shl(LiftoffRegister dst, LiftoffRegister lhs,
- LiftoffRegister rhs) {
- bailout(kSimd, "i32x4_shl");
-}
-
-void LiftoffAssembler::emit_i32x4_shli(LiftoffRegister dst, LiftoffRegister lhs,
- int32_t rhs) {
- bailout(kSimd, "i32x4_shli");
-}
-
-void LiftoffAssembler::emit_i32x4_shr_s(LiftoffRegister dst,
- LiftoffRegister lhs,
- LiftoffRegister rhs) {
- bailout(kSimd, "i32x4_shr_s");
-}
-
-void LiftoffAssembler::emit_i32x4_shri_s(LiftoffRegister dst,
- LiftoffRegister lhs, int32_t rhs) {
- bailout(kSimd, "i32x4_shri_s");
-}
-
-void LiftoffAssembler::emit_i32x4_shr_u(LiftoffRegister dst,
- LiftoffRegister lhs,
- LiftoffRegister rhs) {
- bailout(kSimd, "i32x4_shr_u");
-}
-
-void LiftoffAssembler::emit_i32x4_shri_u(LiftoffRegister dst,
- LiftoffRegister lhs, int32_t rhs) {
- bailout(kSimd, "i32x4_shri_u");
-}
-
void LiftoffAssembler::emit_i32x4_dot_i16x8_s(LiftoffRegister dst,
LiftoffRegister lhs,
LiftoffRegister rhs) {
@@ -2607,38 +2578,6 @@ void LiftoffAssembler::emit_i16x8_bitmask(LiftoffRegister dst,
bailout(kSimd, "i16x8_bitmask");
}
-void LiftoffAssembler::emit_i16x8_shl(LiftoffRegister dst, LiftoffRegister lhs,
- LiftoffRegister rhs) {
- bailout(kSimd, "i16x8_shl");
-}
-
-void LiftoffAssembler::emit_i16x8_shli(LiftoffRegister dst, LiftoffRegister lhs,
- int32_t rhs) {
- bailout(kSimd, "i16x8_shli");
-}
-
-void LiftoffAssembler::emit_i16x8_shr_s(LiftoffRegister dst,
- LiftoffRegister lhs,
- LiftoffRegister rhs) {
- bailout(kSimd, "i16x8_shr_s");
-}
-
-void LiftoffAssembler::emit_i16x8_shri_s(LiftoffRegister dst,
- LiftoffRegister lhs, int32_t rhs) {
- bailout(kSimd, "i16x8_shri_s");
-}
-
-void LiftoffAssembler::emit_i16x8_shr_u(LiftoffRegister dst,
- LiftoffRegister lhs,
- LiftoffRegister rhs) {
- bailout(kSimd, "i16x8_shr_u");
-}
-
-void LiftoffAssembler::emit_i16x8_shri_u(LiftoffRegister dst,
- LiftoffRegister lhs, int32_t rhs) {
- bailout(kSimd, "i16x8_shri_u");
-}
-
void LiftoffAssembler::emit_i16x8_add_sat_s(LiftoffRegister dst,
LiftoffRegister lhs,
LiftoffRegister rhs) {
@@ -2736,38 +2675,6 @@ void LiftoffAssembler::emit_i8x16_bitmask(LiftoffRegister dst,
bailout(kSimd, "i8x16_bitmask");
}
-void LiftoffAssembler::emit_i8x16_shl(LiftoffRegister dst, LiftoffRegister lhs,
- LiftoffRegister rhs) {
- bailout(kSimd, "i8x16_shl");
-}
-
-void LiftoffAssembler::emit_i8x16_shli(LiftoffRegister dst, LiftoffRegister lhs,
- int32_t rhs) {
- bailout(kSimd, "i8x16_shli");
-}
-
-void LiftoffAssembler::emit_i8x16_shr_s(LiftoffRegister dst,
- LiftoffRegister lhs,
- LiftoffRegister rhs) {
- bailout(kSimd, "i8x16_shr_s");
-}
-
-void LiftoffAssembler::emit_i8x16_shri_s(LiftoffRegister dst,
- LiftoffRegister lhs, int32_t rhs) {
- bailout(kSimd, "i8x16_shri_s");
-}
-
-void LiftoffAssembler::emit_i8x16_shr_u(LiftoffRegister dst,
- LiftoffRegister lhs,
- LiftoffRegister rhs) {
- bailout(kSimd, "i8x16_shr_u");
-}
-
-void LiftoffAssembler::emit_i8x16_shri_u(LiftoffRegister dst,
- LiftoffRegister lhs, int32_t rhs) {
- bailout(kSimd, "i8x16_shri_u");
-}
-
void LiftoffAssembler::emit_i8x16_add_sat_s(LiftoffRegister dst,
LiftoffRegister lhs,
LiftoffRegister rhs) {
@@ -3134,14 +3041,40 @@ void LiftoffAssembler::MaybeOSR() {}
void LiftoffAssembler::emit_set_if_nan(Register dst, DoubleRegister src,
ValueKind kind) {
- UNIMPLEMENTED();
+ Label return_nan, done;
+ if (kind == kF32) {
+ cebr(src, src);
+ bunordered(&return_nan);
+ } else {
+ DCHECK_EQ(kind, kF64);
+ cdbr(src, src);
+ bunordered(&return_nan);
+ }
+ b(&done);
+ bind(&return_nan);
+ StoreF32LE(src, MemOperand(dst), r0);
+ bind(&done);
}
-void LiftoffAssembler::emit_s128_set_if_nan(Register dst, DoubleRegister src,
+void LiftoffAssembler::emit_s128_set_if_nan(Register dst, LiftoffRegister src,
Register tmp_gp,
- DoubleRegister tmp_fp,
+ LiftoffRegister tmp_s128,
ValueKind lane_kind) {
- UNIMPLEMENTED();
+ Label return_nan, done;
+ if (lane_kind == kF32) {
+ vfce(tmp_s128.fp(), src.fp(), src.fp(), Condition(1), Condition(0),
+ Condition(2));
+ b(Condition(0x5), &return_nan); // If any or all are NaN.
+ } else {
+ DCHECK_EQ(lane_kind, kF64);
+ vfce(tmp_s128.fp(), src.fp(), src.fp(), Condition(1), Condition(0),
+ Condition(3));
+ b(Condition(0x5), &return_nan);
+ }
+ b(&done);
+ bind(&return_nan);
+ StoreF32LE(src.fp(), MemOperand(dst), r0);
+ bind(&done);
}
void LiftoffStackSlots::Construct(int param_slots) {
diff --git a/deps/v8/src/wasm/baseline/x64/liftoff-assembler-x64.h b/deps/v8/src/wasm/baseline/x64/liftoff-assembler-x64.h
index d5cda7b3c4..50032eac23 100644
--- a/deps/v8/src/wasm/baseline/x64/liftoff-assembler-x64.h
+++ b/deps/v8/src/wasm/baseline/x64/liftoff-assembler-x64.h
@@ -1317,7 +1317,9 @@ void LiftoffAssembler::emit_i64_addi(LiftoffRegister dst, LiftoffRegister lhs,
void LiftoffAssembler::emit_i64_sub(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- if (dst.gp() == rhs.gp()) {
+ if (lhs.gp() == rhs.gp()) {
+ xorq(dst.gp(), dst.gp());
+ } else if (dst.gp() == rhs.gp()) {
negq(dst.gp());
addq(dst.gp(), lhs.gp());
} else {
@@ -2335,29 +2337,6 @@ void EmitSimdShiftOpImm(LiftoffAssembler* assm, LiftoffRegister dst,
}
}
-template <bool is_signed>
-void EmitI8x16Shr(LiftoffAssembler* assm, LiftoffRegister dst,
- LiftoffRegister lhs, LiftoffRegister rhs) {
- // Same algorithm as the one in code-generator-x64.cc.
- assm->Punpckhbw(kScratchDoubleReg, lhs.fp());
- assm->Punpcklbw(dst.fp(), lhs.fp());
- // Prepare shift value
- assm->movq(kScratchRegister, rhs.gp());
- // Take shift value modulo 8.
- assm->andq(kScratchRegister, Immediate(7));
- assm->addq(kScratchRegister, Immediate(8));
- assm->Movq(liftoff::kScratchDoubleReg2, kScratchRegister);
- if (is_signed) {
- assm->Psraw(kScratchDoubleReg, liftoff::kScratchDoubleReg2);
- assm->Psraw(dst.fp(), liftoff::kScratchDoubleReg2);
- assm->Packsswb(dst.fp(), kScratchDoubleReg);
- } else {
- assm->Psrlw(kScratchDoubleReg, liftoff::kScratchDoubleReg2);
- assm->Psrlw(dst.fp(), liftoff::kScratchDoubleReg2);
- assm->Packuswb(dst.fp(), kScratchDoubleReg);
- }
-}
-
inline void EmitAnyTrue(LiftoffAssembler* assm, LiftoffRegister dst,
LiftoffRegister src) {
assm->xorq(dst.gp(), dst.gp());
@@ -2414,21 +2393,11 @@ void LiftoffAssembler::LoadTransform(LiftoffRegister dst, Register src_addr,
} else {
DCHECK_EQ(LoadTransformationKind::kSplat, transform);
if (memtype == MachineType::Int8()) {
- Pinsrb(dst.fp(), dst.fp(), src_op, 0);
- Pxor(kScratchDoubleReg, kScratchDoubleReg);
- Pshufb(dst.fp(), kScratchDoubleReg);
+ S128Load8Splat(dst.fp(), src_op, kScratchDoubleReg);
} else if (memtype == MachineType::Int16()) {
- Pinsrw(dst.fp(), dst.fp(), src_op, 0);
- Pshuflw(dst.fp(), dst.fp(), uint8_t{0});
- Punpcklqdq(dst.fp(), dst.fp());
+ S128Load16Splat(dst.fp(), src_op, kScratchDoubleReg);
} else if (memtype == MachineType::Int32()) {
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope avx_scope(this, AVX);
- vbroadcastss(dst.fp(), src_op);
- } else {
- movss(dst.fp(), src_op);
- shufps(dst.fp(), dst.fp(), byte{0});
- }
+ S128Load32Splat(dst.fp(), src_op);
} else if (memtype == MachineType::Int64()) {
Movddup(dst.fp(), src_op);
}
@@ -2440,18 +2409,17 @@ void LiftoffAssembler::LoadLane(LiftoffRegister dst, LiftoffRegister src,
uintptr_t offset_imm, LoadType type,
uint8_t laneidx, uint32_t* protected_load_pc) {
Operand src_op = liftoff::GetMemOp(this, addr, offset_reg, offset_imm);
- *protected_load_pc = pc_offset();
MachineType mem_type = type.mem_type();
if (mem_type == MachineType::Int8()) {
- Pinsrb(dst.fp(), src.fp(), src_op, laneidx);
+ Pinsrb(dst.fp(), src.fp(), src_op, laneidx, protected_load_pc);
} else if (mem_type == MachineType::Int16()) {
- Pinsrw(dst.fp(), src.fp(), src_op, laneidx);
+ Pinsrw(dst.fp(), src.fp(), src_op, laneidx, protected_load_pc);
} else if (mem_type == MachineType::Int32()) {
- Pinsrd(dst.fp(), src.fp(), src_op, laneidx);
+ Pinsrd(dst.fp(), src.fp(), src_op, laneidx, protected_load_pc);
} else {
DCHECK_EQ(MachineType::Int64(), mem_type);
- Pinsrq(dst.fp(), src.fp(), src_op, laneidx);
+ Pinsrq(dst.fp(), src.fp(), src_op, laneidx, protected_load_pc);
}
}
@@ -2515,26 +2483,24 @@ void LiftoffAssembler::emit_i8x16_shuffle(LiftoffRegister dst,
void LiftoffAssembler::emit_i8x16_swizzle(LiftoffRegister dst,
LiftoffRegister lhs,
LiftoffRegister rhs) {
- I8x16Swizzle(dst.fp(), lhs.fp(), rhs.fp());
+ I8x16Swizzle(dst.fp(), lhs.fp(), rhs.fp(), kScratchDoubleReg,
+ kScratchRegister);
}
void LiftoffAssembler::emit_i8x16_popcnt(LiftoffRegister dst,
LiftoffRegister src) {
- I8x16Popcnt(dst.fp(), src.fp(), liftoff::kScratchDoubleReg2);
+ I8x16Popcnt(dst.fp(), src.fp(), kScratchDoubleReg,
+ liftoff::kScratchDoubleReg2, kScratchRegister);
}
void LiftoffAssembler::emit_i8x16_splat(LiftoffRegister dst,
LiftoffRegister src) {
- Movd(dst.fp(), src.gp());
- Pxor(kScratchDoubleReg, kScratchDoubleReg);
- Pshufb(dst.fp(), kScratchDoubleReg);
+ I8x16Splat(dst.fp(), src.gp(), kScratchDoubleReg);
}
void LiftoffAssembler::emit_i16x8_splat(LiftoffRegister dst,
LiftoffRegister src) {
- Movd(dst.fp(), src.gp());
- Pshuflw(dst.fp(), dst.fp(), static_cast<uint8_t>(0));
- Pshufd(dst.fp(), dst.fp(), static_cast<uint8_t>(0));
+ I16x8Splat(dst.fp(), src.gp());
}
void LiftoffAssembler::emit_i32x4_splat(LiftoffRegister dst,
@@ -2927,89 +2893,37 @@ void LiftoffAssembler::emit_i8x16_bitmask(LiftoffRegister dst,
void LiftoffAssembler::emit_i8x16_shl(LiftoffRegister dst, LiftoffRegister lhs,
LiftoffRegister rhs) {
- static constexpr RegClass tmp_simd_rc = reg_class_for(kS128);
- LiftoffRegister tmp_simd =
- GetUnusedRegister(tmp_simd_rc, LiftoffRegList::ForRegs(dst, lhs));
- // Mask off the unwanted bits before word-shifting.
- Pcmpeqw(kScratchDoubleReg, kScratchDoubleReg);
- movq(kScratchRegister, rhs.gp());
- andq(kScratchRegister, Immediate(7));
- addq(kScratchRegister, Immediate(8));
- Movq(tmp_simd.fp(), kScratchRegister);
- Psrlw(kScratchDoubleReg, tmp_simd.fp());
- Packuswb(kScratchDoubleReg, kScratchDoubleReg);
-
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope scope(this, AVX);
- vpand(dst.fp(), lhs.fp(), kScratchDoubleReg);
- } else {
- if (dst.fp() != lhs.fp()) movaps(dst.fp(), lhs.fp());
- andps(dst.fp(), kScratchDoubleReg);
- }
- subq(kScratchRegister, Immediate(8));
- Movq(tmp_simd.fp(), kScratchRegister);
- Psllw(dst.fp(), tmp_simd.fp());
+ I8x16Shl(dst.fp(), lhs.fp(), rhs.gp(), kScratchRegister, kScratchDoubleReg,
+ liftoff::kScratchDoubleReg2);
}
void LiftoffAssembler::emit_i8x16_shli(LiftoffRegister dst, LiftoffRegister lhs,
int32_t rhs) {
- byte shift = static_cast<byte>(rhs & 0x7);
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope scope(this, AVX);
- vpsllw(dst.fp(), lhs.fp(), shift);
- } else {
- if (dst.fp() != lhs.fp()) movaps(dst.fp(), lhs.fp());
- psllw(dst.fp(), shift);
- }
-
- uint8_t bmask = static_cast<uint8_t>(0xff << shift);
- uint32_t mask = bmask << 24 | bmask << 16 | bmask << 8 | bmask;
- movl(kScratchRegister, Immediate(mask));
- Movd(kScratchDoubleReg, kScratchRegister);
- Pshufd(kScratchDoubleReg, kScratchDoubleReg, uint8_t{0});
- Pand(dst.fp(), kScratchDoubleReg);
+ I8x16Shl(dst.fp(), lhs.fp(), rhs, kScratchRegister, kScratchDoubleReg);
}
void LiftoffAssembler::emit_i8x16_shr_s(LiftoffRegister dst,
LiftoffRegister lhs,
LiftoffRegister rhs) {
- liftoff::EmitI8x16Shr</*is_signed=*/true>(this, dst, lhs, rhs);
+ I8x16ShrS(dst.fp(), lhs.fp(), rhs.gp(), kScratchRegister, kScratchDoubleReg,
+ liftoff::kScratchDoubleReg2);
}
void LiftoffAssembler::emit_i8x16_shri_s(LiftoffRegister dst,
LiftoffRegister lhs, int32_t rhs) {
- Punpckhbw(kScratchDoubleReg, lhs.fp());
- Punpcklbw(dst.fp(), lhs.fp());
- uint8_t shift = (rhs & 7) + 8;
- Psraw(kScratchDoubleReg, shift);
- Psraw(dst.fp(), shift);
- Packsswb(dst.fp(), kScratchDoubleReg);
+ I8x16ShrS(dst.fp(), lhs.fp(), rhs, kScratchDoubleReg);
}
void LiftoffAssembler::emit_i8x16_shr_u(LiftoffRegister dst,
LiftoffRegister lhs,
LiftoffRegister rhs) {
- liftoff::EmitI8x16Shr</*is_signed=*/false>(this, dst, lhs, rhs);
+ I8x16ShrU(dst.fp(), lhs.fp(), rhs.gp(), kScratchRegister, kScratchDoubleReg,
+ liftoff::kScratchDoubleReg2);
}
void LiftoffAssembler::emit_i8x16_shri_u(LiftoffRegister dst,
LiftoffRegister lhs, int32_t rhs) {
- // Perform 16-bit shift, then mask away high bits.
- uint8_t shift = rhs & 7; // i.InputInt3(1);
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope scope(this, AVX);
- vpsrlw(dst.fp(), lhs.fp(), byte{shift});
- } else if (dst != lhs) {
- Movaps(dst.fp(), lhs.fp());
- psrlw(dst.fp(), byte{shift});
- }
-
- uint8_t bmask = 0xff >> shift;
- uint32_t mask = bmask << 24 | bmask << 16 | bmask << 8 | bmask;
- movl(kScratchRegister, Immediate(mask));
- Movd(kScratchDoubleReg, kScratchRegister);
- Pshufd(kScratchDoubleReg, kScratchDoubleReg, byte{0});
- Pand(dst.fp(), kScratchDoubleReg);
+ I8x16ShrU(dst.fp(), lhs.fp(), rhs, kScratchRegister, kScratchDoubleReg);
}
void LiftoffAssembler::emit_i8x16_add(LiftoffRegister dst, LiftoffRegister lhs,
@@ -3220,14 +3134,13 @@ void LiftoffAssembler::emit_i16x8_max_u(LiftoffRegister dst,
void LiftoffAssembler::emit_i16x8_extadd_pairwise_i8x16_s(LiftoffRegister dst,
LiftoffRegister src) {
- I16x8ExtAddPairwiseI8x16S(dst.fp(), src.fp());
+ I16x8ExtAddPairwiseI8x16S(dst.fp(), src.fp(), kScratchDoubleReg,
+ kScratchRegister);
}
void LiftoffAssembler::emit_i16x8_extadd_pairwise_i8x16_u(LiftoffRegister dst,
LiftoffRegister src) {
- Operand op = ExternalReferenceAsOperand(
- ExternalReference::address_of_wasm_i8x16_splat_0x01());
- Pmaddubsw(dst.fp(), src.fp(), op);
+ I16x8ExtAddPairwiseI8x16U(dst.fp(), src.fp(), kScratchRegister);
}
void LiftoffAssembler::emit_i16x8_extmul_low_i8x16_s(LiftoffRegister dst,
@@ -3259,7 +3172,7 @@ void LiftoffAssembler::emit_i16x8_extmul_high_i8x16_u(LiftoffRegister dst,
void LiftoffAssembler::emit_i16x8_q15mulr_sat_s(LiftoffRegister dst,
LiftoffRegister src1,
LiftoffRegister src2) {
- I16x8Q15MulRSatS(dst.fp(), src1.fp(), src2.fp());
+ I16x8Q15MulRSatS(dst.fp(), src1.fp(), src2.fp(), kScratchDoubleReg);
}
void LiftoffAssembler::emit_i32x4_neg(LiftoffRegister dst,
@@ -3376,14 +3289,12 @@ void LiftoffAssembler::emit_i32x4_dot_i16x8_s(LiftoffRegister dst,
void LiftoffAssembler::emit_i32x4_extadd_pairwise_i16x8_s(LiftoffRegister dst,
LiftoffRegister src) {
- Operand op = ExternalReferenceAsOperand(
- ExternalReference::address_of_wasm_i16x8_splat_0x0001());
- Pmaddwd(dst.fp(), src.fp(), op);
+ I32x4ExtAddPairwiseI16x8S(dst.fp(), src.fp(), kScratchRegister);
}
void LiftoffAssembler::emit_i32x4_extadd_pairwise_i16x8_u(LiftoffRegister dst,
LiftoffRegister src) {
- I32x4ExtAddPairwiseI16x8U(dst.fp(), src.fp());
+ I32x4ExtAddPairwiseI16x8U(dst.fp(), src.fp(), kScratchDoubleReg);
}
namespace liftoff {
@@ -3574,28 +3485,12 @@ void LiftoffAssembler::emit_i64x2_uconvert_i32x4_high(LiftoffRegister dst,
void LiftoffAssembler::emit_f32x4_abs(LiftoffRegister dst,
LiftoffRegister src) {
- if (dst.fp() == src.fp()) {
- Pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
- Psrld(kScratchDoubleReg, static_cast<byte>(1));
- Andps(dst.fp(), kScratchDoubleReg);
- } else {
- Pcmpeqd(dst.fp(), dst.fp());
- Psrld(dst.fp(), static_cast<byte>(1));
- Andps(dst.fp(), src.fp());
- }
+ Absps(dst.fp(), src.fp());
}
void LiftoffAssembler::emit_f32x4_neg(LiftoffRegister dst,
LiftoffRegister src) {
- if (dst.fp() == src.fp()) {
- Pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
- Pslld(kScratchDoubleReg, byte{31});
- Xorps(dst.fp(), kScratchDoubleReg);
- } else {
- Pcmpeqd(dst.fp(), dst.fp());
- Pslld(dst.fp(), byte{31});
- Xorps(dst.fp(), src.fp());
- }
+ Negps(dst.fp(), src.fp());
}
void LiftoffAssembler::emit_f32x4_sqrt(LiftoffRegister dst,
@@ -3730,28 +3625,12 @@ void LiftoffAssembler::emit_f32x4_pmax(LiftoffRegister dst, LiftoffRegister lhs,
void LiftoffAssembler::emit_f64x2_abs(LiftoffRegister dst,
LiftoffRegister src) {
- if (dst.fp() == src.fp()) {
- Pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
- Psrlq(kScratchDoubleReg, byte{1});
- Andpd(dst.fp(), kScratchDoubleReg);
- } else {
- Pcmpeqd(dst.fp(), dst.fp());
- Psrlq(dst.fp(), byte{1});
- Andpd(dst.fp(), src.fp());
- }
+ Abspd(dst.fp(), src.fp());
}
void LiftoffAssembler::emit_f64x2_neg(LiftoffRegister dst,
LiftoffRegister src) {
- if (dst.fp() == src.fp()) {
- Pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
- Psllq(kScratchDoubleReg, static_cast<byte>(63));
- Xorpd(dst.fp(), kScratchDoubleReg);
- } else {
- Pcmpeqd(dst.fp(), dst.fp());
- Psllq(dst.fp(), static_cast<byte>(63));
- Xorpd(dst.fp(), src.fp());
- }
+ Negpd(dst.fp(), src.fp());
}
void LiftoffAssembler::emit_f64x2_sqrt(LiftoffRegister dst,
@@ -3842,7 +3721,7 @@ void LiftoffAssembler::emit_f64x2_convert_low_i32x4_s(LiftoffRegister dst,
void LiftoffAssembler::emit_f64x2_convert_low_i32x4_u(LiftoffRegister dst,
LiftoffRegister src) {
- F64x2ConvertLowI32x4U(dst.fp(), src.fp());
+ F64x2ConvertLowI32x4U(dst.fp(), src.fp(), kScratchRegister);
}
void LiftoffAssembler::emit_f64x2_promote_low_f32x4(LiftoffRegister dst,
@@ -3852,26 +3731,7 @@ void LiftoffAssembler::emit_f64x2_promote_low_f32x4(LiftoffRegister dst,
void LiftoffAssembler::emit_i32x4_sconvert_f32x4(LiftoffRegister dst,
LiftoffRegister src) {
- // NAN->0
- if (CpuFeatures::IsSupported(AVX)) {
- CpuFeatureScope scope(this, AVX);
- vcmpeqps(kScratchDoubleReg, src.fp(), src.fp());
- vpand(dst.fp(), src.fp(), kScratchDoubleReg);
- } else {
- movaps(kScratchDoubleReg, src.fp());
- cmpeqps(kScratchDoubleReg, kScratchDoubleReg);
- if (dst.fp() != src.fp()) movaps(dst.fp(), src.fp());
- andps(dst.fp(), kScratchDoubleReg);
- }
- // Set top bit if >= 0 (but not -0.0!).
- Pxor(kScratchDoubleReg, dst.fp());
- // Convert to int.
- Cvttps2dq(dst.fp(), dst.fp());
- // Set top bit if >=0 is now < 0.
- Pand(kScratchDoubleReg, dst.fp());
- Psrad(kScratchDoubleReg, byte{31});
- // Set positive overflow lanes to 0x7FFFFFFF.
- Pxor(dst.fp(), kScratchDoubleReg);
+ I32x4SConvertF32x4(dst.fp(), src.fp(), kScratchDoubleReg, kScratchRegister);
}
void LiftoffAssembler::emit_i32x4_uconvert_f32x4(LiftoffRegister dst,
@@ -4012,12 +3872,14 @@ void LiftoffAssembler::emit_i32x4_uconvert_i16x8_high(LiftoffRegister dst,
void LiftoffAssembler::emit_i32x4_trunc_sat_f64x2_s_zero(LiftoffRegister dst,
LiftoffRegister src) {
- I32x4TruncSatF64x2SZero(dst.fp(), src.fp());
+ I32x4TruncSatF64x2SZero(dst.fp(), src.fp(), kScratchDoubleReg,
+ kScratchRegister);
}
void LiftoffAssembler::emit_i32x4_trunc_sat_f64x2_u_zero(LiftoffRegister dst,
LiftoffRegister src) {
- I32x4TruncSatF64x2UZero(dst.fp(), src.fp());
+ I32x4TruncSatF64x2UZero(dst.fp(), src.fp(), kScratchDoubleReg,
+ kScratchRegister);
}
void LiftoffAssembler::emit_s128_and_not(LiftoffRegister dst,
@@ -4322,11 +4184,7 @@ void LiftoffAssembler::CallIndirect(const ValueKindSig* sig,
popq(kScratchRegister);
target = kScratchRegister;
}
- if (FLAG_untrusted_code_mitigations) {
- RetpolineCall(target);
- } else {
- call(target);
- }
+ call(target);
}
void LiftoffAssembler::TailCallIndirect(Register target) {
@@ -4334,11 +4192,7 @@ void LiftoffAssembler::TailCallIndirect(Register target) {
popq(kScratchRegister);
target = kScratchRegister;
}
- if (FLAG_untrusted_code_mitigations) {
- RetpolineJump(target);
- } else {
- jmp(target);
- }
+ jmp(target);
}
void LiftoffAssembler::CallRuntimeStub(WasmCode::RuntimeStubId sid) {
@@ -4376,19 +4230,19 @@ void LiftoffAssembler::emit_set_if_nan(Register dst, DoubleRegister src,
bind(&ret);
}
-void LiftoffAssembler::emit_s128_set_if_nan(Register dst, DoubleRegister src,
+void LiftoffAssembler::emit_s128_set_if_nan(Register dst, LiftoffRegister src,
Register tmp_gp,
- DoubleRegister tmp_fp,
+ LiftoffRegister tmp_s128,
ValueKind lane_kind) {
if (lane_kind == kF32) {
- movaps(tmp_fp, src);
- cmpunordps(tmp_fp, tmp_fp);
+ movaps(tmp_s128.fp(), src.fp());
+ cmpunordps(tmp_s128.fp(), tmp_s128.fp());
} else {
DCHECK_EQ(lane_kind, kF64);
- movapd(tmp_fp, src);
- cmpunordpd(tmp_fp, tmp_fp);
+ movapd(tmp_s128.fp(), src.fp());
+ cmpunordpd(tmp_s128.fp(), tmp_s128.fp());
}
- pmovmskb(tmp_gp, tmp_fp);
+ pmovmskb(tmp_gp, tmp_s128.fp());
orl(Operand(dst, 0), tmp_gp);
}
diff --git a/deps/v8/src/wasm/c-api.cc b/deps/v8/src/wasm/c-api.cc
index 5a1ab579e7..a2b026eff3 100644
--- a/deps/v8/src/wasm/c-api.cc
+++ b/deps/v8/src/wasm/c-api.cc
@@ -26,6 +26,7 @@
#include <iostream>
#include "include/libplatform/libplatform.h"
+#include "include/v8-initialization.h"
#include "src/api/api-inl.h"
#include "src/base/platform/wrappers.h"
#include "src/builtins/builtins.h"
@@ -396,6 +397,11 @@ auto Engine::make(own<Config>&& config) -> own<Engine> {
if (!engine) return own<Engine>();
engine->platform = v8::platform::NewDefaultPlatform();
v8::V8::InitializePlatform(engine->platform.get());
+#ifdef V8_VIRTUAL_MEMORY_CAGE
+ if (!v8::V8::InitializeVirtualMemoryCage()) {
+ FATAL("Could not initialize the virtual memory cage");
+ }
+#endif
v8::V8::Initialize();
return make_own(seal<Engine>(engine));
}
diff --git a/deps/v8/src/wasm/c-api.h b/deps/v8/src/wasm/c-api.h
index 0dba237d30..97a8d2d5f6 100644
--- a/deps/v8/src/wasm/c-api.h
+++ b/deps/v8/src/wasm/c-api.h
@@ -9,7 +9,8 @@
#ifndef V8_WASM_C_API_H_
#define V8_WASM_C_API_H_
-#include "include/v8.h"
+#include "include/v8-isolate.h"
+#include "include/v8-local-handle.h"
#include "src/common/globals.h"
#include "src/handles/handles.h"
#include "third_party/wasm-api/wasm.hh"
diff --git a/deps/v8/src/wasm/code-space-access.cc b/deps/v8/src/wasm/code-space-access.cc
index 0f71c9a224..83cb5ddea1 100644
--- a/deps/v8/src/wasm/code-space-access.cc
+++ b/deps/v8/src/wasm/code-space-access.cc
@@ -12,6 +12,12 @@ namespace internal {
namespace wasm {
thread_local int CodeSpaceWriteScope::code_space_write_nesting_level_ = 0;
+// The thread-local counter (above) is only valid if a single thread only works
+// on one module at a time. This second thread-local checks that.
+#if defined(DEBUG) && !V8_HAS_PTHREAD_JIT_WRITE_PROTECT
+thread_local NativeModule* CodeSpaceWriteScope::current_native_module_ =
+ nullptr;
+#endif
// TODO(jkummerow): Background threads could permanently stay in
// writable mode; only the main thread has to switch back and forth.
@@ -20,6 +26,12 @@ CodeSpaceWriteScope::CodeSpaceWriteScope(NativeModule*) {
#else // !V8_HAS_PTHREAD_JIT_WRITE_PROTECT
CodeSpaceWriteScope::CodeSpaceWriteScope(NativeModule* native_module)
: native_module_(native_module) {
+#ifdef DEBUG
+ if (code_space_write_nesting_level_ == 0) {
+ current_native_module_ = native_module;
+ }
+ DCHECK_EQ(native_module, current_native_module_);
+#endif // DEBUG
#endif // !V8_HAS_PTHREAD_JIT_WRITE_PROTECT
if (code_space_write_nesting_level_ == 0) SetWritable();
code_space_write_nesting_level_++;
diff --git a/deps/v8/src/wasm/code-space-access.h b/deps/v8/src/wasm/code-space-access.h
index 96f852e63b..788bb8eca3 100644
--- a/deps/v8/src/wasm/code-space-access.h
+++ b/deps/v8/src/wasm/code-space-access.h
@@ -55,6 +55,9 @@ class V8_NODISCARD CodeSpaceWriteScope final {
private:
static thread_local int code_space_write_nesting_level_;
+#if defined(DEBUG) && !V8_HAS_PTHREAD_JIT_WRITE_PROTECT
+ static thread_local NativeModule* current_native_module_;
+#endif
void SetWritable() const;
void SetExecutable() const;
diff --git a/deps/v8/src/wasm/function-body-decoder-impl.h b/deps/v8/src/wasm/function-body-decoder-impl.h
index 20c6b30ffc..618e8f013c 100644
--- a/deps/v8/src/wasm/function-body-decoder-impl.h
+++ b/deps/v8/src/wasm/function-body-decoder-impl.h
@@ -2224,6 +2224,7 @@ class WasmFullDecoder : public WasmDecoder<validate, decoding_mode> {
int non_defaultable = 0;
for (uint32_t index = params_count; index < this->num_locals(); index++) {
if (!VALIDATE(this->enabled_.has_nn_locals() ||
+ this->enabled_.has_unsafe_nn_locals() ||
this->local_type(index).is_defaultable())) {
this->DecodeError(
"Cannot define function-level local of non-defaultable type %s",
@@ -2634,19 +2635,15 @@ class WasmFullDecoder : public WasmDecoder<validate, decoding_mode> {
return 0;
}
// +1 because the current try block is not included in the count.
- Control* target = control_at(imm.depth + 1);
- if (imm.depth + 1 < control_depth() - 1 && !target->is_try()) {
- this->DecodeError(
- "delegate target must be a try block or the function block");
- return 0;
- }
- if (target->is_try_catch() || target->is_try_catchall()) {
- this->DecodeError(
- "cannot delegate inside the catch handler of the target");
- return 0;
+ uint32_t target_depth = imm.depth + 1;
+ while (target_depth < control_depth() - 1 &&
+ (!control_at(target_depth)->is_try() ||
+ control_at(target_depth)->is_try_catch() ||
+ control_at(target_depth)->is_try_catchall())) {
+ target_depth++;
}
FallThrough();
- CALL_INTERFACE_IF_OK_AND_PARENT_REACHABLE(Delegate, imm.depth + 1, c);
+ CALL_INTERFACE_IF_OK_AND_PARENT_REACHABLE(Delegate, target_depth, c);
current_catch_ = c->previous_catch;
EndControl();
PopControl();
@@ -4264,7 +4261,6 @@ class WasmFullDecoder : public WasmDecoder<validate, decoding_mode> {
}
case kExprArrayCopy: {
NON_CONST_ONLY
- CHECK_PROTOTYPE_OPCODE(gc_experiments);
ArrayIndexImmediate<validate> dst_imm(this, this->pc_ + opcode_length);
if (!this->Validate(this->pc_ + opcode_length, dst_imm)) return 0;
if (!VALIDATE(dst_imm.array_type->mutability())) {
@@ -4299,7 +4295,6 @@ class WasmFullDecoder : public WasmDecoder<validate, decoding_mode> {
return opcode_length + dst_imm.length + src_imm.length;
}
case kExprArrayInit: {
- CHECK_PROTOTYPE_OPCODE(gc_experiments);
if (decoding_mode != kInitExpression) {
this->DecodeError("array.init is only allowed in init. expressions");
return 0;
@@ -4368,8 +4363,6 @@ class WasmFullDecoder : public WasmDecoder<validate, decoding_mode> {
return opcode_length + imm.length;
}
case kExprRttFreshSub:
- CHECK_PROTOTYPE_OPCODE(gc_experiments);
- V8_FALLTHROUGH;
case kExprRttSub: {
IndexImmediate<validate> imm(this, this->pc_ + opcode_length,
"type index");
@@ -4426,6 +4419,8 @@ class WasmFullDecoder : public WasmDecoder<validate, decoding_mode> {
if (V8_LIKELY(ObjectRelatedWithRtt(obj, rtt))) {
CALL_INTERFACE(RefTest, obj, rtt, &value);
} else {
+ CALL_INTERFACE(Drop);
+ CALL_INTERFACE(Drop);
// Unrelated types. Will always fail.
CALL_INTERFACE(I32Const, &value, 0);
}
diff --git a/deps/v8/src/wasm/function-compiler.cc b/deps/v8/src/wasm/function-compiler.cc
index cd9d941a00..e520a7d680 100644
--- a/deps/v8/src/wasm/function-compiler.cc
+++ b/deps/v8/src/wasm/function-compiler.cc
@@ -134,7 +134,7 @@ WasmCompilationResult WasmCompilationUnit::ExecuteFunctionCompilation(
case ExecutionTier::kTurbofan:
result = compiler::ExecuteTurbofanWasmCompilation(
- env, func_body, func_index_, counters, detected);
+ env, wire_bytes_storage, func_body, func_index_, counters, detected);
result.for_debugging = for_debugging_;
break;
}
@@ -142,30 +142,6 @@ WasmCompilationResult WasmCompilationUnit::ExecuteFunctionCompilation(
return result;
}
-namespace {
-bool must_record_function_compilation(Isolate* isolate) {
- return isolate->logger()->is_listening_to_code_events() ||
- isolate->is_profiling();
-}
-
-PRINTF_FORMAT(3, 4)
-void RecordWasmHeapStubCompilation(Isolate* isolate, Handle<Code> code,
- const char* format, ...) {
- DCHECK(must_record_function_compilation(isolate));
-
- base::ScopedVector<char> buffer(128);
- va_list arguments;
- va_start(arguments, format);
- int len = base::VSNPrintF(buffer, format, arguments);
- CHECK_LT(0, len);
- va_end(arguments);
- Handle<String> name_str =
- isolate->factory()->NewStringFromAsciiChecked(buffer.begin());
- PROFILE(isolate, CodeCreateEvent(CodeEventListener::STUB_TAG,
- Handle<AbstractCode>::cast(code), name_str));
-}
-} // namespace
-
// static
void WasmCompilationUnit::CompileWasmFunction(Isolate* isolate,
NativeModule* native_module,
@@ -243,17 +219,19 @@ void JSToWasmWrapperCompilationUnit::Execute() {
}
Handle<Code> JSToWasmWrapperCompilationUnit::Finalize() {
- Handle<Code> code;
if (use_generic_wrapper_) {
- code = isolate_->builtins()->code_handle(Builtin::kGenericJSToWasmWrapper);
- } else {
- CompilationJob::Status status = job_->FinalizeJob(isolate_);
- CHECK_EQ(status, CompilationJob::SUCCEEDED);
- code = job_->compilation_info()->code();
+ return isolate_->builtins()->code_handle(Builtin::kGenericJSToWasmWrapper);
}
- if (!use_generic_wrapper_ && must_record_function_compilation(isolate_)) {
- RecordWasmHeapStubCompilation(
- isolate_, code, "%s", job_->compilation_info()->GetDebugName().get());
+
+ CompilationJob::Status status = job_->FinalizeJob(isolate_);
+ CHECK_EQ(status, CompilationJob::SUCCEEDED);
+ Handle<Code> code = job_->compilation_info()->code();
+ if (isolate_->logger()->is_listening_to_code_events() ||
+ isolate_->is_profiling()) {
+ Handle<String> name = isolate_->factory()->NewStringFromAsciiChecked(
+ job_->compilation_info()->GetDebugName().get());
+ PROFILE(isolate_, CodeCreateEvent(CodeEventListener::STUB_TAG,
+ Handle<AbstractCode>::cast(code), name));
}
return code;
}
diff --git a/deps/v8/src/wasm/graph-builder-interface.cc b/deps/v8/src/wasm/graph-builder-interface.cc
index 84f34cc0ed..b8eb6b7050 100644
--- a/deps/v8/src/wasm/graph-builder-interface.cc
+++ b/deps/v8/src/wasm/graph-builder-interface.cc
@@ -109,9 +109,12 @@ class WasmGraphBuildingInterface {
: ControlBase(std::forward<Args>(args)...) {}
};
- explicit WasmGraphBuildingInterface(compiler::WasmGraphBuilder* builder,
- int func_index)
- : builder_(builder), func_index_(func_index) {}
+ WasmGraphBuildingInterface(compiler::WasmGraphBuilder* builder,
+ int func_index,
+ EndpointInstrumentationMode instrumentation)
+ : builder_(builder),
+ func_index_(func_index),
+ instrumentation_(instrumentation) {}
void StartFunction(FullDecoder* decoder) {
// Get the branch hints map for this function (if available)
@@ -138,7 +141,9 @@ class WasmGraphBuildingInterface {
while (index < num_locals) {
ValueType type = decoder->local_type(index);
TFNode* node;
- if (decoder->enabled_.has_nn_locals() && !type.is_defaultable()) {
+ if ((decoder->enabled_.has_nn_locals() ||
+ decoder->enabled_.has_unsafe_nn_locals()) &&
+ !type.is_defaultable()) {
DCHECK(type.is_reference());
// TODO(jkummerow): Consider using "the hole" instead, to make any
// illegal uses more obvious.
@@ -153,7 +158,9 @@ class WasmGraphBuildingInterface {
}
LoadContextIntoSsa(ssa_env);
- if (FLAG_trace_wasm) builder_->TraceFunctionEntry(decoder->position());
+ if (FLAG_trace_wasm && instrumentation_ == kInstrumentEndpoints) {
+ builder_->TraceFunctionEntry(decoder->position());
+ }
}
// Reload the instance cache entries into the Ssa Environment.
@@ -163,7 +170,11 @@ class WasmGraphBuildingInterface {
void StartFunctionBody(FullDecoder* decoder, Control* block) {}
- void FinishFunction(FullDecoder*) { builder_->PatchInStackCheckIfNeeded(); }
+ void FinishFunction(FullDecoder*) {
+ if (instrumentation_ == kInstrumentEndpoints) {
+ builder_->PatchInStackCheckIfNeeded();
+ }
+ }
void OnFirstError(FullDecoder*) {}
@@ -475,7 +486,7 @@ class WasmGraphBuildingInterface {
: decoder->stack_value(ret_count + drop_values);
GetNodes(values.begin(), stack_base, ret_count);
}
- if (FLAG_trace_wasm) {
+ if (FLAG_trace_wasm && instrumentation_ == kInstrumentEndpoints) {
builder_->TraceFunctionExit(base::VectorOf(values), decoder->position());
}
builder_->Return(base::VectorOf(values));
@@ -649,21 +660,15 @@ class WasmGraphBuildingInterface {
void CallRef(FullDecoder* decoder, const Value& func_ref,
const FunctionSig* sig, uint32_t sig_index, const Value args[],
Value returns[]) {
- CheckForNull null_check = func_ref.type.is_nullable()
- ? CheckForNull::kWithNullCheck
- : CheckForNull::kWithoutNullCheck;
- DoCall(decoder, kCallRef, 0, null_check, func_ref.node, sig, sig_index,
- args, returns);
+ DoCall(decoder, kCallRef, 0, NullCheckFor(func_ref.type), func_ref.node,
+ sig, sig_index, args, returns);
}
void ReturnCallRef(FullDecoder* decoder, const Value& func_ref,
const FunctionSig* sig, uint32_t sig_index,
const Value args[]) {
- CheckForNull null_check = func_ref.type.is_nullable()
- ? CheckForNull::kWithNullCheck
- : CheckForNull::kWithoutNullCheck;
- DoReturnCall(decoder, kCallRef, 0, null_check, func_ref, sig, sig_index,
- args);
+ DoReturnCall(decoder, kCallRef, 0, NullCheckFor(func_ref.type), func_ref,
+ sig, sig_index, args);
}
void BrOnNull(FullDecoder* decoder, const Value& ref_object, uint32_t depth) {
@@ -922,23 +927,17 @@ class WasmGraphBuildingInterface {
void StructGet(FullDecoder* decoder, const Value& struct_object,
const FieldImmediate<validate>& field, bool is_signed,
Value* result) {
- CheckForNull null_check = struct_object.type.is_nullable()
- ? CheckForNull::kWithNullCheck
- : CheckForNull::kWithoutNullCheck;
result->node = builder_->StructGet(
struct_object.node, field.struct_imm.struct_type, field.field_imm.index,
- null_check, is_signed, decoder->position());
+ NullCheckFor(struct_object.type), is_signed, decoder->position());
}
void StructSet(FullDecoder* decoder, const Value& struct_object,
const FieldImmediate<validate>& field,
const Value& field_value) {
- CheckForNull null_check = struct_object.type.is_nullable()
- ? CheckForNull::kWithNullCheck
- : CheckForNull::kWithoutNullCheck;
builder_->StructSet(struct_object.node, field.struct_imm.struct_type,
- field.field_imm.index, field_value.node, null_check,
- decoder->position());
+ field.field_imm.index, field_value.node,
+ NullCheckFor(struct_object.type), decoder->position());
}
void ArrayNewWithRtt(FullDecoder* decoder,
@@ -967,36 +966,28 @@ class WasmGraphBuildingInterface {
void ArrayGet(FullDecoder* decoder, const Value& array_obj,
const ArrayIndexImmediate<validate>& imm, const Value& index,
bool is_signed, Value* result) {
- CheckForNull null_check = array_obj.type.is_nullable()
- ? CheckForNull::kWithNullCheck
- : CheckForNull::kWithoutNullCheck;
- result->node =
- builder_->ArrayGet(array_obj.node, imm.array_type, index.node,
- null_check, is_signed, decoder->position());
+ result->node = builder_->ArrayGet(array_obj.node, imm.array_type,
+ index.node, NullCheckFor(array_obj.type),
+ is_signed, decoder->position());
}
void ArraySet(FullDecoder* decoder, const Value& array_obj,
const ArrayIndexImmediate<validate>& imm, const Value& index,
const Value& value) {
- CheckForNull null_check = array_obj.type.is_nullable()
- ? CheckForNull::kWithNullCheck
- : CheckForNull::kWithoutNullCheck;
builder_->ArraySet(array_obj.node, imm.array_type, index.node, value.node,
- null_check, decoder->position());
+ NullCheckFor(array_obj.type), decoder->position());
}
void ArrayLen(FullDecoder* decoder, const Value& array_obj, Value* result) {
- CheckForNull null_check = array_obj.type.is_nullable()
- ? CheckForNull::kWithNullCheck
- : CheckForNull::kWithoutNullCheck;
- result->node =
- builder_->ArrayLen(array_obj.node, null_check, decoder->position());
+ result->node = builder_->ArrayLen(
+ array_obj.node, NullCheckFor(array_obj.type), decoder->position());
}
void ArrayCopy(FullDecoder* decoder, const Value& dst, const Value& dst_index,
const Value& src, const Value& src_index,
const Value& length) {
- builder_->ArrayCopy(dst.node, dst_index.node, src.node, src_index.node,
+ builder_->ArrayCopy(dst.node, dst_index.node, NullCheckFor(dst.type),
+ src.node, src_index.node, NullCheckFor(src.type),
length.node, decoder->position());
}
@@ -1177,6 +1168,7 @@ class WasmGraphBuildingInterface {
const BranchHintMap* branch_hints_ = nullptr;
// Tracks loop data for loop unrolling.
std::vector<compiler::WasmLoopInfo> loop_infos_;
+ EndpointInstrumentationMode instrumentation_;
TFNode* effect() { return builder_->effect(); }
@@ -1547,7 +1539,6 @@ class WasmGraphBuildingInterface {
WRAP_CACHE_FIELD(mem_start);
WRAP_CACHE_FIELD(mem_size);
- WRAP_CACHE_FIELD(mem_mask);
#undef WRAP_CACHE_FIELD
}
}
@@ -1597,6 +1588,12 @@ class WasmGraphBuildingInterface {
builder_->TerminateThrow(effect(), control());
}
}
+
+ CheckForNull NullCheckFor(ValueType type) {
+ DCHECK(type.is_object_reference());
+ return type.is_nullable() ? CheckForNull::kWithNullCheck
+ : CheckForNull::kWithoutNullCheck;
+ }
};
} // namespace
@@ -1607,10 +1604,12 @@ DecodeResult BuildTFGraph(AccountingAllocator* allocator,
WasmFeatures* detected, const FunctionBody& body,
std::vector<compiler::WasmLoopInfo>* loop_infos,
compiler::NodeOriginTable* node_origins,
- int func_index) {
+ int func_index,
+ EndpointInstrumentationMode instrumentation) {
Zone zone(allocator, ZONE_NAME);
WasmFullDecoder<Decoder::kFullValidation, WasmGraphBuildingInterface> decoder(
- &zone, module, enabled, detected, body, builder, func_index);
+ &zone, module, enabled, detected, body, builder, func_index,
+ instrumentation);
if (node_origins) {
builder->AddBytecodePositionDecorator(node_origins, &decoder);
}
diff --git a/deps/v8/src/wasm/graph-builder-interface.h b/deps/v8/src/wasm/graph-builder-interface.h
index 6c668e2b0a..c264bc8330 100644
--- a/deps/v8/src/wasm/graph-builder-interface.h
+++ b/deps/v8/src/wasm/graph-builder-interface.h
@@ -27,12 +27,18 @@ struct FunctionBody;
class WasmFeatures;
struct WasmModule;
+enum EndpointInstrumentationMode {
+ kDoNotInstrumentEndpoints,
+ kInstrumentEndpoints
+};
+
V8_EXPORT_PRIVATE DecodeResult
BuildTFGraph(AccountingAllocator* allocator, const WasmFeatures& enabled,
const WasmModule* module, compiler::WasmGraphBuilder* builder,
WasmFeatures* detected, const FunctionBody& body,
std::vector<compiler::WasmLoopInfo>* loop_infos,
- compiler::NodeOriginTable* node_origins, int func_index);
+ compiler::NodeOriginTable* node_origins, int func_index,
+ EndpointInstrumentationMode instrumentation);
} // namespace wasm
} // namespace internal
diff --git a/deps/v8/src/wasm/jump-table-assembler.cc b/deps/v8/src/wasm/jump-table-assembler.cc
index db2514791b..4dc808fe33 100644
--- a/deps/v8/src/wasm/jump-table-assembler.cc
+++ b/deps/v8/src/wasm/jump-table-assembler.cc
@@ -268,6 +268,36 @@ void JumpTableAssembler::NopBytes(int bytes) {
}
}
+#elif V8_TARGET_ARCH_LOONG64
+void JumpTableAssembler::EmitLazyCompileJumpSlot(uint32_t func_index,
+ Address lazy_compile_target) {
+ DCHECK(is_int32(func_index));
+ int start = pc_offset();
+ li(kWasmCompileLazyFuncIndexRegister, (int32_t)func_index); // max. 2 instr
+ // Jump produces max 4 instructions.
+ Jump(lazy_compile_target, RelocInfo::NONE);
+ int nop_bytes = start + kLazyCompileTableSlotSize - pc_offset();
+ DCHECK_EQ(nop_bytes % kInstrSize, 0);
+ for (int i = 0; i < nop_bytes; i += kInstrSize) nop();
+}
+bool JumpTableAssembler::EmitJumpSlot(Address target) {
+ PatchAndJump(target);
+ return true;
+}
+void JumpTableAssembler::EmitFarJumpSlot(Address target) {
+ JumpToInstructionStream(target);
+}
+void JumpTableAssembler::PatchFarJumpSlot(Address slot, Address target) {
+ UNREACHABLE();
+}
+void JumpTableAssembler::NopBytes(int bytes) {
+ DCHECK_LE(0, bytes);
+ DCHECK_EQ(0, bytes % kInstrSize);
+ for (; bytes > 0; bytes -= kInstrSize) {
+ nop();
+ }
+}
+
#elif V8_TARGET_ARCH_PPC64
void JumpTableAssembler::EmitLazyCompileJumpSlot(uint32_t func_index,
Address lazy_compile_target) {
diff --git a/deps/v8/src/wasm/jump-table-assembler.h b/deps/v8/src/wasm/jump-table-assembler.h
index 3963de9824..433608decb 100644
--- a/deps/v8/src/wasm/jump-table-assembler.h
+++ b/deps/v8/src/wasm/jump-table-assembler.h
@@ -224,6 +224,11 @@ class V8_EXPORT_PRIVATE JumpTableAssembler : public MacroAssembler {
static constexpr int kJumpTableSlotSize = 6 * kInstrSize;
static constexpr int kFarJumpTableSlotSize = 6 * kInstrSize;
static constexpr int kLazyCompileTableSlotSize = 10 * kInstrSize;
+#elif V8_TARGET_ARCH_LOONG64
+ static constexpr int kJumpTableLineSize = 8 * kInstrSize;
+ static constexpr int kJumpTableSlotSize = 8 * kInstrSize;
+ static constexpr int kFarJumpTableSlotSize = 4 * kInstrSize;
+ static constexpr int kLazyCompileTableSlotSize = 8 * kInstrSize;
#else
#error Unknown architecture.
#endif
diff --git a/deps/v8/src/wasm/module-compiler.cc b/deps/v8/src/wasm/module-compiler.cc
index ea714cbe4c..2d66102c1f 100644
--- a/deps/v8/src/wasm/module-compiler.cc
+++ b/deps/v8/src/wasm/module-compiler.cc
@@ -1646,12 +1646,8 @@ void CompileNativeModule(Isolate* isolate,
return;
}
- if (!FLAG_predictable) {
- // For predictable mode, do not finalize wrappers yet to make sure we catch
- // validation errors first.
- compilation_state->FinalizeJSToWasmWrappers(
- isolate, native_module->module(), export_wrappers_out);
- }
+ compilation_state->FinalizeJSToWasmWrappers(isolate, native_module->module(),
+ export_wrappers_out);
compilation_state->WaitForCompilationEvent(
CompilationEvent::kFinishedBaselineCompilation);
@@ -1663,9 +1659,6 @@ void CompileNativeModule(Isolate* isolate,
ValidateSequentially(wasm_module, native_module.get(), isolate->counters(),
isolate->allocator(), thrower, lazy_module);
CHECK(thrower->error());
- } else if (FLAG_predictable) {
- compilation_state->FinalizeJSToWasmWrappers(
- isolate, native_module->module(), export_wrappers_out);
}
}
@@ -3052,13 +3045,13 @@ void CompilationStateImpl::InitializeCompilationProgressAfterDeserialization(
}
compilation_progress_.assign(module->num_declared_functions,
kProgressAfterDeserialization);
- uint32_t num_imported_functions = module->num_imported_functions;
for (auto func_index : missing_functions) {
if (FLAG_wasm_lazy_compilation) {
- native_module_->UseLazyStub(num_imported_functions + func_index);
+ native_module_->UseLazyStub(func_index);
}
- compilation_progress_[func_index] = SetupCompilationProgressForFunction(
- lazy_module, module, enabled_features, func_index);
+ compilation_progress_[declared_function_index(module, func_index)] =
+ SetupCompilationProgressForFunction(lazy_module, module,
+ enabled_features, func_index);
}
}
auto builder = std::make_unique<CompilationUnitBuilder>(native_module_);
@@ -3665,13 +3658,17 @@ WasmCode* CompileImportWrapper(
CompilationEnv env = native_module->CreateCompilationEnv();
WasmCompilationResult result = compiler::CompileWasmImportCallWrapper(
&env, kind, sig, source_positions, expected_arity);
- std::unique_ptr<WasmCode> wasm_code = native_module->AddCode(
- result.func_index, result.code_desc, result.frame_slot_count,
- result.tagged_parameter_slots,
- result.protected_instructions_data.as_vector(),
- result.source_positions.as_vector(), GetCodeKind(result),
- ExecutionTier::kNone, kNoDebugging);
- WasmCode* published_code = native_module->PublishCode(std::move(wasm_code));
+ WasmCode* published_code;
+ {
+ CodeSpaceWriteScope code_space_write_scope(native_module);
+ std::unique_ptr<WasmCode> wasm_code = native_module->AddCode(
+ result.func_index, result.code_desc, result.frame_slot_count,
+ result.tagged_parameter_slots,
+ result.protected_instructions_data.as_vector(),
+ result.source_positions.as_vector(), GetCodeKind(result),
+ ExecutionTier::kNone, kNoDebugging);
+ published_code = native_module->PublishCode(std::move(wasm_code));
+ }
(*cache_scope)[key] = published_code;
published_code->IncRef();
counters->wasm_generated_code_size()->Increment(
diff --git a/deps/v8/src/wasm/module-compiler.h b/deps/v8/src/wasm/module-compiler.h
index e8bd2597bc..16ac753547 100644
--- a/deps/v8/src/wasm/module-compiler.h
+++ b/deps/v8/src/wasm/module-compiler.h
@@ -44,9 +44,11 @@ class CompilationResultResolver;
class ErrorThrower;
class ModuleCompiler;
class NativeModule;
+class StreamingDecoder;
class WasmCode;
struct WasmModule;
+V8_EXPORT_PRIVATE
std::shared_ptr<NativeModule> CompileToNativeModule(
Isolate* isolate, const WasmFeatures& enabled, ErrorThrower* thrower,
std::shared_ptr<const WasmModule> module, const ModuleWireBytes& wire_bytes,
diff --git a/deps/v8/src/wasm/module-decoder.cc b/deps/v8/src/wasm/module-decoder.cc
index b014f8a8c7..d2c78f0da5 100644
--- a/deps/v8/src/wasm/module-decoder.cc
+++ b/deps/v8/src/wasm/module-decoder.cc
@@ -563,9 +563,10 @@ class ModuleDecoderImpl : public Decoder {
break;
}
case kWasmFunctionExtendingTypeCode: {
- if (!enabled_features_.has_gc_experiments()) {
+ if (!enabled_features_.has_gc()) {
errorf(pc(),
- "nominal types need --experimental-wasm-gc-experiments");
+ "invalid function type definition, enable with "
+ "--experimental-wasm-gc");
break;
}
const FunctionSig* s = consume_sig(module_->signature_zone.get());
@@ -591,9 +592,10 @@ class ModuleDecoderImpl : public Decoder {
break;
}
case kWasmStructExtendingTypeCode: {
- if (!enabled_features_.has_gc_experiments()) {
+ if (!enabled_features_.has_gc()) {
errorf(pc(),
- "nominal types need --experimental-wasm-gc-experiments");
+ "invalid struct type definition, enable with "
+ "--experimental-wasm-gc");
break;
}
const StructType* s = consume_struct(module_->signature_zone.get());
@@ -617,9 +619,10 @@ class ModuleDecoderImpl : public Decoder {
break;
}
case kWasmArrayExtendingTypeCode: {
- if (!enabled_features_.has_gc_experiments()) {
+ if (!enabled_features_.has_gc()) {
errorf(pc(),
- "nominal types need --experimental-wasm-gc-experiments");
+ "invalid array type definition, enable with "
+ "--experimental-wasm-gc");
break;
}
const ArrayType* type = consume_array(module_->signature_zone.get());
diff --git a/deps/v8/src/wasm/module-instantiate.cc b/deps/v8/src/wasm/module-instantiate.cc
index f56ab55cd7..1040f77ecd 100644
--- a/deps/v8/src/wasm/module-instantiate.cc
+++ b/deps/v8/src/wasm/module-instantiate.cc
@@ -65,9 +65,10 @@ class CompileImportWrapperJob final : public JobTask {
}
void Run(JobDelegate* delegate) override {
- CodeSpaceWriteScope code_space_write_scope(native_module_);
while (base::Optional<WasmImportWrapperCache::CacheKey> key =
queue_->pop()) {
+ // TODO(wasm): Batch code publishing, to avoid repeated locking and
+ // permission switching.
CompileImportWrapper(native_module_, counters_, key->kind, key->signature,
key->expected_arity, cache_scope_);
if (delegate->ShouldYield()) return;
@@ -162,6 +163,7 @@ Handle<Map> CreateStructMap(Isolate* isolate, const WasmModule* module,
map->SetInstanceDescriptors(isolate, *descriptors,
descriptors->number_of_descriptors());
map->set_is_extensible(false);
+ WasmStruct::EncodeInstanceSizeInMap(real_instance_size, *map);
return map;
}
@@ -187,6 +189,8 @@ Handle<Map> CreateArrayMap(Isolate* isolate, const WasmModule* module,
map->SetInstanceDescriptors(isolate, *descriptors,
descriptors->number_of_descriptors());
map->set_is_extensible(false);
+ WasmArray::EncodeElementSizeInMap(type->element_type().element_size_bytes(),
+ *map);
return map;
}
@@ -1035,7 +1039,8 @@ bool InstanceBuilder::ProcessImportedFunction(
if (kind == compiler::WasmImportCallKind::kJSFunctionArityMismatch) {
Handle<JSFunction> function = Handle<JSFunction>::cast(js_receiver);
SharedFunctionInfo shared = function->shared();
- expected_arity = shared.internal_formal_parameter_count();
+ expected_arity =
+ shared.internal_formal_parameter_count_without_receiver();
}
NativeModule* native_module = instance->module_object().native_module();
@@ -1439,7 +1444,8 @@ void InstanceBuilder::CompileImportWrappers(
compiler::WasmImportCallKind::kJSFunctionArityMismatch) {
Handle<JSFunction> function = Handle<JSFunction>::cast(resolved.second);
SharedFunctionInfo shared = function->shared();
- expected_arity = shared.internal_formal_parameter_count();
+ expected_arity =
+ shared.internal_formal_parameter_count_without_receiver();
}
WasmImportWrapperCache::CacheKey key(kind, sig, expected_arity);
diff --git a/deps/v8/src/wasm/wasm-code-manager.cc b/deps/v8/src/wasm/wasm-code-manager.cc
index d080d1285e..0c8a570c71 100644
--- a/deps/v8/src/wasm/wasm-code-manager.cc
+++ b/deps/v8/src/wasm/wasm-code-manager.cc
@@ -267,14 +267,17 @@ void WasmCode::LogCode(Isolate* isolate, const char* source_url,
"wasm-function[%d]", index()));
name = base::VectorOf(name_buffer);
}
- int code_offset = module->functions[index_].code.offset();
- PROFILE(isolate, CodeCreateEvent(CodeEventListener::FUNCTION_TAG, this, name,
- source_url, code_offset, script_id));
+ // Record source positions before adding code, otherwise when code is added,
+ // there are no source positions to associate with the added code.
if (!source_positions().empty()) {
- LOG_CODE_EVENT(isolate, CodeLinePosInfoRecordEvent(instruction_start(),
- source_positions()));
+ LOG_CODE_EVENT(isolate, WasmCodeLinePosInfoRecordEvent(instruction_start(),
+ source_positions()));
}
+
+ int code_offset = module->functions[index_].code.offset();
+ PROFILE(isolate, CodeCreateEvent(CodeEventListener::FUNCTION_TAG, this, name,
+ source_url, code_offset, script_id));
}
void WasmCode::Validate() const {
@@ -664,12 +667,13 @@ class CheckWritableMemoryRegions {
DCHECK(std::none_of(writable_memory_.begin(), writable_memory_.end(),
[](auto region) { return region.is_empty(); }));
- // Regions are sorted and disjoint.
- std::accumulate(writable_memory_.begin(), writable_memory_.end(),
- Address{0}, [](Address previous_end, auto region) {
- DCHECK_LT(previous_end, region.begin());
- return region.end();
- });
+ // Regions are sorted and disjoint. (std::accumulate has nodiscard on msvc
+ // so USE is required to prevent build failures in debug builds).
+ USE(std::accumulate(writable_memory_.begin(), writable_memory_.end(),
+ Address{0}, [](Address previous_end, auto region) {
+ DCHECK_LT(previous_end, region.begin());
+ return region.end();
+ }));
}
private:
@@ -1032,12 +1036,9 @@ void NativeModule::LogWasmCodes(Isolate* isolate, Script script) {
// Log all owned code, not just the current entries in the code table. This
// will also include import wrappers.
- base::RecursiveMutexGuard lock(&allocation_mutex_);
- for (auto& owned_entry : owned_code_) {
- owned_entry.second->LogCode(isolate, source_url.get(), script.id());
- }
- for (auto& owned_entry : new_owned_code_) {
- owned_entry->LogCode(isolate, source_url.get(), script.id());
+ WasmCodeRefScope code_ref_scope;
+ for (auto& code : SnapshotAllOwnedCode()) {
+ code->LogCode(isolate, source_url.get(), script.id());
}
}
@@ -1179,7 +1180,6 @@ std::unique_ptr<WasmCode> NativeModule::AddCode(
ExecutionTier tier, ForDebugging for_debugging) {
base::Vector<byte> code_space;
NativeModule::JumpTablesRef jump_table_ref;
- CodeSpaceWriteScope code_space_write_scope(this);
{
base::RecursiveMutexGuard guard{&allocation_mutex_};
code_space = code_allocator_.AllocateForCode(this, desc.instr_size);
@@ -1429,6 +1429,17 @@ std::vector<WasmCode*> NativeModule::SnapshotCodeTable() const {
return std::vector<WasmCode*>{start, end};
}
+std::vector<WasmCode*> NativeModule::SnapshotAllOwnedCode() const {
+ base::RecursiveMutexGuard lock(&allocation_mutex_);
+ if (!new_owned_code_.empty()) TransferNewOwnedCodeLocked();
+
+ std::vector<WasmCode*> all_code(owned_code_.size());
+ std::transform(owned_code_.begin(), owned_code_.end(), all_code.begin(),
+ [](auto& entry) { return entry.second.get(); });
+ std::for_each(all_code.begin(), all_code.end(), WasmCodeRefScope::AddRef);
+ return all_code;
+}
+
WasmCode* NativeModule::GetCode(uint32_t index) const {
base::RecursiveMutexGuard guard(&allocation_mutex_);
WasmCode* code = code_table_[declared_function_index(module(), index)];
@@ -2113,6 +2124,12 @@ bool WasmCodeManager::HasMemoryProtectionKeySupport() const {
return memory_protection_key_ != kNoMemoryProtectionKey;
}
+void WasmCodeManager::InitializeMemoryProtectionKeyForTesting() {
+ if (memory_protection_key_ == kNoMemoryProtectionKey) {
+ memory_protection_key_ = AllocateMemoryProtectionKey();
+ }
+}
+
std::shared_ptr<NativeModule> WasmCodeManager::NewNativeModule(
Isolate* isolate, const WasmFeatures& enabled, size_t code_size_estimate,
std::shared_ptr<const WasmModule> module) {
diff --git a/deps/v8/src/wasm/wasm-code-manager.h b/deps/v8/src/wasm/wasm-code-manager.h
index 2baf46e888..70ef6d75a9 100644
--- a/deps/v8/src/wasm/wasm-code-manager.h
+++ b/deps/v8/src/wasm/wasm-code-manager.h
@@ -102,6 +102,14 @@ struct WasmModule;
IF_TSAN(V, TSANRelaxedStore32SaveFP) \
IF_TSAN(V, TSANRelaxedStore64IgnoreFP) \
IF_TSAN(V, TSANRelaxedStore64SaveFP) \
+ IF_TSAN(V, TSANSeqCstStore8IgnoreFP) \
+ IF_TSAN(V, TSANSeqCstStore8SaveFP) \
+ IF_TSAN(V, TSANSeqCstStore16IgnoreFP) \
+ IF_TSAN(V, TSANSeqCstStore16SaveFP) \
+ IF_TSAN(V, TSANSeqCstStore32IgnoreFP) \
+ IF_TSAN(V, TSANSeqCstStore32SaveFP) \
+ IF_TSAN(V, TSANSeqCstStore64IgnoreFP) \
+ IF_TSAN(V, TSANSeqCstStore64SaveFP) \
IF_TSAN(V, TSANRelaxedLoad32IgnoreFP) \
IF_TSAN(V, TSANRelaxedLoad32SaveFP) \
IF_TSAN(V, TSANRelaxedLoad64IgnoreFP) \
@@ -109,7 +117,6 @@ struct WasmModule;
V(WasmAllocateArray_Uninitialized) \
V(WasmAllocateArray_InitNull) \
V(WasmAllocateArray_InitZero) \
- V(WasmArrayCopy) \
V(WasmArrayCopyWithChecks) \
V(WasmAllocateRtt) \
V(WasmAllocateFreshRtt) \
@@ -188,25 +195,47 @@ class V8_EXPORT_PRIVATE WasmCode final {
}
#ifdef V8_IS_TSAN
- static RuntimeStubId GetTSANRelaxedStoreStub(SaveFPRegsMode fp_mode,
- int size) {
- if (size == kInt8Size) {
- return fp_mode == SaveFPRegsMode::kIgnore
- ? RuntimeStubId::kTSANRelaxedStore8IgnoreFP
- : RuntimeStubId::kTSANRelaxedStore8SaveFP;
- } else if (size == kInt16Size) {
- return fp_mode == SaveFPRegsMode::kIgnore
- ? RuntimeStubId::kTSANRelaxedStore16IgnoreFP
- : RuntimeStubId::kTSANRelaxedStore16SaveFP;
- } else if (size == kInt32Size) {
- return fp_mode == SaveFPRegsMode::kIgnore
- ? RuntimeStubId::kTSANRelaxedStore32IgnoreFP
- : RuntimeStubId::kTSANRelaxedStore32SaveFP;
+ static RuntimeStubId GetTSANStoreStub(SaveFPRegsMode fp_mode, int size,
+ std::memory_order order) {
+ if (order == std::memory_order_relaxed) {
+ if (size == kInt8Size) {
+ return fp_mode == SaveFPRegsMode::kIgnore
+ ? RuntimeStubId::kTSANRelaxedStore8IgnoreFP
+ : RuntimeStubId::kTSANRelaxedStore8SaveFP;
+ } else if (size == kInt16Size) {
+ return fp_mode == SaveFPRegsMode::kIgnore
+ ? RuntimeStubId::kTSANRelaxedStore16IgnoreFP
+ : RuntimeStubId::kTSANRelaxedStore16SaveFP;
+ } else if (size == kInt32Size) {
+ return fp_mode == SaveFPRegsMode::kIgnore
+ ? RuntimeStubId::kTSANRelaxedStore32IgnoreFP
+ : RuntimeStubId::kTSANRelaxedStore32SaveFP;
+ } else {
+ CHECK_EQ(size, kInt64Size);
+ return fp_mode == SaveFPRegsMode::kIgnore
+ ? RuntimeStubId::kTSANRelaxedStore64IgnoreFP
+ : RuntimeStubId::kTSANRelaxedStore64SaveFP;
+ }
} else {
- CHECK_EQ(size, kInt64Size);
- return fp_mode == SaveFPRegsMode::kIgnore
- ? RuntimeStubId::kTSANRelaxedStore64IgnoreFP
- : RuntimeStubId::kTSANRelaxedStore64SaveFP;
+ DCHECK_EQ(order, std::memory_order_seq_cst);
+ if (size == kInt8Size) {
+ return fp_mode == SaveFPRegsMode::kIgnore
+ ? RuntimeStubId::kTSANSeqCstStore8IgnoreFP
+ : RuntimeStubId::kTSANSeqCstStore8SaveFP;
+ } else if (size == kInt16Size) {
+ return fp_mode == SaveFPRegsMode::kIgnore
+ ? RuntimeStubId::kTSANSeqCstStore16IgnoreFP
+ : RuntimeStubId::kTSANSeqCstStore16SaveFP;
+ } else if (size == kInt32Size) {
+ return fp_mode == SaveFPRegsMode::kIgnore
+ ? RuntimeStubId::kTSANSeqCstStore32IgnoreFP
+ : RuntimeStubId::kTSANSeqCstStore32SaveFP;
+ } else {
+ CHECK_EQ(size, kInt64Size);
+ return fp_mode == SaveFPRegsMode::kIgnore
+ ? RuntimeStubId::kTSANSeqCstStore64IgnoreFP
+ : RuntimeStubId::kTSANSeqCstStore64SaveFP;
+ }
}
}
@@ -520,7 +549,7 @@ class WasmCodeAllocator {
// Make a code region writable. Only allowed if there is at lease one writer
// (see above).
// Hold the {NativeModule}'s {allocation_mutex_} when calling this method.
- void MakeWritable(base::AddressRegion);
+ V8_EXPORT_PRIVATE void MakeWritable(base::AddressRegion);
// Free memory pages of all given code objects. Used for wasm code GC.
// Hold the {NativeModule}'s {allocation_mutex_} when calling this method.
@@ -637,6 +666,9 @@ class V8_EXPORT_PRIVATE NativeModule final {
// Creates a snapshot of the current state of the code table. This is useful
// to get a consistent view of the table (e.g. used by the serializer).
std::vector<WasmCode*> SnapshotCodeTable() const;
+ // Creates a snapshot of all {owned_code_}, will transfer new code (if any) to
+ // {owned_code_}.
+ std::vector<WasmCode*> SnapshotAllOwnedCode() const;
WasmCode* GetCode(uint32_t index) const;
bool HasCode(uint32_t index) const;
@@ -1006,6 +1038,10 @@ class V8_EXPORT_PRIVATE WasmCodeManager final {
// Returns true if there is PKU support, false otherwise.
bool HasMemoryProtectionKeySupport() const;
+ // This allocates a memory protection key (if none was allocated before),
+ // independent of the --wasm-memory-protection-keys flag.
+ void InitializeMemoryProtectionKeyForTesting();
+
private:
friend class WasmCodeAllocator;
friend class WasmEngine;
@@ -1033,7 +1069,7 @@ class V8_EXPORT_PRIVATE WasmCodeManager final {
// and updated after each GC.
std::atomic<size_t> critical_committed_code_space_;
- const int memory_protection_key_;
+ int memory_protection_key_;
mutable base::Mutex native_modules_mutex_;
diff --git a/deps/v8/src/wasm/wasm-engine.h b/deps/v8/src/wasm/wasm-engine.h
index 7209096911..5cf61ef543 100644
--- a/deps/v8/src/wasm/wasm-engine.h
+++ b/deps/v8/src/wasm/wasm-engine.h
@@ -45,6 +45,7 @@ class GdbServer;
class AsyncCompileJob;
class ErrorThrower;
struct ModuleWireBytes;
+class StreamingDecoder;
class WasmFeatures;
class V8_EXPORT_PRIVATE CompilationResultResolver {
diff --git a/deps/v8/src/wasm/wasm-external-refs.cc b/deps/v8/src/wasm/wasm-external-refs.cc
index 101d563876..6fc3278141 100644
--- a/deps/v8/src/wasm/wasm-external-refs.cc
+++ b/deps/v8/src/wasm/wasm-external-refs.cc
@@ -451,7 +451,6 @@ class V8_NODISCARD ThreadNotInWasmScope {
#endif
};
-#ifdef DISABLE_UNTRUSTED_CODE_MITIGATIONS
inline byte* EffectiveAddress(WasmInstanceObject instance, uint32_t index) {
return instance.memory_start() + index;
}
@@ -460,19 +459,6 @@ inline byte* EffectiveAddress(byte* base, size_t size, uint32_t index) {
return base + index;
}
-#else
-inline byte* EffectiveAddress(WasmInstanceObject instance, uint32_t index) {
- // Compute the effective address of the access, making sure to condition
- // the index even in the in-bounds case.
- return instance.memory_start() + (index & instance.memory_mask());
-}
-
-inline byte* EffectiveAddress(byte* base, size_t size, uint32_t index) {
- size_t mem_mask = base::bits::RoundUpToPowerOfTwo(size) - 1;
- return base + (index & mem_mask);
-}
-#endif
-
template <typename V>
V ReadAndIncrementOffset(Address data, size_t* offset) {
V result = ReadUnalignedValue<V>(data + *offset);
@@ -551,6 +537,53 @@ int32_t memory_fill_wrapper(Address data) {
return kSuccess;
}
+namespace {
+inline void* ArrayElementAddress(WasmArray array, uint32_t index,
+ int element_size_bytes) {
+ return reinterpret_cast<void*>(array.ptr() + WasmArray::kHeaderSize -
+ kHeapObjectTag + index * element_size_bytes);
+}
+} // namespace
+
+void array_copy_wrapper(Address raw_instance, Address raw_dst_array,
+ uint32_t dst_index, Address raw_src_array,
+ uint32_t src_index, uint32_t length) {
+ ThreadNotInWasmScope thread_not_in_wasm_scope;
+ DisallowGarbageCollection no_gc;
+ WasmArray dst_array = WasmArray::cast(Object(raw_dst_array));
+ WasmArray src_array = WasmArray::cast(Object(raw_src_array));
+
+ bool overlapping_ranges =
+ dst_array.ptr() == src_array.ptr() &&
+ (dst_index < src_index ? dst_index + length > src_index
+ : src_index + length > dst_index);
+ wasm::ValueType element_type = src_array.type()->element_type();
+ if (element_type.is_reference()) {
+ WasmInstanceObject instance =
+ WasmInstanceObject::cast(Object(raw_instance));
+ Isolate* isolate = Isolate::FromRootAddress(instance.isolate_root());
+ ObjectSlot dst_slot = dst_array.ElementSlot(dst_index);
+ ObjectSlot src_slot = src_array.ElementSlot(src_index);
+ if (overlapping_ranges) {
+ isolate->heap()->MoveRange(dst_array, dst_slot, src_slot, length,
+ UPDATE_WRITE_BARRIER);
+ } else {
+ isolate->heap()->CopyRange(dst_array, dst_slot, src_slot, length,
+ UPDATE_WRITE_BARRIER);
+ }
+ } else {
+ int element_size_bytes = element_type.element_size_bytes();
+ void* dst = ArrayElementAddress(dst_array, dst_index, element_size_bytes);
+ void* src = ArrayElementAddress(src_array, src_index, element_size_bytes);
+ size_t copy_size = length * element_size_bytes;
+ if (overlapping_ranges) {
+ MemMove(dst, src, copy_size);
+ } else {
+ MemCopy(dst, src, copy_size);
+ }
+ }
+}
+
static WasmTrapCallbackForTesting wasm_trap_callback_for_testing = nullptr;
void set_trap_callback_for_testing(WasmTrapCallbackForTesting callback) {
diff --git a/deps/v8/src/wasm/wasm-external-refs.h b/deps/v8/src/wasm/wasm-external-refs.h
index e8363d5936..3365e109fb 100644
--- a/deps/v8/src/wasm/wasm-external-refs.h
+++ b/deps/v8/src/wasm/wasm-external-refs.h
@@ -111,6 +111,10 @@ int32_t memory_copy_wrapper(Address data);
// zero-extend the result in the return register.
int32_t memory_fill_wrapper(Address data);
+void array_copy_wrapper(Address raw_instance, Address raw_dst_array,
+ uint32_t dst_index, Address raw_src_array,
+ uint32_t src_index, uint32_t length);
+
using WasmTrapCallbackForTesting = void (*)();
V8_EXPORT_PRIVATE void set_trap_callback_for_testing(
diff --git a/deps/v8/src/wasm/wasm-feature-flags.h b/deps/v8/src/wasm/wasm-feature-flags.h
index 1c4c2acaec..ac8e8e16d7 100644
--- a/deps/v8/src/wasm/wasm-feature-flags.h
+++ b/deps/v8/src/wasm/wasm-feature-flags.h
@@ -26,8 +26,12 @@
\
/* Non-specified, V8-only experimental additions to the GC proposal */ \
/* V8 side owner: jkummerow */ \
- V(gc_experiments, "garbage collection V8-only experimental features", false) \
- V(nn_locals, "allow non-defaultable/non-nullable locals", false) \
+ V(nn_locals, \
+ "allow non-defaultable/non-nullable locals, validated with 'until end of " \
+ "block' semantics", \
+ false) \
+ V(unsafe_nn_locals, \
+ "allow non-defaultable/non-nullable locals, no validation", false) \
\
/* Typed function references proposal. */ \
/* Official proposal: https://github.com/WebAssembly/function-references */ \
@@ -47,7 +51,12 @@
/* Branch Hinting proposal. */ \
/* https://github.com/WebAssembly/branch-hinting */ \
/* V8 side owner: jkummerow */ \
- V(branch_hinting, "branch hinting", false)
+ V(branch_hinting, "branch hinting", false) \
+ \
+ /* Stack Switching proposal. */ \
+ /* https://github.com/WebAssembly/stack-switching */ \
+ /* V8 side owner: thibaudm & fgm */ \
+ V(stack_switching, "stack switching", false)
// #############################################################################
// Staged features (disabled by default, but enabled via --wasm-staging (also
@@ -58,12 +67,6 @@
// be shipped with enough lead time to the next branch to allow for
// stabilization.
#define FOREACH_WASM_STAGING_FEATURE_FLAG(V) /* (force 80 columns) */ \
- /* Exception handling proposal. */ \
- /* https://github.com/WebAssembly/exception-handling */ \
- /* V8 side owner: thibaudm */ \
- /* Staged in v8.9 */ \
- V(eh, "exception handling opcodes", false) \
- \
/* Reference Types, a.k.a. reftypes proposal. */ \
/* https://github.com/WebAssembly/reference-types */ \
/* V8 side owner: ahaas */ \
@@ -104,6 +107,13 @@
/* V8 side owner: gdeepti */ \
V(threads, "thread opcodes", true) \
\
+ /* Exception handling proposal. */ \
+ /* https://github.com/WebAssembly/exception-handling */ \
+ /* V8 side owner: thibaudm */ \
+ /* Staged in v8.9 */ \
+ /* Shipped in v9.5 */ \
+ V(eh, "exception handling opcodes", true) \
+ \
// Combination of all available wasm feature flags.
#define FOREACH_WASM_FEATURE_FLAG(V) \
FOREACH_WASM_EXPERIMENTAL_FEATURE_FLAG(V) \
diff --git a/deps/v8/src/wasm/wasm-js.cc b/deps/v8/src/wasm/wasm-js.cc
index b65db60154..ef514c3b4c 100644
--- a/deps/v8/src/wasm/wasm-js.cc
+++ b/deps/v8/src/wasm/wasm-js.cc
@@ -7,6 +7,8 @@
#include <cinttypes>
#include <cstring>
+#include "include/v8-function.h"
+#include "include/v8-wasm.h"
#include "src/api/api-inl.h"
#include "src/api/api-natives.h"
#include "src/ast/ast.h"
@@ -1115,12 +1117,25 @@ void WebAssemblyTable(const v8::FunctionCallbackInfo<v8::Value>& args) {
}
i::Handle<i::FixedArray> fixed_array;
- i::Handle<i::JSObject> table_obj =
+ i::Handle<i::WasmTableObject> table_obj =
i::WasmTableObject::New(i_isolate, i::Handle<i::WasmInstanceObject>(),
type, static_cast<uint32_t>(initial), has_maximum,
static_cast<uint32_t>(maximum), &fixed_array);
+
+ if (initial > 0 && args.Length() >= 2 && !args[1]->IsUndefined()) {
+ i::Handle<i::Object> element = Utils::OpenHandle(*args[1]);
+ if (!i::WasmTableObject::IsValidElement(i_isolate, table_obj, element)) {
+ thrower.TypeError(
+ "Argument 2 must be undefined, null, or a value of type compatible "
+ "with the type of the new table.");
+ return;
+ }
+ for (uint32_t index = 0; index < static_cast<uint32_t>(initial); ++index) {
+ i::WasmTableObject::Set(i_isolate, table_obj, index, element);
+ }
+ }
v8::ReturnValue<v8::Value> return_value = args.GetReturnValue();
- return_value.Set(Utils::ToLocal(table_obj));
+ return_value.Set(Utils::ToLocal(i::Handle<i::JSObject>::cast(table_obj)));
}
void WebAssemblyMemory(const v8::FunctionCallbackInfo<v8::Value>& args) {
@@ -1578,7 +1593,6 @@ void EncodeExceptionValues(v8::Isolate* isolate,
case i::wasm::kBottom:
case i::wasm::kS128:
UNREACHABLE();
- break;
}
}
}
diff --git a/deps/v8/src/wasm/wasm-limits.h b/deps/v8/src/wasm/wasm-limits.h
index b7806af797..fcafb69395 100644
--- a/deps/v8/src/wasm/wasm-limits.h
+++ b/deps/v8/src/wasm/wasm-limits.h
@@ -58,9 +58,6 @@ constexpr size_t kV8MaxWasmMemories = 1;
// GC proposal. These limits are not standardized yet.
constexpr size_t kV8MaxWasmStructFields = 999;
constexpr uint32_t kV8MaxRttSubtypingDepth = 31;
-// Maximum supported by implementation: ((1<<27)-3).
-// Reason: total object size in bytes must fit into a Smi, for filler objects.
-constexpr size_t kV8MaxWasmArrayLength = 1u << 26;
constexpr size_t kV8MaxWasmArrayInitLength = 999;
static_assert(kV8MaxWasmTableSize <= 4294967295, // 2^32 - 1
diff --git a/deps/v8/src/wasm/wasm-linkage.h b/deps/v8/src/wasm/wasm-linkage.h
index 2d98055519..ecf59f9ed5 100644
--- a/deps/v8/src/wasm/wasm-linkage.h
+++ b/deps/v8/src/wasm/wasm-linkage.h
@@ -80,6 +80,15 @@ constexpr Register kGpReturnRegisters[] = {v0, v1};
constexpr DoubleRegister kFpParamRegisters[] = {f2, f4, f6, f8, f10, f12, f14};
constexpr DoubleRegister kFpReturnRegisters[] = {f2, f4};
+#elif V8_TARGET_ARCH_LOONG64
+// ===========================================================================
+// == LOONG64 ================================================================
+// ===========================================================================
+constexpr Register kGpParamRegisters[] = {a0, a2, a3, a4, a5, a6, a7};
+constexpr Register kGpReturnRegisters[] = {a0, a1};
+constexpr DoubleRegister kFpParamRegisters[] = {f0, f1, f2, f3, f4, f5, f6, f7};
+constexpr DoubleRegister kFpReturnRegisters[] = {f0, f1};
+
#elif V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_PPC64
// ===========================================================================
// == ppc & ppc64 ============================================================
diff --git a/deps/v8/src/wasm/wasm-module-builder.cc b/deps/v8/src/wasm/wasm-module-builder.cc
index 2bf20ea3ec..756900c160 100644
--- a/deps/v8/src/wasm/wasm-module-builder.cc
+++ b/deps/v8/src/wasm/wasm-module-builder.cc
@@ -264,7 +264,7 @@ WasmModuleBuilder::WasmModuleBuilder(Zone* zone)
functions_(zone),
tables_(zone),
data_segments_(zone),
- indirect_functions_(zone),
+ element_segments_(zone),
globals_(zone),
exceptions_(zone),
signature_map_(zone),
@@ -323,75 +323,52 @@ uint32_t WasmModuleBuilder::AddArrayType(ArrayType* type) {
const uint32_t WasmModuleBuilder::kNullIndex =
std::numeric_limits<uint32_t>::max();
-// TODO(9495): Add support for typed function tables and more init. expressions.
-uint32_t WasmModuleBuilder::AllocateIndirectFunctions(uint32_t count) {
- DCHECK(allocating_indirect_functions_allowed_);
- uint32_t index = static_cast<uint32_t>(indirect_functions_.size());
- DCHECK_GE(FLAG_wasm_max_table_size, index);
- if (count > FLAG_wasm_max_table_size - index) {
+uint32_t WasmModuleBuilder::IncreaseTableMinSize(uint32_t table_index,
+ uint32_t count) {
+ DCHECK_LT(table_index, tables_.size());
+ uint32_t old_min_size = tables_[table_index].min_size;
+ if (count > FLAG_wasm_max_table_size - old_min_size) {
return std::numeric_limits<uint32_t>::max();
}
- uint32_t new_size = static_cast<uint32_t>(indirect_functions_.size()) + count;
- DCHECK(max_table_size_ == 0 || new_size <= max_table_size_);
- indirect_functions_.resize(new_size, kNullIndex);
- uint32_t max = max_table_size_ > 0 ? max_table_size_ : new_size;
- if (tables_.empty()) {
- // This cannot use {AddTable} because that would flip the
- // {allocating_indirect_functions_allowed_} flag.
- tables_.push_back({kWasmFuncRef, new_size, max, true, {}});
- } else {
- // There can only be the indirect function table so far, otherwise the
- // {allocating_indirect_functions_allowed_} flag would have been false.
- DCHECK_EQ(1u, tables_.size());
- DCHECK_EQ(kWasmFuncRef, tables_[0].type);
- DCHECK(tables_[0].has_maximum);
- tables_[0].min_size = new_size;
- tables_[0].max_size = max;
- }
- return index;
-}
-
-void WasmModuleBuilder::SetIndirectFunction(uint32_t indirect,
- uint32_t direct) {
- indirect_functions_[indirect] = direct;
-}
-
-void WasmModuleBuilder::SetMaxTableSize(uint32_t max) {
- DCHECK_GE(FLAG_wasm_max_table_size, max);
- DCHECK_GE(max, indirect_functions_.size());
- max_table_size_ = max;
- DCHECK(allocating_indirect_functions_allowed_);
- if (!tables_.empty()) {
- tables_[0].max_size = max;
- }
+ tables_[table_index].min_size = old_min_size + count;
+ tables_[table_index].max_size =
+ std::max(old_min_size + count, tables_[table_index].max_size);
+ return old_min_size;
}
uint32_t WasmModuleBuilder::AddTable(ValueType type, uint32_t min_size) {
-#if DEBUG
- allocating_indirect_functions_allowed_ = false;
-#endif
tables_.push_back({type, min_size, 0, false, {}});
return static_cast<uint32_t>(tables_.size() - 1);
}
uint32_t WasmModuleBuilder::AddTable(ValueType type, uint32_t min_size,
uint32_t max_size) {
-#if DEBUG
- allocating_indirect_functions_allowed_ = false;
-#endif
tables_.push_back({type, min_size, max_size, true, {}});
return static_cast<uint32_t>(tables_.size() - 1);
}
uint32_t WasmModuleBuilder::AddTable(ValueType type, uint32_t min_size,
uint32_t max_size, WasmInitExpr init) {
-#if DEBUG
- allocating_indirect_functions_allowed_ = false;
-#endif
tables_.push_back({type, min_size, max_size, true, std::move(init)});
return static_cast<uint32_t>(tables_.size() - 1);
}
+void WasmModuleBuilder::AddElementSegment(WasmElemSegment segment) {
+ element_segments_.push_back(std::move(segment));
+}
+
+void WasmModuleBuilder::SetIndirectFunction(
+ uint32_t table_index, uint32_t index_in_table,
+ uint32_t direct_function_index,
+ WasmElemSegment::FunctionIndexingMode indexing_mode) {
+ WasmElemSegment segment(zone_, kWasmFuncRef, table_index,
+ WasmInitExpr(static_cast<int>(index_in_table)));
+ segment.indexing_mode = indexing_mode;
+ segment.entries.emplace_back(WasmElemSegment::Entry::kRefFuncEntry,
+ direct_function_index);
+ AddElementSegment(std::move(segment));
+}
+
uint32_t WasmModuleBuilder::AddImport(base::Vector<const char> name,
FunctionSig* sig,
base::Vector<const char> module) {
@@ -454,8 +431,9 @@ void WasmModuleBuilder::SetMaxMemorySize(uint32_t value) {
void WasmModuleBuilder::SetHasSharedMemory() { has_shared_memory_ = true; }
namespace {
-void WriteInitializerExpression(ZoneBuffer* buffer, const WasmInitExpr& init,
- ValueType type) {
+void WriteInitializerExpressionWithEnd(ZoneBuffer* buffer,
+ const WasmInitExpr& init,
+ ValueType type) {
switch (init.kind()) {
case WasmInitExpr::kI32Const:
buffer->write_u8(kExprI32Const);
@@ -534,7 +512,7 @@ void WriteInitializerExpression(ZoneBuffer* buffer, const WasmInitExpr& init,
case WasmInitExpr::kStructNewWithRtt:
STATIC_ASSERT((kExprStructNewWithRtt >> 8) == kGCPrefix);
for (const WasmInitExpr& operand : init.operands()) {
- WriteInitializerExpression(buffer, operand, kWasmBottom);
+ WriteInitializerExpressionWithEnd(buffer, operand, kWasmBottom);
}
buffer->write_u8(kGCPrefix);
buffer->write_u8(static_cast<uint8_t>(kExprStructNewWithRtt));
@@ -543,7 +521,7 @@ void WriteInitializerExpression(ZoneBuffer* buffer, const WasmInitExpr& init,
case WasmInitExpr::kArrayInit:
STATIC_ASSERT((kExprArrayInit >> 8) == kGCPrefix);
for (const WasmInitExpr& operand : init.operands()) {
- WriteInitializerExpression(buffer, operand, kWasmBottom);
+ WriteInitializerExpressionWithEnd(buffer, operand, kWasmBottom);
}
buffer->write_u8(kGCPrefix);
buffer->write_u8(static_cast<uint8_t>(kExprArrayInit));
@@ -559,7 +537,8 @@ void WriteInitializerExpression(ZoneBuffer* buffer, const WasmInitExpr& init,
case WasmInitExpr::kRttSub:
case WasmInitExpr::kRttFreshSub:
// The operand to rtt.sub must be emitted first.
- WriteInitializerExpression(buffer, init.operands()[0], kWasmBottom);
+ WriteInitializerExpressionWithEnd(buffer, init.operands()[0],
+ kWasmBottom);
STATIC_ASSERT((kExprRttSub >> 8) == kGCPrefix);
STATIC_ASSERT((kExprRttFreshSub >> 8) == kGCPrefix);
buffer->write_u8(kGCPrefix);
@@ -571,6 +550,11 @@ void WriteInitializerExpression(ZoneBuffer* buffer, const WasmInitExpr& init,
}
}
+void WriteInitializerExpression(ZoneBuffer* buffer, const WasmInitExpr& init,
+ ValueType type) {
+ WriteInitializerExpressionWithEnd(buffer, init, type);
+ buffer->write_u8(kExprEnd);
+}
} // namespace
void WasmModuleBuilder::WriteTo(ZoneBuffer* buffer) const {
@@ -705,7 +689,6 @@ void WasmModuleBuilder::WriteTo(ZoneBuffer* buffer) const {
WriteValueType(buffer, global.type);
buffer->write_u8(global.mutability ? 1 : 0);
WriteInitializerExpression(buffer, global.init, global.type);
- buffer->write_u8(kExprEnd);
}
FixupSection(buffer, start);
}
@@ -744,31 +727,67 @@ void WasmModuleBuilder::WriteTo(ZoneBuffer* buffer) const {
FixupSection(buffer, start);
}
- // == emit function table elements ===========================================
- if (indirect_functions_.size() > 0) {
+ // == emit element segments ==================================================
+ if (element_segments_.size() > 0) {
size_t start = EmitSection(kElementSectionCode, buffer);
- buffer->write_u8(1); // count of entries
- buffer->write_u8(0); // table index
- uint32_t first_element = 0;
- while (first_element < indirect_functions_.size() &&
- indirect_functions_[first_element] == kNullIndex) {
- first_element++;
- }
- uint32_t last_element =
- static_cast<uint32_t>(indirect_functions_.size() - 1);
- while (last_element >= first_element &&
- indirect_functions_[last_element] == kNullIndex) {
- last_element--;
- }
- buffer->write_u8(kExprI32Const); // offset
- buffer->write_u32v(first_element);
- buffer->write_u8(kExprEnd);
- uint32_t element_count = last_element - first_element + 1;
- buffer->write_size(element_count);
- for (uint32_t i = first_element; i <= last_element; i++) {
- buffer->write_size(indirect_functions_[i] + function_imports_.size());
+ buffer->write_size(element_segments_.size());
+ for (const WasmElemSegment& segment : element_segments_) {
+ bool is_active = segment.status == WasmElemSegment::kStatusActive;
+ // If this segment is expressible in the backwards-compatible syntax
+ // (before reftypes proposal), we should emit it in that syntax.
+ // This is the case if the segment is active and all entries are function
+ // references. Note that this is currently the only path that allows
+ // kRelativeToImports function indexing mode.
+ // TODO(manoskouk): Remove this logic once reftypes has shipped.
+ bool backwards_compatible =
+ is_active && segment.table_index == 0 &&
+ std::all_of(
+ segment.entries.begin(), segment.entries.end(), [](auto& entry) {
+ return entry.kind ==
+ WasmModuleBuilder::WasmElemSegment::Entry::kRefFuncEntry;
+ });
+ if (backwards_compatible) {
+ buffer->write_u8(0);
+ WriteInitializerExpression(buffer, segment.offset, segment.type);
+ buffer->write_size(segment.entries.size());
+ for (const WasmElemSegment::Entry entry : segment.entries) {
+ buffer->write_u32v(
+ segment.indexing_mode == WasmElemSegment::kRelativeToImports
+ ? entry.index
+ : entry.index +
+ static_cast<uint32_t>(function_imports_.size()));
+ }
+ } else {
+ DCHECK_EQ(segment.indexing_mode, WasmElemSegment::kRelativeToImports);
+ // If we pick the general syntax, we always explicitly emit the table
+ // index and the type, and use the expressions-as-elements syntax. I.e.
+ // the initial byte is one of 0x05, 0x06, and 0x07.
+ uint8_t kind_mask =
+ segment.status == WasmElemSegment::kStatusActive
+ ? 0b10
+ : segment.status == WasmElemSegment::kStatusDeclarative ? 0b11
+ : 0b01;
+ uint8_t expressions_as_elements_mask = 0b100;
+ buffer->write_u8(kind_mask | expressions_as_elements_mask);
+ if (is_active) {
+ buffer->write_u32v(segment.table_index);
+ WriteInitializerExpression(buffer, segment.offset, segment.type);
+ }
+ WriteValueType(buffer, segment.type);
+ buffer->write_size(segment.entries.size());
+ for (const WasmElemSegment::Entry entry : segment.entries) {
+ uint8_t opcode =
+ entry.kind == WasmElemSegment::Entry::kGlobalGetEntry
+ ? kExprGlobalGet
+ : entry.kind == WasmElemSegment::Entry::kRefFuncEntry
+ ? kExprRefFunc
+ : kExprRefNull;
+ buffer->write_u8(opcode);
+ buffer->write_u32v(entry.index);
+ buffer->write_u8(kExprEnd);
+ }
+ }
}
-
FixupSection(buffer, start);
}
diff --git a/deps/v8/src/wasm/wasm-module-builder.h b/deps/v8/src/wasm/wasm-module-builder.h
index db2091cdba..8eeac56afd 100644
--- a/deps/v8/src/wasm/wasm-module-builder.h
+++ b/deps/v8/src/wasm/wasm-module-builder.h
@@ -207,6 +207,7 @@ class V8_EXPORT_PRIVATE WasmFunctionBuilder : public ZoneObject {
WasmModuleBuilder* builder() const { return builder_; }
uint32_t func_index() { return func_index_; }
+ uint32_t sig_index() { return signature_index_; }
inline FunctionSig* signature();
private:
@@ -245,6 +246,68 @@ class V8_EXPORT_PRIVATE WasmModuleBuilder : public ZoneObject {
WasmModuleBuilder(const WasmModuleBuilder&) = delete;
WasmModuleBuilder& operator=(const WasmModuleBuilder&) = delete;
+ // Static representation of wasm element segment (table initializer). This is
+ // different than the version in wasm-module.h.
+ class WasmElemSegment {
+ public:
+ // asm.js gives function indices starting with the first non-imported
+ // function.
+ enum FunctionIndexingMode {
+ kRelativeToImports,
+ kRelativeToDeclaredFunctions
+ };
+ enum Status {
+ kStatusActive, // copied automatically during instantiation.
+ kStatusPassive, // copied explicitly after instantiation.
+ kStatusDeclarative // purely declarative and never copied.
+ };
+ struct Entry {
+ enum Kind { kGlobalGetEntry, kRefFuncEntry, kRefNullEntry } kind;
+ uint32_t index;
+ Entry(Kind kind, uint32_t index) : kind(kind), index(index) {}
+ Entry() : kind(kRefNullEntry), index(0) {}
+ };
+
+ // Construct an active segment.
+ WasmElemSegment(Zone* zone, ValueType type, uint32_t table_index,
+ WasmInitExpr offset)
+ : type(type),
+ table_index(table_index),
+ offset(std::move(offset)),
+ entries(zone),
+ status(kStatusActive) {
+ DCHECK(IsValidOffsetKind(offset.kind()));
+ }
+
+ // Construct a passive or declarative segment, which has no table
+ // index or offset.
+ WasmElemSegment(Zone* zone, ValueType type, bool declarative)
+ : type(type),
+ table_index(0),
+ entries(zone),
+ status(declarative ? kStatusDeclarative : kStatusPassive) {
+ DCHECK(IsValidOffsetKind(offset.kind()));
+ }
+
+ MOVE_ONLY_NO_DEFAULT_CONSTRUCTOR(WasmElemSegment);
+
+ ValueType type;
+ uint32_t table_index;
+ WasmInitExpr offset;
+ FunctionIndexingMode indexing_mode = kRelativeToImports;
+ ZoneVector<Entry> entries;
+ Status status;
+
+ private:
+ // This ensures no {WasmInitExpr} with subexpressions is used, which would
+ // cause a memory leak because those are stored in an std::vector. Such
+ // offset would also be mistyped.
+ bool IsValidOffsetKind(WasmInitExpr::Operator kind) {
+ return kind == WasmInitExpr::kI32Const ||
+ kind == WasmInitExpr::kGlobalGet;
+ }
+ };
+
// Building methods.
uint32_t AddImport(base::Vector<const char> name, FunctionSig* sig,
base::Vector<const char> module = {});
@@ -255,16 +318,23 @@ class V8_EXPORT_PRIVATE WasmModuleBuilder : public ZoneObject {
bool mutability,
base::Vector<const char> module = {});
void AddDataSegment(const byte* data, uint32_t size, uint32_t dest);
+ // Add an element segment to this {WasmModuleBuilder}. {segment}'s enties
+ // have to be initialized.
+ void AddElementSegment(WasmElemSegment segment);
+ // Helper method to create an active segment with one function. Assumes that
+ // table segment at {table_index} is typed as funcref.
+ void SetIndirectFunction(uint32_t table_index, uint32_t index_in_table,
+ uint32_t direct_function_index,
+ WasmElemSegment::FunctionIndexingMode indexing_mode);
+ // Increase the starting size of the table at {table_index} by {count}. Also
+ // increases the maximum table size if needed. Returns the former starting
+ // size, or the maximum uint32_t value if the maximum table size has been
+ // exceeded.
+ uint32_t IncreaseTableMinSize(uint32_t table_index, uint32_t count);
uint32_t AddSignature(FunctionSig* sig);
uint32_t AddException(FunctionSig* type);
uint32_t AddStructType(StructType* type);
uint32_t AddArrayType(ArrayType* type);
- // In the current implementation, it's supported to have uninitialized slots
- // at the beginning and/or end of the indirect function table, as long as
- // the filled slots form a contiguous block in the middle.
- uint32_t AllocateIndirectFunctions(uint32_t count);
- void SetIndirectFunction(uint32_t indirect, uint32_t direct);
- void SetMaxTableSize(uint32_t max);
uint32_t AddTable(ValueType type, uint32_t min_size);
uint32_t AddTable(ValueType type, uint32_t min_size, uint32_t max_size);
uint32_t AddTable(ValueType type, uint32_t min_size, uint32_t max_size,
@@ -288,10 +358,17 @@ class V8_EXPORT_PRIVATE WasmModuleBuilder : public ZoneObject {
Zone* zone() { return zone_; }
+ ValueType GetTableType(uint32_t index) { return tables_[index].type; }
+
+ bool IsSignature(uint32_t index) {
+ return types_[index].kind == Type::kFunctionSig;
+ }
+
FunctionSig* GetSignature(uint32_t index) {
DCHECK(types_[index].kind == Type::kFunctionSig);
return types_[index].sig;
}
+
bool IsStructType(uint32_t index) {
return types_[index].kind == Type::kStructType;
}
@@ -304,10 +381,15 @@ class V8_EXPORT_PRIVATE WasmModuleBuilder : public ZoneObject {
}
ArrayType* GetArrayType(uint32_t index) { return types_[index].array_type; }
+ WasmFunctionBuilder* GetFunction(uint32_t index) { return functions_[index]; }
int NumExceptions() { return static_cast<int>(exceptions_.size()); }
int NumTypes() { return static_cast<int>(types_.size()); }
+ int NumTables() { return static_cast<int>(tables_.size()); }
+
+ int NumFunctions() { return static_cast<int>(functions_.size()); }
+
FunctionSig* GetExceptionType(int index) {
return types_[exceptions_[index]].sig;
}
@@ -380,12 +462,11 @@ class V8_EXPORT_PRIVATE WasmModuleBuilder : public ZoneObject {
ZoneVector<WasmFunctionBuilder*> functions_;
ZoneVector<WasmTable> tables_;
ZoneVector<WasmDataSegment> data_segments_;
- ZoneVector<uint32_t> indirect_functions_;
+ ZoneVector<WasmElemSegment> element_segments_;
ZoneVector<WasmGlobal> globals_;
ZoneVector<int> exceptions_;
ZoneUnorderedMap<FunctionSig, uint32_t> signature_map_;
int start_function_index_;
- uint32_t max_table_size_ = 0;
uint32_t min_memory_size_;
uint32_t max_memory_size_;
bool has_max_memory_size_;
@@ -393,8 +474,6 @@ class V8_EXPORT_PRIVATE WasmModuleBuilder : public ZoneObject {
#if DEBUG
// Once AddExportedImport is called, no more imports can be added.
bool adding_imports_allowed_ = true;
- // Indirect functions must be allocated before adding extra tables.
- bool allocating_indirect_functions_allowed_ = true;
#endif
};
diff --git a/deps/v8/src/wasm/wasm-module-sourcemap.cc b/deps/v8/src/wasm/wasm-module-sourcemap.cc
index 85a171e5ac..ea03dae8e2 100644
--- a/deps/v8/src/wasm/wasm-module-sourcemap.cc
+++ b/deps/v8/src/wasm/wasm-module-sourcemap.cc
@@ -6,11 +6,18 @@
#include <algorithm>
-#include "include/v8.h"
+#include "include/v8-context.h"
+#include "include/v8-json.h"
+#include "include/v8-local-handle.h"
+#include "include/v8-object.h"
+#include "include/v8-primitive.h"
#include "src/api/api.h"
#include "src/base/vlq-base64.h"
namespace v8 {
+
+class String;
+
namespace internal {
namespace wasm {
diff --git a/deps/v8/src/wasm/wasm-module-sourcemap.h b/deps/v8/src/wasm/wasm-module-sourcemap.h
index fd8c1117fa..38c0358f90 100644
--- a/deps/v8/src/wasm/wasm-module-sourcemap.h
+++ b/deps/v8/src/wasm/wasm-module-sourcemap.h
@@ -12,10 +12,13 @@
#include <string>
#include <vector>
-#include "include/v8.h"
+#include "include/v8-local-handle.h"
#include "src/base/macros.h"
namespace v8 {
+
+class String;
+
namespace internal {
namespace wasm {
// The class is for decoding and managing source map generated by a WebAssembly
diff --git a/deps/v8/src/wasm/wasm-objects-inl.h b/deps/v8/src/wasm/wasm-objects-inl.h
index a75d83df02..f7e9f2a975 100644
--- a/deps/v8/src/wasm/wasm-objects-inl.h
+++ b/deps/v8/src/wasm/wasm-objects-inl.h
@@ -186,7 +186,6 @@ bool WasmGlobalObject::SetFuncRef(Isolate* isolate, Handle<Object> value) {
// WasmInstanceObject
PRIMITIVE_ACCESSORS(WasmInstanceObject, memory_start, byte*, kMemoryStartOffset)
PRIMITIVE_ACCESSORS(WasmInstanceObject, memory_size, size_t, kMemorySizeOffset)
-PRIMITIVE_ACCESSORS(WasmInstanceObject, memory_mask, size_t, kMemoryMaskOffset)
PRIMITIVE_ACCESSORS(WasmInstanceObject, isolate_root, Address,
kIsolateRootOffset)
PRIMITIVE_ACCESSORS(WasmInstanceObject, stack_limit_address, Address,
@@ -559,11 +558,26 @@ int WasmStruct::Size(const wasm::StructType* type) {
Heap::kMinObjectSizeInTaggedWords * kTaggedSize);
}
-int WasmStruct::GcSafeSize(Map map) {
- wasm::StructType* type = GcSafeType(map);
- return Size(type);
+// static
+void WasmStruct::EncodeInstanceSizeInMap(int instance_size, Map map) {
+ // WasmStructs can be bigger than the {map.instance_size_in_words} field
+ // can describe; yet we have to store the instance size somewhere on the
+ // map so that the GC can read it without relying on any other objects
+ // still being around. To solve this problem, we store the instance size
+ // in two other fields that are otherwise unused for WasmStructs.
+ STATIC_ASSERT(0xFFFF - kHeaderSize >
+ wasm::kMaxValueTypeSize * wasm::kV8MaxWasmStructFields);
+ map.SetWasmByte1(instance_size & 0xFF);
+ map.SetWasmByte2(instance_size >> 8);
+}
+
+// static
+int WasmStruct::DecodeInstanceSizeFromMap(Map map) {
+ return (map.WasmByte2() << 8) | map.WasmByte1();
}
+int WasmStruct::GcSafeSize(Map map) { return DecodeInstanceSizeFromMap(map); }
+
wasm::StructType* WasmStruct::type() const { return type(map()); }
Address WasmStruct::RawFieldAddress(int raw_offset) {
@@ -614,12 +628,7 @@ wasm::ArrayType* WasmArray::GcSafeType(Map map) {
wasm::ArrayType* WasmArray::type() const { return type(map()); }
int WasmArray::SizeFor(Map map, int length) {
- int element_size = type(map)->element_type().element_size_bytes();
- return kHeaderSize + RoundUp(element_size * length, kTaggedSize);
-}
-
-int WasmArray::GcSafeSizeFor(Map map, int length) {
- int element_size = GcSafeType(map)->element_type().element_size_bytes();
+ int element_size = DecodeElementSizeFromMap(map);
return kHeaderSize + RoundUp(element_size * length, kTaggedSize);
}
@@ -635,6 +644,14 @@ Handle<Object> WasmArray::GetElement(Isolate* isolate, Handle<WasmArray> array,
return ReadValueAt(isolate, array, element_type, offset);
}
+// static
+void WasmArray::EncodeElementSizeInMap(int element_size, Map map) {
+ map.SetWasmByte1(element_size);
+}
+
+// static
+int WasmArray::DecodeElementSizeFromMap(Map map) { return map.WasmByte1(); }
+
void WasmTypeInfo::clear_foreign_address(Isolate* isolate) {
#ifdef V8_HEAP_SANDBOX
// Due to the type-specific pointer tags for external pointers, we need to
diff --git a/deps/v8/src/wasm/wasm-objects.cc b/deps/v8/src/wasm/wasm-objects.cc
index a6ff80f624..a52dd7fbc5 100644
--- a/deps/v8/src/wasm/wasm-objects.cc
+++ b/deps/v8/src/wasm/wasm-objects.cc
@@ -17,6 +17,7 @@
#include "src/objects/struct-inl.h"
#include "src/trap-handler/trap-handler.h"
#include "src/utils/utils.h"
+#include "src/wasm/code-space-access.h"
#include "src/wasm/jump-table-assembler.h"
#include "src/wasm/module-compiler.h"
#include "src/wasm/module-decoder.h"
@@ -1242,21 +1243,13 @@ bool WasmInstanceObject::EnsureIndirectFunctionTableWithMinimumSize(
void WasmInstanceObject::SetRawMemory(byte* mem_start, size_t mem_size) {
CHECK_LE(mem_size, wasm::max_mem_bytes());
#if V8_HOST_ARCH_64_BIT
- uint64_t mem_mask64 = base::bits::RoundUpToPowerOfTwo64(mem_size) - 1;
set_memory_start(mem_start);
set_memory_size(mem_size);
- set_memory_mask(mem_mask64);
#else
// Must handle memory > 2GiB specially.
CHECK_LE(mem_size, size_t{kMaxUInt32});
- uint32_t mem_mask32 =
- (mem_size > 2 * size_t{GB})
- ? 0xFFFFFFFFu
- : base::bits::RoundUpToPowerOfTwo32(static_cast<uint32_t>(mem_size)) -
- 1;
set_memory_start(mem_start);
set_memory_size(mem_size);
- set_memory_mask(mem_mask32);
#endif
}
@@ -1540,7 +1533,8 @@ void WasmInstanceObject::ImportWasmJSFunctionIntoTable(
if (sig_id >= 0) {
wasm::NativeModule* native_module =
instance->module_object().native_module();
- // TODO(wasm): Cache and reuse wrapper code.
+ // TODO(wasm): Cache and reuse wrapper code, to avoid repeated compilation
+ // and permissions switching.
const wasm::WasmFeatures enabled = native_module->enabled_features();
auto resolved = compiler::ResolveWasmImportCall(
callable, sig, instance->module(), enabled);
@@ -1553,10 +1547,11 @@ void WasmInstanceObject::ImportWasmJSFunctionIntoTable(
if (kind == compiler::WasmImportCallKind ::kJSFunctionArityMismatch) {
expected_arity = Handle<JSFunction>::cast(callable)
->shared()
- .internal_formal_parameter_count();
+ .internal_formal_parameter_count_without_receiver();
}
wasm::WasmCompilationResult result = compiler::CompileWasmImportCallWrapper(
&env, kind, sig, false, expected_arity);
+ wasm::CodeSpaceWriteScope write_scope(native_module);
std::unique_ptr<wasm::WasmCode> wasm_code = native_module->AddCode(
result.func_index, result.code_desc, result.frame_slot_count,
result.tagged_parameter_slots,
@@ -2030,7 +2025,7 @@ Handle<WasmExportedFunction> WasmExportedFunction::New(
// method. This does not apply to functions exported from asm.js however.
DCHECK_EQ(is_asm_js_module, js_function->IsConstructor());
shared->set_length(arity);
- shared->set_internal_formal_parameter_count(arity);
+ shared->set_internal_formal_parameter_count(JSParameterCount(arity));
shared->set_script(instance->module_object().script());
return Handle<WasmExportedFunction>::cast(js_function);
}
@@ -2115,7 +2110,8 @@ Handle<WasmJSFunction> WasmJSFunction::New(Isolate* isolate,
CK kind = compiler::kDefaultImportCallKind;
if (callable->IsJSFunction()) {
SharedFunctionInfo shared = Handle<JSFunction>::cast(callable)->shared();
- expected_arity = shared.internal_formal_parameter_count();
+ expected_arity =
+ shared.internal_formal_parameter_count_without_receiver();
if (expected_arity != parameter_count) {
kind = CK::kJSFunctionArityMismatch;
}
@@ -2143,7 +2139,8 @@ Handle<WasmJSFunction> WasmJSFunction::New(Isolate* isolate,
Factory::JSFunctionBuilder{isolate, shared, context}
.set_map(function_map)
.Build();
- js_function->shared().set_internal_formal_parameter_count(parameter_count);
+ js_function->shared().set_internal_formal_parameter_count(
+ JSParameterCount(parameter_count));
return Handle<WasmJSFunction>::cast(js_function);
}
@@ -2217,10 +2214,6 @@ Handle<AsmWasmData> AsmWasmData::New(
return result;
}
-static_assert(wasm::kV8MaxWasmArrayLength <=
- (Smi::kMaxValue - WasmArray::kHeaderSize) / kDoubleSize,
- "max Wasm array size must fit into max object size");
-
namespace wasm {
bool TypecheckJSObject(Isolate* isolate, const WasmModule* module,
diff --git a/deps/v8/src/wasm/wasm-objects.h b/deps/v8/src/wasm/wasm-objects.h
index 11d5c265ed..d34818109b 100644
--- a/deps/v8/src/wasm/wasm-objects.h
+++ b/deps/v8/src/wasm/wasm-objects.h
@@ -356,7 +356,6 @@ class V8_EXPORT_PRIVATE WasmInstanceObject : public JSObject {
DECL_ACCESSORS(managed_object_maps, FixedArray)
DECL_PRIMITIVE_ACCESSORS(memory_start, byte*)
DECL_PRIMITIVE_ACCESSORS(memory_size, size_t)
- DECL_PRIMITIVE_ACCESSORS(memory_mask, size_t)
DECL_PRIMITIVE_ACCESSORS(isolate_root, Address)
DECL_PRIMITIVE_ACCESSORS(stack_limit_address, Address)
DECL_PRIMITIVE_ACCESSORS(real_stack_limit_address, Address)
@@ -397,7 +396,6 @@ class V8_EXPORT_PRIVATE WasmInstanceObject : public JSObject {
V(kOptionalPaddingOffset, POINTER_SIZE_PADDING(kOptionalPaddingOffset)) \
V(kMemoryStartOffset, kSystemPointerSize) \
V(kMemorySizeOffset, kSizetSize) \
- V(kMemoryMaskOffset, kSizetSize) \
V(kStackLimitAddressOffset, kSystemPointerSize) \
V(kImportedFunctionTargetsOffset, kSystemPointerSize) \
V(kIndirectFunctionTableTargetsOffset, kSystemPointerSize) \
@@ -903,6 +901,8 @@ class WasmStruct : public TorqueGeneratedWasmStruct<WasmStruct, WasmObject> {
static inline wasm::StructType* GcSafeType(Map map);
static inline int Size(const wasm::StructType* type);
static inline int GcSafeSize(Map map);
+ static inline void EncodeInstanceSizeInMap(int instance_size, Map map);
+ static inline int DecodeInstanceSizeFromMap(Map map);
// Returns the address of the field at given offset.
inline Address RawFieldAddress(int raw_offset);
@@ -939,7 +939,6 @@ class WasmArray : public TorqueGeneratedWasmArray<WasmArray, WasmObject> {
wasm::WasmValue GetElement(uint32_t index);
static inline int SizeFor(Map map, int length);
- static inline int GcSafeSizeFor(Map map, int length);
// Returns boxed value of the array's element.
static inline Handle<Object> GetElement(Isolate* isolate,
@@ -949,6 +948,17 @@ class WasmArray : public TorqueGeneratedWasmArray<WasmArray, WasmObject> {
// Returns the Address of the element at {index}.
Address ElementAddress(uint32_t index);
+ static int MaxLength(const wasm::ArrayType* type) {
+ // The total object size must fit into a Smi, for filler objects. To make
+ // the behavior of Wasm programs independent from the Smi configuration,
+ // we hard-code the smaller of the two supported ranges.
+ int element_shift = type->element_type().element_size_log2();
+ return (SmiTagging<4>::kSmiMaxValue - kHeaderSize) >> element_shift;
+ }
+
+ static inline void EncodeElementSizeInMap(int element_size, Map map);
+ static inline int DecodeElementSizeFromMap(Map map);
+
DECL_PRINTER(WasmArray)
class BodyDescriptor;
diff --git a/deps/v8/src/web-snapshot/web-snapshot.cc b/deps/v8/src/web-snapshot/web-snapshot.cc
index 5e8ae15c0b..3e2aa43067 100644
--- a/deps/v8/src/web-snapshot/web-snapshot.cc
+++ b/deps/v8/src/web-snapshot/web-snapshot.cc
@@ -6,7 +6,11 @@
#include <limits>
-#include "include/v8.h"
+#include "include/v8-isolate.h"
+#include "include/v8-local-handle.h"
+#include "include/v8-object.h"
+#include "include/v8-primitive.h"
+#include "include/v8-script.h"
#include "src/api/api-inl.h"
#include "src/base/platform/wrappers.h"
#include "src/handles/handles.h"
@@ -1513,15 +1517,14 @@ void WebSnapshotDeserializer::ReadValue(
case ValueType::REGEXP: {
Handle<String> pattern = ReadString(false);
Handle<String> flags_string = ReadString(false);
- bool success = false;
- JSRegExp::Flags flags =
- JSRegExp::FlagsFromString(isolate_, flags_string, &success);
- if (!success) {
+ base::Optional<JSRegExp::Flags> flags =
+ JSRegExp::FlagsFromString(isolate_, flags_string);
+ if (!flags.has_value()) {
Throw("Web snapshot: Malformed flags in regular expression");
return;
}
MaybeHandle<JSRegExp> maybe_regexp =
- JSRegExp::New(isolate_, pattern, flags);
+ JSRegExp::New(isolate_, pattern, flags.value());
if (!maybe_regexp.ToHandle(&value)) {
Throw("Web snapshot: Malformed RegExp");
return;
diff --git a/deps/v8/test/bigint/bigint-shell.cc b/deps/v8/test/bigint/bigint-shell.cc
index 43692d69c5..9cbab4d244 100644
--- a/deps/v8/test/bigint/bigint-shell.cc
+++ b/deps/v8/test/bigint/bigint-shell.cc
@@ -2,6 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include <cmath>
#include <memory>
#include <string>
@@ -28,12 +29,14 @@ int PrintHelp(char** argv) {
return 1;
}
-#define TESTS(V) \
- V(kBarrett, "barrett") \
- V(kBurnikel, "burnikel") \
- V(kFFT, "fft") \
- V(kKaratsuba, "karatsuba") \
- V(kToom, "toom") \
+#define TESTS(V) \
+ V(kBarrett, "barrett") \
+ V(kBurnikel, "burnikel") \
+ V(kFFT, "fft") \
+ V(kFromString, "fromstring") \
+ V(kFromStringBase2, "fromstring2") \
+ V(kKaratsuba, "karatsuba") \
+ V(kToom, "toom") \
V(kToString, "tostring")
enum Operation { kNoOp, kList, kTest };
@@ -86,7 +89,7 @@ class RNG {
static constexpr int kCharsPerDigit = kDigitBits / 4;
-static const char kConversionChars[] = "0123456789abcdef";
+static const char kConversionChars[] = "0123456789abcdefghijklmnopqrstuvwxyz";
std::string FormatHex(Digits X) {
X.Normalize();
@@ -173,6 +176,16 @@ class Runner {
error_ = true;
}
+ void AssertEquals(const char* input, int input_length, int radix,
+ Digits expected, Digits actual) {
+ if (Compare(expected, actual) == 0) return;
+ std::cerr << "Input: " << std::string(input, input_length) << "\n";
+ std::cerr << "Radix: " << radix << "\n";
+ std::cerr << "Expected: " << FormatHex(expected) << "\n";
+ std::cerr << "Actual: " << FormatHex(actual) << "\n";
+ error_ = true;
+ }
+
int RunTest() {
int count = 0;
if (test_ == kBarrett) {
@@ -199,6 +212,14 @@ class Runner {
for (int i = 0; i < runs_; i++) {
TestToString(&count);
}
+ } else if (test_ == kFromString) {
+ for (int i = 0; i < runs_; i++) {
+ TestFromString(&count);
+ }
+ } else if (test_ == kFromStringBase2) {
+ for (int i = 0; i < runs_; i++) {
+ TestFromStringBaseTwo(&count);
+ }
} else {
DCHECK(false); // Unreachable.
}
@@ -391,6 +412,75 @@ class Runner {
}
}
+ void TestFromString(int* count) {
+ constexpr int kMaxDigits = 1 << 20; // Any large-enough value will do.
+ constexpr int kMin = kFromStringLargeThreshold / 2;
+ constexpr int kMax = kFromStringLargeThreshold * 2;
+ for (int size = kMin; size < kMax; size++) {
+ // To keep test execution times low, test one random radix every time.
+ // Generally, radixes 2 through 36 (inclusive) are supported; however
+ // the functions {FromStringLarge} and {FromStringClassic} can't deal
+ // with the data format that {Parse} creates for power-of-two radixes,
+ // so we skip power-of-two radixes here (and test them separately below).
+ // We round up the number of radixes in the list to 32 by padding with
+ // 10, giving decimal numbers extra test coverage, and making it easy
+ // to evenly map a random number into the index space.
+ constexpr uint8_t radixes[] = {3, 5, 6, 7, 9, 10, 11, 12, 13, 14, 15,
+ 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27,
+ 28, 29, 30, 31, 33, 34, 35, 36, 10, 10};
+ int radix_index = (rng_.NextUint64() & 31);
+ int radix = radixes[radix_index];
+ int num_chars = std::round(size * kDigitBits / std::log2(radix));
+ std::unique_ptr<char[]> chars(new char[num_chars]);
+ GenerateRandomString(chars.get(), num_chars, radix);
+ FromStringAccumulator accumulator(kMaxDigits);
+ FromStringAccumulator ref_accumulator(kMaxDigits);
+ const char* start = chars.get();
+ const char* end = chars.get() + num_chars;
+ accumulator.Parse(start, end, radix);
+ ref_accumulator.Parse(start, end, radix);
+ ScratchDigits result(accumulator.ResultLength());
+ ScratchDigits reference(ref_accumulator.ResultLength());
+ processor()->FromStringLarge(result, &accumulator);
+ processor()->FromStringClassic(reference, &ref_accumulator);
+ AssertEquals(start, num_chars, radix, result, reference);
+ if (error_) return;
+ (*count)++;
+ }
+ }
+
+ void TestFromStringBaseTwo(int* count) {
+ constexpr int kMaxDigits = 1 << 20; // Any large-enough value will do.
+ constexpr int kMin = 1;
+ constexpr int kMax = 100;
+ for (int size = kMin; size < kMax; size++) {
+ ScratchDigits X(size);
+ GenerateRandom(X);
+ for (int bits = 1; bits <= 5; bits++) {
+ int radix = 1 << bits;
+ int chars_required = ToStringResultLength(X, radix, false);
+ int string_len = chars_required;
+ std::unique_ptr<char[]> chars(new char[string_len]);
+ processor()->ToStringImpl(chars.get(), &string_len, X, radix, false,
+ true);
+ // Fill any remaining allocated characters with garbage to test that
+ // too.
+ for (int i = string_len; i < chars_required; i++) {
+ chars[i] = '?';
+ }
+ const char* start = chars.get();
+ const char* end = start + chars_required;
+ FromStringAccumulator accumulator(kMaxDigits);
+ accumulator.Parse(start, end, radix);
+ ScratchDigits result(accumulator.ResultLength());
+ processor()->FromString(result, &accumulator);
+ AssertEquals(start, chars_required, radix, X, result);
+ if (error_) return;
+ (*count)++;
+ }
+ }
+ }
+
int ParseOptions(int argc, char** argv) {
for (int i = 1; i < argc; i++) {
if (strcmp(argv[i], "--list") == 0) {
@@ -425,25 +515,82 @@ class Runner {
}
private:
- // TODO(jkummerow): Also generate "non-random-looking" inputs, i.e. long
- // strings of zeros and ones in the binary representation, such as
- // ((1 << random) ± 1).
void GenerateRandom(RWDigits Z) {
if (Z.len() == 0) return;
- if (sizeof(digit_t) == 8) {
- for (int i = 0; i < Z.len(); i++) {
- Z[i] = static_cast<digit_t>(rng_.NextUint64());
+ int mode = static_cast<int>(rng_.NextUint64() & 3);
+ if (mode == 0) {
+ // Generate random bits.
+ if (sizeof(digit_t) == 8) {
+ for (int i = 0; i < Z.len(); i++) {
+ Z[i] = static_cast<digit_t>(rng_.NextUint64());
+ }
+ } else {
+ for (int i = 0; i < Z.len(); i += 2) {
+ uint64_t random = rng_.NextUint64();
+ Z[i] = static_cast<digit_t>(random);
+ if (i + 1 < Z.len()) Z[i + 1] = static_cast<digit_t>(random >> 32);
+ }
+ }
+ // Special case: we don't want the MSD to be zero.
+ while (Z.msd() == 0) {
+ Z[Z.len() - 1] = static_cast<digit_t>(rng_.NextUint64());
}
+ return;
+ }
+ if (mode == 1) {
+ // Generate a power of 2, with the lone 1-bit somewhere in the MSD.
+ int bit_in_msd = static_cast<int>(rng_.NextUint64() % kDigitBits);
+ Z[Z.len() - 1] = digit_t{1} << bit_in_msd;
+ for (int i = 0; i < Z.len() - 1; i++) Z[i] = 0;
+ return;
+ }
+ // For mode == 2 and mode == 3, generate a random number of 1-bits in the
+ // MSD, aligned to the least-significant end.
+ int bits_in_msd = static_cast<int>(rng_.NextUint64() % kDigitBits);
+ digit_t msd = (digit_t{1} << bits_in_msd) - 1;
+ if (msd == 0) msd = ~digit_t{0};
+ Z[Z.len() - 1] = msd;
+ if (mode == 2) {
+ // The non-MSD digits are all 1-bits.
+ for (int i = 0; i < Z.len() - 1; i++) Z[i] = ~digit_t{0};
} else {
- for (int i = 0; i < Z.len(); i += 2) {
- uint64_t random = rng_.NextUint64();
- Z[i] = static_cast<digit_t>(random);
- if (i + 1 < Z.len()) Z[i + 1] = static_cast<digit_t>(random >> 32);
+ // mode == 3
+ // Each non-MSD digit is either all ones or all zeros.
+ uint64_t random;
+ int random_bits = 0;
+ for (int i = 0; i < Z.len() - 1; i++) {
+ if (random_bits == 0) {
+ random = rng_.NextUint64();
+ random_bits = 64;
+ }
+ Z[i] = random & 1 ? ~digit_t{0} : digit_t{0};
+ random >>= 1;
+ random_bits--;
}
}
- // Special case: we don't want the MSD to be zero.
- while (Z.msd() == 0) {
- Z[Z.len() - 1] = static_cast<digit_t>(rng_.NextUint64());
+ }
+
+ void GenerateRandomString(char* str, int len, int radix) {
+ DCHECK(2 <= radix && radix <= 36);
+ if (len == 0) return;
+ uint64_t random;
+ int available_bits = 0;
+ const int char_bits = BitLength(radix - 1);
+ const uint64_t char_mask = (1u << char_bits) - 1u;
+ for (int i = 0; i < len; i++) {
+ while (true) {
+ if (available_bits < char_bits) {
+ random = rng_.NextUint64();
+ available_bits = 64;
+ }
+ int next_char = static_cast<int>(random & char_mask);
+ random = random >> char_bits;
+ available_bits -= char_bits;
+ if (next_char >= radix) continue;
+ *str = kConversionChars[next_char];
+ str++;
+ break;
+ };
}
}
diff --git a/deps/v8/test/cctest/BUILD.gn b/deps/v8/test/cctest/BUILD.gn
index e7f011df74..351292ba28 100644
--- a/deps/v8/test/cctest/BUILD.gn
+++ b/deps/v8/test/cctest/BUILD.gn
@@ -89,6 +89,7 @@ v8_source_set("cctest_sources") {
"compiler/function-tester.cc",
"compiler/function-tester.h",
"compiler/node-observer-tester.h",
+ "compiler/test-atomic-load-store-codegen.cc",
"compiler/test-basic-block-profiler.cc",
"compiler/test-branch-combine.cc",
"compiler/test-calls-with-arraylike-or-spread.cc",
@@ -119,7 +120,6 @@ v8_source_set("cctest_sources") {
"compiler/test-run-jsops.cc",
"compiler/test-run-load-store.cc",
"compiler/test-run-machops.cc",
- "compiler/test-run-retpoline.cc",
"compiler/test-run-stackcheck.cc",
"compiler/test-run-tail-calls.cc",
"compiler/test-run-unwinding-info.cc",
@@ -290,6 +290,7 @@ v8_source_set("cctest_sources") {
"test-utils.cc",
"test-verifiers.cc",
"test-version.cc",
+ "test-virtual-memory-cage.cc",
"test-weakmaps.cc",
"test-weaksets.cc",
"test-web-snapshots.cc",
@@ -307,7 +308,6 @@ v8_source_set("cctest_sources") {
"test-assembler-arm.cc",
"test-disasm-arm.cc",
"test-macro-assembler-arm.cc",
- "test-poison-disasm-arm.cc",
"test-sync-primitives-arm.cc",
]
} else if (v8_current_cpu == "arm64") {
@@ -319,7 +319,6 @@ v8_source_set("cctest_sources") {
"test-js-arm64-variables.cc",
"test-macro-assembler-arm64.cc",
"test-pointer-auth-arm64.cc",
- "test-poison-disasm-arm64.cc",
"test-sync-primitives-arm64.cc",
"test-utils-arm64.cc",
"test-utils-arm64.h",
@@ -385,6 +384,12 @@ v8_source_set("cctest_sources") {
"test-macro-assembler-riscv64.cc",
"test-simple-riscv64.cc",
]
+ } else if (v8_current_cpu == "loong64") {
+ sources += [ ### gcmole(arch:loong64) ###
+ "test-assembler-loong64.cc",
+ "test-disasm-loong64.cc",
+ "test-macro-assembler-loong64.cc",
+ ]
}
if (v8_use_perfetto) {
@@ -482,7 +487,7 @@ v8_source_set("cctest_sources") {
v8_current_cpu == "s390" || v8_current_cpu == "s390x" ||
v8_current_cpu == "mips" || v8_current_cpu == "mips64" ||
v8_current_cpu == "mipsel" || v8_current_cpu == "mipsel64" ||
- v8_current_cpu == "riscv64") {
+ v8_current_cpu == "riscv64" || v8_current_cpu == "loong64") {
# Disable fmadd/fmsub so that expected results match generated code in
# RunFloat64MulAndFloat64Add1 and friends.
if (!is_win) {
diff --git a/deps/v8/test/cctest/cctest-utils.h b/deps/v8/test/cctest/cctest-utils.h
index 8701b412e0..6d3c027e6d 100644
--- a/deps/v8/test/cctest/cctest-utils.h
+++ b/deps/v8/test/cctest/cctest-utils.h
@@ -46,6 +46,9 @@ namespace internal {
#elif V8_TARGET_ARCH_RISCV64
#define GET_STACK_POINTER_TO(sp_addr) \
__asm__ __volatile__("add %0, sp, x0" : "=r"(sp_addr))
+#elif V8_HOST_ARCH_LOONG64
+#define GET_STACK_POINTER_TO(sp_addr) \
+ __asm__ __volatile__("st.d $sp, %0" : "=m"(sp_addr))
#else
#error Host architecture was not detected as supported by v8
#endif
diff --git a/deps/v8/test/cctest/cctest.cc b/deps/v8/test/cctest/cctest.cc
index fa42921b77..7f04173489 100644
--- a/deps/v8/test/cctest/cctest.cc
+++ b/deps/v8/test/cctest/cctest.cc
@@ -29,7 +29,12 @@
#include "include/cppgc/platform.h"
#include "include/libplatform/libplatform.h"
-#include "include/v8.h"
+#include "include/v8-array-buffer.h"
+#include "include/v8-context.h"
+#include "include/v8-function.h"
+#include "include/v8-isolate.h"
+#include "include/v8-local-handle.h"
+#include "include/v8-locker.h"
#include "src/base/strings.h"
#include "src/codegen/compiler.h"
#include "src/codegen/optimized-compilation-info.h"
@@ -336,6 +341,9 @@ int main(int argc, char* argv[]) {
v8::V8::InitializeICUDefaultLocation(argv[0]);
std::unique_ptr<v8::Platform> platform(v8::platform::NewDefaultPlatform());
v8::V8::InitializePlatform(platform.get());
+#ifdef V8_VIRTUAL_MEMORY_CAGE
+ CHECK(v8::V8::InitializeVirtualMemoryCage());
+#endif
cppgc::InitializeProcess(platform->GetPageAllocator());
using HelpOptions = v8::internal::FlagList::HelpOptions;
v8::internal::FlagList::SetFlagsFromCommandLine(
diff --git a/deps/v8/test/cctest/cctest.status b/deps/v8/test/cctest/cctest.status
index 9b36904475..9c28520ed5 100644
--- a/deps/v8/test/cctest/cctest.status
+++ b/deps/v8/test/cctest/cctest.status
@@ -136,6 +136,9 @@
'test-strings/Traverse': [PASS, HEAVY],
'test-swiss-name-dictionary-csa/DeleteAtBoundaries': [PASS, HEAVY],
'test-swiss-name-dictionary-csa/SameH2': [PASS, HEAVY],
+
+ # TODO(v8:11382): Reenable once irregexp is reentrant.
+ 'test-regexp/RegExpInterruptReentrantExecution': [FAIL],
}], # ALWAYS
##############################################################################
@@ -175,9 +178,6 @@
# BUG(v8:3434).
'test-api/LoadICFastApi_DirectCall_GCMoveStubWithProfiler': [SKIP],
-
- # TODO(11856): Port nondeterminism detection.
- 'test-liftoff-for-fuzzing/*': [SKIP],
}], # 'arch == arm64'
['arch == arm64 and simulator_run', {
@@ -336,9 +336,6 @@
'test-serialize/StartupSerializerTwice': [SKIP],
'test-serialize/StartupSerializerOnceRunScript': [SKIP],
'test-serialize/StartupSerializerTwiceRunScript': [SKIP],
-
- # TODO(11856): Port nondeterminism detection.
- 'test-liftoff-for-fuzzing/*': [SKIP],
}], # 'arch == arm'
##############################################################################
@@ -459,6 +456,15 @@
}], # 'arch == riscv64 and simulator_run'
##############################################################################
+['arch == loong64', {
+ # The instruction scheduler is disabled on loong64.
+ 'test-instruction-scheduler/DeoptInMiddleOfBasicBlock': [SKIP],
+ # The uint32 values are sign-extended on loong64.
+ 'test-run-load-store/RunLoadStoreZeroExtend64': [SKIP],
+ 'test-run-load-store/RunUnalignedLoadStoreZeroExtend64': [SKIP],
+}], # 'arch == loong64'
+
+##############################################################################
['system == android', {
# Uses too much memory.
'test-api/NewStringRangeError': [SKIP],
@@ -580,6 +586,7 @@
'test-api/TurboAsmDisablesDetach': [SKIP],
'test-api/WasmI32AtomicWaitCallback': [SKIP],
'test-api/WasmI64AtomicWaitCallback': [SKIP],
+ 'test-api/WasmSetJitCodeEventHandler': [SKIP],
'test-api-wasm/WasmStreaming*': [SKIP],
'test-backing-store/Run_WasmModule_Buffer_Externalized_Regression_UseAfterFree': [SKIP],
'test-c-wasm-entry/*': [SKIP],
@@ -613,6 +620,7 @@
'codegen-tester/*': [SKIP],
'test-accessor-assembler/*': [SKIP],
'test-assembler-*': [SKIP],
+ 'test-atomic-load-store-codegen/*': [SKIP],
'test-basic-block-profiler/*': [SKIP],
'test-branch-combine/*': [SKIP],
'test-calls-with-arraylike-or-spread/*': [SKIP],
@@ -649,7 +657,6 @@
'test-run-load-store/*': [SKIP],
'test-run-machops/*': [SKIP],
'test-run-native-calls/*': [SKIP],
- 'test-run-retpoline/*': [SKIP],
'test-run-stackcheck/*': [SKIP],
'test-run-tail-calls/*': [SKIP],
'test-run-unwinding-info/*': [SKIP],
@@ -666,6 +673,9 @@
# Instruction cache flushing is disabled in jitless mode.
'test-icache/*': [SKIP],
+
+ # Tests generated irregexp code.
+ 'test-regexp/RegExpInterruptReentrantExecution': [SKIP],
}], # lite_mode or variant == jitless
##############################################################################
diff --git a/deps/v8/test/cctest/compiler/code-assembler-tester.h b/deps/v8/test/cctest/compiler/code-assembler-tester.h
index 6dc343fa08..d1e904a6a8 100644
--- a/deps/v8/test/cctest/compiler/code-assembler-tester.h
+++ b/deps/v8/test/cctest/compiler/code-assembler-tester.h
@@ -24,7 +24,7 @@ class CodeAssemblerTester {
: zone_(isolate->allocator(), ZONE_NAME, kCompressGraphZone),
scope_(isolate),
state_(isolate, &zone_, descriptor, CodeKind::FOR_TESTING, name,
- PoisoningMitigationLevel::kDontPoison, Builtin::kNoBuiltinId) {}
+ Builtin::kNoBuiltinId) {}
// Test generating code for a stub. Assumes VoidDescriptor call interface.
explicit CodeAssemblerTester(Isolate* isolate, const char* name = "test")
@@ -36,8 +36,7 @@ class CodeAssemblerTester {
const char* name = "test")
: zone_(isolate->allocator(), ZONE_NAME, kCompressGraphZone),
scope_(isolate),
- state_(isolate, &zone_, parameter_count, kind, name,
- PoisoningMitigationLevel::kDontPoison) {}
+ state_(isolate, &zone_, parameter_count, kind, name) {}
CodeAssemblerTester(Isolate* isolate, CodeKind kind,
const char* name = "test")
@@ -48,7 +47,7 @@ class CodeAssemblerTester {
: zone_(isolate->allocator(), ZONE_NAME, kCompressGraphZone),
scope_(isolate),
state_(isolate, &zone_, call_descriptor, CodeKind::FOR_TESTING, name,
- PoisoningMitigationLevel::kDontPoison, Builtin::kNoBuiltinId) {}
+ Builtin::kNoBuiltinId) {}
CodeAssemblerState* state() { return &state_; }
diff --git a/deps/v8/test/cctest/compiler/function-tester.cc b/deps/v8/test/cctest/compiler/function-tester.cc
index 13141bbd60..63df42cfb2 100644
--- a/deps/v8/test/cctest/compiler/function-tester.cc
+++ b/deps/v8/test/cctest/compiler/function-tester.cc
@@ -4,6 +4,7 @@
#include "test/cctest/compiler/function-tester.h"
+#include "include/v8-function.h"
#include "src/api/api-inl.h"
#include "src/codegen/assembler.h"
#include "src/codegen/optimized-compilation-info.h"
diff --git a/deps/v8/test/cctest/compiler/test-atomic-load-store-codegen.cc b/deps/v8/test/cctest/compiler/test-atomic-load-store-codegen.cc
new file mode 100644
index 0000000000..5e8372d538
--- /dev/null
+++ b/deps/v8/test/cctest/compiler/test-atomic-load-store-codegen.cc
@@ -0,0 +1,398 @@
+// Copyright 2021 the V8 project authors. All rights reserved. Use of this
+// source code is governed by a BSD-style license that can be found in the
+// LICENSE file.
+
+#include "src/base/bits.h"
+#include "src/objects/objects-inl.h"
+#include "test/cctest/cctest.h"
+#include "test/cctest/compiler/codegen-tester.h"
+#include "test/cctest/compiler/value-helper.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+#if V8_TARGET_LITTLE_ENDIAN
+#define LSB(addr, bytes) addr
+#elif V8_TARGET_BIG_ENDIAN
+#define LSB(addr, bytes) reinterpret_cast<byte*>(addr + 1) - (bytes)
+#else
+#error "Unknown Architecture"
+#endif
+
+#define TEST_ATOMIC_LOAD_INTEGER(ctype, itype, mach_type, order) \
+ do { \
+ ctype buffer[1]; \
+ \
+ RawMachineAssemblerTester<ctype> m; \
+ Node* base = m.PointerConstant(&buffer[0]); \
+ Node* index = m.Int32Constant(0); \
+ AtomicLoadParameters params(mach_type, order); \
+ if (mach_type.MemSize() == 8) { \
+ m.Return(m.AtomicLoad64(params, base, index)); \
+ } else { \
+ m.Return(m.AtomicLoad(params, base, index)); \
+ } \
+ \
+ FOR_INPUTS(ctype, itype, i) { \
+ buffer[0] = i; \
+ CHECK_EQ(i, m.Call()); \
+ } \
+ } while (false)
+
+TEST(AcquireLoadInteger) {
+ TEST_ATOMIC_LOAD_INTEGER(int8_t, int8, MachineType::Int8(),
+ AtomicMemoryOrder::kAcqRel);
+ TEST_ATOMIC_LOAD_INTEGER(uint8_t, uint8, MachineType::Uint8(),
+ AtomicMemoryOrder::kAcqRel);
+ TEST_ATOMIC_LOAD_INTEGER(int16_t, int16, MachineType::Int16(),
+ AtomicMemoryOrder::kAcqRel);
+ TEST_ATOMIC_LOAD_INTEGER(uint16_t, uint16, MachineType::Uint16(),
+ AtomicMemoryOrder::kAcqRel);
+ TEST_ATOMIC_LOAD_INTEGER(int32_t, int32, MachineType::Int32(),
+ AtomicMemoryOrder::kAcqRel);
+ TEST_ATOMIC_LOAD_INTEGER(uint32_t, uint32, MachineType::Uint32(),
+ AtomicMemoryOrder::kAcqRel);
+#if V8_TARGET_ARCH_64_BIT
+ TEST_ATOMIC_LOAD_INTEGER(uint64_t, uint64, MachineType::Uint64(),
+ AtomicMemoryOrder::kAcqRel);
+#endif
+}
+
+TEST(SeqCstLoadInteger) {
+ TEST_ATOMIC_LOAD_INTEGER(int8_t, int8, MachineType::Int8(),
+ AtomicMemoryOrder::kSeqCst);
+ TEST_ATOMIC_LOAD_INTEGER(uint8_t, uint8, MachineType::Uint8(),
+ AtomicMemoryOrder::kSeqCst);
+ TEST_ATOMIC_LOAD_INTEGER(int16_t, int16, MachineType::Int16(),
+ AtomicMemoryOrder::kSeqCst);
+ TEST_ATOMIC_LOAD_INTEGER(uint16_t, uint16, MachineType::Uint16(),
+ AtomicMemoryOrder::kSeqCst);
+ TEST_ATOMIC_LOAD_INTEGER(int32_t, int32, MachineType::Int32(),
+ AtomicMemoryOrder::kSeqCst);
+ TEST_ATOMIC_LOAD_INTEGER(uint32_t, uint32, MachineType::Uint32(),
+ AtomicMemoryOrder::kSeqCst);
+#if V8_TARGET_ARCH_64_BIT
+ TEST_ATOMIC_LOAD_INTEGER(uint64_t, uint64, MachineType::Uint64(),
+ AtomicMemoryOrder::kSeqCst);
+#endif
+}
+
+namespace {
+// Mostly same as CHECK_EQ() but customized for compressed tagged values.
+template <typename CType>
+void CheckEq(CType in_value, CType out_value) {
+ CHECK_EQ(in_value, out_value);
+}
+
+#ifdef V8_COMPRESS_POINTERS
+// Specializations for checking the result of compressing store.
+template <>
+void CheckEq<Object>(Object in_value, Object out_value) {
+ // Compare only lower 32-bits of the value because tagged load/stores are
+ // 32-bit operations anyway.
+ CHECK_EQ(static_cast<Tagged_t>(in_value.ptr()),
+ static_cast<Tagged_t>(out_value.ptr()));
+}
+
+template <>
+void CheckEq<HeapObject>(HeapObject in_value, HeapObject out_value) {
+ return CheckEq<Object>(in_value, out_value);
+}
+
+template <>
+void CheckEq<Smi>(Smi in_value, Smi out_value) {
+ return CheckEq<Object>(in_value, out_value);
+}
+#endif
+
+template <typename TaggedT>
+void InitBuffer(TaggedT* buffer, size_t length, MachineType type) {
+ const size_t kBufferSize = sizeof(TaggedT) * length;
+
+ // Tagged field loads require values to be properly tagged because of
+ // pointer decompression that may be happenning during load.
+ Isolate* isolate = CcTest::InitIsolateOnce();
+ Smi* smi_view = reinterpret_cast<Smi*>(&buffer[0]);
+ if (type.IsTaggedSigned()) {
+ for (size_t i = 0; i < length; i++) {
+ smi_view[i] = Smi::FromInt(static_cast<int>(i + kBufferSize) ^ 0xABCDEF0);
+ }
+ } else {
+ memcpy(&buffer[0], &isolate->roots_table(), kBufferSize);
+ if (!type.IsTaggedPointer()) {
+ // Also add some Smis if we are checking AnyTagged case.
+ for (size_t i = 0; i < length / 2; i++) {
+ smi_view[i] =
+ Smi::FromInt(static_cast<int>(i + kBufferSize) ^ 0xABCDEF0);
+ }
+ }
+ }
+}
+
+template <typename TaggedT>
+void AtomicLoadTagged(MachineType type, AtomicMemoryOrder order) {
+ const int kNumElems = 16;
+ TaggedT buffer[kNumElems];
+
+ InitBuffer(buffer, kNumElems, type);
+
+ for (int i = 0; i < kNumElems; i++) {
+ BufferedRawMachineAssemblerTester<TaggedT> m;
+ TaggedT* base_pointer = &buffer[0];
+ if (COMPRESS_POINTERS_BOOL) {
+ base_pointer = reinterpret_cast<TaggedT*>(LSB(base_pointer, kTaggedSize));
+ }
+ Node* base = m.PointerConstant(base_pointer);
+ Node* index = m.Int32Constant(i * sizeof(buffer[0]));
+ AtomicLoadParameters params(type, order);
+ Node* load;
+ if (kTaggedSize == 8) {
+ load = m.AtomicLoad64(params, base, index);
+ } else {
+ load = m.AtomicLoad(params, base, index);
+ }
+ m.Return(load);
+ CheckEq<TaggedT>(buffer[i], m.Call());
+ }
+}
+} // namespace
+
+TEST(AcquireLoadTagged) {
+ AtomicLoadTagged<Smi>(MachineType::TaggedSigned(),
+ AtomicMemoryOrder::kAcqRel);
+ AtomicLoadTagged<HeapObject>(MachineType::TaggedPointer(),
+ AtomicMemoryOrder::kAcqRel);
+ AtomicLoadTagged<Object>(MachineType::AnyTagged(),
+ AtomicMemoryOrder::kAcqRel);
+}
+
+TEST(SeqCstLoadTagged) {
+ AtomicLoadTagged<Smi>(MachineType::TaggedSigned(),
+ AtomicMemoryOrder::kSeqCst);
+ AtomicLoadTagged<HeapObject>(MachineType::TaggedPointer(),
+ AtomicMemoryOrder::kSeqCst);
+ AtomicLoadTagged<Object>(MachineType::AnyTagged(),
+ AtomicMemoryOrder::kSeqCst);
+}
+
+#define TEST_ATOMIC_STORE_INTEGER(ctype, itype, mach_type, order) \
+ do { \
+ ctype buffer[1]; \
+ buffer[0] = static_cast<ctype>(-1); \
+ \
+ BufferedRawMachineAssemblerTester<int32_t> m(mach_type); \
+ Node* value = m.Parameter(0); \
+ Node* base = m.PointerConstant(&buffer[0]); \
+ Node* index = m.Int32Constant(0); \
+ AtomicStoreParameters params(mach_type.representation(), kNoWriteBarrier, \
+ order); \
+ if (mach_type.MemSize() == 8) { \
+ m.AtomicStore64(params, base, index, value, nullptr); \
+ } else { \
+ m.AtomicStore(params, base, index, value); \
+ } \
+ \
+ int32_t OK = 0x29000; \
+ m.Return(m.Int32Constant(OK)); \
+ \
+ FOR_INPUTS(ctype, itype, i) { \
+ CHECK_EQ(OK, m.Call(i)); \
+ CHECK_EQ(i, buffer[0]); \
+ } \
+ } while (false)
+
+TEST(ReleaseStoreInteger) {
+ TEST_ATOMIC_STORE_INTEGER(int8_t, int8, MachineType::Int8(),
+ AtomicMemoryOrder::kAcqRel);
+ TEST_ATOMIC_STORE_INTEGER(uint8_t, uint8, MachineType::Uint8(),
+ AtomicMemoryOrder::kAcqRel);
+ TEST_ATOMIC_STORE_INTEGER(int16_t, int16, MachineType::Int16(),
+ AtomicMemoryOrder::kAcqRel);
+ TEST_ATOMIC_STORE_INTEGER(uint16_t, uint16, MachineType::Uint16(),
+ AtomicMemoryOrder::kAcqRel);
+ TEST_ATOMIC_STORE_INTEGER(int32_t, int32, MachineType::Int32(),
+ AtomicMemoryOrder::kAcqRel);
+ TEST_ATOMIC_STORE_INTEGER(uint32_t, uint32, MachineType::Uint32(),
+ AtomicMemoryOrder::kAcqRel);
+#if V8_TARGET_ARCH_64_BIT
+ TEST_ATOMIC_STORE_INTEGER(uint64_t, uint64, MachineType::Uint64(),
+ AtomicMemoryOrder::kAcqRel);
+#endif
+}
+
+TEST(SeqCstStoreInteger) {
+ TEST_ATOMIC_STORE_INTEGER(int8_t, int8, MachineType::Int8(),
+ AtomicMemoryOrder::kSeqCst);
+ TEST_ATOMIC_STORE_INTEGER(uint8_t, uint8, MachineType::Uint8(),
+ AtomicMemoryOrder::kSeqCst);
+ TEST_ATOMIC_STORE_INTEGER(int16_t, int16, MachineType::Int16(),
+ AtomicMemoryOrder::kSeqCst);
+ TEST_ATOMIC_STORE_INTEGER(uint16_t, uint16, MachineType::Uint16(),
+ AtomicMemoryOrder::kSeqCst);
+ TEST_ATOMIC_STORE_INTEGER(int32_t, int32, MachineType::Int32(),
+ AtomicMemoryOrder::kSeqCst);
+ TEST_ATOMIC_STORE_INTEGER(uint32_t, uint32, MachineType::Uint32(),
+ AtomicMemoryOrder::kSeqCst);
+#if V8_TARGET_ARCH_64_BIT
+ TEST_ATOMIC_STORE_INTEGER(uint64_t, uint64, MachineType::Uint64(),
+ AtomicMemoryOrder::kSeqCst);
+#endif
+}
+
+namespace {
+template <typename TaggedT>
+void AtomicStoreTagged(MachineType type, AtomicMemoryOrder order) {
+ // This tests that tagged values are correctly transferred by atomic loads and
+ // stores from in_buffer to out_buffer. For each particular element in
+ // in_buffer, it is copied to a different index in out_buffer, and all other
+ // indices are zapped, to test instructions of the correct width are emitted.
+
+ const int kNumElems = 16;
+ TaggedT in_buffer[kNumElems];
+ TaggedT out_buffer[kNumElems];
+ uintptr_t zap_data[] = {kZapValue, kZapValue};
+ TaggedT zap_value;
+
+ STATIC_ASSERT(sizeof(TaggedT) <= sizeof(zap_data));
+ MemCopy(&zap_value, &zap_data, sizeof(TaggedT));
+ InitBuffer(in_buffer, kNumElems, type);
+
+#ifdef V8_TARGET_BIG_ENDIAN
+ int offset = sizeof(TaggedT) - ElementSizeInBytes(type.representation());
+#else
+ int offset = 0;
+#endif
+
+ for (int32_t x = 0; x < kNumElems; x++) {
+ int32_t y = kNumElems - x - 1;
+
+ RawMachineAssemblerTester<int32_t> m;
+ int32_t OK = 0x29000 + x;
+ Node* in_base = m.PointerConstant(in_buffer);
+ Node* in_index = m.IntPtrConstant(x * sizeof(TaggedT) + offset);
+ Node* out_base = m.PointerConstant(out_buffer);
+ Node* out_index = m.IntPtrConstant(y * sizeof(TaggedT) + offset);
+
+ Node* load;
+ AtomicLoadParameters load_params(type, order);
+ AtomicStoreParameters store_params(type.representation(), kNoWriteBarrier,
+ order);
+ if (kTaggedSize == 4) {
+ load = m.AtomicLoad(load_params, in_base, in_index);
+ m.AtomicStore(store_params, out_base, out_index, load);
+ } else {
+ DCHECK(m.machine()->Is64());
+ load = m.AtomicLoad64(load_params, in_base, in_index);
+ m.AtomicStore64(store_params, out_base, out_index, load, nullptr);
+ }
+
+ m.Return(m.Int32Constant(OK));
+
+ for (int32_t z = 0; z < kNumElems; z++) {
+ out_buffer[z] = zap_value;
+ }
+ CHECK_NE(in_buffer[x], out_buffer[y]);
+ CHECK_EQ(OK, m.Call());
+ // Mostly same as CHECK_EQ() but customized for compressed tagged values.
+ CheckEq<TaggedT>(in_buffer[x], out_buffer[y]);
+ for (int32_t z = 0; z < kNumElems; z++) {
+ if (z != y) CHECK_EQ(zap_value, out_buffer[z]);
+ }
+ }
+}
+} // namespace
+
+TEST(ReleaseStoreTagged) {
+ AtomicStoreTagged<Smi>(MachineType::TaggedSigned(),
+ AtomicMemoryOrder::kAcqRel);
+ AtomicStoreTagged<HeapObject>(MachineType::TaggedPointer(),
+ AtomicMemoryOrder::kAcqRel);
+ AtomicStoreTagged<Object>(MachineType::AnyTagged(),
+ AtomicMemoryOrder::kAcqRel);
+}
+
+TEST(SeqCstStoreTagged) {
+ AtomicStoreTagged<Smi>(MachineType::TaggedSigned(),
+ AtomicMemoryOrder::kSeqCst);
+ AtomicStoreTagged<HeapObject>(MachineType::TaggedPointer(),
+ AtomicMemoryOrder::kSeqCst);
+ AtomicStoreTagged<Object>(MachineType::AnyTagged(),
+ AtomicMemoryOrder::kSeqCst);
+}
+
+#if V8_TARGET_ARCH_32_BIT
+
+namespace {
+void TestAtomicPairLoadInteger(AtomicMemoryOrder order) {
+ uint64_t buffer[1];
+ uint32_t high;
+ uint32_t low;
+
+ BufferedRawMachineAssemblerTester<int32_t> m;
+ Node* base = m.PointerConstant(&buffer[0]);
+ Node* index = m.Int32Constant(0);
+
+ Node* pair_load = m.AtomicLoad64(
+ AtomicLoadParameters(MachineType::Uint64(), order), base, index);
+ m.StoreToPointer(&low, MachineRepresentation::kWord32,
+ m.Projection(0, pair_load));
+ m.StoreToPointer(&high, MachineRepresentation::kWord32,
+ m.Projection(1, pair_load));
+
+ int32_t OK = 0x29000;
+ m.Return(m.Int32Constant(OK));
+
+ FOR_UINT64_INPUTS(i) {
+ buffer[0] = i;
+ CHECK_EQ(OK, m.Call());
+ CHECK_EQ(i, make_uint64(high, low));
+ }
+}
+} // namespace
+
+TEST(AcquirePairLoadInteger) {
+ TestAtomicPairLoadInteger(AtomicMemoryOrder::kAcqRel);
+}
+
+TEST(SeqCstPairLoadInteger) {
+ TestAtomicPairLoadInteger(AtomicMemoryOrder::kSeqCst);
+}
+
+namespace {
+void TestAtomicPairStoreInteger(AtomicMemoryOrder order) {
+ uint64_t buffer[1];
+
+ BufferedRawMachineAssemblerTester<int32_t> m(MachineType::Uint32(),
+ MachineType::Uint32());
+ Node* base = m.PointerConstant(&buffer[0]);
+ Node* index = m.Int32Constant(0);
+
+ m.AtomicStore64(AtomicStoreParameters(MachineRepresentation::kWord64,
+ kNoWriteBarrier, order),
+ base, index, m.Parameter(0), m.Parameter(1));
+
+ int32_t OK = 0x29000;
+ m.Return(m.Int32Constant(OK));
+
+ FOR_UINT64_INPUTS(i) {
+ CHECK_EQ(OK, m.Call(static_cast<uint32_t>(i & 0xFFFFFFFF),
+ static_cast<uint32_t>(i >> 32)));
+ CHECK_EQ(i, buffer[0]);
+ }
+}
+} // namespace
+
+TEST(ReleasePairStoreInteger) {
+ TestAtomicPairStoreInteger(AtomicMemoryOrder::kAcqRel);
+}
+
+TEST(SeqCstPairStoreInteger) {
+ TestAtomicPairStoreInteger(AtomicMemoryOrder::kSeqCst);
+}
+
+#endif // V8_TARGET_ARCH_32_BIT
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/cctest/compiler/test-calls-with-arraylike-or-spread.cc b/deps/v8/test/cctest/compiler/test-calls-with-arraylike-or-spread.cc
index 27c6465bca..b885fc0392 100644
--- a/deps/v8/test/cctest/compiler/test-calls-with-arraylike-or-spread.cc
+++ b/deps/v8/test/cctest/compiler/test-calls-with-arraylike-or-spread.cc
@@ -2,6 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include "include/v8-function.h"
#include "src/flags/flags.h"
#include "test/cctest/compiler/node-observer-tester.h"
#include "test/cctest/test-api.h"
diff --git a/deps/v8/test/cctest/compiler/test-code-generator.cc b/deps/v8/test/cctest/compiler/test-code-generator.cc
index 997d7ade73..69823ef55a 100644
--- a/deps/v8/test/cctest/compiler/test-code-generator.cc
+++ b/deps/v8/test/cctest/compiler/test-code-generator.cc
@@ -1002,7 +1002,6 @@ class CodeGeneratorTester {
environment->main_zone(), &frame_, &linkage_,
environment->instructions(), &info_, environment->main_isolate(),
base::Optional<OsrHelper>(), kNoSourcePosition, nullptr,
- PoisoningMitigationLevel::kDontPoison,
AssemblerOptions::Default(environment->main_isolate()),
Builtin::kNoBuiltinId, kMaxUnoptimizedFrameHeight,
kMaxPushedArgumentCount);
@@ -1056,7 +1055,6 @@ class CodeGeneratorTester {
AllocatedOperand(LocationOperand::REGISTER,
MachineRepresentation::kTagged,
kReturnRegister0.code()),
- ImmediateOperand(ImmediateOperand::INLINE_INT32, -1), // poison index.
ImmediateOperand(ImmediateOperand::INLINE_INT32, optional_padding_slot),
ImmediateOperand(ImmediateOperand::INLINE_INT32, stack_slot_delta)};
Instruction* tail_call =
@@ -1079,8 +1077,7 @@ class CodeGeneratorTester {
defined(V8_TARGET_ARCH_PPC) || defined(V8_TARGET_ARCH_PPC64)
// Only folding register pushes is supported on ARM.
bool supported = ((push_type & CodeGenerator::kRegisterPush) == push_type);
-#elif defined(V8_TARGET_ARCH_X64) || defined(V8_TARGET_ARCH_IA32) || \
- defined(V8_TARGET_ARCH_X87)
+#elif defined(V8_TARGET_ARCH_X64) || defined(V8_TARGET_ARCH_IA32)
bool supported = ((push_type & CodeGenerator::kScalarPush) == push_type);
#else
bool supported = false;
@@ -1145,7 +1142,6 @@ class CodeGeneratorTester {
AllocatedOperand(LocationOperand::REGISTER,
MachineRepresentation::kTagged,
kReturnRegister0.code()),
- ImmediateOperand(ImmediateOperand::INLINE_INT32, -1), // poison index.
ImmediateOperand(ImmediateOperand::INLINE_INT32, optional_padding_slot),
ImmediateOperand(ImmediateOperand::INLINE_INT32,
first_unused_stack_slot)};
diff --git a/deps/v8/test/cctest/compiler/test-concurrent-shared-function-info.cc b/deps/v8/test/cctest/compiler/test-concurrent-shared-function-info.cc
index 81c5e69e4a..b0cccd7696 100644
--- a/deps/v8/test/cctest/compiler/test-concurrent-shared-function-info.cc
+++ b/deps/v8/test/cctest/compiler/test-concurrent-shared-function-info.cc
@@ -4,6 +4,7 @@
#include <limits>
+#include "include/v8-function.h"
#include "src/api/api-inl.h"
#include "src/codegen/compiler.h"
#include "src/codegen/optimized-compilation-info.h"
@@ -34,11 +35,17 @@ void ExpectSharedFunctionInfoState(SharedFunctionInfo sfi,
HeapObject script_or_debug_info = sfi.script_or_debug_info(kAcquireLoad);
switch (expectedState) {
case SfiState::Compiled:
- CHECK(function_data.IsBytecodeArray() || function_data.IsBaselineData());
+ CHECK(
+ function_data.IsBytecodeArray() ||
+ (function_data.IsCodeT() &&
+ FromCodeT(CodeT::cast(function_data)).kind() == CodeKind::BASELINE));
CHECK(script_or_debug_info.IsScript());
break;
case SfiState::DebugInfo:
- CHECK(function_data.IsBytecodeArray() || function_data.IsBaselineData());
+ CHECK(
+ function_data.IsBytecodeArray() ||
+ (function_data.IsCodeT() &&
+ FromCodeT(CodeT::cast(function_data)).kind() == CodeKind::BASELINE));
CHECK(script_or_debug_info.IsDebugInfo());
{
DebugInfo debug_info = DebugInfo::cast(script_or_debug_info);
diff --git a/deps/v8/test/cctest/compiler/test-linkage.cc b/deps/v8/test/cctest/compiler/test-linkage.cc
index ec801a8af4..bfa4a02578 100644
--- a/deps/v8/test/cctest/compiler/test-linkage.cc
+++ b/deps/v8/test/cctest/compiler/test-linkage.cc
@@ -2,6 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include "include/v8-function.h"
#include "src/api/api-inl.h"
#include "src/codegen/code-factory.h"
#include "src/codegen/compiler.h"
@@ -35,7 +36,7 @@ static Handle<JSFunction> Compile(const char* source) {
.ToHandleChecked();
Handle<SharedFunctionInfo> shared =
Compiler::GetSharedFunctionInfoForScript(
- isolate, source_code, ScriptDetails(), nullptr, nullptr,
+ isolate, source_code, ScriptDetails(),
v8::ScriptCompiler::kNoCompileOptions,
ScriptCompiler::kNoCacheNoReason, NOT_NATIVES_CODE)
.ToHandleChecked();
diff --git a/deps/v8/test/cctest/compiler/test-run-bytecode-graph-builder.cc b/deps/v8/test/cctest/compiler/test-run-bytecode-graph-builder.cc
index 0be9306e29..69c8018fc7 100644
--- a/deps/v8/test/cctest/compiler/test-run-bytecode-graph-builder.cc
+++ b/deps/v8/test/cctest/compiler/test-run-bytecode-graph-builder.cc
@@ -4,6 +4,7 @@
#include <utility>
+#include "include/v8-function.h"
#include "src/api/api-inl.h"
#include "src/codegen/optimized-compilation-info.h"
#include "src/compiler/pipeline.h"
diff --git a/deps/v8/test/cctest/compiler/test-run-deopt.cc b/deps/v8/test/cctest/compiler/test-run-deopt.cc
index 049a8b3956..c23f61e047 100644
--- a/deps/v8/test/cctest/compiler/test-run-deopt.cc
+++ b/deps/v8/test/cctest/compiler/test-run-deopt.cc
@@ -2,6 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include "include/v8-function.h"
#include "src/execution/frames-inl.h"
#include "test/cctest/cctest.h"
#include "test/cctest/compiler/function-tester.h"
diff --git a/deps/v8/test/cctest/compiler/test-run-machops.cc b/deps/v8/test/cctest/compiler/test-run-machops.cc
index d91bb005a1..d975cb31f2 100644
--- a/deps/v8/test/cctest/compiler/test-run-machops.cc
+++ b/deps/v8/test/cctest/compiler/test-run-machops.cc
@@ -4443,7 +4443,7 @@ TEST(RunTruncateFloat32ToInt32) {
#if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_S390X || \
V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_PPC64
CHECK_EQ(std::numeric_limits<int32_t>::min(), m.Call(i));
-#elif V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_ARM
+#elif V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_LOONG64
CHECK_EQ(0, m.Call(i));
#elif V8_TARGET_ARCH_RISCV64
CHECK_EQ(std::numeric_limits<int32_t>::max(), m.Call(i));
@@ -4465,7 +4465,7 @@ TEST(RunTruncateFloat32ToInt32) {
#if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_S390X || \
V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_PPC64
CHECK_EQ(std::numeric_limits<int32_t>::min(), m.Call(i));
-#elif V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_ARM
+#elif V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_LOONG64
CHECK_EQ(0, m.Call(i));
#endif
}
@@ -4580,7 +4580,7 @@ uint64_t ToInt64(uint32_t low, uint32_t high) {
return (static_cast<uint64_t>(high) << 32) | static_cast<uint64_t>(low);
}
-#if V8_TARGET_ARCH_32_BIT && !V8_TARGET_ARCH_X87
+#if V8_TARGET_ARCH_32_BIT
TEST(RunInt32PairAdd) {
BufferedRawMachineAssemblerTester<int32_t> m(
MachineType::Uint32(), MachineType::Uint32(), MachineType::Uint32(),
diff --git a/deps/v8/test/cctest/compiler/test-run-retpoline.cc b/deps/v8/test/cctest/compiler/test-run-retpoline.cc
deleted file mode 100644
index 090351bc38..0000000000
--- a/deps/v8/test/cctest/compiler/test-run-retpoline.cc
+++ /dev/null
@@ -1,210 +0,0 @@
-// Copyright 2018 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/codegen/assembler-inl.h"
-#include "src/codegen/code-stub-assembler.h"
-#include "src/codegen/macro-assembler.h"
-
-#include "test/cctest/cctest.h"
-#include "test/cctest/compiler/code-assembler-tester.h"
-#include "test/cctest/compiler/function-tester.h"
-
-namespace v8 {
-namespace internal {
-namespace compiler {
-namespace test_run_retpoline {
-
-#define __ assembler.
-
-namespace {
-
-// Function that takes a number of pointer-sized integer arguments, calculates a
-// weighted sum of them and returns it.
-Handle<Code> BuildCallee(Isolate* isolate, CallDescriptor* call_descriptor) {
- CodeAssemblerTester tester(isolate, call_descriptor, "callee");
- CodeStubAssembler assembler(tester.state());
- int param_slots = static_cast<int>(call_descriptor->ParameterSlotCount());
- TNode<IntPtrT> sum = __ IntPtrConstant(0);
- for (int i = 0; i < param_slots; ++i) {
- TNode<IntPtrT> product = __ Signed(__ IntPtrMul(
- __ UncheckedParameter<IntPtrT>(i), __ IntPtrConstant(i + 1)));
- sum = __ IntPtrAdd(sum, product);
- }
- __ Return(sum);
- return tester.GenerateCodeCloseAndEscape();
-}
-
-// Function that tail-calls another function with a number of pointer-sized
-// integer arguments.
-Handle<Code> BuildCaller(Isolate* isolate, CallDescriptor* call_descriptor,
- CallDescriptor* callee_descriptor, bool tail) {
- CodeAssemblerTester tester(isolate, call_descriptor, "caller");
- CodeStubAssembler assembler(tester.state());
- std::vector<Node*> params;
- // The first parameter is always the callee.
- Handle<Code> callee = BuildCallee(isolate, callee_descriptor);
- // defeat the instruction selector.
- CodeStubAssembler::TVariable<Code> target_var(&assembler);
- CodeStubAssembler::Label t(&assembler), f(&assembler),
- end(&assembler, &target_var);
- __ Branch(__ Int32Constant(0), &t, &f);
- __ BIND(&t);
- target_var = __ HeapConstant(callee);
- __ Goto(&end);
- __ BIND(&f);
- target_var = __ HeapConstant(callee);
- __ Goto(&end);
- __ BIND(&end);
- params.push_back(target_var.value());
-
- int param_slots = static_cast<int>(callee_descriptor->ParameterSlotCount());
- for (int i = 0; i < param_slots; ++i) {
- params.push_back(__ IntPtrConstant(i));
- }
- DCHECK_EQ(param_slots + 1, params.size());
- if (tail) {
- tester.raw_assembler_for_testing()->TailCallN(
- callee_descriptor, param_slots + 1, params.data());
- } else {
- Node* result = tester.raw_assembler_for_testing()->CallN(
- callee_descriptor, param_slots + 1, params.data());
- __ Return(__ UncheckedCast<IntPtrT>(result));
- }
- return tester.GenerateCodeCloseAndEscape();
-}
-
-// Setup function, which calls "caller".
-Handle<Code> BuildSetupFunction(Isolate* isolate,
- CallDescriptor* caller_descriptor,
- CallDescriptor* callee_descriptor, bool tail) {
- CodeAssemblerTester tester(isolate, 0);
- CodeStubAssembler assembler(tester.state());
- std::vector<Node*> params;
- // The first parameter is always the callee.
- params.push_back(__ HeapConstant(
- BuildCaller(isolate, caller_descriptor, callee_descriptor, tail)));
- // Set up arguments for "Caller".
- int param_slots = static_cast<int>(caller_descriptor->ParameterSlotCount());
- for (int i = 0; i < param_slots; ++i) {
- // Use values that are different from the ones we will pass to this
- // function's callee later.
- params.push_back(__ IntPtrConstant(i + 42));
- }
- DCHECK_EQ(param_slots + 1, params.size());
- TNode<IntPtrT> intptr_result =
- __ UncheckedCast<IntPtrT>(tester.raw_assembler_for_testing()->CallN(
- caller_descriptor, param_slots + 1, params.data()));
- __ Return(__ SmiTag(intptr_result));
- return tester.GenerateCodeCloseAndEscape();
-}
-
-CallDescriptor* CreateDescriptorForStackArguments(Zone* zone, int param_slots) {
- LocationSignature::Builder locations(zone, 1,
- static_cast<size_t>(param_slots));
-
- locations.AddReturn(LinkageLocation::ForRegister(kReturnRegister0.code(),
- MachineType::IntPtr()));
-
- for (int i = 0; i < param_slots; ++i) {
- locations.AddParam(LinkageLocation::ForCallerFrameSlot(
- i - param_slots, MachineType::IntPtr()));
- }
-
- return zone->New<CallDescriptor>(
- CallDescriptor::kCallCodeObject, // kind
- MachineType::AnyTagged(), // target MachineType
- LinkageLocation::ForAnyRegister(
- MachineType::AnyTagged()), // target location
- locations.Build(), // location_sig
- param_slots, // stack parameter slots
- Operator::kNoProperties, // properties
- kNoCalleeSaved, // callee-saved registers
- kNoCalleeSaved, // callee-saved fp
- CallDescriptor::kRetpoline); // flags
-}
-
-// Test a tail call from a caller with n parameters to a callee with m
-// parameters. All parameters are pointer-sized.
-void TestHelper(int n, int m, bool tail) {
- HandleAndZoneScope scope;
- Isolate* isolate = scope.main_isolate();
- CanonicalHandleScope canonical(isolate);
- Zone* zone = scope.main_zone();
- CallDescriptor* caller_descriptor =
- CreateDescriptorForStackArguments(zone, n);
- CallDescriptor* callee_descriptor =
- CreateDescriptorForStackArguments(zone, m);
- Handle<Code> setup =
- BuildSetupFunction(isolate, caller_descriptor, callee_descriptor, tail);
- FunctionTester ft(setup, 0);
- Handle<Object> result = ft.Call().ToHandleChecked();
- int expected = 0;
- for (int i = 0; i < m; ++i) expected += (i + 1) * i;
- CHECK_EQ(expected, Handle<Smi>::cast(result)->value());
-}
-
-} // namespace
-
-#undef __
-
-TEST(RetpolineOddEven) {
- TestHelper(1, 0, false);
- TestHelper(1, 2, false);
- TestHelper(3, 2, false);
- TestHelper(3, 4, false);
-}
-
-TEST(RetpolineOddEvenTail) {
- TestHelper(1, 0, true);
- TestHelper(1, 2, true);
- TestHelper(3, 2, true);
- TestHelper(3, 4, true);
-}
-
-TEST(RetpolineOddOdd) {
- TestHelper(1, 1, false);
- TestHelper(1, 3, false);
- TestHelper(3, 1, false);
- TestHelper(3, 3, false);
-}
-
-TEST(RetpolineOddOddTail) {
- TestHelper(1, 1, true);
- TestHelper(1, 3, true);
- TestHelper(3, 1, true);
- TestHelper(3, 3, true);
-}
-
-TEST(RetpolineEvenEven) {
- TestHelper(0, 0, false);
- TestHelper(0, 2, false);
- TestHelper(2, 0, false);
- TestHelper(2, 2, false);
-}
-
-TEST(RetpolineEvenEvenTail) {
- TestHelper(0, 0, true);
- TestHelper(0, 2, true);
- TestHelper(2, 0, true);
- TestHelper(2, 2, true);
-}
-
-TEST(RetpolineEvenOdd) {
- TestHelper(0, 1, false);
- TestHelper(0, 3, false);
- TestHelper(2, 1, false);
- TestHelper(2, 3, false);
-}
-
-TEST(RetpolineEvenOddTail) {
- TestHelper(0, 1, true);
- TestHelper(0, 3, true);
- TestHelper(2, 1, true);
- TestHelper(2, 3, true);
-}
-
-} // namespace test_run_retpoline
-} // namespace compiler
-} // namespace internal
-} // namespace v8
diff --git a/deps/v8/test/cctest/disasm-regex-helper.cc b/deps/v8/test/cctest/disasm-regex-helper.cc
index 9569252856..1559f90ac2 100644
--- a/deps/v8/test/cctest/disasm-regex-helper.cc
+++ b/deps/v8/test/cctest/disasm-regex-helper.cc
@@ -4,6 +4,7 @@
#include "test/cctest/disasm-regex-helper.h"
+#include "include/v8-function.h"
#include "src/api/api-inl.h"
#include "src/diagnostics/disassembler.h"
#include "src/objects/objects-inl.h"
diff --git a/deps/v8/test/cctest/heap/test-alloc.cc b/deps/v8/test/cctest/heap/test-alloc.cc
index 855acd2c72..3267b6d352 100644
--- a/deps/v8/test/cctest/heap/test-alloc.cc
+++ b/deps/v8/test/cctest/heap/test-alloc.cc
@@ -25,15 +25,15 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-#include "src/init/v8.h"
-#include "test/cctest/cctest.h"
-
+#include "include/v8-function.h"
#include "src/api/api-inl.h"
#include "src/builtins/accessors.h"
#include "src/heap/heap-inl.h"
+#include "src/init/v8.h"
#include "src/objects/api-callbacks.h"
#include "src/objects/objects-inl.h"
#include "src/objects/property.h"
+#include "test/cctest/cctest.h"
#include "test/cctest/heap/heap-tester.h"
#include "test/cctest/heap/heap-utils.h"
diff --git a/deps/v8/test/cctest/heap/test-embedder-tracing.cc b/deps/v8/test/cctest/heap/test-embedder-tracing.cc
index 77d1a2dd00..4b93eb0a37 100644
--- a/deps/v8/test/cctest/heap/test-embedder-tracing.cc
+++ b/deps/v8/test/cctest/heap/test-embedder-tracing.cc
@@ -5,7 +5,13 @@
#include <unordered_map>
#include <vector>
-#include "include/v8.h"
+#include "include/v8-context.h"
+#include "include/v8-function.h"
+#include "include/v8-local-handle.h"
+#include "include/v8-object.h"
+#include "include/v8-persistent-handle.h"
+#include "include/v8-template.h"
+#include "include/v8-traced-handle.h"
#include "src/api/api-inl.h"
#include "src/heap/embedder-tracing.h"
#include "src/heap/heap-inl.h"
diff --git a/deps/v8/test/cctest/heap/test-heap.cc b/deps/v8/test/cctest/heap/test-heap.cc
index 2974b82cb0..a6a5ba7a74 100644
--- a/deps/v8/test/cctest/heap/test-heap.cc
+++ b/deps/v8/test/cctest/heap/test-heap.cc
@@ -29,6 +29,7 @@
#include <utility>
+#include "include/v8-function.h"
#include "src/api/api-inl.h"
#include "src/base/strings.h"
#include "src/codegen/assembler-inl.h"
@@ -5685,8 +5686,9 @@ TEST(Regress598319) {
marking->Step(kSmallStepSizeInMs,
i::IncrementalMarking::NO_GC_VIA_STACK_GUARD,
StepOrigin::kV8);
- if (page->IsFlagSet(Page::HAS_PROGRESS_BAR) && page->ProgressBar() > 0) {
- CHECK_NE(page->ProgressBar(), arr.get().Size());
+ ProgressBar& progress_bar = page->ProgressBar();
+ if (progress_bar.IsEnabled() && progress_bar.Value() > 0) {
+ CHECK_NE(progress_bar.Value(), arr.get().Size());
{
// Shift by 1, effectively moving one white object across the progress
// bar, meaning that we will miss marking it.
diff --git a/deps/v8/test/cctest/heap/test-iterators.cc b/deps/v8/test/cctest/heap/test-iterators.cc
index 853cf705bd..122cd2f30f 100644
--- a/deps/v8/test/cctest/heap/test-iterators.cc
+++ b/deps/v8/test/cctest/heap/test-iterators.cc
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "include/v8.h"
+#include "include/v8-object.h"
#include "src/api/api-inl.h"
#include "src/execution/isolate.h"
#include "src/heap/combined-heap.h"
diff --git a/deps/v8/test/cctest/heap/test-mark-compact.cc b/deps/v8/test/cctest/heap/test-mark-compact.cc
index 6183ae3218..6c9c4fca18 100644
--- a/deps/v8/test/cctest/heap/test-mark-compact.cc
+++ b/deps/v8/test/cctest/heap/test-mark-compact.cc
@@ -37,11 +37,11 @@
#include <utility>
-#include "src/init/v8.h"
-
+#include "include/v8-locker.h"
#include "src/handles/global-handles.h"
#include "src/heap/mark-compact-inl.h"
#include "src/heap/mark-compact.h"
+#include "src/init/v8.h"
#include "src/objects/objects-inl.h"
#include "test/cctest/cctest.h"
#include "test/cctest/heap/heap-tester.h"
diff --git a/deps/v8/test/cctest/heap/test-shared-heap.cc b/deps/v8/test/cctest/heap/test-shared-heap.cc
index 2062cbf797..11a544fa5a 100644
--- a/deps/v8/test/cctest/heap/test-shared-heap.cc
+++ b/deps/v8/test/cctest/heap/test-shared-heap.cc
@@ -2,7 +2,9 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "include/v8.h"
+#include "include/v8-array-buffer.h"
+#include "include/v8-initialization.h"
+#include "include/v8-isolate.h"
#include "src/common/globals.h"
#include "src/handles/handles-inl.h"
#include "src/heap/heap.h"
diff --git a/deps/v8/test/cctest/heap/test-spaces.cc b/deps/v8/test/cctest/heap/test-spaces.cc
index 27300c0c3d..1f8bc11982 100644
--- a/deps/v8/test/cctest/heap/test-spaces.cc
+++ b/deps/v8/test/cctest/heap/test-spaces.cc
@@ -27,6 +27,7 @@
#include <stdlib.h>
+#include "include/v8-initialization.h"
#include "include/v8-platform.h"
#include "src/base/bounded-page-allocator.h"
#include "src/base/macros.h"
@@ -788,6 +789,7 @@ class FailingPageAllocator : public v8::PageAllocator {
Permission permissions) override {
return false;
}
+ bool DecommitPages(void* address, size_t length) override { return false; }
};
} // namespace
diff --git a/deps/v8/test/cctest/interpreter/bytecode-expectations-printer.cc b/deps/v8/test/cctest/interpreter/bytecode-expectations-printer.cc
index 42e3237be4..c8dfb80e5b 100644
--- a/deps/v8/test/cctest/interpreter/bytecode-expectations-printer.cc
+++ b/deps/v8/test/cctest/interpreter/bytecode-expectations-printer.cc
@@ -9,7 +9,7 @@
#include <vector>
#include "include/libplatform/libplatform.h"
-#include "include/v8.h"
+#include "include/v8-function.h"
#include "src/api/api-inl.h"
#include "src/base/logging.h"
#include "src/codegen/source-position-table.h"
diff --git a/deps/v8/test/cctest/interpreter/bytecode-expectations-printer.h b/deps/v8/test/cctest/interpreter/bytecode-expectations-printer.h
index ccba0d4fc7..4afe052419 100644
--- a/deps/v8/test/cctest/interpreter/bytecode-expectations-printer.h
+++ b/deps/v8/test/cctest/interpreter/bytecode-expectations-printer.h
@@ -9,12 +9,15 @@
#include <string>
#include <vector>
+#include "include/v8-local-handle.h"
#include "src/interpreter/bytecodes.h"
#include "src/objects/objects.h"
namespace v8 {
class Isolate;
+class Script;
+class Module;
namespace internal {
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/DeclareGlobals.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/DeclareGlobals.golden
index bea6428fad..8fcb7b4cc4 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/DeclareGlobals.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/DeclareGlobals.golden
@@ -58,7 +58,7 @@ snippet: "
"
frame size: 3
parameter count: 1
-bytecode array length: 23
+bytecode array length: 28
bytecodes: [
B(LdaConstant), U8(0),
B(Star1),
@@ -67,8 +67,10 @@ bytecodes: [
/* 8 S> */ B(LdaSmi), I8(1),
/* 8 E> */ B(StaGlobal), U8(1), U8(0),
/* 11 S> */ B(LdaSmi), I8(2),
+ B(Star1),
/* 12 E> */ B(StaGlobal), U8(1), U8(0),
- B(Star0),
+ B(Mov), R(1), R(0),
+ B(Ldar), R(0),
/* 16 S> */ B(Return),
]
constant pool: [
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/GlobalCompoundExpressions.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/GlobalCompoundExpressions.golden
index 808f608f25..58b558db64 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/GlobalCompoundExpressions.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/GlobalCompoundExpressions.golden
@@ -12,13 +12,15 @@ snippet: "
function f() { return global &= 1; }
f();
"
-frame size: 0
+frame size: 1
parameter count: 1
-bytecode array length: 10
+bytecode array length: 13
bytecodes: [
/* 31 S> */ B(LdaGlobal), U8(0), U8(0),
B(BitwiseAndSmi), I8(1), U8(2),
+ B(Star0),
/* 45 E> */ B(StaGlobal), U8(0), U8(3),
+ B(Ldar), R(0),
/* 50 S> */ B(Return),
]
constant pool: [
@@ -33,13 +35,15 @@ snippet: "
function f() { return unallocated += 1; }
f();
"
-frame size: 0
+frame size: 1
parameter count: 1
-bytecode array length: 10
+bytecode array length: 13
bytecodes: [
/* 32 S> */ B(LdaGlobal), U8(0), U8(0),
B(AddSmi), I8(1), U8(2),
+ B(Star0),
/* 51 E> */ B(StaGlobal), U8(0), U8(3),
+ B(Ldar), R(0),
/* 56 S> */ B(Return),
]
constant pool: [
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/GlobalCountOperators.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/GlobalCountOperators.golden
index fa86273ad6..fcb80b2e66 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/GlobalCountOperators.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/GlobalCountOperators.golden
@@ -12,13 +12,15 @@ snippet: "
function f() { return ++global; }
f();
"
-frame size: 0
+frame size: 1
parameter count: 1
-bytecode array length: 9
+bytecode array length: 12
bytecodes: [
/* 31 S> */ B(LdaGlobal), U8(0), U8(0),
B(Inc), U8(2),
+ B(Star0),
/* 40 E> */ B(StaGlobal), U8(0), U8(3),
+ B(Ldar), R(0),
/* 47 S> */ B(Return),
]
constant pool: [
@@ -33,14 +35,15 @@ snippet: "
function f() { return global--; }
f();
"
-frame size: 1
+frame size: 2
parameter count: 1
-bytecode array length: 14
+bytecode array length: 15
bytecodes: [
/* 31 S> */ B(LdaGlobal), U8(0), U8(0),
B(ToNumeric), U8(2),
B(Star0),
B(Dec), U8(2),
+ B(Star1),
/* 44 E> */ B(StaGlobal), U8(0), U8(3),
B(Ldar), R(0),
/* 47 S> */ B(Return),
@@ -57,13 +60,15 @@ snippet: "
function f() { 'use strict'; return --unallocated; }
f();
"
-frame size: 0
+frame size: 1
parameter count: 1
-bytecode array length: 9
+bytecode array length: 12
bytecodes: [
/* 46 S> */ B(LdaGlobal), U8(0), U8(0),
B(Dec), U8(2),
+ B(Star0),
/* 55 E> */ B(StaGlobal), U8(0), U8(3),
+ B(Ldar), R(0),
/* 67 S> */ B(Return),
]
constant pool: [
@@ -78,14 +83,15 @@ snippet: "
function f() { return unallocated++; }
f();
"
-frame size: 1
+frame size: 2
parameter count: 1
-bytecode array length: 14
+bytecode array length: 15
bytecodes: [
/* 32 S> */ B(LdaGlobal), U8(0), U8(0),
B(ToNumeric), U8(2),
B(Star0),
B(Inc), U8(2),
+ B(Star1),
/* 50 E> */ B(StaGlobal), U8(0), U8(3),
B(Ldar), R(0),
/* 53 S> */ B(Return),
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/PrivateAccessorAccess.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/PrivateAccessorAccess.golden
index 819338ad80..a9970d8e6a 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/PrivateAccessorAccess.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/PrivateAccessorAccess.golden
@@ -83,7 +83,7 @@ bytecodes: [
B(Mov), R(this), R(0),
B(Mov), R(context), R(2),
/* 48 E> */ B(CallRuntime), U16(Runtime::kAddPrivateBrand), R(0), U8(3),
- /* 53 S> */ B(Wide), B(LdaSmi), I16(281),
+ /* 53 S> */ B(Wide), B(LdaSmi), I16(284),
B(Star3),
B(LdaConstant), U8(0),
B(Star4),
@@ -114,7 +114,7 @@ bytecodes: [
B(Mov), R(this), R(0),
B(Mov), R(context), R(2),
/* 41 E> */ B(CallRuntime), U16(Runtime::kAddPrivateBrand), R(0), U8(3),
- /* 46 S> */ B(Wide), B(LdaSmi), I16(280),
+ /* 46 S> */ B(Wide), B(LdaSmi), I16(283),
B(Star3),
B(LdaConstant), U8(0),
B(Star4),
@@ -145,7 +145,7 @@ bytecodes: [
B(Mov), R(this), R(0),
B(Mov), R(context), R(2),
/* 48 E> */ B(CallRuntime), U16(Runtime::kAddPrivateBrand), R(0), U8(3),
- /* 53 S> */ B(Wide), B(LdaSmi), I16(281),
+ /* 53 S> */ B(Wide), B(LdaSmi), I16(284),
B(Star3),
B(LdaConstant), U8(0),
B(Star4),
@@ -176,7 +176,7 @@ bytecodes: [
B(Mov), R(this), R(0),
B(Mov), R(context), R(2),
/* 41 E> */ B(CallRuntime), U16(Runtime::kAddPrivateBrand), R(0), U8(3),
- /* 46 S> */ B(Wide), B(LdaSmi), I16(280),
+ /* 46 S> */ B(Wide), B(LdaSmi), I16(283),
B(Star4),
B(LdaConstant), U8(0),
B(Star5),
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/PrivateMethodAccess.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/PrivateMethodAccess.golden
index 855d8919f3..17d724e521 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/PrivateMethodAccess.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/PrivateMethodAccess.golden
@@ -56,7 +56,7 @@ bytecodes: [
B(Mov), R(this), R(0),
B(Mov), R(context), R(2),
/* 44 E> */ B(CallRuntime), U16(Runtime::kAddPrivateBrand), R(0), U8(3),
- /* 49 S> */ B(Wide), B(LdaSmi), I16(279),
+ /* 49 S> */ B(Wide), B(LdaSmi), I16(282),
B(Star3),
B(LdaConstant), U8(0),
B(Star4),
@@ -88,7 +88,7 @@ bytecodes: [
B(Mov), R(this), R(0),
B(Mov), R(context), R(2),
/* 44 E> */ B(CallRuntime), U16(Runtime::kAddPrivateBrand), R(0), U8(3),
- /* 49 S> */ B(Wide), B(LdaSmi), I16(279),
+ /* 49 S> */ B(Wide), B(LdaSmi), I16(282),
B(Star3),
B(LdaConstant), U8(0),
B(Star4),
diff --git a/deps/v8/test/cctest/interpreter/bytecode_expectations/StaticPrivateMethodAccess.golden b/deps/v8/test/cctest/interpreter/bytecode_expectations/StaticPrivateMethodAccess.golden
index df2bdc2a09..3fc1578156 100644
--- a/deps/v8/test/cctest/interpreter/bytecode_expectations/StaticPrivateMethodAccess.golden
+++ b/deps/v8/test/cctest/interpreter/bytecode_expectations/StaticPrivateMethodAccess.golden
@@ -24,7 +24,7 @@ bytecodes: [
B(TestReferenceEqual), R(this),
B(Mov), R(this), R(1),
B(JumpIfTrue), U8(16),
- B(Wide), B(LdaSmi), I16(277),
+ B(Wide), B(LdaSmi), I16(280),
B(Star2),
B(LdaConstant), U8(0),
B(Star3),
@@ -55,7 +55,7 @@ frame size: 2
parameter count: 1
bytecode array length: 14
bytecodes: [
- /* 56 S> */ B(Wide), B(LdaSmi), I16(279),
+ /* 56 S> */ B(Wide), B(LdaSmi), I16(282),
B(Star0),
B(LdaConstant), U8(0),
B(Star1),
@@ -82,7 +82,7 @@ frame size: 2
parameter count: 1
bytecode array length: 14
bytecodes: [
- /* 56 S> */ B(Wide), B(LdaSmi), I16(279),
+ /* 56 S> */ B(Wide), B(LdaSmi), I16(282),
B(Star0),
B(LdaConstant), U8(0),
B(Star1),
@@ -121,7 +121,7 @@ bytecodes: [
B(TestReferenceEqual), R(this),
B(Mov), R(this), R(0),
B(JumpIfTrue), U8(16),
- B(Wide), B(LdaSmi), I16(277),
+ B(Wide), B(LdaSmi), I16(280),
B(Star2),
B(LdaConstant), U8(0),
B(Star3),
@@ -143,7 +143,7 @@ bytecodes: [
B(TestReferenceEqual), R(this),
B(Mov), R(this), R(1),
B(JumpIfTrue), U8(16),
- B(Wide), B(LdaSmi), I16(278),
+ B(Wide), B(LdaSmi), I16(281),
B(Star3),
B(LdaConstant), U8(0),
B(Star4),
@@ -158,7 +158,7 @@ bytecodes: [
B(TestReferenceEqual), R(this),
B(Mov), R(this), R(0),
B(JumpIfTrue), U8(16),
- B(Wide), B(LdaSmi), I16(277),
+ B(Wide), B(LdaSmi), I16(280),
B(Star2),
B(LdaConstant), U8(0),
B(Star3),
@@ -188,7 +188,7 @@ frame size: 2
parameter count: 1
bytecode array length: 14
bytecodes: [
- /* 60 S> */ B(Wide), B(LdaSmi), I16(281),
+ /* 60 S> */ B(Wide), B(LdaSmi), I16(284),
B(Star0),
B(LdaConstant), U8(0),
B(Star1),
@@ -214,7 +214,7 @@ frame size: 2
parameter count: 1
bytecode array length: 14
bytecodes: [
- /* 53 S> */ B(Wide), B(LdaSmi), I16(280),
+ /* 53 S> */ B(Wide), B(LdaSmi), I16(283),
B(Star0),
B(LdaConstant), U8(0),
B(Star1),
@@ -240,7 +240,7 @@ frame size: 2
parameter count: 1
bytecode array length: 14
bytecodes: [
- /* 60 S> */ B(Wide), B(LdaSmi), I16(281),
+ /* 60 S> */ B(Wide), B(LdaSmi), I16(284),
B(Star0),
B(LdaConstant), U8(0),
B(Star1),
@@ -266,7 +266,7 @@ frame size: 3
parameter count: 1
bytecode array length: 14
bytecodes: [
- /* 46 S> */ B(Wide), B(LdaSmi), I16(280),
+ /* 46 S> */ B(Wide), B(LdaSmi), I16(283),
B(Star1),
B(LdaConstant), U8(0),
B(Star2),
diff --git a/deps/v8/test/cctest/interpreter/generate-bytecode-expectations.cc b/deps/v8/test/cctest/interpreter/generate-bytecode-expectations.cc
index 17b0909059..803ca8da25 100644
--- a/deps/v8/test/cctest/interpreter/generate-bytecode-expectations.cc
+++ b/deps/v8/test/cctest/interpreter/generate-bytecode-expectations.cc
@@ -9,13 +9,15 @@
#include <sstream>
#include <vector>
-#include "test/cctest/interpreter/bytecode-expectations-printer.h"
-
#include "include/libplatform/libplatform.h"
-#include "include/v8.h"
-
+#include "include/v8-array-buffer.h"
+#include "include/v8-context.h"
+#include "include/v8-initialization.h"
+#include "include/v8-local-handle.h"
+#include "include/v8-message.h"
#include "src/base/logging.h"
#include "src/interpreter/interpreter.h"
+#include "test/cctest/interpreter/bytecode-expectations-printer.h"
#ifdef V8_OS_POSIX
#include <dirent.h>
diff --git a/deps/v8/test/cctest/interpreter/interpreter-tester.h b/deps/v8/test/cctest/interpreter/interpreter-tester.h
index 4f3bc8e4e9..f20d7937cd 100644
--- a/deps/v8/test/cctest/interpreter/interpreter-tester.h
+++ b/deps/v8/test/cctest/interpreter/interpreter-tester.h
@@ -5,11 +5,11 @@
#ifndef V8_TEST_CCTEST_INTERPRETER_INTERPRETER_TESTER_H_
#define V8_TEST_CCTEST_INTERPRETER_INTERPRETER_TESTER_H_
-#include "src/init/v8.h"
-
+#include "include/v8-function.h"
#include "src/api/api.h"
#include "src/execution/execution.h"
#include "src/handles/handles.h"
+#include "src/init/v8.h"
#include "src/interpreter/bytecode-array-builder.h"
#include "src/interpreter/interpreter.h"
#include "src/objects/feedback-cell.h"
diff --git a/deps/v8/test/cctest/interpreter/test-source-positions.cc b/deps/v8/test/cctest/interpreter/test-source-positions.cc
index a003ad6bf4..6dd7be2874 100644
--- a/deps/v8/test/cctest/interpreter/test-source-positions.cc
+++ b/deps/v8/test/cctest/interpreter/test-source-positions.cc
@@ -2,12 +2,12 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/init/v8.h"
-
+#include "include/v8-function.h"
#include "src/api/api-inl.h"
#include "src/compiler/pipeline.h"
#include "src/execution/isolate.h"
#include "src/handles/handles.h"
+#include "src/init/v8.h"
#include "src/interpreter/bytecode-generator.h"
#include "src/interpreter/interpreter.h"
#include "src/objects/objects-inl.h"
diff --git a/deps/v8/test/cctest/libsampler/test-sampler.cc b/deps/v8/test/cctest/libsampler/test-sampler.cc
index 0ed7454e5f..a6e4c309d7 100644
--- a/deps/v8/test/cctest/libsampler/test-sampler.cc
+++ b/deps/v8/test/cctest/libsampler/test-sampler.cc
@@ -3,13 +3,13 @@
// found in the LICENSE file.
// Tests of sampler functionalities.
-#include "src/libsampler/sampler.h"
-
+#include "include/v8-external.h"
+#include "include/v8-function.h"
#include "src/base/platform/platform.h"
#include "src/base/platform/time.h"
+#include "src/libsampler/sampler.h"
#include "test/cctest/cctest.h"
-
namespace v8 {
namespace sampler {
diff --git a/deps/v8/test/cctest/parsing/test-parse-decision.cc b/deps/v8/test/cctest/parsing/test-parse-decision.cc
index 31c6bba9d1..69746279fd 100644
--- a/deps/v8/test/cctest/parsing/test-parse-decision.cc
+++ b/deps/v8/test/cctest/parsing/test-parse-decision.cc
@@ -9,14 +9,14 @@
#include <unordered_map>
-#include "include/v8.h"
+#include "include/v8-local-handle.h"
+#include "include/v8-primitive.h"
#include "src/api/api-inl.h"
#include "src/execution/isolate.h"
#include "src/handles/handles-inl.h"
#include "src/objects/objects-inl.h"
#include "src/objects/shared-function-info-inl.h"
#include "src/utils/utils.h"
-
#include "test/cctest/cctest.h"
namespace v8 {
diff --git a/deps/v8/test/cctest/print-extension.cc b/deps/v8/test/cctest/print-extension.cc
index 226d37ac31..766e5ca403 100644
--- a/deps/v8/test/cctest/print-extension.cc
+++ b/deps/v8/test/cctest/print-extension.cc
@@ -27,6 +27,8 @@
#include "test/cctest/print-extension.h"
+#include "include/v8-template.h"
+
namespace v8 {
namespace internal {
diff --git a/deps/v8/test/cctest/print-extension.h b/deps/v8/test/cctest/print-extension.h
index a2d237d667..7060e0569e 100644
--- a/deps/v8/test/cctest/print-extension.h
+++ b/deps/v8/test/cctest/print-extension.h
@@ -28,9 +28,13 @@
#ifndef V8_TEST_CCTEST_PRINT_EXTENSION_H_
#define V8_TEST_CCTEST_PRINT_EXTENSION_H_
-#include "include/v8.h"
+#include "include/v8-extension.h"
namespace v8 {
+
+template <typename T>
+class FunctionCallbackInfo;
+
namespace internal {
class PrintExtension : public v8::Extension {
diff --git a/deps/v8/test/cctest/profiler-extension.cc b/deps/v8/test/cctest/profiler-extension.cc
index 780d182c36..23aa5f4131 100644
--- a/deps/v8/test/cctest/profiler-extension.cc
+++ b/deps/v8/test/cctest/profiler-extension.cc
@@ -28,6 +28,8 @@
// Tests of profiles generator and utilities.
#include "test/cctest/profiler-extension.h"
+
+#include "include/v8-template.h"
#include "test/cctest/cctest.h"
namespace v8 {
diff --git a/deps/v8/test/cctest/profiler-extension.h b/deps/v8/test/cctest/profiler-extension.h
index f2be3a1334..afb78dcc30 100644
--- a/deps/v8/test/cctest/profiler-extension.h
+++ b/deps/v8/test/cctest/profiler-extension.h
@@ -30,9 +30,14 @@
#ifndef V8_TEST_CCTEST_PROFILER_EXTENSION_H_
#define V8_TEST_CCTEST_PROFILER_EXTENSION_H_
+#include "include/v8-extension.h"
#include "include/v8-profiler.h"
namespace v8 {
+
+template <typename T>
+class FunctionCallbackInfo;
+
namespace internal {
class CpuProfiler;
diff --git a/deps/v8/test/cctest/test-access-checks.cc b/deps/v8/test/cctest/test-access-checks.cc
index acea843c14..d89039dcb1 100644
--- a/deps/v8/test/cctest/test-access-checks.cc
+++ b/deps/v8/test/cctest/test-access-checks.cc
@@ -4,6 +4,9 @@
#include <stdlib.h>
+#include "include/v8-container.h"
+#include "include/v8-external.h"
+#include "include/v8-function.h"
#include "test/cctest/cctest.h"
namespace {
diff --git a/deps/v8/test/cctest/test-accessors.cc b/deps/v8/test/cctest/test-accessors.cc
index 79c773b22a..15f303ab7d 100644
--- a/deps/v8/test/cctest/test-accessors.cc
+++ b/deps/v8/test/cctest/test-accessors.cc
@@ -27,6 +27,7 @@
#include <stdlib.h>
+#include "include/v8-function.h"
#include "src/api/api-inl.h"
#include "src/execution/frames-inl.h"
#include "src/strings/string-stream.h"
diff --git a/deps/v8/test/cctest/test-api-accessors.cc b/deps/v8/test/cctest/test-api-accessors.cc
index 65332a64b0..d02b6e2023 100644
--- a/deps/v8/test/cctest/test-api-accessors.cc
+++ b/deps/v8/test/cctest/test-api-accessors.cc
@@ -2,7 +2,12 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "include/v8.h"
+#include "include/v8-context.h"
+#include "include/v8-function.h"
+#include "include/v8-isolate.h"
+#include "include/v8-local-handle.h"
+#include "include/v8-object.h"
+#include "include/v8-template.h"
#include "src/api/api-inl.h"
#include "src/debug/debug.h"
#include "src/execution/isolate.h"
diff --git a/deps/v8/test/cctest/test-api-array-buffer.cc b/deps/v8/test/cctest/test-api-array-buffer.cc
index ee614fc63c..248a6bc04b 100644
--- a/deps/v8/test/cctest/test-api-array-buffer.cc
+++ b/deps/v8/test/cctest/test-api-array-buffer.cc
@@ -336,8 +336,10 @@ THREADED_TEST(SkipArrayBufferBackingStoreDuringGC) {
v8::Isolate* isolate = env->GetIsolate();
v8::HandleScope handle_scope(isolate);
+ void* buffer = CcTest::array_buffer_allocator()->Allocate(100);
// Make sure the pointer looks like a heap object
- uint8_t* store_ptr = reinterpret_cast<uint8_t*>(i::kHeapObjectTag);
+ uintptr_t address = reinterpret_cast<uintptr_t>(buffer) | i::kHeapObjectTag;
+ void* store_ptr = reinterpret_cast<void*>(address);
auto backing_store = v8::ArrayBuffer::NewBackingStore(
store_ptr, 8, [](void*, size_t, void*) {}, nullptr);
@@ -353,6 +355,8 @@ THREADED_TEST(SkipArrayBufferBackingStoreDuringGC) {
// Should not move the pointer
CHECK_EQ(ab->GetBackingStore()->Data(), store_ptr);
+
+ CcTest::array_buffer_allocator()->Free(buffer, 100);
}
THREADED_TEST(SkipArrayBufferDuringScavenge) {
@@ -429,7 +433,7 @@ static void BackingStoreCustomDeleter(void* data, size_t length,
CHECK_EQ(backing_store_custom_length, length);
CHECK_EQ(backing_store_custom_deleter_data,
reinterpret_cast<intptr_t>(deleter_data));
- free(data);
+ CcTest::array_buffer_allocator()->Free(data, length);
backing_store_custom_called = true;
}
@@ -437,7 +441,7 @@ TEST(ArrayBuffer_NewBackingStore_CustomDeleter) {
{
// Create and destroy a backing store.
backing_store_custom_called = false;
- backing_store_custom_data = malloc(100);
+ backing_store_custom_data = CcTest::array_buffer_allocator()->Allocate(100);
backing_store_custom_length = 100;
v8::ArrayBuffer::NewBackingStore(
backing_store_custom_data, backing_store_custom_length,
@@ -451,7 +455,7 @@ TEST(SharedArrayBuffer_NewBackingStore_CustomDeleter) {
{
// Create and destroy a backing store.
backing_store_custom_called = false;
- backing_store_custom_data = malloc(100);
+ backing_store_custom_data = CcTest::array_buffer_allocator()->Allocate(100);
backing_store_custom_length = 100;
v8::SharedArrayBuffer::NewBackingStore(
backing_store_custom_data, backing_store_custom_length,
@@ -465,9 +469,10 @@ TEST(ArrayBuffer_NewBackingStore_EmptyDeleter) {
LocalContext env;
v8::Isolate* isolate = env->GetIsolate();
v8::HandleScope handle_scope(isolate);
- char static_buffer[100];
+ size_t size = 100;
+ void* buffer = CcTest::array_buffer_allocator()->Allocate(size);
std::unique_ptr<v8::BackingStore> backing_store =
- v8::ArrayBuffer::NewBackingStore(static_buffer, sizeof(static_buffer),
+ v8::ArrayBuffer::NewBackingStore(buffer, size,
v8::BackingStore::EmptyDeleter, nullptr);
uint64_t external_memory_before =
isolate->AdjustAmountOfExternalAllocatedMemory(0);
@@ -477,17 +482,18 @@ TEST(ArrayBuffer_NewBackingStore_EmptyDeleter) {
// The ArrayBuffer constructor does not increase the external memory counter.
// The counter may decrease however if the allocation triggers GC.
CHECK_GE(external_memory_before, external_memory_after);
+ CcTest::array_buffer_allocator()->Free(buffer, size);
}
TEST(SharedArrayBuffer_NewBackingStore_EmptyDeleter) {
LocalContext env;
v8::Isolate* isolate = env->GetIsolate();
v8::HandleScope handle_scope(isolate);
- char static_buffer[100];
+ size_t size = 100;
+ void* buffer = CcTest::array_buffer_allocator()->Allocate(size);
std::unique_ptr<v8::BackingStore> backing_store =
v8::SharedArrayBuffer::NewBackingStore(
- static_buffer, sizeof(static_buffer), v8::BackingStore::EmptyDeleter,
- nullptr);
+ buffer, size, v8::BackingStore::EmptyDeleter, nullptr);
uint64_t external_memory_before =
isolate->AdjustAmountOfExternalAllocatedMemory(0);
v8::SharedArrayBuffer::New(isolate, std::move(backing_store));
@@ -496,6 +502,7 @@ TEST(SharedArrayBuffer_NewBackingStore_EmptyDeleter) {
// The SharedArrayBuffer constructor does not increase the external memory
// counter. The counter may decrease however if the allocation triggers GC.
CHECK_GE(external_memory_before, external_memory_after);
+ CcTest::array_buffer_allocator()->Free(buffer, size);
}
THREADED_TEST(BackingStore_NotShared) {
@@ -506,7 +513,7 @@ THREADED_TEST(BackingStore_NotShared) {
CHECK(!ab->GetBackingStore()->IsShared());
CHECK(!v8::ArrayBuffer::NewBackingStore(isolate, 8)->IsShared());
backing_store_custom_called = false;
- backing_store_custom_data = malloc(100);
+ backing_store_custom_data = CcTest::array_buffer_allocator()->Allocate(100);
backing_store_custom_length = 100;
CHECK(!v8::ArrayBuffer::NewBackingStore(
backing_store_custom_data, backing_store_custom_length,
@@ -523,7 +530,7 @@ THREADED_TEST(BackingStore_Shared) {
CHECK(ab->GetBackingStore()->IsShared());
CHECK(v8::SharedArrayBuffer::NewBackingStore(isolate, 8)->IsShared());
backing_store_custom_called = false;
- backing_store_custom_data = malloc(100);
+ backing_store_custom_data = CcTest::array_buffer_allocator()->Allocate(100);
backing_store_custom_length = 100;
CHECK(v8::SharedArrayBuffer::NewBackingStore(
backing_store_custom_data, backing_store_custom_length,
diff --git a/deps/v8/test/cctest/test-api-icu.cc b/deps/v8/test/cctest/test-api-icu.cc
index c5e617fdd2..9f9bc4575f 100644
--- a/deps/v8/test/cctest/test-api-icu.cc
+++ b/deps/v8/test/cctest/test-api-icu.cc
@@ -6,7 +6,8 @@
#include <stdlib.h>
-#include "include/v8.h"
+#include "include/v8-isolate.h"
+#include "include/v8-local-handle.h"
#include "src/objects/objects-inl.h"
#include "test/cctest/cctest.h"
#include "unicode/locid.h"
diff --git a/deps/v8/test/cctest/test-api-interceptors.cc b/deps/v8/test/cctest/test-api-interceptors.cc
index 815c538d22..25a5bdd4f5 100644
--- a/deps/v8/test/cctest/test-api-interceptors.cc
+++ b/deps/v8/test/cctest/test-api-interceptors.cc
@@ -4,8 +4,7 @@
#include <stdlib.h>
-#include "test/cctest/test-api.h"
-
+#include "include/v8-function.h"
#include "src/api/api-inl.h"
#include "src/base/platform/platform.h"
#include "src/codegen/compilation-cache.h"
@@ -16,6 +15,7 @@
#include "src/runtime/runtime.h"
#include "src/strings/unicode-inl.h"
#include "src/utils/utils.h"
+#include "test/cctest/test-api.h"
using ::v8::Context;
using ::v8::Function;
diff --git a/deps/v8/test/cctest/test-api-stack-traces.cc b/deps/v8/test/cctest/test-api-stack-traces.cc
index cec54c0a8f..9045186e12 100644
--- a/deps/v8/test/cctest/test-api-stack-traces.cc
+++ b/deps/v8/test/cctest/test-api-stack-traces.cc
@@ -2,6 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include "include/v8-function.h"
#include "src/api/api-inl.h"
#include "src/base/strings.h"
#include "test/cctest/test-api.h"
diff --git a/deps/v8/test/cctest/test-api-wasm.cc b/deps/v8/test/cctest/test-api-wasm.cc
index 6064f1384c..724c707d9a 100644
--- a/deps/v8/test/cctest/test-api-wasm.cc
+++ b/deps/v8/test/cctest/test-api-wasm.cc
@@ -4,7 +4,13 @@
#include <memory>
-#include "include/v8.h"
+#include "include/v8-context.h"
+#include "include/v8-function-callback.h"
+#include "include/v8-local-handle.h"
+#include "include/v8-object.h"
+#include "include/v8-persistent-handle.h"
+#include "include/v8-promise.h"
+#include "include/v8-wasm.h"
#include "src/api/api-inl.h"
#include "src/handles/global-handles.h"
#include "test/cctest/cctest.h"
diff --git a/deps/v8/test/cctest/test-api.cc b/deps/v8/test/cctest/test-api.cc
index 25fba193bb..f7cbc54499 100644
--- a/deps/v8/test/cctest/test-api.cc
+++ b/deps/v8/test/cctest/test-api.cc
@@ -37,7 +37,15 @@
#include <unistd.h>
#endif
+#include "include/v8-date.h"
+#include "include/v8-extension.h"
#include "include/v8-fast-api-calls.h"
+#include "include/v8-function.h"
+#include "include/v8-initialization.h"
+#include "include/v8-json.h"
+#include "include/v8-locker.h"
+#include "include/v8-primitive-object.h"
+#include "include/v8-regexp.h"
#include "include/v8-util.h"
#include "src/api/api-inl.h"
#include "src/base/overflowing-math.h"
@@ -76,6 +84,7 @@
#if V8_ENABLE_WEBASSEMBLY
#include "test/cctest/wasm/wasm-run-utils.h"
+#include "test/common/wasm/test-signatures.h"
#include "test/common/wasm/wasm-macro-gen.h"
#endif // V8_ENABLE_WEBASSEMBLY
@@ -13738,16 +13747,14 @@ static v8::base::HashMap* jitcode_line_info = nullptr;
static int saw_bar = 0;
static int move_events = 0;
-
static bool FunctionNameIs(const char* expected,
const v8::JitCodeEvent* event) {
// Log lines for functions are of the general form:
// "LazyCompile:<type><function_name>" or Function:<type><function_name>,
// where the type is one of "*", "~" or "".
- static const char* kPreamble;
- if (!i::FLAG_lazy) {
- kPreamble = "Function:";
- } else {
+ static const char* kPreamble = "Function:";
+ if (i::FLAG_lazy &&
+ event->code_type != v8::JitCodeEvent::CodeType::WASM_CODE) {
kPreamble = "LazyCompile:";
}
static size_t kPreambleLen = strlen(kPreamble);
@@ -13780,7 +13787,6 @@ static bool FunctionNameIs(const char* expected,
return strncmp(tail, expected, expected_len) == 0;
}
-
static void event_handler(const v8::JitCodeEvent* event) {
CHECK_NOT_NULL(event);
CHECK_NOT_NULL(code_map);
@@ -13873,7 +13879,6 @@ static void event_handler(const v8::JitCodeEvent* event) {
}
}
-
UNINITIALIZED_TEST(SetJitCodeEventHandler) {
i::FLAG_stress_compaction = true;
i::FLAG_incremental_marking = false;
@@ -13998,6 +14003,77 @@ UNINITIALIZED_TEST(SetJitCodeEventHandler) {
isolate->Dispose();
}
+#if V8_ENABLE_WEBASSEMBLY
+static bool saw_wasm_main = false;
+static void wasm_event_handler(const v8::JitCodeEvent* event) {
+ switch (event->type) {
+ case v8::JitCodeEvent::CODE_ADDED: {
+ if (FunctionNameIs("main-0-turbofan", event)) {
+ saw_wasm_main = true;
+ // Make sure main function has line info.
+ auto* entry = jitcode_line_info->Lookup(
+ event->code_start, i::ComputePointerHash(event->code_start));
+ CHECK_NOT_NULL(entry);
+ }
+ break;
+ }
+ case v8::JitCodeEvent::CODE_END_LINE_INFO_RECORDING: {
+ jitcode_line_info->LookupOrInsert(
+ event->code_start, i::ComputePointerHash(event->code_start));
+ break;
+ }
+ case v8::JitCodeEvent::CODE_ADD_LINE_POS_INFO: {
+ break;
+ }
+ default: {
+ // Ignore all other events;
+ }
+ }
+}
+
+namespace v8 {
+namespace internal {
+namespace wasm {
+TEST(WasmSetJitCodeEventHandler) {
+ v8::base::HashMap code;
+ code_map = &code;
+
+ v8::base::HashMap lineinfo;
+ jitcode_line_info = &lineinfo;
+
+ WasmRunner<int32_t, int32_t, int32_t> r(TestExecutionTier::kTurbofan);
+ i::Isolate* isolate = r.main_isolate();
+
+ v8::Isolate* v8_isolate = reinterpret_cast<v8::Isolate*>(isolate);
+ v8_isolate->SetJitCodeEventHandler(v8::kJitCodeEventDefault,
+ wasm_event_handler);
+
+ TestSignatures sigs;
+ auto& f = r.NewFunction(sigs.i_i(), "f");
+ BUILD(f, WASM_I32_ADD(WASM_LOCAL_GET(0), WASM_LOCAL_GET(0)));
+
+ LocalContext env;
+
+ BUILD(r,
+ WASM_I32_ADD(WASM_LOCAL_GET(0), WASM_CALL_FUNCTION(f.function_index(),
+ WASM_LOCAL_GET(1))));
+
+ Handle<JSFunction> func = r.builder().WrapCode(0);
+ CHECK(env->Global()
+ ->Set(env.local(), v8_str("func"), v8::Utils::ToLocal(func))
+ .FromJust());
+ const char* script = R"(
+ func(1, 2);
+ )";
+ CompileRun(script);
+ CHECK(saw_wasm_main);
+ saw_wasm_main = false;
+}
+} // namespace wasm
+} // namespace internal
+} // namespace v8
+#endif // V8_ENABLE_WEBASSEMBLY
+
TEST(ExternalAllocatedMemory) {
v8::Isolate* isolate = CcTest::isolate();
v8::HandleScope outer(isolate);
@@ -21658,10 +21734,6 @@ TEST(RegExpInterruptAndMakeSubjectTwoByteExternal) {
// experimental engine.
i::FLAG_enable_experimental_regexp_engine_on_excessive_backtracks = false;
RegExpInterruptTest test;
- // We want to be stuck regexp execution, so no fallback to linear-time
- // engine.
- // TODO(mbid,v8:10765): Find a way to test interrupt support of the
- // experimental engine.
test.RunTest(RegExpInterruptTest::MakeSubjectTwoByteExternal);
}
diff --git a/deps/v8/test/cctest/test-assembler-arm64.cc b/deps/v8/test/cctest/test-assembler-arm64.cc
index 2784bfe16b..4d9ac4f71a 100644
--- a/deps/v8/test/cctest/test-assembler-arm64.cc
+++ b/deps/v8/test/cctest/test-assembler-arm64.cc
@@ -2168,7 +2168,6 @@ TEST(far_branch_backward) {
break;
default:
UNREACHABLE();
- break;
}
// Now go past the limit so that branches are now out of range.
@@ -2204,7 +2203,6 @@ TEST(far_branch_backward) {
break;
default:
UNREACHABLE();
- break;
}
__ Bind(&fail);
diff --git a/deps/v8/test/cctest/test-assembler-ia32.cc b/deps/v8/test/cctest/test-assembler-ia32.cc
index 5fb548b991..6294a588db 100644
--- a/deps/v8/test/cctest/test-assembler-ia32.cc
+++ b/deps/v8/test/cctest/test-assembler-ia32.cc
@@ -27,6 +27,7 @@
#include <stdlib.h>
+#include "include/v8-function.h"
#include "src/base/platform/platform.h"
#include "src/base/utils/random-number-generator.h"
#include "src/codegen/assembler-inl.h"
diff --git a/deps/v8/test/cctest/test-assembler-loong64.cc b/deps/v8/test/cctest/test-assembler-loong64.cc
new file mode 100644
index 0000000000..d9ad4d9015
--- /dev/null
+++ b/deps/v8/test/cctest/test-assembler-loong64.cc
@@ -0,0 +1,5180 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include <iostream>
+
+#include "src/base/utils/random-number-generator.h"
+#include "src/codegen/assembler-inl.h"
+#include "src/codegen/macro-assembler.h"
+#include "src/diagnostics/disassembler.h"
+#include "src/execution/simulator.h"
+#include "src/heap/factory.h"
+#include "src/init/v8.h"
+#include "test/cctest/cctest.h"
+
+namespace v8 {
+namespace internal {
+
+// Define these function prototypes to match JSEntryFunction in execution.cc.
+// TODO(LOONG64): Refine these signatures per test case.
+using F1 = void*(int x, int p1, int p2, int p3, int p4);
+using F2 = void*(int x, int y, int p2, int p3, int p4);
+using F3 = void*(void* p, int p1, int p2, int p3, int p4);
+using F4 = void*(int64_t x, int64_t y, int64_t p2, int64_t p3, int64_t p4);
+using F5 = void*(void* p0, void* p1, int p2, int p3, int p4);
+
+#define __ assm.
+// v0->a2, v1->a3
+TEST(LA0) {
+ CcTest::InitializeVM();
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope scope(isolate);
+
+ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
+
+ // Addition.
+ __ addi_d(a2, a0, 0xC);
+
+ __ or_(a0, a2, zero_reg);
+ __ jirl(zero_reg, ra, 0);
+
+ CodeDesc desc;
+ assm.GetCode(isolate, &desc);
+ Handle<Code> code =
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
+ auto f = GeneratedCode<F2>::FromCode(*code);
+ int64_t res = reinterpret_cast<int64_t>(f.Call(0xAB0, 0, 0, 0, 0));
+ CHECK_EQ(0xABCL, res);
+}
+
+TEST(LA1) {
+ CcTest::InitializeVM();
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope scope(isolate);
+
+ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
+ Label L, C;
+
+ __ ori(a1, a0, 0);
+ __ ori(a2, zero_reg, 0);
+ __ b(&C);
+
+ __ bind(&L);
+ __ add_d(a2, a2, a1);
+ __ addi_d(a1, a1, -1);
+
+ __ bind(&C);
+ __ ori(a3, a1, 0);
+
+ __ Branch(&L, ne, a3, Operand((int64_t)0));
+
+ __ or_(a0, a2, zero_reg);
+ __ or_(a1, a3, zero_reg);
+ __ jirl(zero_reg, ra, 0);
+
+ CodeDesc desc;
+ assm.GetCode(isolate, &desc);
+ Handle<Code> code =
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
+ auto f = GeneratedCode<F1>::FromCode(*code);
+ int64_t res = reinterpret_cast<int64_t>(f.Call(50, 0, 0, 0, 0));
+ CHECK_EQ(1275L, res);
+}
+
+TEST(LA2) {
+ CcTest::InitializeVM();
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope scope(isolate);
+
+ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
+
+ Label exit, error;
+
+ __ ori(a4, zero_reg, 0); // 00000000
+ __ lu12i_w(a4, 0x12345); // 12345000
+ __ ori(a4, a4, 0); // 12345000
+ __ ori(a2, a4, 0xF0F); // 12345F0F
+ __ Branch(&error, ne, a2, Operand(0x12345F0F));
+
+ __ ori(a4, zero_reg, 0);
+ __ lu32i_d(a4, 0x12345); // 1 2345 0000 0000
+ __ ori(a4, a4, 0xFFF); // 1 2345 0000 0FFF
+ __ addi_d(a2, a4, 1);
+ __ Branch(&error, ne, a2, Operand(0x1234500001000));
+
+ __ ori(a4, zero_reg, 0);
+ __ lu52i_d(a4, zero_reg, 0x123); // 1230 0000 0000 0000
+ __ ori(a4, a4, 0xFFF); // 123F 0000 0000 0FFF
+ __ addi_d(a2, a4, 1); // 1230 0000 0000 1000
+ __ Branch(&error, ne, a2, Operand(0x1230000000001000));
+
+ __ li(a2, 0x31415926);
+ __ b(&exit);
+
+ __ bind(&error);
+ __ li(a2, 0x666);
+
+ __ bind(&exit);
+ __ or_(a0, a2, zero_reg);
+ __ jirl(zero_reg, ra, 0);
+
+ CodeDesc desc;
+ assm.GetCode(isolate, &desc);
+ Handle<Code> code =
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
+ auto f = GeneratedCode<F2>::FromCode(*code);
+ int64_t res = reinterpret_cast<int64_t>(f.Call(0, 0, 0, 0, 0));
+
+ CHECK_EQ(0x31415926L, res);
+}
+
+TEST(LA3) {
+ // Test 32bit calculate instructions.
+ CcTest::InitializeVM();
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope scope(isolate);
+ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
+
+ Label exit, error;
+
+ __ li(a4, 0x00000004);
+ __ li(a5, 0x00001234);
+ __ li(a6, 0x12345678);
+ __ li(a7, 0x7FFFFFFF);
+ __ li(t0, static_cast<int32_t>(0xFFFFFFFC));
+ __ li(t1, static_cast<int32_t>(0xFFFFEDCC));
+ __ li(t2, static_cast<int32_t>(0xEDCBA988));
+ __ li(t3, static_cast<int32_t>(0x80000000));
+
+ __ ori(a2, zero_reg, 0); // 0x00000000
+ __ add_w(a2, a4, a5); // 0x00001238
+ __ sub_w(a2, a2, a4); // 0x00001234
+ __ Branch(&error, ne, a2, Operand(0x00001234));
+ __ ori(a3, zero_reg, 0); // 0x00000000
+ __ add_w(a3, a7, a4); // 32bit addu result is sign-extended into 64bit reg.
+ __ Branch(&error, ne, a3, Operand(0xFFFFFFFF80000003));
+
+ __ sub_w(a3, t3, a4); // 0x7FFFFFFC
+ __ Branch(&error, ne, a3, Operand(0x7FFFFFFC));
+
+ __ ori(a2, zero_reg, 0); // 0x00000000
+ __ ori(a3, zero_reg, 0); // 0x00000000
+ __ addi_w(a2, zero_reg, 0x421); // 0x00007421
+ __ addi_w(a2, a2, -0x1); // 0x00007420
+ __ addi_w(a2, a2, -0x20); // 0x00007400
+ __ Branch(&error, ne, a2, Operand(0x0000400));
+ __ addi_w(a3, a7, 0x1); // 0x80000000 - result is sign-extended.
+ __ Branch(&error, ne, a3, Operand(0xFFFFFFFF80000000));
+
+ __ ori(a2, zero_reg, 0); // 0x00000000
+ __ ori(a3, zero_reg, 0); // 0x00000000
+ __ alsl_w(a2, a6, a4, 3); // 0xFFFFFFFF91A2B3C4
+ __ alsl_w(a2, a2, a4, 2); // 0x468ACF14
+ __ Branch(&error, ne, a2, Operand(0x468acf14));
+ __ ori(a0, zero_reg, 31);
+ __ alsl_wu(a3, a6, a4, 3); // 0x91A2B3C4
+ __ alsl_wu(a3, a3, a7, 1); // 0xFFFFFFFFA3456787
+ __ Branch(&error, ne, a3, Operand(0xA3456787));
+
+ __ ori(a2, zero_reg, 0);
+ __ ori(a3, zero_reg, 0);
+ __ mul_w(a2, a5, a7);
+ __ div_w(a2, a2, a4);
+ __ Branch(&error, ne, a2, Operand(0xFFFFFFFFFFFFFB73));
+ __ mul_w(a3, a4, t1);
+ __ Branch(&error, ne, a3, Operand(0xFFFFFFFFFFFFB730));
+ __ div_w(a3, t3, a4);
+ __ Branch(&error, ne, a3, Operand(0xFFFFFFFFE0000000));
+
+ __ ori(a2, zero_reg, 0);
+ __ mulh_w(a2, a4, t1);
+ __ Branch(&error, ne, a2, Operand(0xFFFFFFFFFFFFFFFF));
+ __ mulh_w(a2, a4, a6);
+ __ Branch(&error, ne, a2, Operand(static_cast<int64_t>(0)));
+
+ __ ori(a2, zero_reg, 0);
+ __ mulh_wu(a2, a4, t1);
+ __ Branch(&error, ne, a2, Operand(0x3));
+ __ mulh_wu(a2, a4, a6);
+ __ Branch(&error, ne, a2, Operand(static_cast<int64_t>(0)));
+
+ __ ori(a2, zero_reg, 0);
+ __ mulw_d_w(a2, a4, t1);
+ __ Branch(&error, ne, a2, Operand(0xFFFFFFFFFFFFB730));
+ __ mulw_d_w(a2, a4, a6);
+ __ Branch(&error, ne, a2, Operand(0x48D159E0));
+
+ __ ori(a2, zero_reg, 0);
+ __ mulw_d_wu(a2, a4, t1);
+ __ Branch(&error, ne, a2, Operand(0x3FFFFB730)); //========0xFFFFB730
+ __ ori(a2, zero_reg, 81);
+ __ mulw_d_wu(a2, a4, a6);
+ __ Branch(&error, ne, a2, Operand(0x48D159E0));
+
+ __ ori(a2, zero_reg, 0);
+ __ div_wu(a2, a7, a5);
+ __ Branch(&error, ne, a2, Operand(0x70821));
+ __ div_wu(a2, t0, a5);
+ __ Branch(&error, ne, a2, Operand(0xE1042));
+ __ div_wu(a2, t0, t1);
+ __ Branch(&error, ne, a2, Operand(0x1));
+
+ __ ori(a2, zero_reg, 0);
+ __ mod_w(a2, a6, a5);
+ __ Branch(&error, ne, a2, Operand(0xDA8));
+ __ ori(a2, zero_reg, 0);
+ __ mod_w(a2, t2, a5);
+ __ Branch(&error, ne, a2, Operand(0xFFFFFFFFFFFFF258));
+ __ ori(a2, zero_reg, 0);
+ __ mod_w(a2, t2, t1);
+ __ Branch(&error, ne, a2, Operand(0xFFFFFFFFFFFFF258));
+
+ __ ori(a2, zero_reg, 0);
+ __ mod_wu(a2, a6, a5);
+ __ Branch(&error, ne, a2, Operand(0xDA8));
+ __ mod_wu(a2, t2, a5);
+ __ Branch(&error, ne, a2, Operand(0xF0));
+ __ mod_wu(a2, t2, t1);
+ __ Branch(&error, ne, a2, Operand(0xFFFFFFFFEDCBA988));
+
+ __ li(a2, 0x31415926);
+ __ b(&exit);
+
+ __ bind(&error);
+ __ li(a2, 0x666);
+
+ __ bind(&exit);
+ __ or_(a0, a2, zero_reg);
+ __ jirl(zero_reg, ra, 0);
+
+ CodeDesc desc;
+ assm.GetCode(isolate, &desc);
+ Handle<Code> code =
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
+ auto f = GeneratedCode<F2>::FromCode(*code);
+ int64_t res = reinterpret_cast<int64_t>(f.Call(0, 0, 0, 0, 0));
+
+ CHECK_EQ(0x31415926L, res);
+}
+
+TEST(LA4) {
+ // Test 64bit calculate instructions.
+ CcTest::InitializeVM();
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope scope(isolate);
+ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
+
+ Label exit, error;
+
+ __ li(a4, 0x17312);
+ __ li(a5, 0x1012131415161718);
+ __ li(a6, 0x51F4B764A26E7412);
+ __ li(a7, 0x7FFFFFFFFFFFFFFF);
+ __ li(t0, static_cast<int64_t>(0xFFFFFFFFFFFFF547));
+ __ li(t1, static_cast<int64_t>(0xDF6B8F35A10E205C));
+ __ li(t2, static_cast<int64_t>(0x81F25A87C4236841));
+ __ li(t3, static_cast<int64_t>(0x8000000000000000));
+
+ __ ori(a2, zero_reg, 0);
+ __ add_d(a2, a4, a5);
+ __ sub_d(a2, a2, a4);
+ __ Branch(&error, ne, a2, Operand(0x1012131415161718));
+ __ ori(a3, zero_reg, 0);
+ __ add_d(a3, a6, a7); //溢出
+ __ Branch(&error, ne, a3, Operand(0xd1f4b764a26e7411));
+ __ sub_d(a3, t3, a4); //溢出
+ __ Branch(&error, ne, a3, Operand(0x7ffffffffffe8cee));
+
+ __ ori(a2, zero_reg, 0);
+ __ addi_d(a2, a5, 0x412); //正值
+ __ Branch(&error, ne, a2, Operand(0x1012131415161b2a));
+ __ addi_d(a2, a7, 0x547); //负值
+ __ Branch(&error, ne, a2, Operand(0x8000000000000546));
+
+ __ ori(t4, zero_reg, 0);
+ __ addu16i_d(a2, t4, 0x1234);
+ __ Branch(&error, ne, a2, Operand(0x12340000));
+ __ addu16i_d(a2, a2, 0x9876);
+ __ Branch(&error, ne, a2, Operand(0xffffffffaaaa0000));
+
+ __ ori(a2, zero_reg, 0);
+ __ alsl_d(a2, t2, t0, 3);
+ __ Branch(&error, ne, a2, Operand(0xf92d43e211b374f));
+
+ __ ori(a2, zero_reg, 0);
+ __ mul_d(a2, a5, a6);
+ __ Branch(&error, ne, a2, Operand(0xdbe6a8729a547fb0));
+ __ mul_d(a2, t0, t1);
+ __ Branch(&error, ne, a2, Operand(0x57ad69f40f870584));
+ __ mul_d(a2, a4, t0);
+ __ Branch(&error, ne, a2, Operand(0xfffffffff07523fe));
+
+ __ ori(a2, zero_reg, 0);
+ __ mulh_d(a2, a5, a6);
+ __ Branch(&error, ne, a2, Operand(0x52514c6c6b54467));
+ __ mulh_d(a2, t0, t1);
+ __ Branch(&error, ne, a2, Operand(0x15d));
+
+ __ ori(a2, zero_reg, 0);
+ __ mulh_du(a2, a5, a6);
+ __ Branch(&error, ne, a2, Operand(0x52514c6c6b54467));
+ __ mulh_du(a2, t0, t1);
+ __ Branch(&error, ne, a2, Operand(0xdf6b8f35a10e1700));
+ __ mulh_du(a2, a4, t0);
+ __ Branch(&error, ne, a2, Operand(0x17311));
+
+ __ ori(a2, zero_reg, 0);
+ __ div_d(a2, a5, a6);
+ __ Branch(&error, ne, a2, Operand(static_cast<int64_t>(0)));
+ __ div_d(a2, t0, t1);
+ __ Branch(&error, ne, a2, Operand(static_cast<int64_t>(0)));
+ __ div_d(a2, t1, a4);
+ __ Branch(&error, ne, a2, Operand(0xffffe985f631e6d9));
+
+ __ ori(a2, zero_reg, 0);
+ __ div_du(a2, a5, a6);
+ __ Branch(&error, ne, a2, Operand(static_cast<int64_t>(0)));
+ __ div_du(a2, t0, t1);
+ __ Branch(&error, ne, a2, Operand(0x1));
+ __ div_du(a2, t1, a4);
+ __ Branch(&error, ne, a2, Operand(0x9a22ffd3973d));
+
+ __ ori(a2, zero_reg, 0);
+ __ mod_d(a2, a6, a4);
+ __ Branch(&error, ne, a2, Operand(0x13558));
+ __ mod_d(a2, t2, t0);
+ __ Branch(&error, ne, a2, Operand(0xfffffffffffffb0a));
+ __ mod_d(a2, t1, a4);
+ __ Branch(&error, ne, a2, Operand(0xffffffffffff6a1a));
+
+ __ ori(a2, zero_reg, 0);
+ __ mod_du(a2, a6, a4);
+ __ Branch(&error, ne, a2, Operand(0x13558));
+ __ mod_du(a2, t2, t0);
+ __ Branch(&error, ne, a2, Operand(0x81f25a87c4236841));
+ __ mod_du(a2, t1, a4);
+ __ Branch(&error, ne, a2, Operand(0x1712));
+
+ // Everything was correctly executed. Load the expected result.
+ __ li(a2, 0x31415926);
+ __ b(&exit);
+
+ __ bind(&error);
+ __ li(a2, 0x666);
+ // Got an error. Return a wrong result.
+
+ __ bind(&exit);
+ __ or_(a0, a2, zero_reg);
+ __ jirl(zero_reg, ra, 0);
+
+ CodeDesc desc;
+ assm.GetCode(isolate, &desc);
+ Handle<Code> code =
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
+ auto f = GeneratedCode<F2>::FromCode(*code);
+ int64_t res = reinterpret_cast<int64_t>(f.Call(0, 0, 0, 0, 0));
+
+ CHECK_EQ(0x31415926L, res);
+}
+
+TEST(LA5) {
+ CcTest::InitializeVM();
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope scope(isolate);
+ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
+
+ Label exit, error;
+
+ __ li(a4, 0x17312);
+ __ li(a5, 0x1012131415161718);
+ __ li(a6, 0x51F4B764A26E7412);
+ __ li(a7, 0x7FFFFFFFFFFFFFFF);
+ __ li(t0, static_cast<int64_t>(0xFFFFFFFFFFFFF547));
+ __ li(t1, static_cast<int64_t>(0xDF6B8F35A10E205C));
+ __ li(t2, static_cast<int64_t>(0x81F25A87C4236841));
+ __ li(t3, static_cast<int64_t>(0x8000000000000000));
+
+ __ ori(a2, zero_reg, 0);
+ __ slt(a2, a5, a6);
+ __ Branch(&error, ne, a2, Operand(0x1));
+ __ slt(a2, a7, t0);
+ __ Branch(&error, ne, a2, Operand(static_cast<int64_t>(0)));
+ __ slt(a2, t1, t1);
+ __ Branch(&error, ne, a2, Operand(static_cast<int64_t>(0)));
+
+ __ ori(a2, zero_reg, 0);
+ __ sltu(a2, a5, a6);
+ __ Branch(&error, ne, a2, Operand(0x1));
+ __ sltu(a2, a7, t0);
+ __ Branch(&error, ne, a2, Operand(0x1));
+ __ sltu(a2, t1, t1);
+ __ Branch(&error, ne, a2, Operand(static_cast<int64_t>(0)));
+
+ __ ori(a2, zero_reg, 0);
+ __ slti(a2, a5, 0x123);
+ __ Branch(&error, ne, a2, Operand(static_cast<int64_t>(0)));
+ __ slti(a2, t0, 0x123);
+ __ Branch(&error, ne, a2, Operand(0x1));
+
+ __ ori(a2, zero_reg, 0);
+ __ sltui(a2, a5, 0x123);
+ __ Branch(&error, ne, a2, Operand(static_cast<int64_t>(0)));
+ __ sltui(a2, t0, 0x123);
+ __ Branch(&error, ne, a2, Operand(static_cast<int64_t>(0)));
+
+ __ ori(a2, zero_reg, 0);
+ __ and_(a2, a4, a5);
+ __ Branch(&error, ne, a2, Operand(0x1310));
+ __ and_(a2, a6, a7);
+ __ Branch(&error, ne, a2, Operand(0x51F4B764A26E7412));
+
+ __ ori(a2, zero_reg, 0);
+ __ or_(a2, t0, t1);
+ __ Branch(&error, ne, a2, Operand(0xfffffffffffff55f));
+ __ or_(a2, t2, t3);
+ __ Branch(&error, ne, a2, Operand(0x81f25a87c4236841));
+
+ __ ori(a2, zero_reg, 0);
+ __ nor(a2, a4, a5);
+ __ Branch(&error, ne, a2, Operand(0xefedecebeae888e5));
+ __ nor(a2, a6, a7);
+ __ Branch(&error, ne, a2, Operand(0x8000000000000000));
+
+ __ ori(a2, zero_reg, 0);
+ __ xor_(a2, t0, t1);
+ __ Branch(&error, ne, a2, Operand(0x209470ca5ef1d51b));
+ __ xor_(a2, t2, t3);
+ __ Branch(&error, ne, a2, Operand(0x1f25a87c4236841));
+
+ __ ori(a2, zero_reg, 0);
+ __ andn(a2, a4, a5);
+ __ Branch(&error, ne, a2, Operand(0x16002));
+ __ andn(a2, a6, a7);
+ __ Branch(&error, ne, a2, Operand(static_cast<int64_t>(0)));
+
+ __ ori(a2, zero_reg, 0);
+ __ orn(a2, t0, t1);
+ __ Branch(&error, ne, a2, Operand(0xffffffffffffffe7));
+ __ orn(a2, t2, t3);
+ __ Branch(&error, ne, a2, Operand(0xffffffffffffffff));
+
+ __ ori(a2, zero_reg, 0);
+ __ andi(a2, a4, 0x123);
+ __ Branch(&error, ne, a2, Operand(0x102));
+ __ andi(a2, a6, 0xDCB);
+ __ Branch(&error, ne, a2, Operand(0x402));
+
+ __ ori(a2, zero_reg, 0);
+ __ xori(a2, t0, 0x123);
+ __ Branch(&error, ne, a2, Operand(0xfffffffffffff464));
+ __ xori(a2, t2, 0xDCB);
+ __ Branch(&error, ne, a2, Operand(0x81f25a87c423658a));
+
+ // Everything was correctly executed. Load the expected result.
+ __ li(a2, 0x31415926);
+ __ b(&exit);
+
+ __ bind(&error);
+ // Got an error. Return a wrong result.
+ __ li(a2, 0x666);
+
+ __ bind(&exit);
+ __ or_(a0, a2, zero_reg);
+ __ jirl(zero_reg, ra, 0);
+
+ CodeDesc desc;
+ assm.GetCode(isolate, &desc);
+ Handle<Code> code =
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
+ auto f = GeneratedCode<F2>::FromCode(*code);
+ int64_t res = reinterpret_cast<int64_t>(f.Call(0, 0, 0, 0, 0));
+
+ CHECK_EQ(0x31415926L, res);
+}
+
+TEST(LA6) {
+ // Test loads and stores instruction.
+ CcTest::InitializeVM();
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope scope(isolate);
+ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
+
+ struct T {
+ int64_t si1;
+ int64_t si2;
+ int64_t si3;
+ int64_t result_ld_b_si1;
+ int64_t result_ld_b_si2;
+ int64_t result_ld_h_si1;
+ int64_t result_ld_h_si2;
+ int64_t result_ld_w_si1;
+ int64_t result_ld_w_si2;
+ int64_t result_ld_d_si1;
+ int64_t result_ld_d_si3;
+ int64_t result_ld_bu_si2;
+ int64_t result_ld_hu_si2;
+ int64_t result_ld_wu_si2;
+ int64_t result_st_b;
+ int64_t result_st_h;
+ int64_t result_st_w;
+ };
+ T t;
+
+ // Ld_b
+ __ Ld_b(a4, MemOperand(a0, offsetof(T, si1)));
+ __ St_d(a4, MemOperand(a0, offsetof(T, result_ld_b_si1)));
+
+ __ Ld_b(a4, MemOperand(a0, offsetof(T, si2)));
+ __ St_d(a4, MemOperand(a0, offsetof(T, result_ld_b_si2)));
+
+ // Ld_h
+ __ Ld_h(a5, MemOperand(a0, offsetof(T, si1)));
+ __ St_d(a5, MemOperand(a0, offsetof(T, result_ld_h_si1)));
+
+ __ Ld_h(a5, MemOperand(a0, offsetof(T, si2)));
+ __ St_d(a5, MemOperand(a0, offsetof(T, result_ld_h_si2)));
+
+ // Ld_w
+ __ Ld_w(a6, MemOperand(a0, offsetof(T, si1)));
+ __ St_d(a6, MemOperand(a0, offsetof(T, result_ld_w_si1)));
+
+ __ Ld_w(a6, MemOperand(a0, offsetof(T, si2)));
+ __ St_d(a6, MemOperand(a0, offsetof(T, result_ld_w_si2)));
+
+ // Ld_d
+ __ Ld_d(a7, MemOperand(a0, offsetof(T, si1)));
+ __ St_d(a7, MemOperand(a0, offsetof(T, result_ld_d_si1)));
+
+ __ Ld_d(a7, MemOperand(a0, offsetof(T, si3)));
+ __ St_d(a7, MemOperand(a0, offsetof(T, result_ld_d_si3)));
+
+ // Ld_bu
+ __ Ld_bu(t0, MemOperand(a0, offsetof(T, si2)));
+ __ St_d(t0, MemOperand(a0, offsetof(T, result_ld_bu_si2)));
+
+ // Ld_hu
+ __ Ld_hu(t1, MemOperand(a0, offsetof(T, si2)));
+ __ St_d(t1, MemOperand(a0, offsetof(T, result_ld_hu_si2)));
+
+ // Ld_wu
+ __ Ld_wu(t2, MemOperand(a0, offsetof(T, si2)));
+ __ St_d(t2, MemOperand(a0, offsetof(T, result_ld_wu_si2)));
+
+ // St
+ __ li(t4, 0x11111111);
+
+ // St_b
+ __ Ld_d(t5, MemOperand(a0, offsetof(T, si3)));
+ __ St_d(t5, MemOperand(a0, offsetof(T, result_st_b)));
+ __ St_b(t4, MemOperand(a0, offsetof(T, result_st_b)));
+
+ // St_h
+ __ Ld_d(t6, MemOperand(a0, offsetof(T, si3)));
+ __ St_d(t6, MemOperand(a0, offsetof(T, result_st_h)));
+ __ St_h(t4, MemOperand(a0, offsetof(T, result_st_h)));
+
+ // St_w
+ __ Ld_d(t7, MemOperand(a0, offsetof(T, si3)));
+ __ St_d(t7, MemOperand(a0, offsetof(T, result_st_w)));
+ __ St_w(t4, MemOperand(a0, offsetof(T, result_st_w)));
+
+ __ jirl(zero_reg, ra, 0);
+
+ CodeDesc desc;
+ assm.GetCode(isolate, &desc);
+ Handle<Code> code =
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
+ auto f = GeneratedCode<F3>::FromCode(*code);
+ t.si1 = 0x11223344;
+ t.si2 = 0x99AABBCC;
+ t.si3 = 0x1122334455667788;
+ f.Call(&t, 0, 0, 0, 0);
+
+ CHECK_EQ(static_cast<int64_t>(0x44), t.result_ld_b_si1);
+ CHECK_EQ(static_cast<int64_t>(0xFFFFFFFFFFFFFFCC), t.result_ld_b_si2);
+
+ CHECK_EQ(static_cast<int64_t>(0x3344), t.result_ld_h_si1);
+ CHECK_EQ(static_cast<int64_t>(0xFFFFFFFFFFFFBBCC), t.result_ld_h_si2);
+
+ CHECK_EQ(static_cast<int64_t>(0x11223344), t.result_ld_w_si1);
+ CHECK_EQ(static_cast<int64_t>(0xFFFFFFFF99AABBCC), t.result_ld_w_si2);
+
+ CHECK_EQ(static_cast<int64_t>(0x11223344), t.result_ld_d_si1);
+ CHECK_EQ(static_cast<int64_t>(0x1122334455667788), t.result_ld_d_si3);
+
+ CHECK_EQ(static_cast<int64_t>(0xCC), t.result_ld_bu_si2);
+ CHECK_EQ(static_cast<int64_t>(0xBBCC), t.result_ld_hu_si2);
+ CHECK_EQ(static_cast<int64_t>(0x99AABBCC), t.result_ld_wu_si2);
+
+ CHECK_EQ(static_cast<int64_t>(0x1122334455667711), t.result_st_b);
+ CHECK_EQ(static_cast<int64_t>(0x1122334455661111), t.result_st_h);
+ CHECK_EQ(static_cast<int64_t>(0x1122334411111111), t.result_st_w);
+}
+
+TEST(LA7) {
+ CcTest::InitializeVM();
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope scope(isolate);
+ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
+
+ struct T {
+ int64_t si1;
+ int64_t si2;
+ int64_t si3;
+ int64_t result_ldx_b_si1;
+ int64_t result_ldx_b_si2;
+ int64_t result_ldx_h_si1;
+ int64_t result_ldx_h_si2;
+ int64_t result_ldx_w_si1;
+ int64_t result_ldx_w_si2;
+ int64_t result_ldx_d_si1;
+ int64_t result_ldx_d_si3;
+ int64_t result_ldx_bu_si2;
+ int64_t result_ldx_hu_si2;
+ int64_t result_ldx_wu_si2;
+ int64_t result_stx_b;
+ int64_t result_stx_h;
+ int64_t result_stx_w;
+ };
+ T t;
+
+ // ldx_b
+ __ li(a2, static_cast<int64_t>(offsetof(T, si1)));
+ __ Ld_b(a4, MemOperand(a0, a2));
+ __ St_d(a4, MemOperand(a0, offsetof(T, result_ldx_b_si1)));
+
+ __ li(a2, static_cast<int64_t>(offsetof(T, si2)));
+ __ Ld_b(a4, MemOperand(a0, a2));
+ __ St_d(a4, MemOperand(a0, offsetof(T, result_ldx_b_si2)));
+
+ // ldx_h
+ __ li(a2, static_cast<int64_t>(offsetof(T, si1)));
+ __ Ld_h(a5, MemOperand(a0, a2));
+ __ St_d(a5, MemOperand(a0, offsetof(T, result_ldx_h_si1)));
+
+ __ li(a2, static_cast<int64_t>(offsetof(T, si2)));
+ __ Ld_h(a5, MemOperand(a0, a2));
+ __ St_d(a5, MemOperand(a0, offsetof(T, result_ldx_h_si2)));
+
+ // ldx_w
+ __ li(a2, static_cast<int64_t>(offsetof(T, si1)));
+ __ Ld_w(a6, MemOperand(a0, a2));
+ __ St_d(a6, MemOperand(a0, offsetof(T, result_ldx_w_si1)));
+
+ __ li(a2, static_cast<int64_t>(offsetof(T, si2)));
+ __ Ld_w(a6, MemOperand(a0, a2));
+ __ St_d(a6, MemOperand(a0, offsetof(T, result_ldx_w_si2)));
+
+ // Ld_d
+ __ li(a2, static_cast<int64_t>(offsetof(T, si1)));
+ __ Ld_d(a7, MemOperand(a0, a2));
+ __ St_d(a7, MemOperand(a0, offsetof(T, result_ldx_d_si1)));
+
+ __ li(a2, static_cast<int64_t>(offsetof(T, si3)));
+ __ Ld_d(a7, MemOperand(a0, a2));
+ __ St_d(a7, MemOperand(a0, offsetof(T, result_ldx_d_si3)));
+
+ // Ld_bu
+ __ li(a2, static_cast<int64_t>(offsetof(T, si2)));
+ __ Ld_bu(t0, MemOperand(a0, a2));
+ __ St_d(t0, MemOperand(a0, offsetof(T, result_ldx_bu_si2)));
+
+ // Ld_hu
+ __ li(a2, static_cast<int64_t>(offsetof(T, si2)));
+ __ Ld_hu(t1, MemOperand(a0, a2));
+ __ St_d(t1, MemOperand(a0, offsetof(T, result_ldx_hu_si2)));
+
+ // Ld_wu
+ __ li(a2, static_cast<int64_t>(offsetof(T, si2)));
+ __ Ld_wu(t2, MemOperand(a0, a2));
+ __ St_d(t2, MemOperand(a0, offsetof(T, result_ldx_wu_si2)));
+
+ // St
+ __ li(t4, 0x11111111);
+
+ // St_b
+ __ Ld_d(t5, MemOperand(a0, offsetof(T, si3)));
+ __ St_d(t5, MemOperand(a0, offsetof(T, result_stx_b)));
+ __ li(a2, static_cast<int64_t>(offsetof(T, result_stx_b)));
+ __ St_b(t4, MemOperand(a0, a2));
+
+ // St_h
+ __ Ld_d(t6, MemOperand(a0, offsetof(T, si3)));
+ __ St_d(t6, MemOperand(a0, offsetof(T, result_stx_h)));
+ __ li(a2, static_cast<int64_t>(offsetof(T, result_stx_h)));
+ __ St_h(t4, MemOperand(a0, a2));
+
+ // St_w
+ __ Ld_d(t7, MemOperand(a0, offsetof(T, si3)));
+ __ li(a2, static_cast<int64_t>(offsetof(T, result_stx_w)));
+ __ St_d(t7, MemOperand(a0, a2));
+ __ li(a3, static_cast<int64_t>(offsetof(T, result_stx_w)));
+ __ St_w(t4, MemOperand(a0, a3));
+
+ __ jirl(zero_reg, ra, 0);
+
+ CodeDesc desc;
+ assm.GetCode(isolate, &desc);
+ Handle<Code> code =
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
+ auto f = GeneratedCode<F3>::FromCode(*code);
+ t.si1 = 0x11223344;
+ t.si2 = 0x99AABBCC;
+ t.si3 = 0x1122334455667788;
+ f.Call(&t, 0, 0, 0, 0);
+
+ CHECK_EQ(static_cast<int64_t>(0x44), t.result_ldx_b_si1);
+ CHECK_EQ(static_cast<int64_t>(0xFFFFFFFFFFFFFFCC), t.result_ldx_b_si2);
+
+ CHECK_EQ(static_cast<int64_t>(0x3344), t.result_ldx_h_si1);
+ CHECK_EQ(static_cast<int64_t>(0xFFFFFFFFFFFFBBCC), t.result_ldx_h_si2);
+
+ CHECK_EQ(static_cast<int64_t>(0x11223344), t.result_ldx_w_si1);
+ CHECK_EQ(static_cast<int64_t>(0xFFFFFFFF99AABBCC), t.result_ldx_w_si2);
+
+ CHECK_EQ(static_cast<int64_t>(0x11223344), t.result_ldx_d_si1);
+ CHECK_EQ(static_cast<int64_t>(0x1122334455667788), t.result_ldx_d_si3);
+
+ CHECK_EQ(static_cast<int64_t>(0xCC), t.result_ldx_bu_si2);
+ CHECK_EQ(static_cast<int64_t>(0xBBCC), t.result_ldx_hu_si2);
+ CHECK_EQ(static_cast<int64_t>(0x99AABBCC), t.result_ldx_wu_si2);
+
+ CHECK_EQ(static_cast<int64_t>(0x1122334455667711), t.result_stx_b);
+ CHECK_EQ(static_cast<int64_t>(0x1122334455661111), t.result_stx_h);
+ CHECK_EQ(static_cast<int64_t>(0x1122334411111111), t.result_stx_w);
+}
+
+TEST(LDPTR_STPTR) {
+ CcTest::InitializeVM();
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope scope(isolate);
+ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
+
+ int64_t test[10];
+
+ __ ldptr_w(a4, a0, 0);
+ __ stptr_d(a4, a0, 24); // test[3]
+
+ __ ldptr_w(a5, a0, 8); // test[1]
+ __ stptr_d(a5, a0, 32); // test[4]
+
+ __ ldptr_d(a6, a0, 16); // test[2]
+ __ stptr_d(a6, a0, 40); // test[5]
+
+ __ li(t0, 0x11111111);
+
+ __ stptr_d(a6, a0, 48); // test[6]
+ __ stptr_w(t0, a0, 48); // test[6]
+
+ __ jirl(zero_reg, ra, 0);
+
+ CodeDesc desc;
+ assm.GetCode(isolate, &desc);
+ Handle<Code> code =
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
+ auto f = GeneratedCode<F3>::FromCode(*code);
+ test[0] = 0x11223344;
+ test[1] = 0x99AABBCC;
+ test[2] = 0x1122334455667788;
+ f.Call(&test, 0, 0, 0, 0);
+
+ CHECK_EQ(static_cast<int64_t>(0x11223344), test[3]);
+ CHECK_EQ(static_cast<int64_t>(0xFFFFFFFF99AABBCC), test[4]);
+ CHECK_EQ(static_cast<int64_t>(0x1122334455667788), test[5]);
+ CHECK_EQ(static_cast<int64_t>(0x1122334411111111), test[6]);
+}
+
+TEST(LA8) {
+ // Test 32bit shift instructions.
+ CcTest::InitializeVM();
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope scope(isolate);
+
+ struct T {
+ int32_t input;
+ int32_t result_sll_w_0;
+ int32_t result_sll_w_8;
+ int32_t result_sll_w_10;
+ int32_t result_sll_w_31;
+ int32_t result_srl_w_0;
+ int32_t result_srl_w_8;
+ int32_t result_srl_w_10;
+ int32_t result_srl_w_31;
+ int32_t result_sra_w_0;
+ int32_t result_sra_w_8;
+ int32_t result_sra_w_10;
+ int32_t result_sra_w_31;
+ int32_t result_rotr_w_0;
+ int32_t result_rotr_w_8;
+ int32_t result_slli_w_0;
+ int32_t result_slli_w_8;
+ int32_t result_slli_w_10;
+ int32_t result_slli_w_31;
+ int32_t result_srli_w_0;
+ int32_t result_srli_w_8;
+ int32_t result_srli_w_10;
+ int32_t result_srli_w_31;
+ int32_t result_srai_w_0;
+ int32_t result_srai_w_8;
+ int32_t result_srai_w_10;
+ int32_t result_srai_w_31;
+ int32_t result_rotri_w_0;
+ int32_t result_rotri_w_8;
+ int32_t result_rotri_w_10;
+ int32_t result_rotri_w_31;
+ };
+ T t;
+ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
+
+ __ Ld_w(a4, MemOperand(a0, offsetof(T, input)));
+
+ // sll_w
+ __ li(a5, 0);
+ __ sll_w(t0, a4, a5);
+ __ li(a5, 0x8);
+ __ sll_w(t1, a4, a5);
+ __ li(a5, 0xA);
+ __ sll_w(t2, a4, a5);
+ __ li(a5, 0x1F);
+ __ sll_w(t3, a4, a5);
+
+ __ St_w(t0, MemOperand(a0, offsetof(T, result_sll_w_0)));
+ __ St_w(t1, MemOperand(a0, offsetof(T, result_sll_w_8)));
+ __ St_w(t2, MemOperand(a0, offsetof(T, result_sll_w_10)));
+ __ St_w(t3, MemOperand(a0, offsetof(T, result_sll_w_31)));
+
+ // srl_w
+ __ li(a5, 0x0);
+ __ srl_w(t0, a4, a5);
+ __ li(a5, 0x8);
+ __ srl_w(t1, a4, a5);
+ __ li(a5, 0xA);
+ __ srl_w(t2, a4, a5);
+ __ li(a5, 0x1F);
+ __ srl_w(t3, a4, a5);
+
+ __ St_w(t0, MemOperand(a0, offsetof(T, result_srl_w_0)));
+ __ St_w(t1, MemOperand(a0, offsetof(T, result_srl_w_8)));
+ __ St_w(t2, MemOperand(a0, offsetof(T, result_srl_w_10)));
+ __ St_w(t3, MemOperand(a0, offsetof(T, result_srl_w_31)));
+
+ // sra_w
+ __ li(a5, 0x0);
+ __ sra_w(t0, a4, a5);
+ __ li(a5, 0x8);
+ __ sra_w(t1, a4, a5);
+
+ __ li(a6, static_cast<int32_t>(0x80000000));
+ __ add_w(a6, a6, a4);
+ __ li(a5, 0xA);
+ __ sra_w(t2, a6, a5);
+ __ li(a5, 0x1F);
+ __ sra_w(t3, a6, a5);
+
+ __ St_w(t0, MemOperand(a0, offsetof(T, result_sra_w_0)));
+ __ St_w(t1, MemOperand(a0, offsetof(T, result_sra_w_8)));
+ __ St_w(t2, MemOperand(a0, offsetof(T, result_sra_w_10)));
+ __ St_w(t3, MemOperand(a0, offsetof(T, result_sra_w_31)));
+
+ // rotr
+ __ li(a5, 0x0);
+ __ rotr_w(t0, a4, a5);
+ __ li(a6, 0x8);
+ __ rotr_w(t1, a4, a6);
+
+ __ St_w(t0, MemOperand(a0, offsetof(T, result_rotr_w_0)));
+ __ St_w(t1, MemOperand(a0, offsetof(T, result_rotr_w_8)));
+
+ // slli_w
+ __ slli_w(t0, a4, 0);
+ __ slli_w(t1, a4, 0x8);
+ __ slli_w(t2, a4, 0xA);
+ __ slli_w(t3, a4, 0x1F);
+
+ __ St_w(t0, MemOperand(a0, offsetof(T, result_slli_w_0)));
+ __ St_w(t1, MemOperand(a0, offsetof(T, result_slli_w_8)));
+ __ St_w(t2, MemOperand(a0, offsetof(T, result_slli_w_10)));
+ __ St_w(t3, MemOperand(a0, offsetof(T, result_slli_w_31)));
+
+ // srli_w
+ __ srli_w(t0, a4, 0);
+ __ srli_w(t1, a4, 0x8);
+ __ srli_w(t2, a4, 0xA);
+ __ srli_w(t3, a4, 0x1F);
+
+ __ St_w(t0, MemOperand(a0, offsetof(T, result_srli_w_0)));
+ __ St_w(t1, MemOperand(a0, offsetof(T, result_srli_w_8)));
+ __ St_w(t2, MemOperand(a0, offsetof(T, result_srli_w_10)));
+ __ St_w(t3, MemOperand(a0, offsetof(T, result_srli_w_31)));
+
+ // srai_w
+ __ srai_w(t0, a4, 0);
+ __ srai_w(t1, a4, 0x8);
+
+ __ li(a6, static_cast<int32_t>(0x80000000));
+ __ add_w(a6, a6, a4);
+ __ srai_w(t2, a6, 0xA);
+ __ srai_w(t3, a6, 0x1F);
+
+ __ St_w(t0, MemOperand(a0, offsetof(T, result_srai_w_0)));
+ __ St_w(t1, MemOperand(a0, offsetof(T, result_srai_w_8)));
+ __ St_w(t2, MemOperand(a0, offsetof(T, result_srai_w_10)));
+ __ St_w(t3, MemOperand(a0, offsetof(T, result_srai_w_31)));
+
+ // rotri_w
+ __ rotri_w(t0, a4, 0);
+ __ rotri_w(t1, a4, 0x8);
+ __ rotri_w(t2, a4, 0xA);
+ __ rotri_w(t3, a4, 0x1F);
+
+ __ St_w(t0, MemOperand(a0, offsetof(T, result_rotri_w_0)));
+ __ St_w(t1, MemOperand(a0, offsetof(T, result_rotri_w_8)));
+ __ St_w(t2, MemOperand(a0, offsetof(T, result_rotri_w_10)));
+ __ St_w(t3, MemOperand(a0, offsetof(T, result_rotri_w_31)));
+
+ __ jirl(zero_reg, ra, 0);
+
+ CodeDesc desc;
+ assm.GetCode(isolate, &desc);
+ Handle<Code> code =
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
+ auto f = GeneratedCode<F3>::FromCode(*code);
+ t.input = 0x12345678;
+ f.Call(&t, 0x0, 0, 0, 0);
+
+ CHECK_EQ(static_cast<int32_t>(0x12345678), t.result_sll_w_0);
+ CHECK_EQ(static_cast<int32_t>(0x34567800), t.result_sll_w_8);
+ CHECK_EQ(static_cast<int32_t>(0xD159E000), t.result_sll_w_10);
+ CHECK_EQ(static_cast<int32_t>(0x0), t.result_sll_w_31);
+
+ CHECK_EQ(static_cast<int32_t>(0x12345678), t.result_srl_w_0);
+ CHECK_EQ(static_cast<int32_t>(0x123456), t.result_srl_w_8);
+ CHECK_EQ(static_cast<int32_t>(0x48D15), t.result_srl_w_10);
+ CHECK_EQ(static_cast<int32_t>(0x0), t.result_srl_w_31);
+
+ CHECK_EQ(static_cast<int32_t>(0x12345678), t.result_sra_w_0);
+ CHECK_EQ(static_cast<int32_t>(0x123456), t.result_sra_w_8);
+ CHECK_EQ(static_cast<int32_t>(0xFFE48D15), t.result_sra_w_10);
+ CHECK_EQ(static_cast<int32_t>(0xFFFFFFFF), t.result_sra_w_31);
+
+ CHECK_EQ(static_cast<int32_t>(0x12345678), t.result_rotr_w_0);
+ CHECK_EQ(static_cast<int32_t>(0x78123456), t.result_rotr_w_8);
+
+ CHECK_EQ(static_cast<int32_t>(0x12345678), t.result_slli_w_0);
+ CHECK_EQ(static_cast<int32_t>(0x34567800), t.result_slli_w_8);
+ CHECK_EQ(static_cast<int32_t>(0xD159E000), t.result_slli_w_10);
+ CHECK_EQ(static_cast<int32_t>(0x0), t.result_slli_w_31);
+
+ CHECK_EQ(static_cast<int32_t>(0x12345678), t.result_srli_w_0);
+ CHECK_EQ(static_cast<int32_t>(0x123456), t.result_srli_w_8);
+ CHECK_EQ(static_cast<int32_t>(0x48D15), t.result_srli_w_10);
+ CHECK_EQ(static_cast<int32_t>(0x0), t.result_srli_w_31);
+
+ CHECK_EQ(static_cast<int32_t>(0x12345678), t.result_srai_w_0);
+ CHECK_EQ(static_cast<int32_t>(0x123456), t.result_srai_w_8);
+ CHECK_EQ(static_cast<int32_t>(0xFFE48D15), t.result_srai_w_10);
+ CHECK_EQ(static_cast<int32_t>(0xFFFFFFFF), t.result_srai_w_31);
+
+ CHECK_EQ(static_cast<int32_t>(0x12345678), t.result_rotri_w_0);
+ CHECK_EQ(static_cast<int32_t>(0x78123456), t.result_rotri_w_8);
+ CHECK_EQ(static_cast<int32_t>(0x9E048D15), t.result_rotri_w_10);
+ CHECK_EQ(static_cast<int32_t>(0x2468ACF0), t.result_rotri_w_31);
+}
+
+TEST(LA9) {
+ // Test 64bit shift instructions.
+ CcTest::InitializeVM();
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope scope(isolate);
+
+ struct T {
+ int64_t input;
+ int64_t result_sll_d_0;
+ int64_t result_sll_d_13;
+ int64_t result_sll_d_30;
+ int64_t result_sll_d_63;
+ int64_t result_srl_d_0;
+ int64_t result_srl_d_13;
+ int64_t result_srl_d_30;
+ int64_t result_srl_d_63;
+ int64_t result_sra_d_0;
+ int64_t result_sra_d_13;
+ int64_t result_sra_d_30;
+ int64_t result_sra_d_63;
+ int64_t result_rotr_d_0;
+ int64_t result_rotr_d_13;
+ int64_t result_slli_d_0;
+ int64_t result_slli_d_13;
+ int64_t result_slli_d_30;
+ int64_t result_slli_d_63;
+ int64_t result_srli_d_0;
+ int64_t result_srli_d_13;
+ int64_t result_srli_d_30;
+ int64_t result_srli_d_63;
+ int64_t result_srai_d_0;
+ int64_t result_srai_d_13;
+ int64_t result_srai_d_30;
+ int64_t result_srai_d_63;
+ int64_t result_rotri_d_0;
+ int64_t result_rotri_d_13;
+ int64_t result_rotri_d_30;
+ int64_t result_rotri_d_63;
+ };
+
+ T t;
+ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
+
+ __ Ld_d(a4, MemOperand(a0, offsetof(T, input)));
+
+ // sll_d
+ __ li(a5, 0);
+ __ sll_d(t0, a4, a5);
+ __ li(a5, 0xD);
+ __ sll_d(t1, a4, a5);
+ __ li(a5, 0x1E);
+ __ sll_d(t2, a4, a5);
+ __ li(a5, 0x3F);
+ __ sll_d(t3, a4, a5);
+
+ __ St_d(t0, MemOperand(a0, offsetof(T, result_sll_d_0)));
+ __ St_d(t1, MemOperand(a0, offsetof(T, result_sll_d_13)));
+ __ St_d(t2, MemOperand(a0, offsetof(T, result_sll_d_30)));
+ __ St_d(t3, MemOperand(a0, offsetof(T, result_sll_d_63)));
+
+ // srl_d
+ __ li(a5, 0x0);
+ __ srl_d(t0, a4, a5);
+ __ li(a5, 0xD);
+ __ srl_d(t1, a4, a5);
+ __ li(a5, 0x1E);
+ __ srl_d(t2, a4, a5);
+ __ li(a5, 0x3F);
+ __ srl_d(t3, a4, a5);
+
+ __ St_d(t0, MemOperand(a0, offsetof(T, result_srl_d_0)));
+ __ St_d(t1, MemOperand(a0, offsetof(T, result_srl_d_13)));
+ __ St_d(t2, MemOperand(a0, offsetof(T, result_srl_d_30)));
+ __ St_d(t3, MemOperand(a0, offsetof(T, result_srl_d_63)));
+
+ // sra_d
+ __ li(a5, 0x0);
+ __ sra_d(t0, a4, a5);
+ __ li(a5, 0xD);
+ __ sra_d(t1, a4, a5);
+
+ __ li(a6, static_cast<int64_t>(0x8000000000000000));
+ __ add_d(a6, a6, a4);
+ __ li(a5, 0x1E);
+ __ sra_d(t2, a6, a5);
+ __ li(a5, 0x3F);
+ __ sra_d(t3, a6, a5);
+
+ __ St_d(t0, MemOperand(a0, offsetof(T, result_sra_d_0)));
+ __ St_d(t1, MemOperand(a0, offsetof(T, result_sra_d_13)));
+ __ St_d(t2, MemOperand(a0, offsetof(T, result_sra_d_30)));
+ __ St_d(t3, MemOperand(a0, offsetof(T, result_sra_d_63)));
+
+ // rotr
+ __ li(a5, 0x0);
+ __ rotr_d(t0, a4, a5);
+ __ li(a6, 0xD);
+ __ rotr_d(t1, a4, a6);
+
+ __ St_d(t0, MemOperand(a0, offsetof(T, result_rotr_d_0)));
+ __ St_d(t1, MemOperand(a0, offsetof(T, result_rotr_d_13)));
+
+ // slli_d
+ __ slli_d(t0, a4, 0);
+ __ slli_d(t1, a4, 0xD);
+ __ slli_d(t2, a4, 0x1E);
+ __ slli_d(t3, a4, 0x3F);
+
+ __ St_d(t0, MemOperand(a0, offsetof(T, result_slli_d_0)));
+ __ St_d(t1, MemOperand(a0, offsetof(T, result_slli_d_13)));
+ __ St_d(t2, MemOperand(a0, offsetof(T, result_slli_d_30)));
+ __ St_d(t3, MemOperand(a0, offsetof(T, result_slli_d_63)));
+
+ // srli_d
+ __ srli_d(t0, a4, 0);
+ __ srli_d(t1, a4, 0xD);
+ __ srli_d(t2, a4, 0x1E);
+ __ srli_d(t3, a4, 0x3F);
+
+ __ St_d(t0, MemOperand(a0, offsetof(T, result_srli_d_0)));
+ __ St_d(t1, MemOperand(a0, offsetof(T, result_srli_d_13)));
+ __ St_d(t2, MemOperand(a0, offsetof(T, result_srli_d_30)));
+ __ St_d(t3, MemOperand(a0, offsetof(T, result_srli_d_63)));
+
+ // srai_d
+ __ srai_d(t0, a4, 0);
+ __ srai_d(t1, a4, 0xD);
+
+ __ li(a6, static_cast<int64_t>(0x8000000000000000));
+ __ add_d(a6, a6, a4);
+ __ srai_d(t2, a6, 0x1E);
+ __ srai_d(t3, a6, 0x3F);
+
+ __ St_d(t0, MemOperand(a0, offsetof(T, result_srai_d_0)));
+ __ St_d(t1, MemOperand(a0, offsetof(T, result_srai_d_13)));
+ __ St_d(t2, MemOperand(a0, offsetof(T, result_srai_d_30)));
+ __ St_d(t3, MemOperand(a0, offsetof(T, result_srai_d_63)));
+
+ // rotri_d
+ __ rotri_d(t0, a4, 0);
+ __ rotri_d(t1, a4, 0xD);
+ __ rotri_d(t2, a4, 0x1E);
+ __ rotri_d(t3, a4, 0x3F);
+
+ __ St_d(t0, MemOperand(a0, offsetof(T, result_rotri_d_0)));
+ __ St_d(t1, MemOperand(a0, offsetof(T, result_rotri_d_13)));
+ __ St_d(t2, MemOperand(a0, offsetof(T, result_rotri_d_30)));
+ __ St_d(t3, MemOperand(a0, offsetof(T, result_rotri_d_63)));
+
+ __ jirl(zero_reg, ra, 0);
+
+ CodeDesc desc;
+ assm.GetCode(isolate, &desc);
+ Handle<Code> code =
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
+ auto f = GeneratedCode<F3>::FromCode(*code);
+ t.input = 0x51F4B764A26E7412;
+ f.Call(&t, 0, 0, 0, 0);
+
+ CHECK_EQ(static_cast<int64_t>(0x51f4b764a26e7412), t.result_sll_d_0);
+ CHECK_EQ(static_cast<int64_t>(0x96ec944dce824000), t.result_sll_d_13);
+ CHECK_EQ(static_cast<int64_t>(0x289b9d0480000000), t.result_sll_d_30);
+ CHECK_EQ(static_cast<int64_t>(0x0), t.result_sll_d_63);
+
+ CHECK_EQ(static_cast<int64_t>(0x51f4b764a26e7412), t.result_srl_d_0);
+ CHECK_EQ(static_cast<int64_t>(0x28fa5bb251373), t.result_srl_d_13);
+ CHECK_EQ(static_cast<int64_t>(0x147d2dd92), t.result_srl_d_30);
+ CHECK_EQ(static_cast<int64_t>(0x0), t.result_srl_d_63);
+
+ CHECK_EQ(static_cast<int64_t>(0x51f4b764a26e7412), t.result_sra_d_0);
+ CHECK_EQ(static_cast<int64_t>(0x28fa5bb251373), t.result_sra_d_13);
+ CHECK_EQ(static_cast<int64_t>(0xffffffff47d2dd92), t.result_sra_d_30);
+ CHECK_EQ(static_cast<int64_t>(0xffffffffffffffff), t.result_sra_d_63);
+
+ CHECK_EQ(static_cast<int64_t>(0x51f4b764a26e7412), t.result_rotr_d_0);
+ CHECK_EQ(static_cast<int64_t>(0xa0928fa5bb251373), t.result_rotr_d_13);
+
+ CHECK_EQ(static_cast<int64_t>(0x51f4b764a26e7412), t.result_slli_d_0);
+ CHECK_EQ(static_cast<int64_t>(0x96ec944dce824000), t.result_slli_d_13);
+ CHECK_EQ(static_cast<int64_t>(0x289b9d0480000000), t.result_slli_d_30);
+ CHECK_EQ(static_cast<int64_t>(0x0), t.result_slli_d_63);
+
+ CHECK_EQ(static_cast<int64_t>(0x51f4b764a26e7412), t.result_srli_d_0);
+ CHECK_EQ(static_cast<int64_t>(0x28fa5bb251373), t.result_srli_d_13);
+ CHECK_EQ(static_cast<int64_t>(0x147d2dd92), t.result_srli_d_30);
+ CHECK_EQ(static_cast<int64_t>(0x0), t.result_srli_d_63);
+
+ CHECK_EQ(static_cast<int64_t>(0x51f4b764a26e7412), t.result_srai_d_0);
+ CHECK_EQ(static_cast<int64_t>(0x28fa5bb251373), t.result_srai_d_13);
+ CHECK_EQ(static_cast<int64_t>(0xffffffff47d2dd92), t.result_srai_d_30);
+ CHECK_EQ(static_cast<int64_t>(0xffffffffffffffff), t.result_srai_d_63);
+
+ CHECK_EQ(static_cast<int64_t>(0x51f4b764a26e7412), t.result_rotri_d_0);
+ CHECK_EQ(static_cast<int64_t>(0xa0928fa5bb251373), t.result_rotri_d_13);
+ CHECK_EQ(static_cast<int64_t>(0x89b9d04947d2dd92), t.result_rotri_d_30);
+ CHECK_EQ(static_cast<int64_t>(0xa3e96ec944dce824), t.result_rotri_d_63);
+}
+
+TEST(LA10) {
+ // Test 32bit bit operation instructions.
+ CcTest::InitializeVM();
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope scope(isolate);
+ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
+
+ struct T {
+ int64_t si1;
+ int64_t si2;
+ int32_t result_ext_w_b_si1;
+ int32_t result_ext_w_b_si2;
+ int32_t result_ext_w_h_si1;
+ int32_t result_ext_w_h_si2;
+ int32_t result_clo_w_si1;
+ int32_t result_clo_w_si2;
+ int32_t result_clz_w_si1;
+ int32_t result_clz_w_si2;
+ int32_t result_cto_w_si1;
+ int32_t result_cto_w_si2;
+ int32_t result_ctz_w_si1;
+ int32_t result_ctz_w_si2;
+ int32_t result_bytepick_w_si1;
+ int32_t result_bytepick_w_si2;
+ int32_t result_revb_2h_si1;
+ int32_t result_revb_2h_si2;
+ int32_t result_bitrev_4b_si1;
+ int32_t result_bitrev_4b_si2;
+ int32_t result_bitrev_w_si1;
+ int32_t result_bitrev_w_si2;
+ int32_t result_bstrins_w_si1;
+ int32_t result_bstrins_w_si2;
+ int32_t result_bstrpick_w_si1;
+ int32_t result_bstrpick_w_si2;
+ };
+ T t;
+
+ __ Ld_d(a4, MemOperand(a0, offsetof(T, si1)));
+ __ Ld_d(a5, MemOperand(a0, offsetof(T, si2)));
+
+ // ext_w_b
+ __ ext_w_b(t0, a4);
+ __ ext_w_b(t1, a5);
+ __ St_w(t0, MemOperand(a0, offsetof(T, result_ext_w_b_si1)));
+ __ St_w(t1, MemOperand(a0, offsetof(T, result_ext_w_b_si2)));
+
+ // ext_w_h
+ __ ext_w_h(t0, a4);
+ __ ext_w_h(t1, a5);
+ __ St_w(t0, MemOperand(a0, offsetof(T, result_ext_w_h_si1)));
+ __ St_w(t1, MemOperand(a0, offsetof(T, result_ext_w_h_si2)));
+
+ /* //clo_w
+ __ clo_w(t0, a4);
+ __ clo_w(t1, a5);
+ __ St_w(t0, MemOperand(a0, offsetof(T, result_clo_w_si1)));
+ __ St_w(t1, MemOperand(a0, offsetof(T, result_clo_w_si2)));*/
+
+ // clz_w
+ __ clz_w(t0, a4);
+ __ clz_w(t1, a5);
+ __ St_w(t0, MemOperand(a0, offsetof(T, result_clz_w_si1)));
+ __ St_w(t1, MemOperand(a0, offsetof(T, result_clz_w_si2)));
+
+ /* //cto_w
+ __ cto_w(t0, a4);
+ __ cto_w(t1, a5);
+ __ St_w(t0, MemOperand(a0, offsetof(T, result_cto_w_si1)));
+ __ St_w(t1, MemOperand(a0, offsetof(T, result_cto_w_si2)));*/
+
+ // ctz_w
+ __ ctz_w(t0, a4);
+ __ ctz_w(t1, a5);
+ __ St_w(t0, MemOperand(a0, offsetof(T, result_ctz_w_si1)));
+ __ St_w(t1, MemOperand(a0, offsetof(T, result_ctz_w_si2)));
+
+ // bytepick_w
+ __ bytepick_w(t0, a4, a5, 0);
+ __ bytepick_w(t1, a5, a4, 2);
+ __ St_w(t0, MemOperand(a0, offsetof(T, result_bytepick_w_si1)));
+ __ St_w(t1, MemOperand(a0, offsetof(T, result_bytepick_w_si2)));
+
+ // revb_2h
+ __ revb_2h(t0, a4);
+ __ revb_2h(t1, a5);
+ __ St_w(t0, MemOperand(a0, offsetof(T, result_revb_2h_si1)));
+ __ St_w(t1, MemOperand(a0, offsetof(T, result_revb_2h_si2)));
+
+ // bitrev
+ __ bitrev_4b(t0, a4);
+ __ bitrev_4b(t1, a5);
+ __ St_w(t0, MemOperand(a0, offsetof(T, result_bitrev_4b_si1)));
+ __ St_w(t1, MemOperand(a0, offsetof(T, result_bitrev_4b_si2)));
+
+ // bitrev_w
+ __ bitrev_w(t0, a4);
+ __ bitrev_w(t1, a5);
+ __ St_w(t0, MemOperand(a0, offsetof(T, result_bitrev_w_si1)));
+ __ St_w(t1, MemOperand(a0, offsetof(T, result_bitrev_w_si2)));
+
+ // bstrins
+ __ or_(t0, zero_reg, zero_reg);
+ __ or_(t1, zero_reg, zero_reg);
+ __ bstrins_w(t0, a4, 0xD, 0x4);
+ __ bstrins_w(t1, a5, 0x16, 0x5);
+ __ St_w(t0, MemOperand(a0, offsetof(T, result_bstrins_w_si1)));
+ __ St_w(t1, MemOperand(a0, offsetof(T, result_bstrins_w_si2)));
+
+ // bstrpick
+ __ or_(t0, zero_reg, zero_reg);
+ __ or_(t1, zero_reg, zero_reg);
+ __ bstrpick_w(t0, a4, 0xD, 0x4);
+ __ bstrpick_w(t1, a5, 0x16, 0x5);
+ __ St_w(t0, MemOperand(a0, offsetof(T, result_bstrpick_w_si1)));
+ __ St_w(t1, MemOperand(a0, offsetof(T, result_bstrpick_w_si2)));
+
+ __ jirl(zero_reg, ra, 0);
+
+ CodeDesc desc;
+ assm.GetCode(isolate, &desc);
+ Handle<Code> code =
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
+ auto f = GeneratedCode<F3>::FromCode(*code);
+ t.si1 = 0x51F4B764A26E7412;
+ t.si2 = 0x81F25A87C423B891;
+ f.Call(&t, 0, 0, 0, 0);
+
+ CHECK_EQ(static_cast<int32_t>(0x12), t.result_ext_w_b_si1);
+ CHECK_EQ(static_cast<int32_t>(0xffffff91), t.result_ext_w_b_si2);
+ CHECK_EQ(static_cast<int32_t>(0x7412), t.result_ext_w_h_si1);
+ CHECK_EQ(static_cast<int32_t>(0xffffb891), t.result_ext_w_h_si2);
+ // CHECK_EQ(static_cast<int32_t>(0x1), t.result_clo_w_si1);
+ // CHECK_EQ(static_cast<int32_t>(0x2), t.result_clo_w_si2);
+ CHECK_EQ(static_cast<int32_t>(0x0), t.result_clz_w_si1);
+ CHECK_EQ(static_cast<int32_t>(0x0), t.result_clz_w_si2);
+ // CHECK_EQ(static_cast<int32_t>(0x0), t.result_cto_w_si1);
+ // CHECK_EQ(static_cast<int32_t>(0x1), t.result_cto_w_si2);
+ CHECK_EQ(static_cast<int32_t>(0x1), t.result_ctz_w_si1);
+ CHECK_EQ(static_cast<int32_t>(0x0), t.result_ctz_w_si2);
+ CHECK_EQ(static_cast<int32_t>(0xc423b891), t.result_bytepick_w_si1);
+ CHECK_EQ(static_cast<int32_t>(0x7412c423),
+ t.result_bytepick_w_si2); // 0xffffc423
+ CHECK_EQ(static_cast<int32_t>(0x6ea21274), t.result_revb_2h_si1);
+ CHECK_EQ(static_cast<int32_t>(0x23c491b8), t.result_revb_2h_si2);
+ CHECK_EQ(static_cast<int32_t>(0x45762e48), t.result_bitrev_4b_si1);
+ CHECK_EQ(static_cast<int32_t>(0x23c41d89), t.result_bitrev_4b_si2);
+ CHECK_EQ(static_cast<int32_t>(0x482e7645), t.result_bitrev_w_si1);
+ CHECK_EQ(static_cast<int32_t>(0x891dc423), t.result_bitrev_w_si2);
+ CHECK_EQ(static_cast<int32_t>(0x120), t.result_bstrins_w_si1);
+ CHECK_EQ(static_cast<int32_t>(0x771220), t.result_bstrins_w_si2);
+ CHECK_EQ(static_cast<int32_t>(0x341), t.result_bstrpick_w_si1);
+ CHECK_EQ(static_cast<int32_t>(0x11dc4), t.result_bstrpick_w_si2);
+}
+
+TEST(LA11) {
+ // Test 64bit bit operation instructions.
+ CcTest::InitializeVM();
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope scope(isolate);
+ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
+
+ struct T {
+ int64_t si1;
+ int64_t si2;
+ int64_t result_clo_d_si1;
+ int64_t result_clo_d_si2;
+ int64_t result_clz_d_si1;
+ int64_t result_clz_d_si2;
+ int64_t result_cto_d_si1;
+ int64_t result_cto_d_si2;
+ int64_t result_ctz_d_si1;
+ int64_t result_ctz_d_si2;
+ int64_t result_bytepick_d_si1;
+ int64_t result_bytepick_d_si2;
+ int64_t result_revb_4h_si1;
+ int64_t result_revb_4h_si2;
+ int64_t result_revb_2w_si1;
+ int64_t result_revb_2w_si2;
+ int64_t result_revb_d_si1;
+ int64_t result_revb_d_si2;
+ int64_t result_revh_2w_si1;
+ int64_t result_revh_2w_si2;
+ int64_t result_revh_d_si1;
+ int64_t result_revh_d_si2;
+ int64_t result_bitrev_8b_si1;
+ int64_t result_bitrev_8b_si2;
+ int64_t result_bitrev_d_si1;
+ int64_t result_bitrev_d_si2;
+ int64_t result_bstrins_d_si1;
+ int64_t result_bstrins_d_si2;
+ int64_t result_bstrpick_d_si1;
+ int64_t result_bstrpick_d_si2;
+ int64_t result_maskeqz_si1;
+ int64_t result_maskeqz_si2;
+ int64_t result_masknez_si1;
+ int64_t result_masknez_si2;
+ };
+
+ T t;
+
+ __ Ld_d(a4, MemOperand(a0, offsetof(T, si1)));
+ __ Ld_d(a5, MemOperand(a0, offsetof(T, si2)));
+
+ /* //clo_d
+ __ clo_d(t0, a4);
+ __ clo_d(t1, a5);
+ __ St_w(t0, MemOperand(a0, offsetof(T, result_clo_d_si1)));
+ __ St_w(t1, MemOperand(a0, offsetof(T, result_clo_d_si2)));*/
+
+ // clz_d
+ __ or_(t0, zero_reg, zero_reg);
+ __ clz_d(t0, a4);
+ __ clz_d(t1, a5);
+ __ St_d(t0, MemOperand(a0, offsetof(T, result_clz_d_si1)));
+ __ St_d(t1, MemOperand(a0, offsetof(T, result_clz_d_si2)));
+
+ /* //cto_d
+ __ cto_d(t0, a4);
+ __ cto_d(t1, a5);
+ __ St_w(t0, MemOperand(a0, offsetof(T, result_cto_d_si1)));
+ __ St_w(t1, MemOperand(a0, offsetof(T, result_cto_d_si2)));*/
+
+ // ctz_d
+ __ ctz_d(t0, a4);
+ __ ctz_d(t1, a5);
+ __ St_d(t0, MemOperand(a0, offsetof(T, result_ctz_d_si1)));
+ __ St_d(t1, MemOperand(a0, offsetof(T, result_ctz_d_si2)));
+
+ // bytepick_d
+ __ bytepick_d(t0, a4, a5, 0);
+ __ bytepick_d(t1, a5, a4, 5);
+ __ St_d(t0, MemOperand(a0, offsetof(T, result_bytepick_d_si1)));
+ __ St_d(t1, MemOperand(a0, offsetof(T, result_bytepick_d_si2)));
+
+ // revb_4h
+ __ revb_4h(t0, a4);
+ __ revb_4h(t1, a5);
+ __ St_d(t0, MemOperand(a0, offsetof(T, result_revb_4h_si1)));
+ __ St_d(t1, MemOperand(a0, offsetof(T, result_revb_4h_si2)));
+
+ // revb_2w
+ __ revb_2w(t0, a4);
+ __ revb_2w(t1, a5);
+ __ St_d(t0, MemOperand(a0, offsetof(T, result_revb_2w_si1)));
+ __ St_d(t1, MemOperand(a0, offsetof(T, result_revb_2w_si2)));
+
+ // revb_d
+ __ revb_d(t0, a4);
+ __ revb_d(t1, a5);
+ __ St_d(t0, MemOperand(a0, offsetof(T, result_revb_d_si1)));
+ __ St_d(t1, MemOperand(a0, offsetof(T, result_revb_d_si2)));
+
+ // revh_2w
+ __ revh_2w(t0, a4);
+ __ revh_2w(t1, a5);
+ __ St_d(t0, MemOperand(a0, offsetof(T, result_revh_2w_si1)));
+ __ St_d(t1, MemOperand(a0, offsetof(T, result_revh_2w_si2)));
+
+ // revh_d
+ __ revh_d(t0, a4);
+ __ revh_d(t1, a5);
+ __ St_d(t0, MemOperand(a0, offsetof(T, result_revh_d_si1)));
+ __ St_d(t1, MemOperand(a0, offsetof(T, result_revh_d_si2)));
+
+ // bitrev_8b
+ __ bitrev_8b(t0, a4);
+ __ bitrev_8b(t1, a5);
+ __ St_d(t0, MemOperand(a0, offsetof(T, result_bitrev_8b_si1)));
+ __ St_d(t1, MemOperand(a0, offsetof(T, result_bitrev_8b_si2)));
+
+ // bitrev_d
+ __ bitrev_d(t0, a4);
+ __ bitrev_d(t1, a5);
+ __ St_d(t0, MemOperand(a0, offsetof(T, result_bitrev_d_si1)));
+ __ St_d(t1, MemOperand(a0, offsetof(T, result_bitrev_d_si2)));
+
+ // bstrins_d
+ __ or_(t0, zero_reg, zero_reg);
+ __ or_(t1, zero_reg, zero_reg);
+ __ bstrins_d(t0, a4, 5, 0);
+ __ bstrins_d(t1, a5, 39, 12);
+ __ St_d(t0, MemOperand(a0, offsetof(T, result_bstrins_d_si1)));
+ __ St_d(t1, MemOperand(a0, offsetof(T, result_bstrins_d_si2)));
+
+ // bstrpick_d
+ __ or_(t0, zero_reg, zero_reg);
+ __ or_(t1, zero_reg, zero_reg);
+ __ bstrpick_d(t0, a4, 5, 0);
+ __ bstrpick_d(t1, a5, 63, 48);
+ __ St_d(t0, MemOperand(a0, offsetof(T, result_bstrpick_d_si1)));
+ __ St_d(t1, MemOperand(a0, offsetof(T, result_bstrpick_d_si2)));
+
+ // maskeqz
+ __ maskeqz(t0, a4, a4);
+ __ maskeqz(t1, a5, zero_reg);
+ __ St_d(t0, MemOperand(a0, offsetof(T, result_maskeqz_si1)));
+ __ St_d(t1, MemOperand(a0, offsetof(T, result_maskeqz_si2)));
+
+ // masknez
+ __ masknez(t0, a4, a4);
+ __ masknez(t1, a5, zero_reg);
+ __ St_d(t0, MemOperand(a0, offsetof(T, result_masknez_si1)));
+ __ St_d(t1, MemOperand(a0, offsetof(T, result_masknez_si2)));
+
+ __ jirl(zero_reg, ra, 0);
+
+ CodeDesc desc;
+ assm.GetCode(isolate, &desc);
+ Handle<Code> code =
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
+ auto f = GeneratedCode<F3>::FromCode(*code);
+ t.si1 = 0x10C021098B710CDE;
+ t.si2 = 0xFB8017FF781A15C3;
+ f.Call(&t, 0, 0, 0, 0);
+
+ // CHECK_EQ(static_cast<int64_t>(0x0), t.result_clo_d_si1);
+ // CHECK_EQ(static_cast<int64_t>(0x5), t.result_clo_d_si2);
+ CHECK_EQ(static_cast<int64_t>(0x3), t.result_clz_d_si1);
+ CHECK_EQ(static_cast<int64_t>(0x0), t.result_clz_d_si2);
+ // CHECK_EQ(static_cast<int64_t>(0x0), t.result_cto_d_si1);
+ // CHECK_EQ(static_cast<int64_t>(0x2), t.result_cto_d_si2);
+ CHECK_EQ(static_cast<int64_t>(0x1), t.result_ctz_d_si1);
+ CHECK_EQ(static_cast<int64_t>(0x0), t.result_ctz_d_si2);
+ CHECK_EQ(static_cast<int64_t>(0xfb8017ff781a15c3), t.result_bytepick_d_si1);
+ CHECK_EQ(static_cast<int64_t>(0x710cdefb8017ff78), t.result_bytepick_d_si2);
+ CHECK_EQ(static_cast<int64_t>(0xc0100921718bde0c), t.result_revb_4h_si1);
+ CHECK_EQ(static_cast<int64_t>(0x80fbff171a78c315), t.result_revb_4h_si2);
+ CHECK_EQ(static_cast<int64_t>(0x921c010de0c718b), t.result_revb_2w_si1);
+ CHECK_EQ(static_cast<int64_t>(0xff1780fbc3151a78), t.result_revb_2w_si2);
+ CHECK_EQ(static_cast<int64_t>(0xde0c718b0921c010), t.result_revb_d_si1);
+ CHECK_EQ(static_cast<int64_t>(0xc3151a78ff1780fb), t.result_revb_d_si2);
+ CHECK_EQ(static_cast<int64_t>(0x210910c00cde8b71), t.result_revh_2w_si1);
+ CHECK_EQ(static_cast<int64_t>(0x17fffb8015c3781a), t.result_revh_2w_si2);
+ CHECK_EQ(static_cast<int64_t>(0xcde8b71210910c0), t.result_revh_d_si1);
+ CHECK_EQ(static_cast<int64_t>(0x15c3781a17fffb80), t.result_revh_d_si2);
+ CHECK_EQ(static_cast<int64_t>(0x8038490d18e307b), t.result_bitrev_8b_si1);
+ CHECK_EQ(static_cast<int64_t>(0xdf01e8ff1e58a8c3), t.result_bitrev_8b_si2);
+ CHECK_EQ(static_cast<int64_t>(0x7b308ed190840308), t.result_bitrev_d_si1);
+ CHECK_EQ(static_cast<int64_t>(0xc3a8581effe801df), t.result_bitrev_d_si2);
+ CHECK_EQ(static_cast<int64_t>(0x1e), t.result_bstrins_d_si1);
+ CHECK_EQ(static_cast<int64_t>(0x81a15c3000), t.result_bstrins_d_si2);
+ CHECK_EQ(static_cast<int64_t>(0x1e), t.result_bstrpick_d_si1);
+ CHECK_EQ(static_cast<int64_t>(0xfb80), t.result_bstrpick_d_si2);
+ CHECK_EQ(static_cast<int64_t>(0), t.result_maskeqz_si1);
+ CHECK_EQ(static_cast<int64_t>(0xFB8017FF781A15C3), t.result_maskeqz_si2);
+ CHECK_EQ(static_cast<int64_t>(0x10C021098B710CDE), t.result_masknez_si1);
+ CHECK_EQ(static_cast<int64_t>(0), t.result_masknez_si2);
+}
+
+uint64_t run_beq(int64_t value1, int64_t value2, int16_t offset) {
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope scope(isolate);
+
+ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
+
+ Label main_block, L;
+ __ li(a2, 0l);
+ __ b(&main_block);
+ // Block 1
+ __ addi_d(a2, a2, 0x1);
+ __ addi_d(a2, a2, 0x2);
+ __ b(&L);
+
+ // Block 2
+ __ addi_d(a2, a2, 0x10);
+ __ addi_d(a2, a2, 0x20);
+ __ b(&L);
+
+ // Block 3 (Main)
+ __ bind(&main_block);
+ __ beq(a0, a1, offset);
+ __ bind(&L);
+ __ or_(a0, a2, zero_reg);
+ __ jirl(zero_reg, ra, 0);
+
+ // Block 4
+ __ addi_d(a2, a2, 0x100);
+ __ addi_d(a2, a2, 0x200);
+ __ b(&L);
+
+ // Block 5
+ __ addi_d(a2, a2, 0x300);
+ __ addi_d(a2, a2, 0x400);
+ __ b(&L);
+
+ CodeDesc desc;
+ assm.GetCode(isolate, &desc);
+ Handle<Code> code =
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
+
+ auto f = GeneratedCode<F2>::FromCode(*code);
+ uint64_t res = reinterpret_cast<uint64_t>(f.Call(value1, value2, 0, 0, 0));
+
+ return res;
+}
+
+TEST(BEQ) {
+ CcTest::InitializeVM();
+ struct TestCaseBeq {
+ int64_t value1;
+ int64_t value2;
+ int16_t offset;
+ uint64_t expected_res;
+ };
+
+ // clang-format off
+ struct TestCaseBeq tc[] = {
+ // value1, value2, offset, expected_res
+ { 0, 0, -6, 0x3 },
+ { 1, 1, -3, 0x30 },
+ { -2, -2, 3, 0x300 },
+ { 3, -3, 6, 0 },
+ { 4, 4, 6, 0x700 },
+ };
+ // clang-format on
+
+ size_t nr_test_cases = sizeof(tc) / sizeof(TestCaseBeq);
+ for (size_t i = 0; i < nr_test_cases; ++i) {
+ uint64_t res = run_beq(tc[i].value1, tc[i].value2, tc[i].offset);
+ CHECK_EQ(tc[i].expected_res, res);
+ }
+}
+
+uint64_t run_bne(int64_t value1, int64_t value2, int16_t offset) {
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope scope(isolate);
+
+ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
+
+ Label main_block, L;
+ __ li(a2, 0l);
+ __ b(&main_block);
+ // Block 1
+ __ addi_d(a2, a2, 0x1);
+ __ addi_d(a2, a2, 0x2);
+ __ b(&L);
+
+ // Block 2
+ __ addi_d(a2, a2, 0x10);
+ __ addi_d(a2, a2, 0x20);
+ __ b(&L);
+
+ // Block 3 (Main)
+ __ bind(&main_block);
+ __ bne(a0, a1, offset);
+ __ bind(&L);
+ __ or_(a0, a2, zero_reg);
+ __ jirl(zero_reg, ra, 0);
+
+ // Block 4
+ __ addi_d(a2, a2, 0x100);
+ __ addi_d(a2, a2, 0x200);
+ __ b(&L);
+
+ // Block 5
+ __ addi_d(a2, a2, 0x300);
+ __ addi_d(a2, a2, 0x400);
+ __ b(&L);
+
+ CodeDesc desc;
+ assm.GetCode(isolate, &desc);
+ Handle<Code> code =
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
+
+ auto f = GeneratedCode<F2>::FromCode(*code);
+ uint64_t res = reinterpret_cast<uint64_t>(f.Call(value1, value2, 0, 0, 0));
+
+ return res;
+}
+
+TEST(BNE) {
+ CcTest::InitializeVM();
+ struct TestCaseBne {
+ int64_t value1;
+ int64_t value2;
+ int16_t offset;
+ uint64_t expected_res;
+ };
+
+ // clang-format off
+ struct TestCaseBne tc[] = {
+ // value1, value2, offset, expected_res
+ { 1, -1, -6, 0x3 },
+ { 2, -2, -3, 0x30 },
+ { 3, -3, 3, 0x300 },
+ { 4, -4, 6, 0x700 },
+ { 0, 0, 6, 0 },
+ };
+ // clang-format on
+
+ size_t nr_test_cases = sizeof(tc) / sizeof(TestCaseBne);
+ for (size_t i = 0; i < nr_test_cases; ++i) {
+ uint64_t res = run_bne(tc[i].value1, tc[i].value2, tc[i].offset);
+ CHECK_EQ(tc[i].expected_res, res);
+ }
+}
+
+uint64_t run_blt(int64_t value1, int64_t value2, int16_t offset) {
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope scope(isolate);
+
+ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
+
+ Label main_block, L;
+ __ li(a2, 0l);
+ __ b(&main_block);
+ // Block 1
+ __ addi_d(a2, a2, 0x1);
+ __ addi_d(a2, a2, 0x2);
+ __ b(&L);
+
+ // Block 2
+ __ addi_d(a2, a2, 0x10);
+ __ addi_d(a2, a2, 0x20);
+ __ b(&L);
+
+ // Block 3 (Main)
+ __ bind(&main_block);
+ __ blt(a0, a1, offset);
+ __ bind(&L);
+ __ or_(a0, a2, zero_reg);
+ __ jirl(zero_reg, ra, 0);
+
+ // Block 4
+ __ addi_d(a2, a2, 0x100);
+ __ addi_d(a2, a2, 0x200);
+ __ b(&L);
+
+ // Block 5
+ __ addi_d(a2, a2, 0x300);
+ __ addi_d(a2, a2, 0x400);
+ __ b(&L);
+
+ CodeDesc desc;
+ assm.GetCode(isolate, &desc);
+ Handle<Code> code =
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
+
+ auto f = GeneratedCode<F2>::FromCode(*code);
+ uint64_t res = reinterpret_cast<uint64_t>(f.Call(value1, value2, 0, 0, 0));
+
+ return res;
+}
+
+TEST(BLT) {
+ CcTest::InitializeVM();
+ struct TestCaseBlt {
+ int64_t value1;
+ int64_t value2;
+ int16_t offset;
+ uint64_t expected_res;
+ };
+
+ // clang-format off
+ struct TestCaseBlt tc[] = {
+ // value1, value2, offset, expected_res
+ { -1, 1, -6, 0x3 },
+ { -2, 2, -3, 0x30 },
+ { -3, 3, 3, 0x300 },
+ { -4, 4, 6, 0x700 },
+ { 5, -5, 6, 0 },
+ { 0, 0, 6, 0 },
+ };
+ // clang-format on
+
+ size_t nr_test_cases = sizeof(tc) / sizeof(TestCaseBlt);
+ for (size_t i = 0; i < nr_test_cases; ++i) {
+ uint64_t res = run_blt(tc[i].value1, tc[i].value2, tc[i].offset);
+ CHECK_EQ(tc[i].expected_res, res);
+ }
+}
+
+uint64_t run_bge(uint64_t value1, uint64_t value2, int16_t offset) {
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope scope(isolate);
+
+ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
+
+ Label main_block, L;
+ __ li(a2, 0l);
+ __ b(&main_block);
+ // Block 1
+ __ addi_d(a2, a2, 0x1);
+ __ addi_d(a2, a2, 0x2);
+ __ b(&L);
+
+ // Block 2
+ __ addi_d(a2, a2, 0x10);
+ __ addi_d(a2, a2, 0x20);
+ __ b(&L);
+
+ // Block 3 (Main)
+ __ bind(&main_block);
+ __ bge(a0, a1, offset);
+ __ bind(&L);
+ __ or_(a0, a2, zero_reg);
+ __ jirl(zero_reg, ra, 0);
+
+ // Block 4
+ __ addi_d(a2, a2, 0x100);
+ __ addi_d(a2, a2, 0x200);
+ __ b(&L);
+
+ // Block 5
+ __ addi_d(a2, a2, 0x300);
+ __ addi_d(a2, a2, 0x400);
+ __ b(&L);
+
+ CodeDesc desc;
+ assm.GetCode(isolate, &desc);
+ Handle<Code> code =
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
+
+ auto f = GeneratedCode<F2>::FromCode(*code);
+ uint64_t res = reinterpret_cast<uint64_t>(f.Call(value1, value2, 0, 0, 0));
+
+ return res;
+}
+
+TEST(BGE) {
+ CcTest::InitializeVM();
+ struct TestCaseBge {
+ int64_t value1;
+ int64_t value2;
+ int16_t offset;
+ uint64_t expected_res;
+ };
+
+ // clang-format off
+ struct TestCaseBge tc[] = {
+ // value1, value2, offset, expected_res
+ { 0, 0, -6, 0x3 },
+ { 1, 1, -3, 0x30 },
+ { 2, -2, 3, 0x300 },
+ { 3, -3, 6, 0x700 },
+ { -4, 4, 6, 0 },
+ };
+ // clang-format on
+
+ size_t nr_test_cases = sizeof(tc) / sizeof(TestCaseBge);
+ for (size_t i = 0; i < nr_test_cases; ++i) {
+ uint64_t res = run_bge(tc[i].value1, tc[i].value2, tc[i].offset);
+ CHECK_EQ(tc[i].expected_res, res);
+ }
+}
+
+uint64_t run_bltu(int64_t value1, int64_t value2, int16_t offset) {
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope scope(isolate);
+
+ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
+
+ Label main_block, L;
+ __ li(a2, 0l);
+ __ b(&main_block);
+ // Block 1
+ __ addi_d(a2, a2, 0x1);
+ __ addi_d(a2, a2, 0x2);
+ __ b(&L);
+
+ // Block 2
+ __ addi_d(a2, a2, 0x10);
+ __ addi_d(a2, a2, 0x20);
+ __ b(&L);
+
+ // Block 3 (Main)
+ __ bind(&main_block);
+ __ bltu(a0, a1, offset);
+ __ bind(&L);
+ __ or_(a0, a2, zero_reg);
+ __ jirl(zero_reg, ra, 0);
+
+ // Block 4
+ __ addi_d(a2, a2, 0x100);
+ __ addi_d(a2, a2, 0x200);
+ __ b(&L);
+
+ // Block 5
+ __ addi_d(a2, a2, 0x300);
+ __ addi_d(a2, a2, 0x400);
+ __ b(&L);
+
+ CodeDesc desc;
+ assm.GetCode(isolate, &desc);
+ Handle<Code> code =
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
+
+ auto f = GeneratedCode<F2>::FromCode(*code);
+ uint64_t res = reinterpret_cast<uint64_t>(f.Call(value1, value2, 0, 0, 0));
+
+ return res;
+}
+
+TEST(BLTU) {
+ CcTest::InitializeVM();
+ struct TestCaseBltu {
+ int64_t value1;
+ int64_t value2;
+ int16_t offset;
+ uint64_t expected_res;
+ };
+
+ // clang-format off
+ struct TestCaseBltu tc[] = {
+ // value1, value2, offset, expected_res
+ { 0, 1, -6, 0x3 },
+ { 1, -1, -3, 0x30 },
+ { 2, -2, 3, 0x300 },
+ { 3, -3, 6, 0x700 },
+ { 4, 4, 6, 0 },
+ };
+ // clang-format on
+
+ size_t nr_test_cases = sizeof(tc) / sizeof(TestCaseBltu);
+ for (size_t i = 0; i < nr_test_cases; ++i) {
+ uint64_t res = run_bltu(tc[i].value1, tc[i].value2, tc[i].offset);
+ CHECK_EQ(tc[i].expected_res, res);
+ }
+}
+
+uint64_t run_bgeu(int64_t value1, int64_t value2, int16_t offset) {
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope scope(isolate);
+
+ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
+
+ Label main_block, L;
+ __ li(a2, 0l);
+ __ b(&main_block);
+ // Block 1
+ __ addi_d(a2, a2, 0x1);
+ __ addi_d(a2, a2, 0x2);
+ __ b(&L);
+
+ // Block 2
+ __ addi_d(a2, a2, 0x10);
+ __ addi_d(a2, a2, 0x20);
+ __ b(&L);
+
+ // Block 3 (Main)
+ __ bind(&main_block);
+ __ bgeu(a0, a1, offset);
+ __ bind(&L);
+ __ or_(a0, a2, zero_reg);
+ __ jirl(zero_reg, ra, 0);
+
+ // Block 4
+ __ addi_d(a2, a2, 0x100);
+ __ addi_d(a2, a2, 0x200);
+ __ b(&L);
+
+ // Block 5
+ __ addi_d(a2, a2, 0x300);
+ __ addi_d(a2, a2, 0x400);
+ __ b(&L);
+
+ CodeDesc desc;
+ assm.GetCode(isolate, &desc);
+ Handle<Code> code =
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
+
+ auto f = GeneratedCode<F2>::FromCode(*code);
+ uint64_t res = reinterpret_cast<uint64_t>(f.Call(value1, value2, 0, 0, 0));
+
+ return res;
+}
+
+TEST(BGEU) {
+ CcTest::InitializeVM();
+ struct TestCaseBgeu {
+ int64_t value1;
+ int64_t value2;
+ int16_t offset;
+ uint64_t expected_res;
+ };
+
+ // clang-format off
+ struct TestCaseBgeu tc[] = {
+ // value1, value2, offset, expected_res
+ { 0, 0, -6, 0x3 },
+ { -1, 1, -3, 0x30 },
+ { -2, 2, 3, 0x300 },
+ { -3, 3, 6, 0x700 },
+ { 4, -4, 6, 0 },
+ };
+ // clang-format on
+
+ size_t nr_test_cases = sizeof(tc) / sizeof(TestCaseBgeu);
+ for (size_t i = 0; i < nr_test_cases; ++i) {
+ uint64_t res = run_bgeu(tc[i].value1, tc[i].value2, tc[i].offset);
+ CHECK_EQ(tc[i].expected_res, res);
+ }
+}
+
+uint64_t run_beqz(int64_t value, int32_t offset) {
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope scope(isolate);
+
+ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
+
+ Label main_block, L;
+ __ li(a2, 0l);
+ __ b(&main_block);
+ // Block 1
+ __ addi_d(a2, a2, 0x1);
+ __ addi_d(a2, a2, 0x2);
+ __ b(&L);
+
+ // Block 2
+ __ addi_d(a2, a2, 0x10);
+ __ addi_d(a2, a2, 0x20);
+ __ b(&L);
+
+ // Block 3 (Main)
+ __ bind(&main_block);
+ __ beqz(a0, offset);
+ __ bind(&L);
+ __ or_(a0, a2, zero_reg);
+ __ jirl(zero_reg, ra, 0);
+
+ // Block 4
+ __ addi_d(a2, a2, 0x100);
+ __ addi_d(a2, a2, 0x200);
+ __ b(&L);
+
+ // Block 5
+ __ addi_d(a2, a2, 0x300);
+ __ addi_d(a2, a2, 0x400);
+ __ b(&L);
+
+ CodeDesc desc;
+ assm.GetCode(isolate, &desc);
+ Handle<Code> code =
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
+
+ auto f = GeneratedCode<F2>::FromCode(*code);
+ uint64_t res = reinterpret_cast<uint64_t>(f.Call(value, 0, 0, 0, 0));
+
+ return res;
+}
+
+TEST(BEQZ) {
+ CcTest::InitializeVM();
+ struct TestCaseBeqz {
+ int64_t value;
+ int32_t offset;
+ uint64_t expected_res;
+ };
+
+ // clang-format off
+ struct TestCaseBeqz tc[] = {
+ // value, offset, expected_res
+ { 0, -6, 0x3 },
+ { 0, -3, 0x30 },
+ { 0, 3, 0x300 },
+ { 0, 6, 0x700 },
+ { 1, 6, 0 },
+ };
+ // clang-format on
+
+ size_t nr_test_cases = sizeof(tc) / sizeof(TestCaseBeqz);
+ for (size_t i = 0; i < nr_test_cases; ++i) {
+ uint64_t res = run_beqz(tc[i].value, tc[i].offset);
+ CHECK_EQ(tc[i].expected_res, res);
+ }
+}
+
+uint64_t run_bnez_b(int64_t value, int32_t offset) {
+ // bnez, b.
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope scope(isolate);
+
+ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
+
+ Label main_block, L;
+ __ li(a2, 0l);
+ __ b(&main_block);
+ // Block 1
+ __ addi_d(a2, a2, 0x1);
+ __ addi_d(a2, a2, 0x2);
+ __ b(5);
+
+ // Block 2
+ __ addi_d(a2, a2, 0x10);
+ __ addi_d(a2, a2, 0x20);
+ __ b(2);
+
+ // Block 3 (Main)
+ __ bind(&main_block);
+ __ bnez(a0, offset);
+ __ bind(&L);
+ __ or_(a0, a2, zero_reg);
+ __ jirl(zero_reg, ra, 0);
+
+ // Block 4
+ __ addi_d(a2, a2, 0x100);
+ __ addi_d(a2, a2, 0x200);
+ __ b(-4);
+
+ // Block 5
+ __ addi_d(a2, a2, 0x300);
+ __ addi_d(a2, a2, 0x400);
+ __ b(-7);
+
+ CodeDesc desc;
+ assm.GetCode(isolate, &desc);
+ Handle<Code> code =
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
+
+ auto f = GeneratedCode<F2>::FromCode(*code);
+ uint64_t res = reinterpret_cast<uint64_t>(f.Call(value, 0, 0, 0, 0));
+
+ return res;
+}
+
+TEST(BNEZ_B) {
+ CcTest::InitializeVM();
+ struct TestCaseBnez {
+ int64_t value;
+ int32_t offset;
+ uint64_t expected_res;
+ };
+
+ // clang-format off
+ struct TestCaseBnez tc[] = {
+ // value, offset, expected_res
+ { 1, -6, 0x3 },
+ { -2, -3, 0x30 },
+ { 3, 3, 0x300 },
+ { -4, 6, 0x700 },
+ { 0, 6, 0 },
+ };
+ // clang-format on
+
+ size_t nr_test_cases = sizeof(tc) / sizeof(TestCaseBnez);
+ for (size_t i = 0; i < nr_test_cases; ++i) {
+ uint64_t res = run_bnez_b(tc[i].value, tc[i].offset);
+ CHECK_EQ(tc[i].expected_res, res);
+ }
+}
+
+uint64_t run_bl(int32_t offset) {
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope scope(isolate);
+
+ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
+
+ Label main_block;
+ __ li(a2, 0l);
+ __ Push(ra); // Push is implemented by two instructions, addi_d and st_d
+ __ b(&main_block);
+
+ // Block 1
+ __ addi_d(a2, a2, 0x1);
+ __ addi_d(a2, a2, 0x2);
+ __ jirl(zero_reg, ra, 0);
+
+ // Block 2
+ __ addi_d(a2, a2, 0x10);
+ __ addi_d(a2, a2, 0x20);
+ __ jirl(zero_reg, ra, 0);
+
+ // Block 3 (Main)
+ __ bind(&main_block);
+ __ bl(offset);
+ __ or_(a0, a2, zero_reg);
+ __ Pop(ra); // Pop is implemented by two instructions, ld_d and addi_d.
+ __ jirl(zero_reg, ra, 0);
+
+ // Block 4
+ __ addi_d(a2, a2, 0x100);
+ __ addi_d(a2, a2, 0x200);
+ __ jirl(zero_reg, ra, 0);
+
+ // Block 5
+ __ addi_d(a2, a2, 0x300);
+ __ addi_d(a2, a2, 0x400);
+ __ jirl(zero_reg, ra, 0);
+
+ CodeDesc desc;
+ assm.GetCode(isolate, &desc);
+ Handle<Code> code =
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
+
+ auto f = GeneratedCode<F2>::FromCode(*code);
+ uint64_t res = reinterpret_cast<uint64_t>(f.Call(0, 0, 0, 0, 0));
+
+ return res;
+}
+
+TEST(BL) {
+ CcTest::InitializeVM();
+ struct TestCaseBl {
+ int32_t offset;
+ uint64_t expected_res;
+ };
+
+ // clang-format off
+ struct TestCaseBl tc[] = {
+ // offset, expected_res
+ { -6, 0x3 },
+ { -3, 0x30 },
+ { 5, 0x300 },
+ { 8, 0x700 },
+ };
+ // clang-format on
+
+ size_t nr_test_cases = sizeof(tc) / sizeof(TestCaseBl);
+ for (size_t i = 0; i < nr_test_cases; ++i) {
+ uint64_t res = run_bl(tc[i].offset);
+ CHECK_EQ(tc[i].expected_res, res);
+ }
+}
+
+TEST(PCADD) {
+ CcTest::InitializeVM();
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope scope(isolate);
+
+ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
+
+ Label exit, error;
+ __ Push(ra);
+
+ // pcaddi
+ __ li(a4, 0x1FFFFC);
+ __ li(a5, 0);
+ __ li(a6, static_cast<int32_t>(0xFFE00000));
+
+ __ bl(1);
+ __ pcaddi(a3, 0x7FFFF);
+ __ add_d(a2, ra, a4);
+ __ Branch(&error, ne, a2, Operand(a3));
+
+ __ bl(1);
+ __ pcaddi(a3, 0);
+ __ add_d(a2, ra, a5);
+ __ Branch(&error, ne, a2, Operand(a3));
+
+ __ bl(1);
+ __ pcaddi(a3, 0x80000);
+ __ add_d(a2, ra, a6);
+ __ Branch(&error, ne, a2, Operand(a3));
+
+ // pcaddu12i
+ __ li(a4, 0x7FFFF000);
+ __ li(a5, 0);
+ __ li(a6, static_cast<int32_t>(0x80000000));
+
+ __ bl(1);
+ __ pcaddu12i(a2, 0x7FFFF);
+ __ add_d(a3, ra, a4);
+ __ Branch(&error, ne, a2, Operand(a3));
+ __ bl(1);
+ __ pcaddu12i(a2, 0);
+ __ add_d(a3, ra, a5);
+ __ Branch(&error, ne, a2, Operand(a3));
+ __ bl(1);
+ __ pcaddu12i(a2, 0x80000);
+ __ add_d(a3, ra, a6);
+ __ Branch(&error, ne, a2, Operand(a3));
+
+ // pcaddu18i
+ __ li(a4, 0x1FFFFC0000);
+ __ li(a5, 0);
+ __ li(a6, static_cast<int64_t>(0xFFFFFFE000000000));
+
+ __ bl(1);
+ __ pcaddu18i(a2, 0x7FFFF);
+ __ add_d(a3, ra, a4);
+ __ Branch(&error, ne, a2, Operand(a3));
+
+ __ bl(1);
+ __ pcaddu18i(a2, 0);
+ __ add_d(a3, ra, a5);
+ __ Branch(&error, ne, a2, Operand(a3));
+
+ __ bl(1);
+ __ pcaddu18i(a2, 0x80000);
+ __ add_d(a3, ra, a6);
+ __ Branch(&error, ne, a2, Operand(a3));
+
+ // pcalau12i
+ __ li(a4, 0x7FFFF000);
+ __ li(a5, 0);
+ __ li(a6, static_cast<int32_t>(0x80000000));
+ __ li(a7, static_cast<int64_t>(0xFFFFFFFFFFFFF000));
+
+ __ bl(1);
+ __ pcalau12i(a3, 0x7FFFF);
+ __ add_d(a2, ra, a4);
+ __ and_(t0, a2, a7);
+ __ and_(t1, a3, a7);
+ __ Branch(&error, ne, t0, Operand(t1));
+
+ __ bl(1);
+ __ pcalau12i(a3, 0);
+ __ add_d(a2, ra, a5);
+ __ and_(t0, a2, a7);
+ __ and_(t1, a3, a7);
+ __ Branch(&error, ne, t0, Operand(t1));
+
+ __ bl(1);
+ __ pcalau12i(a2, 0x80000);
+ __ add_d(a3, ra, a6);
+ __ and_(t0, a2, a7);
+ __ and_(t1, a3, a7);
+ __ Branch(&error, ne, t0, Operand(t1));
+
+ __ li(a0, 0x31415926);
+ __ b(&exit);
+
+ __ bind(&error);
+ __ li(a0, 0x666);
+
+ __ bind(&exit);
+ __ Pop(ra);
+ __ jirl(zero_reg, ra, 0);
+
+ CodeDesc desc;
+ assm.GetCode(isolate, &desc);
+ Handle<Code> code =
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
+ auto f = GeneratedCode<F2>::FromCode(*code);
+ int64_t res = reinterpret_cast<int64_t>(f.Call(0, 0, 0, 0, 0));
+
+ CHECK_EQ(0x31415926L, res);
+}
+
+uint64_t run_jirl(int16_t offset) {
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope scope(isolate);
+
+ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
+
+ Label main_block;
+ __ li(a2, 0l);
+ __ Push(ra);
+ __ b(&main_block);
+
+ // Block 1
+ __ addi_d(a2, a2, 0x1);
+ __ addi_d(a2, a2, 0x2);
+ __ jirl(zero_reg, ra, 0);
+
+ // Block 2
+ __ addi_d(a2, a2, 0x10);
+ __ addi_d(a2, a2, 0x20);
+ __ jirl(zero_reg, ra, 0);
+
+ // Block 3 (Main)
+ __ bind(&main_block);
+ __ pcaddi(a3, 1);
+ __ jirl(ra, a3, offset);
+ __ or_(a0, a2, zero_reg);
+ __ Pop(ra); // Pop is implemented by two instructions, ld_d and addi_d.
+ __ jirl(zero_reg, ra, 0);
+
+ // Block 4
+ __ addi_d(a2, a2, 0x100);
+ __ addi_d(a2, a2, 0x200);
+ __ jirl(zero_reg, ra, 0);
+
+ // Block 5
+ __ addi_d(a2, a2, 0x300);
+ __ addi_d(a2, a2, 0x400);
+ __ jirl(zero_reg, ra, 0);
+
+ CodeDesc desc;
+ assm.GetCode(isolate, &desc);
+ Handle<Code> code =
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
+
+ auto f = GeneratedCode<F2>::FromCode(*code);
+ uint64_t res = reinterpret_cast<uint64_t>(f.Call(0, 0, 0, 0, 0));
+
+ return res;
+}
+
+TEST(JIRL) {
+ CcTest::InitializeVM();
+ struct TestCaseJirl {
+ int16_t offset;
+ uint64_t expected_res;
+ };
+
+ // clang-format off
+ struct TestCaseJirl tc[] = {
+ // offset, expected_res
+ { -7, 0x3 },
+ { -4, 0x30 },
+ { 5, 0x300 },
+ { 8, 0x700 },
+ };
+ // clang-format on
+
+ size_t nr_test_cases = sizeof(tc) / sizeof(TestCaseJirl);
+ for (size_t i = 0; i < nr_test_cases; ++i) {
+ uint64_t res = run_jirl(tc[i].offset);
+ CHECK_EQ(tc[i].expected_res, res);
+ }
+}
+
+TEST(LA12) {
+ // Test floating point calculate instructions.
+ CcTest::InitializeVM();
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope scope(isolate);
+
+ struct T {
+ double a;
+ double b;
+ double c;
+ double d;
+ double e;
+ double f;
+ double result_fadd_d;
+ double result_fsub_d;
+ double result_fmul_d;
+ double result_fdiv_d;
+ double result_fmadd_d;
+ double result_fmsub_d;
+ double result_fnmadd_d;
+ double result_fnmsub_d;
+ double result_fsqrt_d;
+ double result_frecip_d;
+ double result_frsqrt_d;
+ double result_fscaleb_d;
+ double result_flogb_d;
+ double result_fcopysign_d;
+ double result_fclass_d;
+ };
+ T t;
+
+ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
+
+ // Double precision floating point instructions.
+ __ Fld_d(f8, MemOperand(a0, offsetof(T, a)));
+ __ Fld_d(f9, MemOperand(a0, offsetof(T, b)));
+
+ __ fneg_d(f10, f8);
+ __ fadd_d(f11, f9, f10);
+ __ Fst_d(f11, MemOperand(a0, offsetof(T, result_fadd_d)));
+ __ fabs_d(f11, f11);
+ __ fsub_d(f12, f11, f9);
+ __ Fst_d(f12, MemOperand(a0, offsetof(T, result_fsub_d)));
+
+ __ Fld_d(f13, MemOperand(a0, offsetof(T, c)));
+ __ Fld_d(f14, MemOperand(a0, offsetof(T, d)));
+ __ Fld_d(f15, MemOperand(a0, offsetof(T, e)));
+
+ __ fmin_d(f16, f13, f14);
+ __ fmul_d(f17, f15, f16);
+ __ Fst_d(f17, MemOperand(a0, offsetof(T, result_fmul_d)));
+ __ fmax_d(f18, f13, f14);
+ __ fdiv_d(f19, f15, f18);
+ __ Fst_d(f19, MemOperand(a0, offsetof(T, result_fdiv_d)));
+
+ __ fmina_d(f16, f13, f14);
+ __ fmadd_d(f18, f17, f15, f16);
+ __ Fst_d(f18, MemOperand(a0, offsetof(T, result_fmadd_d)));
+ __ fnmadd_d(f19, f17, f15, f16);
+ __ Fst_d(f19, MemOperand(a0, offsetof(T, result_fnmadd_d)));
+ __ fmaxa_d(f16, f13, f14);
+ __ fmsub_d(f20, f17, f15, f16);
+ __ Fst_d(f20, MemOperand(a0, offsetof(T, result_fmsub_d)));
+ __ fnmsub_d(f21, f17, f15, f16);
+ __ Fst_d(f21, MemOperand(a0, offsetof(T, result_fnmsub_d)));
+
+ __ Fld_d(f8, MemOperand(a0, offsetof(T, f)));
+ __ fsqrt_d(f10, f8);
+ __ Fst_d(f10, MemOperand(a0, offsetof(T, result_fsqrt_d)));
+ //__ frecip_d(f11, f10);
+ //__ frsqrt_d(f12, f8);
+ //__ Fst_d(f11, MemOperand(a0, offsetof(T, result_frecip_d)));
+ //__ Fst_d(f12, MemOperand(a0, offsetof(T, result_frsqrt_d)));
+
+ /*__ fscaleb_d(f16, f13, f15);
+ __ flogb_d(f17, f15);
+ __ fcopysign_d(f18, f8, f9);
+ __ fclass_d(f19, f9);
+ __ Fst_d(f16, MemOperand(a0, offsetof(T, result_fscaleb_d)));
+ __ Fst_d(f17, MemOperand(a0, offsetof(T, result_flogb_d)));
+ __ Fst_d(f18, MemOperand(a0, offsetof(T, result_fcopysign_d)));
+ __ Fst_d(f19, MemOperand(a0, offsetof(T, result_fclass_d)));*/
+
+ __ jirl(zero_reg, ra, 0);
+
+ CodeDesc desc;
+ assm.GetCode(isolate, &desc);
+ Handle<Code> code =
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
+ auto f = GeneratedCode<F3>::FromCode(*code);
+ // Double test values.
+ t.a = 1.5e14;
+ t.b = -2.75e11;
+ t.c = 1.5;
+ t.d = -2.75;
+ t.e = 120.0;
+ t.f = 120.44;
+ f.Call(&t, 0, 0, 0, 0);
+
+ CHECK_EQ(static_cast<double>(-1.502750e14), t.result_fadd_d);
+ CHECK_EQ(static_cast<double>(1.505500e14), t.result_fsub_d);
+ CHECK_EQ(static_cast<double>(-3.300000e02), t.result_fmul_d);
+ CHECK_EQ(static_cast<double>(8.000000e01), t.result_fdiv_d);
+ CHECK_EQ(static_cast<double>(-3.959850e04), t.result_fmadd_d);
+ CHECK_EQ(static_cast<double>(-3.959725e04), t.result_fmsub_d);
+ CHECK_EQ(static_cast<double>(3.959850e04), t.result_fnmadd_d);
+ CHECK_EQ(static_cast<double>(3.959725e04), t.result_fnmsub_d);
+ CHECK_EQ(static_cast<double>(10.97451593465515908537), t.result_fsqrt_d);
+ // CHECK_EQ(static_cast<double>( 8.164965e-08), t.result_frecip_d);
+ // CHECK_EQ(static_cast<double>( 8.164966e-08), t.result_frsqrt_d);
+ // CHECK_EQ(static_cast<double>(), t.result_fscaleb_d);
+ // CHECK_EQ(static_cast<double>( 6.906891), t.result_flogb_d);
+ // CHECK_EQ(static_cast<double>( 2.75e11), t.result_fcopysign_d);
+ // CHECK_EQ(static_cast<double>(), t.result_fclass_d);
+}
+
+TEST(LA13) {
+ CcTest::InitializeVM();
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope scope(isolate);
+
+ struct T {
+ float a;
+ float b;
+ float c;
+ float d;
+ float e;
+ float result_fadd_s;
+ float result_fsub_s;
+ float result_fmul_s;
+ float result_fdiv_s;
+ float result_fmadd_s;
+ float result_fmsub_s;
+ float result_fnmadd_s;
+ float result_fnmsub_s;
+ float result_fsqrt_s;
+ float result_frecip_s;
+ float result_frsqrt_s;
+ float result_fscaleb_s;
+ float result_flogb_s;
+ float result_fcopysign_s;
+ float result_fclass_s;
+ };
+ T t;
+
+ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
+
+ // Float precision floating point instructions.
+ __ Fld_s(f8, MemOperand(a0, offsetof(T, a)));
+ __ Fld_s(f9, MemOperand(a0, offsetof(T, b)));
+
+ __ fneg_s(f10, f8);
+ __ fadd_s(f11, f9, f10);
+ __ Fst_s(f11, MemOperand(a0, offsetof(T, result_fadd_s)));
+ __ fabs_s(f11, f11);
+ __ fsub_s(f12, f11, f9);
+ __ Fst_s(f12, MemOperand(a0, offsetof(T, result_fsub_s)));
+
+ __ Fld_s(f13, MemOperand(a0, offsetof(T, c)));
+ __ Fld_s(f14, MemOperand(a0, offsetof(T, d)));
+ __ Fld_s(f15, MemOperand(a0, offsetof(T, e)));
+
+ __ fmin_s(f16, f13, f14);
+ __ fmul_s(f17, f15, f16);
+ __ Fst_s(f17, MemOperand(a0, offsetof(T, result_fmul_s)));
+ __ fmax_s(f18, f13, f14);
+ __ fdiv_s(f19, f15, f18);
+ __ Fst_s(f19, MemOperand(a0, offsetof(T, result_fdiv_s)));
+
+ __ fmina_s(f16, f13, f14);
+ __ fmadd_s(f18, f17, f15, f16);
+ __ Fst_s(f18, MemOperand(a0, offsetof(T, result_fmadd_s)));
+ __ fnmadd_s(f19, f17, f15, f16);
+ __ Fst_s(f19, MemOperand(a0, offsetof(T, result_fnmadd_s)));
+ __ fmaxa_s(f16, f13, f14);
+ __ fmsub_s(f20, f17, f15, f16);
+ __ Fst_s(f20, MemOperand(a0, offsetof(T, result_fmsub_s)));
+ __ fnmsub_s(f21, f17, f15, f16);
+ __ Fst_s(f21, MemOperand(a0, offsetof(T, result_fnmsub_s)));
+
+ __ fsqrt_s(f10, f8);
+ //__ frecip_s(f11, f10);
+ //__ frsqrt_s(f12, f8);
+ __ Fst_s(f10, MemOperand(a0, offsetof(T, result_fsqrt_s)));
+ //__ Fst_s(f11, MemOperand(a0, offsetof(T, result_frecip_s)));
+ //__ Fst_s(f12, MemOperand(a0, offsetof(T, result_frsqrt_s)));
+
+ /*__ fscaleb_s(f16, f13, f15);
+ __ flogb_s(f17, f15);
+ __ fcopysign_s(f18, f8, f9);
+ __ fclass_s(f19, f9);
+ __ Fst_s(f16, MemOperand(a0, offsetof(T, result_fscaleb_s)));
+ __ Fst_s(f17, MemOperand(a0, offsetof(T, result_flogb_s)));
+ __ Fst_s(f18, MemOperand(a0, offsetof(T, result_fcopysign_s)));
+ __ Fst_s(f19, MemOperand(a0, offsetof(T, result_fclass_s)));*/
+ __ jirl(zero_reg, ra, 0);
+
+ CodeDesc desc;
+ assm.GetCode(isolate, &desc);
+ Handle<Code> code =
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
+ auto f = GeneratedCode<F3>::FromCode(*code);
+ // Float test values.
+ t.a = 1.5e6;
+ t.b = -2.75e4;
+ t.c = 1.5;
+ t.d = -2.75;
+ t.e = 120.0;
+ f.Call(&t, 0, 0, 0, 0);
+
+ CHECK_EQ(static_cast<float>(-1.527500e06), t.result_fadd_s);
+ CHECK_EQ(static_cast<float>(1.555000e06), t.result_fsub_s);
+ CHECK_EQ(static_cast<float>(-3.300000e02), t.result_fmul_s);
+ CHECK_EQ(static_cast<float>(8.000000e01), t.result_fdiv_s);
+ CHECK_EQ(static_cast<float>(-3.959850e04), t.result_fmadd_s);
+ CHECK_EQ(static_cast<float>(-3.959725e04), t.result_fmsub_s);
+ CHECK_EQ(static_cast<float>(3.959850e04), t.result_fnmadd_s);
+ CHECK_EQ(static_cast<float>(3.959725e04), t.result_fnmsub_s);
+ CHECK_EQ(static_cast<float>(1224.744873), t.result_fsqrt_s);
+ // CHECK_EQ(static_cast<float>( 8.164966e-04), t.result_frecip_s);
+ // CHECK_EQ(static_cast<float>( 8.164966e-04), t.result_frsqrt_s);
+ // CHECK_EQ(static_cast<float>(), t.result_fscaleb_s);
+ // CHECK_EQ(static_cast<float>( 6.906890), t.result_flogb_s);
+ // CHECK_EQ(static_cast<float>( 2.75e4), t.result_fcopysign_s);
+ // CHECK_EQ(static_cast<float>(), t.result_fclass_s);
+}
+
+TEST(FCMP_COND) {
+ CcTest::InitializeVM();
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope scope(isolate);
+ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
+
+ struct TestFloat {
+ double dTrue;
+ double dFalse;
+ double dOp1;
+ double dOp2;
+ double dCaf;
+ double dCun;
+ double dCeq;
+ double dCueq;
+ double dClt;
+ double dCult;
+ double dCle;
+ double dCule;
+ double dCne;
+ double dCor;
+ double dCune;
+ double dSaf;
+ double dSun;
+ double dSeq;
+ double dSueq;
+ double dSlt;
+ double dSult;
+ double dSle;
+ double dSule;
+ double dSne;
+ double dSor;
+ double dSune;
+ float fTrue;
+ float fFalse;
+ float fOp1;
+ float fOp2;
+ float fCaf;
+ float fCun;
+ float fCeq;
+ float fCueq;
+ float fClt;
+ float fCult;
+ float fCle;
+ float fCule;
+ float fCne;
+ float fCor;
+ float fCune;
+ float fSaf;
+ float fSun;
+ float fSeq;
+ float fSueq;
+ float fSlt;
+ float fSult;
+ float fSle;
+ float fSule;
+ float fSne;
+ float fSor;
+ float fSune;
+ };
+
+ TestFloat test;
+
+ __ Fld_d(f8, MemOperand(a0, offsetof(TestFloat, dOp1)));
+ __ Fld_d(f9, MemOperand(a0, offsetof(TestFloat, dOp2)));
+
+ __ Fld_s(f10, MemOperand(a0, offsetof(TestFloat, fOp1)));
+ __ Fld_s(f11, MemOperand(a0, offsetof(TestFloat, fOp2)));
+
+ __ Fld_d(f12, MemOperand(a0, offsetof(TestFloat, dFalse)));
+ __ Fld_d(f13, MemOperand(a0, offsetof(TestFloat, dTrue)));
+
+ __ Fld_s(f14, MemOperand(a0, offsetof(TestFloat, fFalse)));
+ __ Fld_s(f15, MemOperand(a0, offsetof(TestFloat, fTrue)));
+
+ __ fcmp_cond_d(CAF, f8, f9, FCC0);
+ __ fcmp_cond_s(CAF, f10, f11, FCC1);
+ __ fsel(FCC0, f16, f12, f13);
+ __ fsel(FCC1, f17, f14, f15);
+ __ Fst_d(f16, MemOperand(a0, offsetof(TestFloat, dCaf)));
+ __ Fst_s(f17, MemOperand(a0, offsetof(TestFloat, fCaf)));
+
+ __ fcmp_cond_d(CUN, f8, f9, FCC0);
+ __ fcmp_cond_s(CUN, f10, f11, FCC1);
+ __ fsel(FCC0, f16, f12, f13);
+ __ fsel(FCC1, f17, f14, f15);
+ __ Fst_d(f16, MemOperand(a0, offsetof(TestFloat, dCun)));
+ __ Fst_s(f17, MemOperand(a0, offsetof(TestFloat, fCun)));
+
+ __ fcmp_cond_d(CEQ, f8, f9, FCC0);
+ __ fcmp_cond_s(CEQ, f10, f11, FCC1);
+ __ fsel(FCC0, f16, f12, f13);
+ __ fsel(FCC1, f17, f14, f15);
+ __ Fst_d(f16, MemOperand(a0, offsetof(TestFloat, dCeq)));
+ __ Fst_s(f17, MemOperand(a0, offsetof(TestFloat, fCeq)));
+
+ __ fcmp_cond_d(CUEQ, f8, f9, FCC0);
+ __ fcmp_cond_s(CUEQ, f10, f11, FCC1);
+ __ fsel(FCC0, f16, f12, f13);
+ __ fsel(FCC1, f17, f14, f15);
+ __ Fst_d(f16, MemOperand(a0, offsetof(TestFloat, dCueq)));
+ __ Fst_s(f17, MemOperand(a0, offsetof(TestFloat, fCueq)));
+
+ __ fcmp_cond_d(CLT, f8, f9, FCC0);
+ __ fcmp_cond_s(CLT, f10, f11, FCC1);
+ __ fsel(FCC0, f16, f12, f13);
+ __ fsel(FCC1, f17, f14, f15);
+ __ Fst_d(f16, MemOperand(a0, offsetof(TestFloat, dClt)));
+ __ Fst_s(f17, MemOperand(a0, offsetof(TestFloat, fClt)));
+
+ __ fcmp_cond_d(CULT, f8, f9, FCC0);
+ __ fcmp_cond_s(CULT, f10, f11, FCC1);
+ __ fsel(FCC0, f16, f12, f13);
+ __ fsel(FCC1, f17, f14, f15);
+ __ Fst_d(f16, MemOperand(a0, offsetof(TestFloat, dCult)));
+ __ Fst_s(f17, MemOperand(a0, offsetof(TestFloat, fCult)));
+
+ __ fcmp_cond_d(CLE, f8, f9, FCC0);
+ __ fcmp_cond_s(CLE, f10, f11, FCC1);
+ __ fsel(FCC0, f16, f12, f13);
+ __ fsel(FCC1, f17, f14, f15);
+ __ Fst_d(f16, MemOperand(a0, offsetof(TestFloat, dCle)));
+ __ Fst_s(f17, MemOperand(a0, offsetof(TestFloat, fCle)));
+
+ __ fcmp_cond_d(CULE, f8, f9, FCC0);
+ __ fcmp_cond_s(CULE, f10, f11, FCC1);
+ __ fsel(FCC0, f16, f12, f13);
+ __ fsel(FCC1, f17, f14, f15);
+ __ Fst_d(f16, MemOperand(a0, offsetof(TestFloat, dCule)));
+ __ Fst_s(f17, MemOperand(a0, offsetof(TestFloat, fCule)));
+
+ __ fcmp_cond_d(CNE, f8, f9, FCC0);
+ __ fcmp_cond_s(CNE, f10, f11, FCC1);
+ __ fsel(FCC0, f16, f12, f13);
+ __ fsel(FCC1, f17, f14, f15);
+ __ Fst_d(f16, MemOperand(a0, offsetof(TestFloat, dCne)));
+ __ Fst_s(f17, MemOperand(a0, offsetof(TestFloat, fCne)));
+
+ __ fcmp_cond_d(COR, f8, f9, FCC0);
+ __ fcmp_cond_s(COR, f10, f11, FCC1);
+ __ fsel(FCC0, f16, f12, f13);
+ __ fsel(FCC1, f17, f14, f15);
+ __ Fst_d(f16, MemOperand(a0, offsetof(TestFloat, dCor)));
+ __ Fst_s(f17, MemOperand(a0, offsetof(TestFloat, fCor)));
+
+ __ fcmp_cond_d(CUNE, f8, f9, FCC0);
+ __ fcmp_cond_s(CUNE, f10, f11, FCC1);
+ __ fsel(FCC0, f16, f12, f13);
+ __ fsel(FCC1, f17, f14, f15);
+ __ Fst_d(f16, MemOperand(a0, offsetof(TestFloat, dCune)));
+ __ Fst_s(f17, MemOperand(a0, offsetof(TestFloat, fCune)));
+
+ /* __ fcmp_cond_d(SAF, f8, f9, FCC0);
+ __ fcmp_cond_s(SAF, f10, f11, FCC1);
+ __ fsel(FCC0, f16, f12, f13);
+ __ fsel(FCC1, f17, f14, f15);
+ __ Fst_d(f16, MemOperand(a0, offsetof(TestFloat, dSaf)));
+ __ Fst_s(f17, MemOperand(a0, offsetof(TestFloat, fSaf)));
+
+ __ fcmp_cond_d(SUN, f8, f9, FCC0);
+ __ fcmp_cond_s(SUN, f10, f11, FCC1);
+ __ fsel(FCC0, f16, f12, f13);
+ __ fsel(FCC1, f17, f14, f15);
+ __ Fst_d(f16, MemOperand(a0, offsetof(TestFloat, dSun)));
+ __ Fst_s(f17, MemOperand(a0, offsetof(TestFloat, fSun)));
+
+ __ fcmp_cond_d(SEQ, f8, f9, FCC0);
+ __ fcmp_cond_s(SEQ, f10, f11, FCC1);
+ __ fsel(FCC0, f16, f12, f13);
+ __ fsel(FCC1, f17, f14, f15);
+ __ Fst_d(f16, MemOperand(a0, offsetof(TestFloat, dSeq)));
+ __ Fst_f(f17, MemOperand(a0, offsetof(TestFloat, fSeq)));
+
+ __ fcmp_cond_d(SUEQ, f8, f9, FCC0);
+ __ fcmp_cond_s(SUEQ, f10, f11, FCC1);
+ __ fsel(FCC0, f16, f12, f13);
+ __ fsel(FCC1, f17, f14, f15);
+ __ Fst_d(f16, MemOperand(a0, offsetof(TestFloat, dSueq)));
+ __ Fst_f(f17, MemOperand(a0, offsetof(TestFloat, fSueq)));
+
+ __ fcmp_cond_d(SLT, f8, f9, FCC0);
+ __ fcmp_cond_s(SLT, f10, f11, FCC1);
+ __ fsel(f16, f12, f13, FCC0);
+ __ fsel(f17, f14, f15, FCC1);
+ __ Fld_d(f16, MemOperand(a0, offsetof(TestFloat, dSlt)));
+ __ Fst_d(f17, MemOperand(a0, offsetof(TestFloat, fSlt)));
+
+ __ fcmp_cond_d(SULT, f8, f9, FCC0);
+ __ fcmp_cond_s(SULT, f10, f11, FCC1);
+ __ fsel(FCC0, f16, f12, f13);
+ __ fsel(FCC1, f17, f14, f15);
+ __ Fst_d(f16, MemOperand(a0, offsetof(TestFloat, dSult)));
+ __ Fst_s(f17, MemOperand(a0, offsetof(TestFloat, fSult)));
+
+ __ fcmp_cond_d(SLE, f8, f9, FCC0);
+ __ fcmp_cond_s(SLE, f10, f11, FCC1);
+ __ fsel(FCC0, f16, f12, f13);
+ __ fsel(FCC1, f17, f14, f15);
+ __ Fst_d(f16, MemOperand(a0, offsetof(TestFloat, dSle)));
+ __ Fst_f(f17, MemOperand(a0, offsetof(TestFloat, fSle)));
+
+ __ fcmp_cond_d(SULE, f8, f9, FCC0);
+ __ fcmp_cond_s(SULE, f10, f11, FCC1);
+ __ fsel(FCC0, f16, f12, f13);
+ __ fsel(FCC1, f17, f14, f15);
+ __ Fst_d(f16, MemOperand(a0, offsetof(TestFloat, dSule)));
+ __ Fst_f(f17, MemOperand(a0, offsetof(TestFloat, fSule)));
+
+ __ fcmp_cond_d(SNE, f8, f9, FCC0);
+ __ fcmp_cond_s(SNE, f10, f11, FCC1);
+ __ fsel(FCC0, f16, f12, f13);
+ __ fsel(FCC1, f17, f14, f15);
+ __ Fst_d(f16, MemOperand(a0, offsetof(TestFloat, dSne)));
+ __ Fst_s(f17, MemOperand(a0, offsetof(TestFloat, fSne)));
+
+ __ fcmp_cond_d(SOR, f8, f9, FCC0);
+ __ fcmp_cond_s(SOR, f10, f11, FCC1);
+ __ fsel(FCC0, f16, f12, f13);
+ __ fsel(FCC1, f17, f14, f15);
+ __ Fst_d(f16, MemOperand(a0, offsetof(TestFloat, dSor)));
+ __ Fst_s(f17, MemOperand(a0, offsetof(TestFloat, fSor)));
+
+ __ fcmp_cond_d(SUNE, f8, f9, FCC0);
+ __ fcmp_cond_s(SUNE, f10, f11, FCC1);
+ __ fsel(FCC0, f16, f12, f13);
+ __ fsel(FCC1, f17, f14, f15);
+ __ Fst_d(f16, MemOperand(a0, offsetof(TestFloat, dSune)));
+ __ Fst_s(f17, MemOperand(a0, offsetof(TestFloat, fSune)));*/
+
+ __ jirl(zero_reg, ra, 0);
+
+ CodeDesc desc;
+ assm.GetCode(isolate, &desc);
+ Handle<Code> code =
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
+ auto f = GeneratedCode<F3>::FromCode(*code);
+ test.dTrue = 1234.0;
+ test.dFalse = 0.0;
+ test.fTrue = 12.0;
+ test.fFalse = 0.0;
+
+ test.dOp1 = 2.0;
+ test.dOp2 = 3.0;
+ test.fOp1 = 2.0;
+ test.fOp2 = 3.0;
+ f.Call(&test, 0, 0, 0, 0);
+
+ CHECK_EQ(test.dCaf, test.dFalse);
+ CHECK_EQ(test.fCaf, test.fFalse);
+ CHECK_EQ(test.dCun, test.dFalse);
+ CHECK_EQ(test.fCun, test.fFalse);
+ CHECK_EQ(test.dCeq, test.dFalse);
+ CHECK_EQ(test.fCeq, test.fFalse);
+ CHECK_EQ(test.dCueq, test.dFalse);
+ CHECK_EQ(test.fCueq, test.fFalse);
+ CHECK_EQ(test.dClt, test.dTrue);
+ CHECK_EQ(test.fClt, test.fTrue);
+ CHECK_EQ(test.dCult, test.dTrue);
+ CHECK_EQ(test.fCult, test.fTrue);
+ CHECK_EQ(test.dCle, test.dTrue);
+ CHECK_EQ(test.fCle, test.fTrue);
+ CHECK_EQ(test.dCule, test.dTrue);
+ CHECK_EQ(test.fCule, test.fTrue);
+ CHECK_EQ(test.dCne, test.dTrue);
+ CHECK_EQ(test.fCne, test.fTrue);
+ CHECK_EQ(test.dCor, test.dTrue);
+ CHECK_EQ(test.fCor, test.fTrue);
+ CHECK_EQ(test.dCune, test.dTrue);
+ CHECK_EQ(test.fCune, test.fTrue);
+ /* CHECK_EQ(test.dSaf, test.dFalse);
+ CHECK_EQ(test.fSaf, test.fFalse);
+ CHECK_EQ(test.dSun, test.dFalse);
+ CHECK_EQ(test.fSun, test.fFalse);
+ CHECK_EQ(test.dSeq, test.dFalse);
+ CHECK_EQ(test.fSeq, test.fFalse);
+ CHECK_EQ(test.dSueq, test.dFalse);
+ CHECK_EQ(test.fSueq, test.fFalse);
+ CHECK_EQ(test.dClt, test.dTrue);
+ CHECK_EQ(test.fClt, test.fTrue);
+ CHECK_EQ(test.dCult, test.dTrue);
+ CHECK_EQ(test.fCult, test.fTrue);
+ CHECK_EQ(test.dSle, test.dTrue);
+ CHECK_EQ(test.fSle, test.fTrue);
+ CHECK_EQ(test.dSule, test.dTrue);
+ CHECK_EQ(test.fSule, test.fTrue);
+ CHECK_EQ(test.dSne, test.dTrue);
+ CHECK_EQ(test.fSne, test.fTrue);
+ CHECK_EQ(test.dSor, test.dTrue);
+ CHECK_EQ(test.fSor, test.fTrue);
+ CHECK_EQ(test.dSune, test.dTrue);
+ CHECK_EQ(test.fSune, test.fTrue);*/
+
+ test.dOp1 = std::numeric_limits<double>::max();
+ test.dOp2 = std::numeric_limits<double>::min();
+ test.fOp1 = std::numeric_limits<float>::min();
+ test.fOp2 = -std::numeric_limits<float>::max();
+ f.Call(&test, 0, 0, 0, 0);
+
+ CHECK_EQ(test.dCaf, test.dFalse);
+ CHECK_EQ(test.fCaf, test.fFalse);
+ CHECK_EQ(test.dCun, test.dFalse);
+ CHECK_EQ(test.fCun, test.fFalse);
+ CHECK_EQ(test.dCeq, test.dFalse);
+ CHECK_EQ(test.fCeq, test.fFalse);
+ CHECK_EQ(test.dCueq, test.dFalse);
+ CHECK_EQ(test.fCueq, test.fFalse);
+ CHECK_EQ(test.dClt, test.dFalse);
+ CHECK_EQ(test.fClt, test.fFalse);
+ CHECK_EQ(test.dCult, test.dFalse);
+ CHECK_EQ(test.fCult, test.fFalse);
+ CHECK_EQ(test.dCle, test.dFalse);
+ CHECK_EQ(test.fCle, test.fFalse);
+ CHECK_EQ(test.dCule, test.dFalse);
+ CHECK_EQ(test.fCule, test.fFalse);
+ CHECK_EQ(test.dCne, test.dTrue);
+ CHECK_EQ(test.fCne, test.fTrue);
+ CHECK_EQ(test.dCor, test.dTrue);
+ CHECK_EQ(test.fCor, test.fTrue);
+ CHECK_EQ(test.dCune, test.dTrue);
+ CHECK_EQ(test.fCune, test.fTrue);
+ /* CHECK_EQ(test.dSaf, test.dFalse);
+ CHECK_EQ(test.fSaf, test.fFalse);
+ CHECK_EQ(test.dSun, test.dFalse);
+ CHECK_EQ(test.fSun, test.fFalse);
+ CHECK_EQ(test.dSeq, test.dFalse);
+ CHECK_EQ(test.fSeq, test.fFalse);
+ CHECK_EQ(test.dSueq, test.dFalse);
+ CHECK_EQ(test.fSueq, test.fFalse);
+ CHECK_EQ(test.dSlt, test.dFalse);
+ CHECK_EQ(test.fSlt, test.fFalse);
+ CHECK_EQ(test.dSult, test.dFalse);
+ CHECK_EQ(test.fSult, test.fFalse);
+ CHECK_EQ(test.dSle, test.dFalse);
+ CHECK_EQ(test.fSle, test.fFalse);
+ CHECK_EQ(test.dSule, test.dFalse);
+ CHECK_EQ(test.fSule, test.fFalse);
+ CHECK_EQ(test.dSne, test.dTrue);
+ CHECK_EQ(test.fSne, test.fTrue);
+ CHECK_EQ(test.dSor, test.dTrue);
+ CHECK_EQ(test.fSor, test.fTrue);
+ CHECK_EQ(test.dSune, test.dTrue);
+ CHECK_EQ(test.fSune, test.fTrue);*/
+
+ test.dOp1 = std::numeric_limits<double>::quiet_NaN();
+ test.dOp2 = 0.0;
+ test.fOp1 = std::numeric_limits<float>::quiet_NaN();
+ test.fOp2 = 0.0;
+ f.Call(&test, 0, 0, 0, 0);
+
+ CHECK_EQ(test.dCaf, test.dFalse);
+ CHECK_EQ(test.fCaf, test.fFalse);
+ CHECK_EQ(test.dCun, test.dTrue);
+ CHECK_EQ(test.fCun, test.fTrue);
+ CHECK_EQ(test.dCeq, test.dFalse);
+ CHECK_EQ(test.fCeq, test.fFalse);
+ CHECK_EQ(test.dCueq, test.dTrue);
+ CHECK_EQ(test.fCueq, test.fTrue);
+ CHECK_EQ(test.dClt, test.dFalse);
+ CHECK_EQ(test.fClt, test.fFalse);
+ CHECK_EQ(test.dCult, test.dTrue);
+ CHECK_EQ(test.fCult, test.fTrue);
+ CHECK_EQ(test.dCle, test.dFalse);
+ CHECK_EQ(test.fCle, test.fFalse);
+ CHECK_EQ(test.dCule, test.dTrue);
+ CHECK_EQ(test.fCule, test.fTrue);
+ CHECK_EQ(test.dCne, test.dFalse);
+ CHECK_EQ(test.fCne, test.fFalse);
+ CHECK_EQ(test.dCor, test.dFalse);
+ CHECK_EQ(test.fCor, test.fFalse);
+ CHECK_EQ(test.dCune, test.dTrue);
+ CHECK_EQ(test.fCune, test.fTrue);
+ /* CHECK_EQ(test.dSaf, test.dTrue);
+ CHECK_EQ(test.fSaf, test.fTrue);
+ CHECK_EQ(test.dSun, test.dTrue);
+ CHECK_EQ(test.fSun, test.fTrue);
+ CHECK_EQ(test.dSeq, test.dFalse);
+ CHECK_EQ(test.fSeq, test.fFalse);
+ CHECK_EQ(test.dSueq, test.dTrue);
+ CHECK_EQ(test.fSueq, test.fTrue);
+ CHECK_EQ(test.dSlt, test.dFalse);
+ CHECK_EQ(test.fSlt, test.fFalse);
+ CHECK_EQ(test.dSult, test.dTrue);
+ CHECK_EQ(test.fSult, test.fTrue);
+ CHECK_EQ(test.dSle, test.dFalse);
+ CHECK_EQ(test.fSle, test.fFalse);
+ CHECK_EQ(test.dSule, test.dTrue);
+ CHECK_EQ(test.fSule, test.fTrue);
+ CHECK_EQ(test.dSne, test.dFalse);
+ CHECK_EQ(test.fSne, test.fFalse);
+ CHECK_EQ(test.dSor, test.dFalse);
+ CHECK_EQ(test.fSor, test.fFalse);
+ CHECK_EQ(test.dSune, test.dTrue);
+ CHECK_EQ(test.fSune, test.fTrue);*/
+}
+
+TEST(FCVT) {
+ CcTest::InitializeVM();
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope scope(isolate);
+ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
+
+ struct TestFloat {
+ float fcvt_d_s_in;
+ double fcvt_s_d_in;
+ double fcvt_d_s_out;
+ float fcvt_s_d_out;
+ int fcsr;
+ };
+ TestFloat test;
+ __ xor_(a4, a4, a4);
+ __ xor_(a5, a5, a5);
+ __ Ld_w(a4, MemOperand(a0, offsetof(TestFloat, fcsr)));
+ __ movfcsr2gr(a5);
+ __ movgr2fcsr(a4);
+ __ Fld_s(f8, MemOperand(a0, offsetof(TestFloat, fcvt_d_s_in)));
+ __ Fld_d(f9, MemOperand(a0, offsetof(TestFloat, fcvt_s_d_in)));
+ __ fcvt_d_s(f10, f8);
+ __ fcvt_s_d(f11, f9);
+ __ Fst_d(f10, MemOperand(a0, offsetof(TestFloat, fcvt_d_s_out)));
+ __ Fst_s(f11, MemOperand(a0, offsetof(TestFloat, fcvt_s_d_out)));
+ __ movgr2fcsr(a5);
+ __ jirl(zero_reg, ra, 0);
+
+ CodeDesc desc;
+ assm.GetCode(isolate, &desc);
+ Handle<Code> code =
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
+ auto f = GeneratedCode<F3>::FromCode(*code);
+ test.fcsr = kRoundToNearest;
+
+ test.fcvt_d_s_in = -0.51;
+ test.fcvt_s_d_in = -0.51;
+ f.Call(&test, 0, 0, 0, 0);
+ CHECK_EQ(test.fcvt_d_s_out, static_cast<double>(test.fcvt_d_s_in));
+ CHECK_EQ(test.fcvt_s_d_out, static_cast<float>(test.fcvt_s_d_in));
+
+ test.fcvt_d_s_in = 0.49;
+ test.fcvt_s_d_in = 0.49;
+ f.Call(&test, 0, 0, 0, 0);
+ CHECK_EQ(test.fcvt_d_s_out, static_cast<double>(test.fcvt_d_s_in));
+ CHECK_EQ(test.fcvt_s_d_out, static_cast<float>(test.fcvt_s_d_in));
+
+ test.fcvt_d_s_in = std::numeric_limits<float>::max();
+ test.fcvt_s_d_in = std::numeric_limits<double>::max();
+ f.Call(&test, 0, 0, 0, 0);
+ CHECK_EQ(test.fcvt_d_s_out, static_cast<double>(test.fcvt_d_s_in));
+ CHECK_EQ(test.fcvt_s_d_out, static_cast<float>(test.fcvt_s_d_in));
+
+ test.fcvt_d_s_in = -std::numeric_limits<float>::max();
+ test.fcvt_s_d_in = -std::numeric_limits<double>::max();
+ f.Call(&test, 0, 0, 0, 0);
+ CHECK_EQ(test.fcvt_d_s_out, static_cast<double>(test.fcvt_d_s_in));
+ CHECK_EQ(test.fcvt_s_d_out, static_cast<float>(test.fcvt_s_d_in));
+
+ test.fcvt_d_s_in = std::numeric_limits<float>::min();
+ test.fcvt_s_d_in = std::numeric_limits<double>::min();
+ f.Call(&test, 0, 0, 0, 0);
+ CHECK_EQ(test.fcvt_d_s_out, static_cast<double>(test.fcvt_d_s_in));
+ CHECK_EQ(test.fcvt_s_d_out, static_cast<float>(test.fcvt_s_d_in));
+}
+
+TEST(FFINT) {
+ CcTest::InitializeVM();
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope scope(isolate);
+ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
+
+ struct TestFloat {
+ int32_t ffint_s_w_in;
+ int64_t ffint_s_l_in;
+ int32_t ffint_d_w_in;
+ int64_t ffint_d_l_in;
+ float ffint_s_w_out;
+ float ffint_s_l_out;
+ double ffint_d_w_out;
+ double ffint_d_l_out;
+ int fcsr;
+ };
+ TestFloat test;
+ __ xor_(a4, a4, a4);
+ __ xor_(a5, a5, a5);
+ __ Ld_w(a4, MemOperand(a0, offsetof(TestFloat, fcsr)));
+ __ movfcsr2gr(a5);
+ __ movgr2fcsr(a4);
+ __ Fld_s(f8, MemOperand(a0, offsetof(TestFloat, ffint_s_w_in)));
+ __ Fld_d(f9, MemOperand(a0, offsetof(TestFloat, ffint_s_l_in)));
+ __ Fld_s(f10, MemOperand(a0, offsetof(TestFloat, ffint_d_w_in)));
+ __ Fld_d(f11, MemOperand(a0, offsetof(TestFloat, ffint_d_l_in)));
+ __ ffint_s_w(f12, f8);
+ __ ffint_s_l(f13, f9);
+ __ ffint_d_w(f14, f10);
+ __ ffint_d_l(f15, f11);
+ __ Fst_s(f12, MemOperand(a0, offsetof(TestFloat, ffint_s_w_out)));
+ __ Fst_s(f13, MemOperand(a0, offsetof(TestFloat, ffint_s_l_out)));
+ __ Fst_d(f14, MemOperand(a0, offsetof(TestFloat, ffint_d_w_out)));
+ __ Fst_d(f15, MemOperand(a0, offsetof(TestFloat, ffint_d_l_out)));
+ __ movgr2fcsr(a5);
+ __ jirl(zero_reg, ra, 0);
+
+ CodeDesc desc;
+ assm.GetCode(isolate, &desc);
+ Handle<Code> code =
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
+ auto f = GeneratedCode<F3>::FromCode(*code);
+ test.fcsr = kRoundToNearest;
+
+ test.ffint_s_w_in = -1;
+ test.ffint_s_l_in = -1;
+ test.ffint_d_w_in = -1;
+ test.ffint_d_l_in = -1;
+ f.Call(&test, 0, 0, 0, 0);
+ CHECK_EQ(test.ffint_s_w_out, static_cast<float>(test.ffint_s_w_in));
+ CHECK_EQ(test.ffint_s_l_out, static_cast<float>(test.ffint_s_l_in));
+ CHECK_EQ(test.ffint_d_w_out, static_cast<double>(test.ffint_d_w_in));
+ CHECK_EQ(test.ffint_d_l_out, static_cast<double>(test.ffint_d_l_in));
+
+ test.ffint_s_w_in = 1;
+ test.ffint_s_l_in = 1;
+ test.ffint_d_w_in = 1;
+ test.ffint_d_l_in = 1;
+ f.Call(&test, 0, 0, 0, 0);
+ CHECK_EQ(test.ffint_s_w_out, static_cast<float>(test.ffint_s_w_in));
+ CHECK_EQ(test.ffint_s_l_out, static_cast<float>(test.ffint_s_l_in));
+ CHECK_EQ(test.ffint_d_w_out, static_cast<double>(test.ffint_d_w_in));
+ CHECK_EQ(test.ffint_d_l_out, static_cast<double>(test.ffint_d_l_in));
+
+ test.ffint_s_w_in = std::numeric_limits<int32_t>::max();
+ test.ffint_s_l_in = std::numeric_limits<int64_t>::max();
+ test.ffint_d_w_in = std::numeric_limits<int32_t>::max();
+ test.ffint_d_l_in = std::numeric_limits<int64_t>::max();
+ f.Call(&test, 0, 0, 0, 0);
+ CHECK_EQ(test.ffint_s_w_out, static_cast<float>(test.ffint_s_w_in));
+ CHECK_EQ(test.ffint_s_l_out, static_cast<float>(test.ffint_s_l_in));
+ CHECK_EQ(test.ffint_d_w_out, static_cast<double>(test.ffint_d_w_in));
+ CHECK_EQ(test.ffint_d_l_out, static_cast<double>(test.ffint_d_l_in));
+
+ test.ffint_s_w_in = std::numeric_limits<int32_t>::min();
+ test.ffint_s_l_in = std::numeric_limits<int64_t>::min();
+ test.ffint_d_w_in = std::numeric_limits<int32_t>::min();
+ test.ffint_d_l_in = std::numeric_limits<int64_t>::min();
+ f.Call(&test, 0, 0, 0, 0);
+ CHECK_EQ(test.ffint_s_w_out, static_cast<float>(test.ffint_s_w_in));
+ CHECK_EQ(test.ffint_s_l_out, static_cast<float>(test.ffint_s_l_in));
+ CHECK_EQ(test.ffint_d_w_out, static_cast<double>(test.ffint_d_w_in));
+ CHECK_EQ(test.ffint_d_l_out, static_cast<double>(test.ffint_d_l_in));
+}
+
+TEST(FTINT) {
+ CcTest::InitializeVM();
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope scope(isolate);
+ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
+
+ struct Test {
+ double a;
+ float b;
+ int32_t c;
+ int32_t d;
+ int64_t e;
+ int64_t f;
+ int fcsr;
+ };
+ Test test;
+
+ const int kTableLength = 9;
+ // clang-format off
+ double inputs_d[kTableLength] = {
+ 3.1, 3.6, 3.5, -3.1, -3.6, -3.5,
+ 2147483648.0,
+ std::numeric_limits<double>::quiet_NaN(),
+ std::numeric_limits<double>::infinity()
+ };
+ float inputs_s[kTableLength] = {
+ 3.1, 3.6, 3.5, -3.1, -3.6, -3.5,
+ 2147483648.0,
+ std::numeric_limits<double>::quiet_NaN(),
+ std::numeric_limits<double>::infinity()
+ };
+ double outputs_RN_W[kTableLength] = {
+ 3.0, 4.0, 4.0, -3.0, -4.0, -4.0,
+ kFPUInvalidResult, 0,
+ kFPUInvalidResult};
+ double outputs_RN_L[kTableLength] = {
+ 3.0, 4.0, 4.0, -3.0, -4.0, -4.0,
+ 2147483648.0, 0,
+ static_cast<double>(kFPU64InvalidResult)};
+ double outputs_RZ_W[kTableLength] = {
+ 3.0, 3.0, 3.0, -3.0, -3.0, -3.0,
+ kFPUInvalidResult, 0,
+ kFPUInvalidResult};
+ double outputs_RZ_L[kTableLength] = {
+ 3.0, 3.0, 3.0, -3.0, -3.0, -3.0,
+ 2147483648.0, 0,
+ static_cast<double>(kFPU64InvalidResult)};
+ double outputs_RP_W[kTableLength] = {
+ 4.0, 4.0, 4.0, -3.0, -3.0, -3.0,
+ kFPUInvalidResult, 0,
+ kFPUInvalidResult};
+ double outputs_RP_L[kTableLength] = {
+ 4.0, 4.0, 4.0, -3.0, -3.0, -3.0,
+ 2147483648.0, 0,
+ static_cast<double>(kFPU64InvalidResult)};
+ double outputs_RM_W[kTableLength] = {
+ 3.0, 3.0, 3.0, -4.0, -4.0, -4.0,
+ kFPUInvalidResult, 0,
+ kFPUInvalidResult};
+ double outputs_RM_L[kTableLength] = {
+ 3.0, 3.0, 3.0, -4.0, -4.0, -4.0,
+ 2147483648.0, 0,
+ static_cast<double>(kFPU64InvalidResult)};
+ // clang-format on
+
+ int fcsr_inputs[4] = {kRoundToNearest, kRoundToZero, kRoundToPlusInf,
+ kRoundToMinusInf};
+ double* outputs[8] = {
+ outputs_RN_W, outputs_RN_L, outputs_RZ_W, outputs_RZ_L,
+ outputs_RP_W, outputs_RP_L, outputs_RM_W, outputs_RM_L,
+ };
+
+ __ Fld_d(f8, MemOperand(a0, offsetof(Test, a)));
+ __ Fld_s(f9, MemOperand(a0, offsetof(Test, b)));
+ __ xor_(a5, a5, a5);
+ __ Ld_w(a5, MemOperand(a0, offsetof(Test, fcsr)));
+ __ movfcsr2gr(a4);
+ __ movgr2fcsr(a5);
+ __ ftint_w_d(f10, f8);
+ __ ftint_w_s(f11, f9);
+ __ ftint_l_d(f12, f8);
+ __ ftint_l_s(f13, f9);
+ __ Fst_s(f10, MemOperand(a0, offsetof(Test, c)));
+ __ Fst_s(f11, MemOperand(a0, offsetof(Test, d)));
+ __ Fst_d(f12, MemOperand(a0, offsetof(Test, e)));
+ __ Fst_d(f13, MemOperand(a0, offsetof(Test, f)));
+ __ movgr2fcsr(a4);
+ __ jirl(zero_reg, ra, 0);
+
+ CodeDesc desc;
+ assm.GetCode(isolate, &desc);
+ Handle<Code> code =
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
+ auto f = GeneratedCode<F3>::FromCode(*code);
+ for (int j = 0; j < 4; j++) {
+ test.fcsr = fcsr_inputs[j];
+ for (int i = 0; i < kTableLength; i++) {
+ test.a = inputs_d[i];
+ test.b = inputs_s[i];
+ f.Call(&test, 0, 0, 0, 0);
+ CHECK_EQ(test.c, outputs[2 * j][i]);
+ CHECK_EQ(test.d, outputs[2 * j][i]);
+ CHECK_EQ(test.e, outputs[2 * j + 1][i]);
+ CHECK_EQ(test.f, outputs[2 * j + 1][i]);
+ }
+ }
+}
+
+TEST(FTINTRM) {
+ CcTest::InitializeVM();
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope scope(isolate);
+ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
+
+ struct Test {
+ double a;
+ float b;
+ int32_t c;
+ int32_t d;
+ int64_t e;
+ int64_t f;
+ };
+ Test test;
+
+ const int kTableLength = 9;
+
+ // clang-format off
+ double inputs_d[kTableLength] = {
+ 3.1, 3.6, 3.5, -3.1, -3.6, -3.5,
+ 2147483648.0,
+ std::numeric_limits<double>::quiet_NaN(),
+ std::numeric_limits<double>::infinity()
+ };
+ float inputs_s[kTableLength] = {
+ 3.1, 3.6, 3.5, -3.1, -3.6, -3.5,
+ 2147483648.0,
+ std::numeric_limits<double>::quiet_NaN(),
+ std::numeric_limits<double>::infinity()
+ };
+ double outputs_w[kTableLength] = {
+ 3.0, 3.0, 3.0, -4.0, -4.0, -4.0,
+ kFPUInvalidResult, 0,
+ kFPUInvalidResult};
+ double outputs_l[kTableLength] = {
+ 3.0, 3.0, 3.0, -4.0, -4.0, -4.0,
+ 2147483648.0, 0,
+ static_cast<double>(kFPU64InvalidResult)};
+ // clang-format on
+
+ __ Fld_d(f8, MemOperand(a0, offsetof(Test, a)));
+ __ Fld_s(f9, MemOperand(a0, offsetof(Test, b)));
+ __ ftintrm_w_d(f10, f8);
+ __ ftintrm_w_s(f11, f9);
+ __ ftintrm_l_d(f12, f8);
+ __ ftintrm_l_s(f13, f9);
+ __ Fst_s(f10, MemOperand(a0, offsetof(Test, c)));
+ __ Fst_s(f11, MemOperand(a0, offsetof(Test, d)));
+ __ Fst_d(f12, MemOperand(a0, offsetof(Test, e)));
+ __ Fst_d(f13, MemOperand(a0, offsetof(Test, f)));
+ __ jirl(zero_reg, ra, 0);
+
+ CodeDesc desc;
+ assm.GetCode(isolate, &desc);
+ Handle<Code> code =
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
+ auto f = GeneratedCode<F3>::FromCode(*code);
+ for (int i = 0; i < kTableLength; i++) {
+ test.a = inputs_d[i];
+ test.b = inputs_s[i];
+ f.Call(&test, 0, 0, 0, 0);
+ CHECK_EQ(test.c, outputs_w[i]);
+ CHECK_EQ(test.d, outputs_w[i]);
+ CHECK_EQ(test.e, outputs_l[i]);
+ CHECK_EQ(test.f, outputs_l[i]);
+ }
+}
+
+TEST(FTINTRP) {
+ CcTest::InitializeVM();
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope scope(isolate);
+ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
+
+ struct Test {
+ double a;
+ float b;
+ int32_t c;
+ int32_t d;
+ int64_t e;
+ int64_t f;
+ };
+ Test test;
+
+ const int kTableLength = 9;
+
+ // clang-format off
+ double inputs_d[kTableLength] = {
+ 3.1, 3.6, 3.5, -3.1, -3.6, -3.5,
+ 2147483648.0,
+ std::numeric_limits<double>::quiet_NaN(),
+ std::numeric_limits<double>::infinity()
+ };
+ float inputs_s[kTableLength] = {
+ 3.1, 3.6, 3.5, -3.1, -3.6, -3.5,
+ 2147483648.0,
+ std::numeric_limits<double>::quiet_NaN(),
+ std::numeric_limits<double>::infinity()
+ };
+ double outputs_w[kTableLength] = {
+ 4.0, 4.0, 4.0, -3.0, -3.0, -3.0,
+ kFPUInvalidResult, 0,
+ kFPUInvalidResult};
+ double outputs_l[kTableLength] = {
+ 4.0, 4.0, 4.0, -3.0, -3.0, -3.0,
+ 2147483648.0, 0,
+ static_cast<double>(kFPU64InvalidResult)};
+ // clang-format on
+
+ __ Fld_d(f8, MemOperand(a0, offsetof(Test, a)));
+ __ Fld_s(f9, MemOperand(a0, offsetof(Test, b)));
+ __ ftintrp_w_d(f10, f8);
+ __ ftintrp_w_s(f11, f9);
+ __ ftintrp_l_d(f12, f8);
+ __ ftintrp_l_s(f13, f9);
+ __ Fst_s(f10, MemOperand(a0, offsetof(Test, c)));
+ __ Fst_s(f11, MemOperand(a0, offsetof(Test, d)));
+ __ Fst_d(f12, MemOperand(a0, offsetof(Test, e)));
+ __ Fst_d(f13, MemOperand(a0, offsetof(Test, f)));
+ __ jirl(zero_reg, ra, 0);
+
+ CodeDesc desc;
+ assm.GetCode(isolate, &desc);
+ Handle<Code> code =
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
+ auto f = GeneratedCode<F3>::FromCode(*code);
+ for (int i = 0; i < kTableLength; i++) {
+ test.a = inputs_d[i];
+ test.b = inputs_s[i];
+ f.Call(&test, 0, 0, 0, 0);
+ CHECK_EQ(test.c, outputs_w[i]);
+ CHECK_EQ(test.d, outputs_w[i]);
+ CHECK_EQ(test.e, outputs_l[i]);
+ CHECK_EQ(test.f, outputs_l[i]);
+ }
+}
+
+TEST(FTINTRZ) {
+ CcTest::InitializeVM();
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope scope(isolate);
+ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
+
+ struct Test {
+ double a;
+ float b;
+ int32_t c;
+ int32_t d;
+ int64_t e;
+ int64_t f;
+ };
+ Test test;
+
+ const int kTableLength = 9;
+
+ // clang-format off
+ double inputs_d[kTableLength] = {
+ 3.1, 3.6, 3.5, -3.1, -3.6, -3.5,
+ 2147483648.0,
+ std::numeric_limits<double>::quiet_NaN(),
+ std::numeric_limits<double>::infinity()
+ };
+ float inputs_s[kTableLength] = {
+ 3.1, 3.6, 3.5, -3.1, -3.6, -3.5,
+ 2147483648.0,
+ std::numeric_limits<double>::quiet_NaN(),
+ std::numeric_limits<double>::infinity()
+ };
+ double outputs_w[kTableLength] = {
+ 3.0, 3.0, 3.0, -3.0, -3.0, -3.0,
+ kFPUInvalidResult, 0,
+ kFPUInvalidResult};
+ double outputs_l[kTableLength] = {
+ 3.0, 3.0, 3.0, -3.0, -3.0, -3.0,
+ 2147483648.0, 0,
+ static_cast<double>(kFPU64InvalidResult)};
+ // clang-format on
+
+ __ Fld_d(f8, MemOperand(a0, offsetof(Test, a)));
+ __ Fld_s(f9, MemOperand(a0, offsetof(Test, b)));
+ __ ftintrz_w_d(f10, f8);
+ __ ftintrz_w_s(f11, f9);
+ __ ftintrz_l_d(f12, f8);
+ __ ftintrz_l_s(f13, f9);
+ __ Fst_s(f10, MemOperand(a0, offsetof(Test, c)));
+ __ Fst_s(f11, MemOperand(a0, offsetof(Test, d)));
+ __ Fst_d(f12, MemOperand(a0, offsetof(Test, e)));
+ __ Fst_d(f13, MemOperand(a0, offsetof(Test, f)));
+ __ jirl(zero_reg, ra, 0);
+
+ CodeDesc desc;
+ assm.GetCode(isolate, &desc);
+ Handle<Code> code =
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
+ auto f = GeneratedCode<F3>::FromCode(*code);
+ for (int i = 0; i < kTableLength; i++) {
+ test.a = inputs_d[i];
+ test.b = inputs_s[i];
+ f.Call(&test, 0, 0, 0, 0);
+ CHECK_EQ(test.c, outputs_w[i]);
+ CHECK_EQ(test.d, outputs_w[i]);
+ CHECK_EQ(test.e, outputs_l[i]);
+ CHECK_EQ(test.f, outputs_l[i]);
+ }
+}
+
+TEST(FTINTRNE) {
+ CcTest::InitializeVM();
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope scope(isolate);
+ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
+
+ struct Test {
+ double a;
+ float b;
+ int32_t c;
+ int32_t d;
+ int64_t e;
+ int64_t f;
+ };
+ Test test;
+
+ const int kTableLength = 9;
+
+ // clang-format off
+ double inputs_d[kTableLength] = {
+ 3.1, 3.6, 3.5, -3.1, -3.6, -3.5,
+ 2147483648.0,
+ std::numeric_limits<double>::quiet_NaN(),
+ std::numeric_limits<double>::infinity()
+ };
+ float inputs_s[kTableLength] = {
+ 3.1, 3.6, 3.5, -3.1, -3.6, -3.5,
+ 2147483648.0,
+ std::numeric_limits<double>::quiet_NaN(),
+ std::numeric_limits<double>::infinity()
+ };
+ double outputs_w[kTableLength] = {
+ 3.0, 4.0, 4.0, -3.0, -4.0, -4.0,
+ kFPUInvalidResult, 0,
+ kFPUInvalidResult};
+ double outputs_l[kTableLength] = {
+ 3.0, 4.0, 4.0, -3.0, -4.0, -4.0,
+ 2147483648.0, 0,
+ static_cast<double>(kFPU64InvalidResult)};
+ // clang-format on
+
+ __ Fld_d(f8, MemOperand(a0, offsetof(Test, a)));
+ __ Fld_s(f9, MemOperand(a0, offsetof(Test, b)));
+ __ ftintrne_w_d(f10, f8);
+ __ ftintrne_w_s(f11, f9);
+ __ ftintrne_l_d(f12, f8);
+ __ ftintrne_l_s(f13, f9);
+ __ Fst_s(f10, MemOperand(a0, offsetof(Test, c)));
+ __ Fst_s(f11, MemOperand(a0, offsetof(Test, d)));
+ __ Fst_d(f12, MemOperand(a0, offsetof(Test, e)));
+ __ Fst_d(f13, MemOperand(a0, offsetof(Test, f)));
+ __ jirl(zero_reg, ra, 0);
+
+ CodeDesc desc;
+ assm.GetCode(isolate, &desc);
+ Handle<Code> code =
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
+ auto f = GeneratedCode<F3>::FromCode(*code);
+ for (int i = 0; i < kTableLength; i++) {
+ test.a = inputs_d[i];
+ test.b = inputs_s[i];
+ f.Call(&test, 0, 0, 0, 0);
+ CHECK_EQ(test.c, outputs_w[i]);
+ CHECK_EQ(test.d, outputs_w[i]);
+ CHECK_EQ(test.e, outputs_l[i]);
+ CHECK_EQ(test.f, outputs_l[i]);
+ }
+}
+
+TEST(FRINT) {
+ CcTest::InitializeVM();
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope scope(isolate);
+ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
+
+ struct Test {
+ double a;
+ float b;
+ double c;
+ float d;
+ int fcsr;
+ };
+ Test test;
+
+ const int kTableLength = 32;
+
+ // clang-format off
+ double inputs_d[kTableLength] = {
+ 18446744073709551617.0, 4503599627370496.0, -4503599627370496.0,
+ 1.26782468584154733584017312973E30, 1.44860108245951772690707170478E147,
+ 1.7976931348623157E+308, 6.27463370218383111104242366943E-307,
+ 309485009821345068724781056.89,
+ 2.1, 2.6, 2.5, 3.1, 3.6, 3.5,
+ -2.1, -2.6, -2.5, -3.1, -3.6, -3.5,
+ 37778931862957161709568.0, 37778931862957161709569.0,
+ 37778931862957161709580.0, 37778931862957161709581.0,
+ 37778931862957161709582.0, 37778931862957161709583.0,
+ 37778931862957161709584.0, 37778931862957161709585.0,
+ 37778931862957161709586.0, 37778931862957161709587.0,
+ std::numeric_limits<double>::max() - 0.1,
+ std::numeric_limits<double>::infinity()
+ };
+ float inputs_s[kTableLength] = {
+ 18446744073709551617.0, 4503599627370496.0, -4503599627370496.0,
+ 1.26782468584154733584017312973E30, 1.44860108245951772690707170478E37,
+ 1.7976931348623157E+38, 6.27463370218383111104242366943E-37,
+ 309485009821345068724781056.89,
+ 2.1, 2.6, 2.5, 3.1, 3.6, 3.5,
+ -2.1, -2.6, -2.5, -3.1, -3.6, -3.5,
+ 37778931862957161709568.0, 37778931862957161709569.0,
+ 37778931862957161709580.0, 37778931862957161709581.0,
+ 37778931862957161709582.0, 37778931862957161709583.0,
+ 37778931862957161709584.0, 37778931862957161709585.0,
+ 37778931862957161709586.0, 37778931862957161709587.0,
+ std::numeric_limits<float>::lowest() + 0.6,
+ std::numeric_limits<float>::infinity()
+ };
+ float outputs_RN_S[kTableLength] = {
+ 18446744073709551617.0, 4503599627370496.0, -4503599627370496.0,
+ 1.26782468584154733584017312973E30, 1.44860108245951772690707170478E37,
+ 1.7976931348623157E38, 0,
+ 309485009821345068724781057.0,
+ 2.0, 3.0, 2.0, 3.0, 4.0, 4.0,
+ -2.0, -3.0, -2.0, -3.0, -4.0, -4.0,
+ 37778931862957161709568.0, 37778931862957161709569.0,
+ 37778931862957161709580.0, 37778931862957161709581.0,
+ 37778931862957161709582.0, 37778931862957161709583.0,
+ 37778931862957161709584.0, 37778931862957161709585.0,
+ 37778931862957161709586.0, 37778931862957161709587.0,
+ std::numeric_limits<float>::lowest() + 1,
+ std::numeric_limits<float>::infinity()
+ };
+ double outputs_RN_D[kTableLength] = {
+ 18446744073709551617.0, 4503599627370496.0, -4503599627370496.0,
+ 1.26782468584154733584017312973E30, 1.44860108245951772690707170478E147,
+ 1.7976931348623157E308, 0,
+ 309485009821345068724781057.0,
+ 2.0, 3.0, 2.0, 3.0, 4.0, 4.0,
+ -2.0, -3.0, -2.0, -3.0, -4.0, -4.0,
+ 37778931862957161709568.0, 37778931862957161709569.0,
+ 37778931862957161709580.0, 37778931862957161709581.0,
+ 37778931862957161709582.0, 37778931862957161709583.0,
+ 37778931862957161709584.0, 37778931862957161709585.0,
+ 37778931862957161709586.0, 37778931862957161709587.0,
+ std::numeric_limits<double>::max(),
+ std::numeric_limits<double>::infinity()
+ };
+ float outputs_RZ_S[kTableLength] = {
+ 18446744073709551617.0, 4503599627370496.0, -4503599627370496.0,
+ 1.26782468584154733584017312973E30, 1.44860108245951772690707170478E37,
+ 1.7976931348623157E38, 0,
+ 309485009821345068724781057.0,
+ 2.0, 2.0, 2.0, 3.0, 3.0, 3.0,
+ -2.0, -2.0, -2.0, -3.0, -3.0, -3.0,
+ 37778931862957161709568.0, 37778931862957161709569.0,
+ 37778931862957161709580.0, 37778931862957161709581.0,
+ 37778931862957161709582.0, 37778931862957161709583.0,
+ 37778931862957161709584.0, 37778931862957161709585.0,
+ 37778931862957161709586.0, 37778931862957161709587.0,
+ std::numeric_limits<float>::lowest() + 1,
+ std::numeric_limits<float>::infinity()
+ };
+ double outputs_RZ_D[kTableLength] = {
+ 18446744073709551617.0, 4503599627370496.0, -4503599627370496.0,
+ 1.26782468584154733584017312973E30, 1.44860108245951772690707170478E147,
+ 1.7976931348623157E308, 0,
+ 309485009821345068724781057.0,
+ 2.0, 2.0, 2.0, 3.0, 3.0, 3.0,
+ -2.0, -2.0, -2.0, -3.0, -3.0, -3.0,
+ 37778931862957161709568.0, 37778931862957161709569.0,
+ 37778931862957161709580.0, 37778931862957161709581.0,
+ 37778931862957161709582.0, 37778931862957161709583.0,
+ 37778931862957161709584.0, 37778931862957161709585.0,
+ 37778931862957161709586.0, 37778931862957161709587.0,
+ std::numeric_limits<double>::max() - 1,
+ std::numeric_limits<double>::infinity()
+ };
+ float outputs_RP_S[kTableLength] = {
+ 18446744073709551617.0, 4503599627370496.0, -4503599627370496.0,
+ 1.26782468584154733584017312973E30, 1.44860108245951772690707170478E37,
+ 1.7976931348623157E38, 1,
+ 309485009821345068724781057.0,
+ 3.0, 3.0, 3.0, 4.0, 4.0, 4.0,
+ -2.0, -2.0, -2.0, -3.0, -3.0, -3.0,
+ 37778931862957161709568.0, 37778931862957161709569.0,
+ 37778931862957161709580.0, 37778931862957161709581.0,
+ 37778931862957161709582.0, 37778931862957161709583.0,
+ 37778931862957161709584.0, 37778931862957161709585.0,
+ 37778931862957161709586.0, 37778931862957161709587.0,
+ std::numeric_limits<float>::lowest() + 1,
+ std::numeric_limits<float>::infinity()
+ };
+ double outputs_RP_D[kTableLength] = {
+ 18446744073709551617.0, 4503599627370496.0, -4503599627370496.0,
+ 1.26782468584154733584017312973E30, 1.44860108245951772690707170478E147,
+ 1.7976931348623157E308, 1,
+ 309485009821345068724781057.0,
+ 3.0, 3.0, 3.0, 4.0, 4.0, 4.0,
+ -2.0, -2.0, -2.0, -3.0, -3.0, -3.0,
+ 37778931862957161709568.0, 37778931862957161709569.0,
+ 37778931862957161709580.0, 37778931862957161709581.0,
+ 37778931862957161709582.0, 37778931862957161709583.0,
+ 37778931862957161709584.0, 37778931862957161709585.0,
+ 37778931862957161709586.0, 37778931862957161709587.0,
+ std::numeric_limits<double>::max(),
+ std::numeric_limits<double>::infinity()
+ };
+ float outputs_RM_S[kTableLength] = {
+ 18446744073709551617.0, 4503599627370496.0, -4503599627370496.0,
+ 1.26782468584154733584017312973E30, 1.44860108245951772690707170478E37,
+ 1.7976931348623157E38, 0,
+ 309485009821345068724781057.0,
+ 2.0, 2.0, 2.0, 3.0, 3.0, 3.0,
+ -3.0, -3.0, -3.0, -4.0, -4.0, -4.0,
+ 37778931862957161709568.0, 37778931862957161709569.0,
+ 37778931862957161709580.0, 37778931862957161709581.0,
+ 37778931862957161709582.0, 37778931862957161709583.0,
+ 37778931862957161709584.0, 37778931862957161709585.0,
+ 37778931862957161709586.0, 37778931862957161709587.0,
+ std::numeric_limits<float>::lowest() + 1,
+ std::numeric_limits<float>::infinity()
+ };
+ double outputs_RM_D[kTableLength] = {
+ 18446744073709551617.0, 4503599627370496.0, -4503599627370496.0,
+ 1.26782468584154733584017312973E30, 1.44860108245951772690707170478E147,
+ 1.7976931348623157E308, 0,
+ 309485009821345068724781057.0,
+ 2.0, 2.0, 2.0, 3.0, 3.0, 3.0,
+ -3.0, -3.0, -3.0, -4.0, -4.0, -4.0,
+ 37778931862957161709568.0, 37778931862957161709569.0,
+ 37778931862957161709580.0, 37778931862957161709581.0,
+ 37778931862957161709582.0, 37778931862957161709583.0,
+ 37778931862957161709584.0, 37778931862957161709585.0,
+ 37778931862957161709586.0, 37778931862957161709587.0,
+ std::numeric_limits<double>::max(),
+ std::numeric_limits<double>::infinity()
+ };
+ // clang-format on
+
+ int fcsr_inputs[4] = {kRoundToNearest, kRoundToZero, kRoundToPlusInf,
+ kRoundToMinusInf};
+ double* outputs_d[4] = {outputs_RN_D, outputs_RZ_D, outputs_RP_D,
+ outputs_RM_D};
+ float* outputs_s[4] = {outputs_RN_S, outputs_RZ_S, outputs_RP_S,
+ outputs_RM_S};
+
+ __ Fld_d(f8, MemOperand(a0, offsetof(Test, a)));
+ __ Fld_s(f9, MemOperand(a0, offsetof(Test, b)));
+ __ xor_(a5, a5, a5);
+ __ Ld_w(a5, MemOperand(a0, offsetof(Test, fcsr)));
+ __ movfcsr2gr(a4);
+ __ movgr2fcsr(a5);
+ __ frint_d(f10, f8);
+ __ frint_s(f11, f9);
+ __ Fst_d(f10, MemOperand(a0, offsetof(Test, c)));
+ __ Fst_s(f11, MemOperand(a0, offsetof(Test, d)));
+ __ movgr2fcsr(a4);
+ __ jirl(zero_reg, ra, 0);
+
+ CodeDesc desc;
+ assm.GetCode(isolate, &desc);
+ Handle<Code> code =
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
+ auto f = GeneratedCode<F3>::FromCode(*code);
+ for (int j = 0; j < 4; j++) {
+ test.fcsr = fcsr_inputs[j];
+ for (int i = 0; i < kTableLength; i++) {
+ test.a = inputs_d[i];
+ test.b = inputs_s[i];
+ f.Call(&test, 0, 0, 0, 0);
+ CHECK_EQ(test.c, outputs_d[j][i]);
+ CHECK_EQ(test.d, outputs_s[j][i]);
+ }
+ }
+}
+
+TEST(FMOV) {
+ const int kTableLength = 7;
+ CcTest::InitializeVM();
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope scope(isolate);
+ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
+
+ struct TestFloat {
+ double a;
+ float b;
+ double c;
+ float d;
+ };
+
+ TestFloat test;
+
+ // clang-format off
+ double inputs_D[kTableLength] = {
+ 5.3, -5.3, 0.29, -0.29, 0,
+ std::numeric_limits<double>::max(),
+ -std::numeric_limits<double>::max()
+ };
+ float inputs_S[kTableLength] = {
+ 4.8, -4.8, 0.29, -0.29, 0,
+ std::numeric_limits<float>::max(),
+ -std::numeric_limits<float>::max()
+ };
+
+ double outputs_D[kTableLength] = {
+ 5.3, -5.3, 0.29, -0.29, 0,
+ std::numeric_limits<double>::max(),
+ -std::numeric_limits<double>::max()
+ };
+
+ float outputs_S[kTableLength] = {
+ 4.8, -4.8, 0.29, -0.29, 0,
+ std::numeric_limits<float>::max(),
+ -std::numeric_limits<float>::max()
+ };
+ // clang-format on
+
+ __ Fld_d(f8, MemOperand(a0, offsetof(TestFloat, a)));
+ __ Fld_s(f9, MemOperand(a0, offsetof(TestFloat, b)));
+ __ fmov_d(f10, f8);
+ __ fmov_s(f11, f9);
+ __ Fst_d(f10, MemOperand(a0, offsetof(TestFloat, c)));
+ __ Fst_s(f11, MemOperand(a0, offsetof(TestFloat, d)));
+ __ jirl(zero_reg, ra, 0);
+
+ CodeDesc desc;
+ assm.GetCode(isolate, &desc);
+ Handle<Code> code =
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
+ auto f = GeneratedCode<F3>::FromCode(*code);
+ for (int i = 0; i < kTableLength; i++) {
+ test.a = inputs_D[i];
+ test.b = inputs_S[i];
+ f.Call(&test, 0, 0, 0, 0);
+ CHECK_EQ(test.c, outputs_D[i]);
+ CHECK_EQ(test.d, outputs_S[i]);
+ }
+}
+
+TEST(LA14) {
+ CcTest::InitializeVM();
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope scope(isolate);
+
+ struct T {
+ double a;
+ double b;
+ double c;
+ double d;
+ int64_t high;
+ int64_t low;
+ };
+ T t;
+
+ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
+
+ __ Fld_d(f8, MemOperand(a0, offsetof(T, a)));
+ __ Fld_d(f9, MemOperand(a0, offsetof(T, b)));
+
+ __ movfr2gr_s(a4, f8);
+ __ movfrh2gr_s(a5, f8);
+ __ movfr2gr_d(a6, f9);
+
+ __ movgr2fr_w(f9, a4);
+ __ movgr2frh_w(f9, a5);
+ __ movgr2fr_d(f8, a6);
+
+ __ Fst_d(f8, MemOperand(a0, offsetof(T, a)));
+ __ Fst_d(f9, MemOperand(a0, offsetof(T, c)));
+
+ __ Fld_d(f8, MemOperand(a0, offsetof(T, d)));
+ __ movfrh2gr_s(a4, f8);
+ __ movfr2gr_s(a5, f8);
+
+ __ St_d(a4, MemOperand(a0, offsetof(T, high)));
+ __ St_d(a5, MemOperand(a0, offsetof(T, low)));
+
+ __ jirl(zero_reg, ra, 0);
+
+ CodeDesc desc;
+ assm.GetCode(isolate, &desc);
+ Handle<Code> code =
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
+ auto f = GeneratedCode<F3>::FromCode(*code);
+
+ t.a = 1.5e22;
+ t.b = 2.75e11;
+ t.c = 17.17;
+ t.d = -2.75e11;
+ f.Call(&t, 0, 0, 0, 0);
+ CHECK_EQ(2.75e11, t.a);
+ CHECK_EQ(2.75e11, t.b);
+ CHECK_EQ(1.5e22, t.c);
+ CHECK_EQ(static_cast<int64_t>(0xFFFFFFFFC25001D1L), t.high);
+ CHECK_EQ(static_cast<int64_t>(0xFFFFFFFFBF800000L), t.low);
+
+ t.a = -1.5e22;
+ t.b = -2.75e11;
+ t.c = 17.17;
+ t.d = 274999868928.0;
+ f.Call(&t, 0, 0, 0, 0);
+ CHECK_EQ(-2.75e11, t.a);
+ CHECK_EQ(-2.75e11, t.b);
+ CHECK_EQ(-1.5e22, t.c);
+ CHECK_EQ(static_cast<int64_t>(0x425001D1L), t.high);
+ CHECK_EQ(static_cast<int64_t>(0x3F800000L), t.low);
+}
+
+uint64_t run_bceqz(int fcc_value, int32_t offset) {
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope scope(isolate);
+
+ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
+
+ Label main_block, L;
+ __ li(a2, 0);
+ __ li(t0, fcc_value);
+ __ b(&main_block);
+ // Block 1
+ for (int32_t i = -104; i <= -55; ++i) {
+ __ addi_d(a2, a2, 0x1);
+ }
+ __ b(&L);
+
+ // Block 2
+ for (int32_t i = -53; i <= -4; ++i) {
+ __ addi_d(a2, a2, 0x10);
+ }
+ __ b(&L);
+
+ // Block 3 (Main)
+ __ bind(&main_block);
+ __ movcf2gr(t1, FCC0);
+ __ movgr2cf(FCC0, t0);
+ __ bceqz(FCC0, offset);
+ __ bind(&L);
+ __ movgr2cf(FCC0, t1);
+ __ or_(a0, a2, zero_reg);
+ __ jirl(zero_reg, ra, 0);
+
+ // Block 4
+ for (int32_t i = 4; i <= 53; ++i) {
+ __ addi_d(a2, a2, 0x100);
+ }
+ __ b(&L);
+
+ // Block 5
+ for (int32_t i = 55; i <= 104; ++i) {
+ __ addi_d(a2, a2, 0x300);
+ }
+ __ b(&L);
+
+ CodeDesc desc;
+ assm.GetCode(isolate, &desc);
+ Handle<Code> code =
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
+ auto f = GeneratedCode<F2>::FromCode(*code);
+ uint64_t res = reinterpret_cast<uint64_t>(f.Call(0, 0, 0, 0, 0));
+
+ return res;
+}
+
+TEST(BCEQZ) {
+ CcTest::InitializeVM();
+ struct TestCaseBceqz {
+ int fcc;
+ int32_t offset;
+ uint64_t expected_res;
+ };
+
+ // clang-format off
+ struct TestCaseBceqz tc[] = {
+ // fcc, offset, expected_res
+ { 0, -90, 0x24 },
+ { 0, -27, 0x180 },
+ { 0, 47, 0x700 },
+ { 0, 70, 0x6900 },
+ { 1, -27, 0 },
+ { 1, 47, 0 },
+ };
+ // clang-format on
+
+ size_t nr_test_cases = sizeof(tc) / sizeof(TestCaseBceqz);
+ for (size_t i = 0; i < nr_test_cases; ++i) {
+ uint64_t res = run_bceqz(tc[i].fcc, tc[i].offset);
+ CHECK_EQ(tc[i].expected_res, res);
+ }
+}
+
+uint64_t run_bcnez(int fcc_value, int32_t offset) {
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope scope(isolate);
+
+ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
+
+ Label main_block, L;
+ __ li(a2, 0);
+ __ li(t0, fcc_value);
+ __ b(&main_block);
+ // Block 1
+ for (int32_t i = -104; i <= -55; ++i) {
+ __ addi_d(a2, a2, 0x1);
+ }
+ __ b(&L);
+
+ // Block 2
+ for (int32_t i = -53; i <= -4; ++i) {
+ __ addi_d(a2, a2, 0x10);
+ }
+ __ b(&L);
+
+ // Block 3 (Main)
+ __ bind(&main_block);
+ __ movcf2gr(t1, FCC0);
+ __ movgr2cf(FCC0, t0);
+ __ bcnez(FCC0, offset);
+ __ bind(&L);
+ __ movgr2cf(FCC0, t1);
+ __ or_(a0, a2, zero_reg);
+ __ jirl(zero_reg, ra, 0);
+
+ // Block 4
+ for (int32_t i = 4; i <= 53; ++i) {
+ __ addi_d(a2, a2, 0x100);
+ }
+ __ b(&L);
+
+ // Block 5
+ for (int32_t i = 55; i <= 104; ++i) {
+ __ addi_d(a2, a2, 0x300);
+ }
+ __ b(&L);
+
+ CodeDesc desc;
+ assm.GetCode(isolate, &desc);
+ Handle<Code> code =
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
+ auto f = GeneratedCode<F2>::FromCode(*code);
+ uint64_t res = reinterpret_cast<uint64_t>(f.Call(0, 0, 0, 0, 0));
+
+ return res;
+}
+
+TEST(BCNEZ) {
+ CcTest::InitializeVM();
+ struct TestCaseBcnez {
+ int fcc;
+ int32_t offset;
+ uint64_t expected_res;
+ };
+
+ // clang-format off
+ struct TestCaseBcnez tc[] = {
+ // fcc, offset, expected_res
+ { 1, -90, 0x24 },
+ { 1, -27, 0x180 },
+ { 1, 47, 0x700 },
+ { 1, 70, 0x6900 },
+ { 0, -27, 0 },
+ { 0, 47, 0 },
+ };
+ // clang-format on
+
+ size_t nr_test_cases = sizeof(tc) / sizeof(TestCaseBcnez);
+ for (size_t i = 0; i < nr_test_cases; ++i) {
+ uint64_t res = run_bcnez(tc[i].fcc, tc[i].offset);
+ CHECK_EQ(tc[i].expected_res, res);
+ }
+}
+
+TEST(jump_tables1) {
+ // Test jump tables with forward jumps.
+ CcTest::InitializeVM();
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope scope(isolate);
+ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
+
+ const int kNumCases = 512;
+ int values[kNumCases];
+ isolate->random_number_generator()->NextBytes(values, sizeof(values));
+ Label labels[kNumCases];
+
+ __ addi_d(sp, sp, -8);
+ __ St_d(ra, MemOperand(sp, 0));
+ __ Align(8);
+
+ Label done;
+ {
+ __ BlockTrampolinePoolFor(kNumCases * 2 + 6);
+ __ pcaddi(ra, 2);
+ __ slli_d(t7, a0, 3);
+ __ add_d(t7, t7, ra);
+ __ Ld_d(t7, MemOperand(t7, 4 * kInstrSize));
+ __ jirl(zero_reg, t7, 0);
+ __ nop();
+ for (int i = 0; i < kNumCases; ++i) {
+ __ dd(&labels[i]);
+ }
+ }
+
+ for (int i = 0; i < kNumCases; ++i) {
+ __ bind(&labels[i]);
+ __ lu12i_w(a2, (values[i] >> 12) & 0xFFFFF);
+ __ ori(a2, a2, values[i] & 0xFFF);
+ __ b(&done);
+ __ nop();
+ }
+
+ __ bind(&done);
+ __ Ld_d(ra, MemOperand(sp, 0));
+ __ addi_d(sp, sp, 8);
+ __ or_(a0, a2, zero_reg);
+ __ jirl(zero_reg, ra, 0);
+
+ CHECK_EQ(0, assm.UnboundLabelsCount());
+
+ CodeDesc desc;
+ assm.GetCode(isolate, &desc);
+ Handle<Code> code =
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
+#ifdef OBJECT_PRINT
+ code->Print(std::cout);
+#endif
+ auto f = GeneratedCode<F1>::FromCode(*code);
+ for (int i = 0; i < kNumCases; ++i) {
+ int64_t res = reinterpret_cast<int64_t>(f.Call(i, 0, 0, 0, 0));
+ ::printf("f(%d) = %" PRId64 "\n", i, res);
+ CHECK_EQ((values[i]), static_cast<int>(res));
+ }
+}
+
+TEST(jump_tables2) {
+ // Test jump tables with backward jumps.
+ CcTest::InitializeVM();
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope scope(isolate);
+ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
+
+ const int kNumCases = 512;
+ int values[kNumCases];
+ isolate->random_number_generator()->NextBytes(values, sizeof(values));
+ Label labels[kNumCases];
+
+ __ addi_d(sp, sp, -8);
+ __ St_d(ra, MemOperand(sp, 0));
+
+ Label done, dispatch;
+ __ b(&dispatch);
+ __ nop();
+
+ for (int i = 0; i < kNumCases; ++i) {
+ __ bind(&labels[i]);
+ __ lu12i_w(a2, (values[i] >> 12) & 0xFFFFF);
+ __ ori(a2, a2, values[i] & 0xFFF);
+ __ b(&done);
+ __ nop();
+ }
+
+ __ Align(8);
+ __ bind(&dispatch);
+ {
+ __ BlockTrampolinePoolFor(kNumCases * 2 + 6);
+ __ pcaddi(ra, 2);
+ __ slli_d(t7, a0, 3);
+ __ add_d(t7, t7, ra);
+ __ Ld_d(t7, MemOperand(t7, 4 * kInstrSize));
+ __ jirl(zero_reg, t7, 0);
+ __ nop();
+ for (int i = 0; i < kNumCases; ++i) {
+ __ dd(&labels[i]);
+ }
+ }
+
+ __ bind(&done);
+ __ Ld_d(ra, MemOperand(sp, 0));
+ __ addi_d(sp, sp, 8);
+ __ or_(a0, a2, zero_reg);
+ __ jirl(zero_reg, ra, 0);
+
+ CodeDesc desc;
+ assm.GetCode(isolate, &desc);
+ Handle<Code> code =
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
+#ifdef OBJECT_PRINT
+ code->Print(std::cout);
+#endif
+ auto f = GeneratedCode<F1>::FromCode(*code);
+ for (int i = 0; i < kNumCases; ++i) {
+ int64_t res = reinterpret_cast<int64_t>(f.Call(i, 0, 0, 0, 0));
+ ::printf("f(%d) = %" PRId64 "\n", i, res);
+ CHECK_EQ(values[i], res);
+ }
+}
+
+TEST(jump_tables3) {
+ // Test jump tables with backward jumps and embedded heap objects.
+ CcTest::InitializeVM();
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope scope(isolate);
+ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
+
+ const int kNumCases = 512;
+ Handle<Object> values[kNumCases];
+ for (int i = 0; i < kNumCases; ++i) {
+ double value = isolate->random_number_generator()->NextDouble();
+ values[i] = isolate->factory()->NewHeapNumber<AllocationType::kOld>(value);
+ }
+ Label labels[kNumCases];
+ Object obj;
+ int64_t imm64;
+
+ __ addi_d(sp, sp, -8);
+ __ St_d(ra, MemOperand(sp, 0));
+
+ Label done, dispatch;
+ __ b(&dispatch);
+ __ nop();
+
+ for (int i = 0; i < kNumCases; ++i) {
+ __ bind(&labels[i]);
+ obj = *values[i];
+ imm64 = obj.ptr();
+ __ lu12i_w(a2, (imm64 >> 12) & 0xFFFFF);
+ __ ori(a2, a2, imm64 & 0xFFF);
+ __ lu32i_d(a2, (imm64 >> 32) & 0xFFFFF);
+ __ lu52i_d(a2, a2, (imm64 >> 52) & 0xFFF);
+ __ b(&done);
+ }
+
+ __ Align(8);
+ __ bind(&dispatch);
+ {
+ __ BlockTrampolinePoolFor(kNumCases * 2 + 6);
+ __ pcaddi(ra, 2);
+ __ slli_d(t7, a0, 3); // In delay slot.
+ __ add_d(t7, t7, ra);
+ __ Ld_d(t7, MemOperand(t7, 4 * kInstrSize));
+ __ jirl(zero_reg, t7, 0);
+ __ nop();
+ for (int i = 0; i < kNumCases; ++i) {
+ __ dd(&labels[i]);
+ }
+ }
+ __ bind(&done);
+ __ Ld_d(ra, MemOperand(sp, 0));
+ __ addi_d(sp, sp, 8);
+ __ or_(a0, a2, zero_reg);
+ __ jirl(zero_reg, ra, 0);
+
+ CodeDesc desc;
+ assm.GetCode(isolate, &desc);
+ Handle<Code> code =
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
+#ifdef OBJECT_PRINT
+ code->Print(std::cout);
+#endif
+ auto f = GeneratedCode<F1>::FromCode(*code);
+ for (int i = 0; i < kNumCases; ++i) {
+ Handle<Object> result(
+ Object(reinterpret_cast<Address>(f.Call(i, 0, 0, 0, 0))), isolate);
+#ifdef OBJECT_PRINT
+ ::printf("f(%d) = ", i);
+ result->Print(std::cout);
+ ::printf("\n");
+#endif
+ CHECK(values[i].is_identical_to(result));
+ }
+}
+
+uint64_t run_li_macro(int64_t imm, LiFlags mode, int32_t num_instr = 0) {
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope scope(isolate);
+ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
+
+ Label code_start;
+ __ bind(&code_start);
+ __ li(a2, imm, mode);
+ if (num_instr > 0) {
+ CHECK_EQ(assm.InstructionsGeneratedSince(&code_start), num_instr);
+ CHECK_EQ(__ InstrCountForLi64Bit(imm), num_instr);
+ }
+ __ or_(a0, a2, zero_reg);
+ __ jirl(zero_reg, ra, 0);
+
+ CodeDesc desc;
+ assm.GetCode(isolate, &desc);
+ Handle<Code> code =
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
+#ifdef OBJECT_PRINT
+ code->Print(std::cout);
+#endif
+ auto f = GeneratedCode<F2>::FromCode(*code);
+
+ uint64_t res = reinterpret_cast<uint64_t>(f.Call(0, 0, 0, 0, 0));
+
+ return res;
+}
+
+TEST(li_macro) {
+ CcTest::InitializeVM();
+
+ // Test li macro-instruction for border cases.
+
+ struct TestCase_li {
+ uint64_t imm;
+ int32_t num_instr;
+ };
+ // clang-format off
+ struct TestCase_li tc[] = {
+ // imm, num_instr
+ {0xFFFFFFFFFFFFF800, 1}, // min_int12
+ // The test case above generates addi_d instruction.
+ // This is int12 value and we can load it using just addi_d.
+ { 0x800, 1}, // max_int12 + 1
+ // Generates ori
+ // max_int12 + 1 is not int12 but is uint12, just use ori.
+ {0xFFFFFFFFFFFFF7FF, 2}, // min_int12 - 1
+ // Generates lu12i + ori
+ // We load int32 value using lu12i_w + ori.
+ { 0x801, 1}, // max_int12 + 2
+ // Generates ori
+ // Also an uint1 value, use ori.
+ { 0x00001000, 1}, // max_uint12 + 1
+ // Generates lu12i_w
+ // Low 12 bits are 0, load value using lu12i_w.
+ { 0x00001001, 2}, // max_uint12 + 2
+ // Generates lu12i_w + ori
+ // We have to generate two instructions in this case.
+ {0x00000000FFFFFFFF, 2}, // max_uint32
+ // addi_w + lu32i_d
+ {0x00000000FFFFFFFE, 2}, // max_uint32 - 1
+ // addi_w + lu32i_d
+ {0xFFFFFFFF80000000, 1}, // min_int32
+ // lu12i_w
+ {0x0000000080000000, 2}, // max_int32 + 1
+ // lu12i_w + lu32i_d
+ {0xFFFF0000FFFF8765, 3},
+ // lu12i_w + ori + lu32i_d
+ {0x1234ABCD87654321, 4},
+ // lu12i_w + ori + lu32i_d + lu52i_d
+ {0xFFFF789100000000, 2},
+ // xor + lu32i_d
+ {0xF12F789100000000, 3},
+ // xor + lu32i_d + lu52i_d
+ {0xF120000000000800, 2},
+ // ori + lu52i_d
+ {0xFFF0000000000000, 1},
+ // lu52i_d
+ {0xF100000000000000, 1},
+ {0x0122000000000000, 2},
+ {0x1234FFFF77654321, 4},
+ {0x1230000077654321, 3},
+ };
+ // clang-format on
+
+ size_t nr_test_cases = sizeof(tc) / sizeof(TestCase_li);
+ for (size_t i = 0; i < nr_test_cases; ++i) {
+ CHECK_EQ(tc[i].imm,
+ run_li_macro(tc[i].imm, OPTIMIZE_SIZE, tc[i].num_instr));
+ CHECK_EQ(tc[i].imm, run_li_macro(tc[i].imm, CONSTANT_SIZE));
+ if (is_int48(tc[i].imm)) {
+ CHECK_EQ(tc[i].imm, run_li_macro(tc[i].imm, ADDRESS_LOAD));
+ }
+ }
+}
+
+TEST(FMIN_FMAX) {
+ CcTest::InitializeVM();
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope scope(isolate);
+ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
+
+ struct TestFloat {
+ double a;
+ double b;
+ float c;
+ float d;
+ double e;
+ double f;
+ float g;
+ float h;
+ };
+
+ TestFloat test;
+ const double dnan = std::numeric_limits<double>::quiet_NaN();
+ const double dinf = std::numeric_limits<double>::infinity();
+ const double dminf = -std::numeric_limits<double>::infinity();
+ const float fnan = std::numeric_limits<float>::quiet_NaN();
+ const float finf = std::numeric_limits<float>::infinity();
+ const float fminf = -std::numeric_limits<float>::infinity();
+ const int kTableLength = 13;
+
+ // clang-format off
+ double inputsa[kTableLength] = {2.0, 3.0, dnan, 3.0, -0.0, 0.0, dinf,
+ dnan, 42.0, dinf, dminf, dinf, dnan};
+ double inputsb[kTableLength] = {3.0, 2.0, 3.0, dnan, 0.0, -0.0, dnan,
+ dinf, dinf, 42.0, dinf, dminf, dnan};
+ double outputsdmin[kTableLength] = {2.0, 2.0, 3.0, 3.0, -0.0,
+ -0.0, dinf, dinf, 42.0, 42.0,
+ dminf, dminf, dnan};
+ double outputsdmax[kTableLength] = {3.0, 3.0, 3.0, 3.0, 0.0, 0.0, dinf,
+ dinf, dinf, dinf, dinf, dinf, dnan};
+
+ float inputsc[kTableLength] = {2.0, 3.0, fnan, 3.0, -0.0, 0.0, finf,
+ fnan, 42.0, finf, fminf, finf, fnan};
+ float inputsd[kTableLength] = {3.0, 2.0, 3.0, fnan, 0.0, -0.0, fnan,
+ finf, finf, 42.0, finf, fminf, fnan};
+ float outputsfmin[kTableLength] = {2.0, 2.0, 3.0, 3.0, -0.0,
+ -0.0, finf, finf, 42.0, 42.0,
+ fminf, fminf, fnan};
+ float outputsfmax[kTableLength] = {3.0, 3.0, 3.0, 3.0, 0.0, 0.0, finf,
+ finf, finf, finf, finf, finf, fnan};
+ // clang-format on
+
+ __ Fld_d(f8, MemOperand(a0, offsetof(TestFloat, a)));
+ __ Fld_d(f9, MemOperand(a0, offsetof(TestFloat, b)));
+ __ Fld_s(f10, MemOperand(a0, offsetof(TestFloat, c)));
+ __ Fld_s(f11, MemOperand(a0, offsetof(TestFloat, d)));
+ __ fmin_d(f12, f8, f9);
+ __ fmax_d(f13, f8, f9);
+ __ fmin_s(f14, f10, f11);
+ __ fmax_s(f15, f10, f11);
+ __ Fst_d(f12, MemOperand(a0, offsetof(TestFloat, e)));
+ __ Fst_d(f13, MemOperand(a0, offsetof(TestFloat, f)));
+ __ Fst_s(f14, MemOperand(a0, offsetof(TestFloat, g)));
+ __ Fst_s(f15, MemOperand(a0, offsetof(TestFloat, h)));
+ __ jirl(zero_reg, ra, 0);
+
+ CodeDesc desc;
+ assm.GetCode(isolate, &desc);
+ Handle<Code> code =
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
+ auto f = GeneratedCode<F3>::FromCode(*code);
+ for (int i = 4; i < kTableLength; i++) {
+ test.a = inputsa[i];
+ test.b = inputsb[i];
+ test.c = inputsc[i];
+ test.d = inputsd[i];
+
+ f.Call(&test, 0, 0, 0, 0);
+
+ CHECK_EQ(0, memcmp(&test.e, &outputsdmin[i], sizeof(test.e)));
+ CHECK_EQ(0, memcmp(&test.f, &outputsdmax[i], sizeof(test.f)));
+ CHECK_EQ(0, memcmp(&test.g, &outputsfmin[i], sizeof(test.g)));
+ CHECK_EQ(0, memcmp(&test.h, &outputsfmax[i], sizeof(test.h)));
+ }
+}
+
+TEST(FMINA_FMAXA) {
+ const int kTableLength = 23;
+ CcTest::InitializeVM();
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope scope(isolate);
+ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
+ const double dnan = std::numeric_limits<double>::quiet_NaN();
+ const double dinf = std::numeric_limits<double>::infinity();
+ const double dminf = -std::numeric_limits<double>::infinity();
+ const float fnan = std::numeric_limits<float>::quiet_NaN();
+ const float finf = std::numeric_limits<float>::infinity();
+ const float fminf = std::numeric_limits<float>::infinity();
+
+ struct TestFloat {
+ double a;
+ double b;
+ double resd1;
+ double resd2;
+ float c;
+ float d;
+ float resf1;
+ float resf2;
+ };
+
+ TestFloat test;
+ // clang-format off
+ double inputsa[kTableLength] = {
+ 5.3, 4.8, 6.1, 9.8, 9.8, 9.8, -10.0, -8.9, -9.8, -10.0, -8.9, -9.8,
+ dnan, 3.0, -0.0, 0.0, dinf, dnan, 42.0, dinf, dminf, dinf, dnan};
+ double inputsb[kTableLength] = {
+ 4.8, 5.3, 6.1, -10.0, -8.9, -9.8, 9.8, 9.8, 9.8, -9.8, -11.2, -9.8,
+ 3.0, dnan, 0.0, -0.0, dnan, dinf, dinf, 42.0, dinf, dminf, dnan};
+ double resd1[kTableLength] = {
+ 4.8, 4.8, 6.1, 9.8, -8.9, -9.8, 9.8, -8.9, -9.8, -9.8, -8.9, -9.8,
+ 3.0, 3.0, -0.0, -0.0, dinf, dinf, 42.0, 42.0, dminf, dminf, dnan};
+ double resd2[kTableLength] = {
+ 5.3, 5.3, 6.1, -10.0, 9.8, 9.8, -10.0, 9.8, 9.8, -10.0, -11.2, -9.8,
+ 3.0, 3.0, 0.0, 0.0, dinf, dinf, dinf, dinf, dinf, dinf, dnan};
+ float inputsc[kTableLength] = {
+ 5.3, 4.8, 6.1, 9.8, 9.8, 9.8, -10.0, -8.9, -9.8, -10.0, -8.9, -9.8,
+ fnan, 3.0, -0.0, 0.0, finf, fnan, 42.0, finf, fminf, finf, fnan};
+ float inputsd[kTableLength] = {
+ 4.8, 5.3, 6.1, -10.0, -8.9, -9.8, 9.8, 9.8, 9.8, -9.8, -11.2, -9.8,
+ 3.0, fnan, -0.0, 0.0, fnan, finf, finf, 42.0, finf, fminf, fnan};
+ float resf1[kTableLength] = {
+ 4.8, 4.8, 6.1, 9.8, -8.9, -9.8, 9.8, -8.9, -9.8, -9.8, -8.9, -9.8,
+ 3.0, 3.0, -0.0, -0.0, finf, finf, 42.0, 42.0, fminf, fminf, fnan};
+ float resf2[kTableLength] = {
+ 5.3, 5.3, 6.1, -10.0, 9.8, 9.8, -10.0, 9.8, 9.8, -10.0, -11.2, -9.8,
+ 3.0, 3.0, 0.0, 0.0, finf, finf, finf, finf, finf, finf, fnan};
+ // clang-format on
+
+ __ Fld_d(f8, MemOperand(a0, offsetof(TestFloat, a)));
+ __ Fld_d(f9, MemOperand(a0, offsetof(TestFloat, b)));
+ __ Fld_s(f10, MemOperand(a0, offsetof(TestFloat, c)));
+ __ Fld_s(f11, MemOperand(a0, offsetof(TestFloat, d)));
+ __ fmina_d(f12, f8, f9);
+ __ fmaxa_d(f13, f8, f9);
+ __ fmina_s(f14, f10, f11);
+ __ fmaxa_s(f15, f10, f11);
+ __ Fst_d(f12, MemOperand(a0, offsetof(TestFloat, resd1)));
+ __ Fst_d(f13, MemOperand(a0, offsetof(TestFloat, resd2)));
+ __ Fst_s(f14, MemOperand(a0, offsetof(TestFloat, resf1)));
+ __ Fst_s(f15, MemOperand(a0, offsetof(TestFloat, resf2)));
+ __ jirl(zero_reg, ra, 0);
+
+ CodeDesc desc;
+ assm.GetCode(isolate, &desc);
+ Handle<Code> code =
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
+ auto f = GeneratedCode<F3>::FromCode(*code);
+ for (int i = 0; i < kTableLength; i++) {
+ test.a = inputsa[i];
+ test.b = inputsb[i];
+ test.c = inputsc[i];
+ test.d = inputsd[i];
+ f.Call(&test, 0, 0, 0, 0);
+ if (i < kTableLength - 1) {
+ CHECK_EQ(test.resd1, resd1[i]);
+ CHECK_EQ(test.resd2, resd2[i]);
+ CHECK_EQ(test.resf1, resf1[i]);
+ CHECK_EQ(test.resf2, resf2[i]);
+ } else {
+ CHECK(std::isnan(test.resd1));
+ CHECK(std::isnan(test.resd2));
+ CHECK(std::isnan(test.resf1));
+ CHECK(std::isnan(test.resf2));
+ }
+ }
+}
+
+TEST(FADD) {
+ CcTest::InitializeVM();
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope scope(isolate);
+ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
+
+ struct TestFloat {
+ double a;
+ double b;
+ double c;
+ float d;
+ float e;
+ float f;
+ };
+
+ TestFloat test;
+
+ __ Fld_d(f8, MemOperand(a0, offsetof(TestFloat, a)));
+ __ Fld_d(f9, MemOperand(a0, offsetof(TestFloat, b)));
+ __ fadd_d(f10, f8, f9);
+ __ Fst_d(f10, MemOperand(a0, offsetof(TestFloat, c)));
+
+ __ Fld_s(f11, MemOperand(a0, offsetof(TestFloat, d)));
+ __ Fld_s(f12, MemOperand(a0, offsetof(TestFloat, e)));
+ __ fadd_s(f13, f11, f12);
+ __ Fst_s(f13, MemOperand(a0, offsetof(TestFloat, f)));
+ __ jirl(zero_reg, ra, 0);
+
+ CodeDesc desc;
+ assm.GetCode(isolate, &desc);
+ Handle<Code> code =
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
+ auto f = GeneratedCode<F3>::FromCode(*code);
+ test.a = 2.0;
+ test.b = 3.0;
+ test.d = 2.0;
+ test.e = 3.0;
+ f.Call(&test, 0, 0, 0, 0);
+ CHECK_EQ(test.c, 5.0);
+ CHECK_EQ(test.f, 5.0);
+
+ test.a = std::numeric_limits<double>::max();
+ test.b = -std::numeric_limits<double>::max(); // lowest()
+ test.d = std::numeric_limits<float>::max();
+ test.e = -std::numeric_limits<float>::max(); // lowest()
+ f.Call(&test, 0, 0, 0, 0);
+ CHECK_EQ(test.c, 0.0);
+ CHECK_EQ(test.f, 0.0);
+
+ test.a = std::numeric_limits<double>::max();
+ test.b = std::numeric_limits<double>::max();
+ test.d = std::numeric_limits<float>::max();
+ test.e = std::numeric_limits<float>::max();
+ f.Call(&test, 0, 0, 0, 0);
+ CHECK(!std::isfinite(test.c));
+ CHECK(!std::isfinite(test.f));
+
+ test.a = 5.0;
+ test.b = std::numeric_limits<double>::signaling_NaN();
+ test.d = 5.0;
+ test.e = std::numeric_limits<float>::signaling_NaN();
+ f.Call(&test, 0, 0, 0, 0);
+ CHECK(std::isnan(test.c));
+ CHECK(std::isnan(test.f));
+}
+
+TEST(FSUB) {
+ const int kTableLength = 12;
+ CcTest::InitializeVM();
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope scope(isolate);
+ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
+
+ struct TestFloat {
+ float a;
+ float b;
+ float resultS;
+ double c;
+ double d;
+ double resultD;
+ };
+
+ TestFloat test;
+
+ // clang-format off
+ double inputfs_D[kTableLength] = {
+ 5.3, 4.8, 2.9, -5.3, -4.8, -2.9,
+ 5.3, 4.8, 2.9, -5.3, -4.8, -2.9
+ };
+ double inputft_D[kTableLength] = {
+ 4.8, 5.3, 2.9, 4.8, 5.3, 2.9,
+ -4.8, -5.3, -2.9, -4.8, -5.3, -2.9
+ };
+ double outputs_D[kTableLength] = {
+ 0.5, -0.5, 0.0, -10.1, -10.1, -5.8,
+ 10.1, 10.1, 5.8, -0.5, 0.5, 0.0
+ };
+ float inputfs_S[kTableLength] = {
+ 5.3, 4.8, 2.9, -5.3, -4.8, -2.9,
+ 5.3, 4.8, 2.9, -5.3, -4.8, -2.9
+ };
+ float inputft_S[kTableLength] = {
+ 4.8, 5.3, 2.9, 4.8, 5.3, 2.9,
+ -4.8, -5.3, -2.9, -4.8, -5.3, -2.9
+ };
+ float outputs_S[kTableLength] = {
+ 0.5, -0.5, 0.0, -10.1, -10.1, -5.8,
+ 10.1, 10.1, 5.8, -0.5, 0.5, 0.0
+ };
+ // clang-format on
+
+ __ Fld_s(f8, MemOperand(a0, offsetof(TestFloat, a)));
+ __ Fld_s(f9, MemOperand(a0, offsetof(TestFloat, b)));
+ __ Fld_d(f10, MemOperand(a0, offsetof(TestFloat, c)));
+ __ Fld_d(f11, MemOperand(a0, offsetof(TestFloat, d)));
+ __ fsub_s(f12, f8, f9);
+ __ fsub_d(f13, f10, f11);
+ __ Fst_s(f12, MemOperand(a0, offsetof(TestFloat, resultS)));
+ __ Fst_d(f13, MemOperand(a0, offsetof(TestFloat, resultD)));
+ __ jirl(zero_reg, ra, 0);
+
+ CodeDesc desc;
+ assm.GetCode(isolate, &desc);
+ Handle<Code> code =
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
+ auto f = GeneratedCode<F3>::FromCode(*code);
+ for (int i = 0; i < kTableLength; i++) {
+ test.a = inputfs_S[i];
+ test.b = inputft_S[i];
+ test.c = inputfs_D[i];
+ test.d = inputft_D[i];
+ f.Call(&test, 0, 0, 0, 0);
+ CHECK_EQ(test.resultS, outputs_S[i]);
+ CHECK_EQ(test.resultD, outputs_D[i]);
+ }
+}
+
+TEST(FMUL) {
+ const int kTableLength = 4;
+ CcTest::InitializeVM();
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope scope(isolate);
+ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
+
+ struct TestFloat {
+ float a;
+ float b;
+ float resultS;
+ double c;
+ double d;
+ double resultD;
+ };
+
+ TestFloat test;
+ // clang-format off
+ double inputfs_D[kTableLength] = {
+ 5.3, -5.3, 5.3, -2.9
+ };
+ double inputft_D[kTableLength] = {
+ 4.8, 4.8, -4.8, -0.29
+ };
+
+ float inputfs_S[kTableLength] = {
+ 5.3, -5.3, 5.3, -2.9
+ };
+ float inputft_S[kTableLength] = {
+ 4.8, 4.8, -4.8, -0.29
+ };
+ // clang-format on
+ __ Fld_s(f8, MemOperand(a0, offsetof(TestFloat, a)));
+ __ Fld_s(f9, MemOperand(a0, offsetof(TestFloat, b)));
+ __ Fld_d(f10, MemOperand(a0, offsetof(TestFloat, c)));
+ __ Fld_d(f11, MemOperand(a0, offsetof(TestFloat, d)));
+ __ fmul_s(f12, f8, f9);
+ __ fmul_d(f13, f10, f11);
+ __ Fst_s(f12, MemOperand(a0, offsetof(TestFloat, resultS)));
+ __ Fst_d(f13, MemOperand(a0, offsetof(TestFloat, resultD)));
+ __ jirl(zero_reg, ra, 0);
+
+ CodeDesc desc;
+ assm.GetCode(isolate, &desc);
+ Handle<Code> code =
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
+ auto f = GeneratedCode<F3>::FromCode(*code);
+ for (int i = 0; i < kTableLength; i++) {
+ test.a = inputfs_S[i];
+ test.b = inputft_S[i];
+ test.c = inputfs_D[i];
+ test.d = inputft_D[i];
+ f.Call(&test, 0, 0, 0, 0);
+ CHECK_EQ(test.resultS, inputfs_S[i] * inputft_S[i]);
+ CHECK_EQ(test.resultD, inputfs_D[i] * inputft_D[i]);
+ }
+}
+
+TEST(FDIV) {
+ CcTest::InitializeVM();
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope scope(isolate);
+ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
+
+ struct Test {
+ double dOp1;
+ double dOp2;
+ double dRes;
+ float fOp1;
+ float fOp2;
+ float fRes;
+ };
+
+ Test test;
+
+ __ movfcsr2gr(a4);
+ __ movgr2fcsr(zero_reg);
+
+ __ Fld_d(f8, MemOperand(a0, offsetof(Test, dOp1)));
+ __ Fld_d(f9, MemOperand(a0, offsetof(Test, dOp2)));
+ __ Fld_s(f10, MemOperand(a0, offsetof(Test, fOp1)));
+ __ Fld_s(f11, MemOperand(a0, offsetof(Test, fOp2)));
+ __ fdiv_d(f12, f8, f9);
+ __ fdiv_s(f13, f10, f11);
+ __ Fst_d(f12, MemOperand(a0, offsetof(Test, dRes)));
+ __ Fst_s(f13, MemOperand(a0, offsetof(Test, fRes)));
+
+ __ movgr2fcsr(a4);
+ __ jirl(zero_reg, ra, 0);
+
+ CodeDesc desc;
+ assm.GetCode(isolate, &desc);
+ Handle<Code> code =
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
+ auto f = GeneratedCode<F3>::FromCode(*code);
+ f.Call(&test, 0, 0, 0, 0);
+ const int test_size = 3;
+ // clang-format off
+ double dOp1[test_size] = {
+ 5.0, DBL_MAX, DBL_MAX};
+
+ double dOp2[test_size] = {
+ 2.0, 2.0, -DBL_MAX};
+
+ double dRes[test_size] = {
+ 2.5, DBL_MAX / 2.0, -1.0};
+
+ float fOp1[test_size] = {
+ 5.0, FLT_MAX, FLT_MAX};
+
+ float fOp2[test_size] = {
+ 2.0, 2.0, -FLT_MAX};
+
+ float fRes[test_size] = {
+ 2.5, FLT_MAX / 2.0, -1.0};
+ // clang-format on
+
+ for (int i = 0; i < test_size; i++) {
+ test.dOp1 = dOp1[i];
+ test.dOp2 = dOp2[i];
+ test.fOp1 = fOp1[i];
+ test.fOp2 = fOp2[i];
+
+ f.Call(&test, 0, 0, 0, 0);
+ CHECK_EQ(test.dRes, dRes[i]);
+ CHECK_EQ(test.fRes, fRes[i]);
+ }
+
+ test.dOp1 = DBL_MAX;
+ test.dOp2 = -0.0;
+ test.fOp1 = FLT_MAX;
+ test.fOp2 = -0.0;
+
+ f.Call(&test, 0, 0, 0, 0);
+ CHECK(!std::isfinite(test.dRes));
+ CHECK(!std::isfinite(test.fRes));
+
+ test.dOp1 = 0.0;
+ test.dOp2 = -0.0;
+ test.fOp1 = 0.0;
+ test.fOp2 = -0.0;
+
+ f.Call(&test, 0, 0, 0, 0);
+ CHECK(std::isnan(test.dRes));
+ CHECK(std::isnan(test.fRes));
+
+ test.dOp1 = std::numeric_limits<double>::quiet_NaN();
+ test.dOp2 = -5.0;
+ test.fOp1 = std::numeric_limits<float>::quiet_NaN();
+ test.fOp2 = -5.0;
+
+ f.Call(&test, 0, 0, 0, 0);
+ CHECK(std::isnan(test.dRes));
+ CHECK(std::isnan(test.fRes));
+}
+
+TEST(FABS) {
+ CcTest::InitializeVM();
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope scope(isolate);
+ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
+
+ struct TestFloat {
+ double a;
+ float b;
+ };
+
+ TestFloat test;
+
+ __ movfcsr2gr(a4);
+ __ movgr2fcsr(zero_reg);
+
+ __ Fld_d(f8, MemOperand(a0, offsetof(TestFloat, a)));
+ __ Fld_s(f9, MemOperand(a0, offsetof(TestFloat, b)));
+ __ fabs_d(f10, f8);
+ __ fabs_s(f11, f9);
+ __ Fst_d(f10, MemOperand(a0, offsetof(TestFloat, a)));
+ __ Fst_s(f11, MemOperand(a0, offsetof(TestFloat, b)));
+
+ __ movgr2fcsr(a4);
+ __ jirl(zero_reg, ra, 0);
+
+ CodeDesc desc;
+ assm.GetCode(isolate, &desc);
+ Handle<Code> code =
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
+ auto f = GeneratedCode<F3>::FromCode(*code);
+ test.a = -2.0;
+ test.b = -2.0;
+ f.Call(&test, 0, 0, 0, 0);
+ CHECK_EQ(test.a, 2.0);
+ CHECK_EQ(test.b, 2.0);
+
+ test.a = 2.0;
+ test.b = 2.0;
+ f.Call(&test, 0, 0, 0, 0);
+ CHECK_EQ(test.a, 2.0);
+ CHECK_EQ(test.b, 2.0);
+
+ // Testing biggest positive number
+ test.a = std::numeric_limits<double>::max();
+ test.b = std::numeric_limits<float>::max();
+ f.Call(&test, 0, 0, 0, 0);
+ CHECK_EQ(test.a, std::numeric_limits<double>::max());
+ CHECK_EQ(test.b, std::numeric_limits<float>::max());
+
+ // Testing smallest negative number
+ test.a = -std::numeric_limits<double>::max(); // lowest()
+ test.b = -std::numeric_limits<float>::max(); // lowest()
+ f.Call(&test, 0, 0, 0, 0);
+ CHECK_EQ(test.a, std::numeric_limits<double>::max());
+ CHECK_EQ(test.b, std::numeric_limits<float>::max());
+
+ // Testing smallest positive number
+ test.a = -std::numeric_limits<double>::min();
+ test.b = -std::numeric_limits<float>::min();
+ f.Call(&test, 0, 0, 0, 0);
+ CHECK_EQ(test.a, std::numeric_limits<double>::min());
+ CHECK_EQ(test.b, std::numeric_limits<float>::min());
+
+ // Testing infinity
+ test.a =
+ -std::numeric_limits<double>::max() / std::numeric_limits<double>::min();
+ test.b =
+ -std::numeric_limits<float>::max() / std::numeric_limits<float>::min();
+ f.Call(&test, 0, 0, 0, 0);
+ CHECK_EQ(test.a, std::numeric_limits<double>::max() /
+ std::numeric_limits<double>::min());
+ CHECK_EQ(test.b, std::numeric_limits<float>::max() /
+ std::numeric_limits<float>::min());
+
+ test.a = std::numeric_limits<double>::quiet_NaN();
+ test.b = std::numeric_limits<float>::quiet_NaN();
+ f.Call(&test, 0, 0, 0, 0);
+ CHECK(std::isnan(test.a));
+ CHECK(std::isnan(test.b));
+
+ test.a = std::numeric_limits<double>::signaling_NaN();
+ test.b = std::numeric_limits<float>::signaling_NaN();
+ f.Call(&test, 0, 0, 0, 0);
+ CHECK(std::isnan(test.a));
+ CHECK(std::isnan(test.b));
+}
+
+template <class T>
+struct TestCaseMaddMsub {
+ T fj, fk, fa, fd_fmadd, fd_fmsub, fd_fnmadd, fd_fnmsub;
+};
+
+template <typename T, typename F>
+void helper_fmadd_fmsub_fnmadd_fnmsub(F func) {
+ CcTest::InitializeVM();
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope scope(isolate);
+ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
+
+ T x = std::sqrt(static_cast<T>(2.0));
+ T y = std::sqrt(static_cast<T>(3.0));
+ T z = std::sqrt(static_cast<T>(5.0));
+ T x2 = 11.11, y2 = 22.22, z2 = 33.33;
+ // clang-format off
+ TestCaseMaddMsub<T> test_cases[] = {
+ {x, y, z, 0.0, 0.0, 0.0, 0.0},
+ {x, y, -z, 0.0, 0.0, 0.0, 0.0},
+ {x, -y, z, 0.0, 0.0, 0.0, 0.0},
+ {x, -y, -z, 0.0, 0.0, 0.0, 0.0},
+ {-x, y, z, 0.0, 0.0, 0.0, 0.0},
+ {-x, y, -z, 0.0, 0.0, 0.0, 0.0},
+ {-x, -y, z, 0.0, 0.0, 0.0, 0.0},
+ {-x, -y, -z, 0.0, 0.0, 0.0, 0.0},
+ {-3.14, 0.2345, -123.000056, 0.0, 0.0, 0.0, 0.0},
+ {7.3, -23.257, -357.1357, 0.0, 0.0, 0.0, 0.0},
+ {x2, y2, z2, 0.0, 0.0, 0.0, 0.0},
+ {x2, y2, -z2, 0.0, 0.0, 0.0, 0.0},
+ {x2, -y2, z2, 0.0, 0.0, 0.0, 0.0},
+ {x2, -y2, -z2, 0.0, 0.0, 0.0, 0.0},
+ {-x2, y2, z2, 0.0, 0.0, 0.0, 0.0},
+ {-x2, y2, -z2, 0.0, 0.0, 0.0, 0.0},
+ {-x2, -y2, z2, 0.0, 0.0, 0.0, 0.0},
+ {-x2, -y2, -z2, 0.0, 0.0, 0.0, 0.0},
+ };
+ // clang-format on
+ if (std::is_same<T, float>::value) {
+ __ Fld_s(f8, MemOperand(a0, offsetof(TestCaseMaddMsub<T>, fj)));
+ __ Fld_s(f9, MemOperand(a0, offsetof(TestCaseMaddMsub<T>, fk)));
+ __ Fld_s(f10, MemOperand(a0, offsetof(TestCaseMaddMsub<T>, fa)));
+ } else if (std::is_same<T, double>::value) {
+ __ Fld_d(f8, MemOperand(a0, offsetof(TestCaseMaddMsub<T>, fj)));
+ __ Fld_d(f9, MemOperand(a0, offsetof(TestCaseMaddMsub<T>, fk)));
+ __ Fld_d(f10, MemOperand(a0, offsetof(TestCaseMaddMsub<T>, fa)));
+ } else {
+ UNREACHABLE();
+ }
+
+ func(assm);
+ __ jirl(zero_reg, ra, 0);
+
+ CodeDesc desc;
+ assm.GetCode(isolate, &desc);
+ Handle<Code> code =
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
+ auto f = GeneratedCode<F3>::FromCode(*code);
+
+ const size_t kTableLength = sizeof(test_cases) / sizeof(TestCaseMaddMsub<T>);
+ TestCaseMaddMsub<T> tc;
+ for (size_t i = 0; i < kTableLength; i++) {
+ tc.fj = test_cases[i].fj;
+ tc.fk = test_cases[i].fk;
+ tc.fa = test_cases[i].fa;
+
+ f.Call(&tc, 0, 0, 0, 0);
+
+ T res_fmadd;
+ T res_fmsub;
+ T res_fnmadd;
+ T res_fnmsub;
+ res_fmadd = std::fma(tc.fj, tc.fk, tc.fa);
+ res_fmsub = std::fma(tc.fj, tc.fk, -tc.fa);
+ res_fnmadd = -std::fma(tc.fj, tc.fk, tc.fa);
+ res_fnmsub = -std::fma(tc.fj, tc.fk, -tc.fa);
+
+ CHECK_EQ(tc.fd_fmadd, res_fmadd);
+ CHECK_EQ(tc.fd_fmsub, res_fmsub);
+ CHECK_EQ(tc.fd_fnmadd, res_fnmadd);
+ CHECK_EQ(tc.fd_fnmsub, res_fnmsub);
+ }
+}
+
+TEST(FMADD_FMSUB_FNMADD_FNMSUB_S) {
+ helper_fmadd_fmsub_fnmadd_fnmsub<float>([](MacroAssembler& assm) {
+ __ fmadd_s(f11, f8, f9, f10);
+ __ Fst_s(f11, MemOperand(a0, offsetof(TestCaseMaddMsub<float>, fd_fmadd)));
+ __ fmsub_s(f12, f8, f9, f10);
+ __ Fst_s(f12, MemOperand(a0, offsetof(TestCaseMaddMsub<float>, fd_fmsub)));
+ __ fnmadd_s(f13, f8, f9, f10);
+ __ Fst_s(f13, MemOperand(a0, offsetof(TestCaseMaddMsub<float>, fd_fnmadd)));
+ __ fnmsub_s(f14, f8, f9, f10);
+ __ Fst_s(f14, MemOperand(a0, offsetof(TestCaseMaddMsub<float>, fd_fnmsub)));
+ });
+}
+
+TEST(FMADD_FMSUB_FNMADD_FNMSUB_D) {
+ helper_fmadd_fmsub_fnmadd_fnmsub<double>([](MacroAssembler& assm) {
+ __ fmadd_d(f11, f8, f9, f10);
+ __ Fst_d(f11, MemOperand(a0, offsetof(TestCaseMaddMsub<double>, fd_fmadd)));
+ __ fmsub_d(f12, f8, f9, f10);
+ __ Fst_d(f12, MemOperand(a0, offsetof(TestCaseMaddMsub<double>, fd_fmsub)));
+ __ fnmadd_d(f13, f8, f9, f10);
+ __ Fst_d(f13,
+ MemOperand(a0, offsetof(TestCaseMaddMsub<double>, fd_fnmadd)));
+ __ fnmsub_d(f14, f8, f9, f10);
+ __ Fst_d(f14,
+ MemOperand(a0, offsetof(TestCaseMaddMsub<double>, fd_fnmsub)));
+ });
+}
+
+/*
+TEST(FSQRT_FRSQRT_FRECIP) {
+ const int kTableLength = 4;
+ const double deltaDouble = 2E-15;
+ const float deltaFloat = 2E-7;
+ const float sqrt2_s = sqrt(2);
+ const double sqrt2_d = sqrt(2);
+ CcTest::InitializeVM();
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope scope(isolate);
+ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
+
+ struct TestFloat {
+ float a;
+ float resultS1;
+ float resultS2;
+ float resultS3;
+ double b;
+ double resultD1;
+ double resultD2;
+ double resultD3;
+ };
+ TestFloat test;
+ // clang-format off
+ double inputs_D[kTableLength] = {
+ 0.0L, 4.0L, 2.0L, 4e-28L
+ };
+
+ double outputs_D[kTableLength] = {
+ 0.0L, 2.0L, sqrt2_d, 2e-14L
+ };
+ float inputs_S[kTableLength] = {
+ 0.0, 4.0, 2.0, 4e-28
+ };
+
+ float outputs_S[kTableLength] = {
+ 0.0, 2.0, sqrt2_s, 2e-14
+ };
+ // clang-format on
+ __ Fld_s(f8, MemOperand(a0, offsetof(TestFloat, a)));
+ __ Fld_d(f9, MemOperand(a0, offsetof(TestFloat, b)));
+ __ fsqrt_s(f10, f8);
+ __ fsqrt_d(f11, f9);
+ __ frsqrt_s(f12, f8);
+ __ frsqrt_d(f13, f9);
+ __ frecip_s(f14, f8);
+ __ frecip_d(f15, f9);
+ __ Fst_s(f10, MemOperand(a0, offsetof(TestFloat, resultS1)));
+ __ Fst_d(f11, MemOperand(a0, offsetof(TestFloat, resultD1)));
+ __ Fst_s(f12, MemOperand(a0, offsetof(TestFloat, resultS2)));
+ __ Fst_d(f13, MemOperand(a0, offsetof(TestFloat, resultD2)));
+ __ Fst_s(f14, MemOperand(a0, offsetof(TestFloat, resultS3)));
+ __ Fst_d(f15, MemOperand(a0, offsetof(TestFloat, resultD3)));
+ __ jirl(zero_reg, ra, 0);
+
+ CodeDesc desc;
+ assm.GetCode(isolate, &desc);
+ Handle<Code> code = Factory::CodeBuilder(isolate, desc,
+CodeKind::STUB).Build(); auto f = GeneratedCode<F3>::FromCode(*code);
+
+ for (int i = 0; i < kTableLength; i++) {
+ float f1;
+ double d1;
+ test.a = inputs_S[i];
+ test.b = inputs_D[i];
+
+ f.Call(&test, 0, 0, 0, 0);
+
+ CHECK_EQ(test.resultS1, outputs_S[i]);
+ CHECK_EQ(test.resultD1, outputs_D[i]);
+
+ if (i != 0) {
+ f1 = test.resultS2 - 1.0F/outputs_S[i];
+ f1 = (f1 < 0) ? f1 : -f1;
+ CHECK(f1 <= deltaFloat);
+ d1 = test.resultD2 - 1.0L/outputs_D[i];
+ d1 = (d1 < 0) ? d1 : -d1;
+ CHECK(d1 <= deltaDouble);
+ f1 = test.resultS3 - 1.0F/inputs_S[i];
+ f1 = (f1 < 0) ? f1 : -f1;
+ CHECK(f1 <= deltaFloat);
+ d1 = test.resultD3 - 1.0L/inputs_D[i];
+ d1 = (d1 < 0) ? d1 : -d1;
+ CHECK(d1 <= deltaDouble);
+ } else {
+ CHECK_EQ(test.resultS2, 1.0F/outputs_S[i]);
+ CHECK_EQ(test.resultD2, 1.0L/outputs_D[i]);
+ CHECK_EQ(test.resultS3, 1.0F/inputs_S[i]);
+ CHECK_EQ(test.resultD3, 1.0L/inputs_D[i]);
+ }
+ }
+}*/
+
+TEST(LA15) {
+ // Test chaining of label usages within instructions (issue 1644).
+ CcTest::InitializeVM();
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope scope(isolate);
+ Assembler assm(AssemblerOptions{});
+
+ Label target;
+ __ beq(a0, a1, &target);
+ __ nop();
+ __ bne(a0, a1, &target);
+ __ nop();
+ __ bind(&target);
+ __ nop();
+ __ jirl(zero_reg, ra, 0);
+
+ CodeDesc desc;
+ assm.GetCode(isolate, &desc);
+ Handle<Code> code =
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
+ auto f = GeneratedCode<F2>::FromCode(*code);
+ f.Call(1, 1, 0, 0, 0);
+}
+
+TEST(Trampoline) {
+ static const int kMaxBranchOffset = (1 << (18 - 1)) - 1;
+
+ CcTest::InitializeVM();
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope scope(isolate);
+
+ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
+ Label done;
+ size_t nr_calls = kMaxBranchOffset / kInstrSize + 5;
+
+ __ xor_(a2, a2, a2);
+ __ BranchShort(&done, eq, a0, Operand(a1));
+ for (size_t i = 0; i < nr_calls; ++i) {
+ __ addi_d(a2, a2, 1);
+ }
+ __ bind(&done);
+ __ or_(a0, a2, zero_reg);
+ __ jirl(zero_reg, ra, 0);
+
+ CodeDesc desc;
+ assm.GetCode(isolate, &desc);
+ Handle<Code> code =
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
+ auto f = GeneratedCode<F2>::FromCode(*code);
+
+ int64_t res = reinterpret_cast<int64_t>(f.Call(42, 42, 0, 0, 0));
+ CHECK_EQ(0, res);
+}
+
+#undef __
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/cctest/test-assembler-ppc.cc b/deps/v8/test/cctest/test-assembler-ppc.cc
index 871df63cce..d7bdd7e2cd 100644
--- a/deps/v8/test/cctest/test-assembler-ppc.cc
+++ b/deps/v8/test/cctest/test-assembler-ppc.cc
@@ -1038,6 +1038,51 @@ TEST(12) {
}
#endif
+TEST(WordSizedVectorInstructions) {
+ CcTest::InitializeVM();
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope scope(isolate);
+
+ Assembler assm(AssemblerOptions{});
+ // Testing word sized vector operations.
+ __ li(r0, Operand(5)); // v0 = {5, 5, 5, 5}
+ __ mtvsrd(v0, r0);
+ __ vspltw(v0, v0, Operand(1));
+
+ // Integer
+ __ vadduwm(v1, v0, v0); // v1 = {10, 10, 10, 10}
+ __ vmuluwm(v2, v0, v1); // v2 = {50, 50, 50, 50}
+ __ vsubuhm(v3, v2, v0); // v3 = {45, 45, 45, 45}
+ __ vslw(v4, v2, v0); // v4 = {1600, 1600, 1600, 1600}
+ __ vsrw(v5, v2, v0); // v5 = {1, 1, 1, 1}
+ __ vmaxsw(v4, v5, v4); // v4 = unchanged
+ __ vcmpgtuw(v5, v2, v3); // v5 = all 1s
+ __ vand(v4, v4, v5); // v4 = unchanged
+ // FP
+ __ xvcvsxwsp(v1, v1); // v1 = Converted to SP
+ __ xvcvsxwsp(v4, v4); // v4 = Converted to SP
+ __ xvdivsp(v4, v4, v1); // v4 = {160, 160, 160, 160}
+ // Integer
+ __ xvcvspuxws(v4, v4); // v4 = Converted to Int
+ __ vor(v0, v4, v3); // v0 = {173, 173, 173, 173}
+
+ __ vupkhsw(v0, v0); // v0 = {173, 173}
+ __ mfvsrd(r3, v0);
+ __ blr();
+
+ CodeDesc desc;
+ assm.GetCode(isolate, &desc);
+ Handle<Code> code =
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
+#ifdef DEBUG
+ code->Print();
+#endif
+ auto f = GeneratedCode<F_iiiii>::FromCode(*code);
+ intptr_t res = reinterpret_cast<intptr_t>(f.Call(0, 0, 0, 0, 0));
+ ::printf("f() = %" V8PRIdPTR "\n", res);
+ CHECK_EQ(173, static_cast<int>(res));
+}
+
#undef __
} // namespace internal
diff --git a/deps/v8/test/cctest/test-assembler-riscv64.cc b/deps/v8/test/cctest/test-assembler-riscv64.cc
index 29f8503c09..0cbdc06c1b 100644
--- a/deps/v8/test/cctest/test-assembler-riscv64.cc
+++ b/deps/v8/test/cctest/test-assembler-riscv64.cc
@@ -1823,8 +1823,7 @@ TEST(jump_tables1) {
for (int i = 0; i < kNumCases; ++i) {
__ bind(&labels[i]);
- __ lui(a0, (values[i] + 0x800) >> 12);
- __ addi(a0, a0, (values[i] << 20 >> 20));
+ __ RV_li(a0, values[i]);
__ j(&done);
}
@@ -1860,8 +1859,7 @@ TEST(jump_tables2) {
for (int i = 0; i < kNumCases; ++i) {
__ bind(&labels[i]);
- __ lui(a0, (values[i] + 0x800) >> 12);
- __ addi(a0, a0, (values[i] << 20 >> 20));
+ __ RV_li(a0, values[i]);
__ j(&done);
}
@@ -1926,11 +1924,10 @@ TEST(jump_tables3) {
__ j(&done);
}
- __ Align(8);
__ bind(&dispatch);
{
__ BlockTrampolinePoolFor(kNumCases * 2 + 6);
-
+ __ Align(8);
__ auipc(ra, 0);
__ slli(t3, a0, 3);
__ add(t3, t3, ra);
@@ -1980,6 +1977,39 @@ TEST(li_estimate) {
}
}
+#define UTEST_LOAD_STORE_RVV(ldname, stname, SEW, arg...) \
+ TEST(RISCV_UTEST_##stname##ldname##SEW) { \
+ CcTest::InitializeVM(); \
+ Isolate* isolate = CcTest::i_isolate(); \
+ HandleScope scope(isolate); \
+ int8_t src[16] = {arg}; \
+ int8_t dst[16]; \
+ auto fn = [](MacroAssembler& assm) { \
+ __ VU.set(t0, SEW, Vlmul::m1); \
+ __ vl(v2, a0, 0, VSew::E8); \
+ __ vs(v2, a1, 0, VSew::E8); \
+ }; \
+ GenAndRunTest<int32_t, int64_t>((int64_t)src, (int64_t)dst, fn); \
+ CHECK(!memcmp(src, dst, sizeof(src))); \
+ }
+
+#ifdef CAN_USE_RVV_INSTRUCTIONS
+UTEST_LOAD_STORE_RVV(vl, vs, E8, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
+ 15, 16)
+// UTEST_LOAD_STORE_RVV(vl, vs, E8, 127, 127, 127, 127, 127, 127, 127)
+
+TEST(RVV_VSETIVLI) {
+ CcTest::InitializeVM();
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope scope(isolate);
+ auto fn = [](MacroAssembler& assm) {
+ __ VU.set(t0, VSew::E8, Vlmul::m1);
+ __ vsetivli(t0, 16, VSew::E128, Vlmul::m1);
+ };
+ GenAndRunTest(fn);
+}
+#endif
+
#undef __
} // namespace internal
diff --git a/deps/v8/test/cctest/test-assembler-x64.cc b/deps/v8/test/cctest/test-assembler-x64.cc
index dc50bea651..b811450edf 100644
--- a/deps/v8/test/cctest/test-assembler-x64.cc
+++ b/deps/v8/test/cctest/test-assembler-x64.cc
@@ -26,8 +26,10 @@
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include <cstdlib>
+#include <cstring>
#include <iostream>
+#include "include/v8-function.h"
#include "src/base/numbers/double.h"
#include "src/base/platform/platform.h"
#include "src/base/utils/random-number-generator.h"
@@ -2520,6 +2522,65 @@ TEST(AssemblerX64vmovups) {
CHECK_EQ(-1.5, f.Call(1.5, -1.5));
}
+TEST(AssemblerX64Regmove256bit) {
+ if (!CpuFeatures::IsSupported(AVX)) return;
+ CcTest::InitializeVM();
+ v8::HandleScope scope(CcTest::isolate());
+ auto buffer = AllocateAssemblerBuffer();
+ Isolate* isolate = CcTest::i_isolate();
+ Assembler masm(AssemblerOptions{}, buffer->CreateView());
+ CpuFeatureScope fscope(&masm, AVX);
+
+ __ vmovdqa(ymm0, ymm1);
+ __ vmovdqu(ymm10, ymm11);
+
+ CodeDesc desc;
+ masm.GetCode(isolate, &desc);
+#ifdef OBJECT_PRINT
+ Handle<Code> code =
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
+ StdoutStream os;
+ code->Print(os);
+#endif
+
+ byte expected[] = {// VMOVDQA
+ 0xC5, 0xFD, 0x6F, 0xC1,
+ // VMOVDQU
+ 0xC4, 0x41, 0x7E, 0x7F, 0xDA};
+ CHECK_EQ(0, memcmp(expected, desc.buffer, sizeof(expected)));
+}
+
+TEST(AssemblerX64FloatingPoint256bit) {
+ if (!CpuFeatures::IsSupported(AVX)) return;
+ CcTest::InitializeVM();
+ v8::HandleScope scope(CcTest::isolate());
+ auto buffer = AllocateAssemblerBuffer();
+ Isolate* isolate = CcTest::i_isolate();
+ Assembler masm(AssemblerOptions{}, buffer->CreateView());
+ CpuFeatureScope fscope(&masm, AVX);
+
+ __ vsqrtps(ymm0, ymm1);
+ __ vunpcklps(ymm2, ymm3, ymm14);
+ __ vsubps(ymm10, ymm11, ymm12);
+
+ CodeDesc desc;
+ masm.GetCode(isolate, &desc);
+#ifdef OBJECT_PRINT
+ Handle<Code> code =
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
+ StdoutStream os;
+ code->Print(os);
+#endif
+
+ byte expected[] = {// VSQRTPS
+ 0xC5, 0xFC, 0x51, 0xC1,
+ // VUNPCKLPS
+ 0xC4, 0xC1, 0x64, 0x14, 0xD6,
+ // VSUBPS
+ 0xC4, 0x41, 0x24, 0x5C, 0xD4};
+ CHECK_EQ(0, memcmp(expected, desc.buffer, sizeof(expected)));
+}
+
TEST(CpuFeatures_ProbeImpl) {
// Support for a newer extension implies support for the older extensions.
CHECK_IMPLIES(CpuFeatures::IsSupported(FMA3), CpuFeatures::IsSupported(AVX));
diff --git a/deps/v8/test/cctest/test-code-pages.cc b/deps/v8/test/cctest/test-code-pages.cc
index fec21c275e..d6d5c63251 100644
--- a/deps/v8/test/cctest/test-code-pages.cc
+++ b/deps/v8/test/cctest/test-code-pages.cc
@@ -2,6 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include "include/v8-function.h"
#include "src/api/api-inl.h"
#include "src/codegen/code-desc.h"
#include "src/common/globals.h"
@@ -203,7 +204,7 @@ TEST(OptimizedCodeWithCodePages) {
// If there is baseline code, check that it's only due to
// --always-sparkplug (if this check fails, we'll have to re-think this
// test).
- if (foo->shared().HasBaselineData()) {
+ if (foo->shared().HasBaselineCode()) {
CHECK(FLAG_always_sparkplug);
return;
}
diff --git a/deps/v8/test/cctest/test-code-stub-assembler.cc b/deps/v8/test/cctest/test-code-stub-assembler.cc
index 5a393184bd..dce6dda1e9 100644
--- a/deps/v8/test/cctest/test-code-stub-assembler.cc
+++ b/deps/v8/test/cctest/test-code-stub-assembler.cc
@@ -2121,15 +2121,18 @@ TEST(PopAndReturnConstant) {
using Descriptor = JSTrampolineDescriptor;
CodeAssemblerTester asm_tester(isolate, Descriptor());
- const int kNumParams = 4; // Not including receiver
+ const int kNumParams = 4 + kJSArgcReceiverSlots;
{
CodeStubAssembler m(asm_tester.state());
TNode<Int32T> argc =
m.UncheckedParameter<Int32T>(Descriptor::kActualArgumentsCount);
CSA_CHECK(&m, m.Word32Equal(argc, m.Int32Constant(kNumParams)));
- m.PopAndReturn(m.IntPtrConstant(kNumParams + 1), // Include receiver.
- m.SmiConstant(1234));
+ int pop_count = kNumParams;
+ if (!kJSArgcIncludesReceiver) {
+ pop_count += 1; // Include receiver.
+ }
+ m.PopAndReturn(m.IntPtrConstant(pop_count), m.SmiConstant(1234));
}
FunctionTester ft(asm_tester.GenerateCode(), 0);
@@ -2155,16 +2158,18 @@ TEST(PopAndReturnVariable) {
using Descriptor = JSTrampolineDescriptor;
CodeAssemblerTester asm_tester(isolate, Descriptor());
- const int kNumParams = 4; // Not including receiver
+ const int kNumParams = 4 + kJSArgcReceiverSlots;
{
CodeStubAssembler m(asm_tester.state());
TNode<Int32T> argc =
m.UncheckedParameter<Int32T>(Descriptor::kActualArgumentsCount);
CSA_CHECK(&m, m.Word32Equal(argc, m.Int32Constant(kNumParams)));
- TNode<Int32T> argc_with_receiver = m.Int32Add(argc, m.Int32Constant(1));
- m.PopAndReturn(m.ChangeInt32ToIntPtr(argc_with_receiver),
- m.SmiConstant(1234));
+ int pop_count = kNumParams;
+ if (!kJSArgcIncludesReceiver) {
+ pop_count += 1; // Include receiver.
+ }
+ m.PopAndReturn(m.IntPtrConstant(pop_count), m.SmiConstant(1234));
}
FunctionTester ft(asm_tester.GenerateCode(), 0);
@@ -2557,7 +2562,8 @@ class AppendJSArrayCodeStubAssembler : public CodeStubAssembler {
Object::SetElement(isolate, array, 1, Handle<Smi>(Smi::FromInt(2), isolate),
kDontThrow)
.Check();
- CodeStubArguments args(this, IntPtrConstant(kNumParams));
+ CodeStubArguments args(this,
+ IntPtrConstant(kNumParams + kJSArgcReceiverSlots));
TVariable<IntPtrT> arg_index(this);
Label bailout(this);
arg_index = IntPtrConstant(0);
diff --git a/deps/v8/test/cctest/test-compiler.cc b/deps/v8/test/cctest/test-compiler.cc
index ee255d7b1f..d74299b7f4 100644
--- a/deps/v8/test/cctest/test-compiler.cc
+++ b/deps/v8/test/cctest/test-compiler.cc
@@ -30,8 +30,10 @@
#include <memory>
+#include "include/v8-function.h"
+#include "include/v8-local-handle.h"
#include "include/v8-profiler.h"
-#include "include/v8.h"
+#include "include/v8-script.h"
#include "src/api/api-inl.h"
#include "src/codegen/compilation-cache.h"
#include "src/codegen/compiler.h"
@@ -73,7 +75,7 @@ static Handle<JSFunction> Compile(const char* source) {
.ToHandleChecked();
Handle<SharedFunctionInfo> shared =
Compiler::GetSharedFunctionInfoForScript(
- isolate, source_code, ScriptDetails(), nullptr, nullptr,
+ isolate, source_code, ScriptDetails(),
v8::ScriptCompiler::kNoCompileOptions,
ScriptCompiler::kNoCacheNoReason, NOT_NATIVES_CODE)
.ToHandleChecked();
diff --git a/deps/v8/test/cctest/test-cpu-profiler.cc b/deps/v8/test/cctest/test-cpu-profiler.cc
index 30086e6ec9..00ac78f629 100644
--- a/deps/v8/test/cctest/test-cpu-profiler.cc
+++ b/deps/v8/test/cctest/test-cpu-profiler.cc
@@ -32,6 +32,8 @@
#include "include/libplatform/v8-tracing.h"
#include "include/v8-fast-api-calls.h"
+#include "include/v8-function.h"
+#include "include/v8-locker.h"
#include "include/v8-profiler.h"
#include "src/api/api-inl.h"
#include "src/base/platform/platform.h"
@@ -4319,10 +4321,17 @@ TEST(ClearUnusedWithEagerLogging) {
i::Isolate* isolate = CcTest::i_isolate();
i::HandleScope scope(isolate);
- CpuProfiler profiler(isolate, kDebugNaming, kEagerLogging);
+ CodeEntryStorage storage;
+ CpuProfilesCollection* profiles = new CpuProfilesCollection(isolate);
+ ProfilerCodeObserver* code_observer =
+ new ProfilerCodeObserver(isolate, storage);
+
+ CpuProfiler profiler(isolate, kDebugNaming, kEagerLogging, profiles, nullptr,
+ nullptr, code_observer);
CodeMap* code_map = profiler.code_map_for_test();
size_t initial_size = code_map->size();
+ size_t profiler_size = code_observer->GetEstimatedMemoryUsage();
{
// Create and run a new script and function, generating 2 code objects.
@@ -4334,6 +4343,7 @@ TEST(ClearUnusedWithEagerLogging) {
"function some_func() {}"
"some_func();");
CHECK_GT(code_map->size(), initial_size);
+ CHECK_GT(code_observer->GetEstimatedMemoryUsage(), profiler_size);
}
// Clear the compilation cache so that there are no more references to the
@@ -4344,6 +4354,33 @@ TEST(ClearUnusedWithEagerLogging) {
// Verify that the CodeMap's size is unchanged post-GC.
CHECK_EQ(code_map->size(), initial_size);
+ CHECK_EQ(code_observer->GetEstimatedMemoryUsage(), profiler_size);
+}
+
+// Ensure that ProfilerCodeObserver doesn't compute estimated size when race
+// condition potential
+TEST(SkipEstimatedSizeWhenActiveProfiling) {
+ ManualGCScope manual_gc;
+ TestSetup test_setup;
+ i::Isolate* isolate = CcTest::i_isolate();
+ i::HandleScope scope(isolate);
+
+ CodeEntryStorage storage;
+ CpuProfilesCollection* profiles = new CpuProfilesCollection(isolate);
+ ProfilerCodeObserver* code_observer =
+ new ProfilerCodeObserver(isolate, storage);
+
+ CpuProfiler profiler(isolate, kDebugNaming, kEagerLogging, profiles, nullptr,
+ nullptr, code_observer);
+
+ CHECK_GT(code_observer->GetEstimatedMemoryUsage(), 0);
+
+ profiler.StartProfiling("");
+ CHECK_EQ(code_observer->GetEstimatedMemoryUsage(), 0);
+
+ profiler.StopProfiling("");
+
+ CHECK_GT(code_observer->GetEstimatedMemoryUsage(), 0);
}
} // namespace test_cpu_profiler
diff --git a/deps/v8/test/cctest/test-debug-helper.cc b/deps/v8/test/cctest/test-debug-helper.cc
index 5601bce42f..7d9b7a9971 100644
--- a/deps/v8/test/cctest/test-debug-helper.cc
+++ b/deps/v8/test/cctest/test-debug-helper.cc
@@ -2,6 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include "include/v8-template.h"
#include "src/api/api-inl.h"
#include "src/debug/debug.h"
#include "src/execution/frames-inl.h"
diff --git a/deps/v8/test/cctest/test-debug.cc b/deps/v8/test/cctest/test-debug.cc
index 2effdabc1d..e62fef4b1c 100644
--- a/deps/v8/test/cctest/test-debug.cc
+++ b/deps/v8/test/cctest/test-debug.cc
@@ -27,6 +27,10 @@
#include <stdlib.h>
+#include "include/v8-extension.h"
+#include "include/v8-function.h"
+#include "include/v8-json.h"
+#include "include/v8-locker.h"
#include "src/api/api-inl.h"
#include "src/base/strings.h"
#include "src/codegen/compilation-cache.h"
@@ -938,7 +942,6 @@ TEST(BreakPointInlinedConstructorBuiltin) {
TEST(BreakPointBuiltinConcurrentOpt) {
i::FLAG_allow_natives_syntax = true;
- i::FLAG_block_concurrent_recompilation = true;
LocalContext env;
v8::HandleScope scope(env->GetIsolate());
@@ -952,19 +955,20 @@ TEST(BreakPointBuiltinConcurrentOpt) {
break_point_hit_count = 0;
builtin = CompileRun("Math.sin").As<v8::Function>();
CompileRun("function test(x) { return 1 + Math.sin(x) }");
- // Trigger concurrent compile job. It is suspended until unblock.
CompileRun(
"%PrepareFunctionForOptimization(test);"
"test(0.5); test(0.6);"
- "%OptimizeFunctionOnNextCall(test, 'concurrent'); test(0.7);");
+ "%DisableOptimizationFinalization();"
+ "%OptimizeFunctionOnNextCall(test, 'concurrent');"
+ "test(0.7);");
CHECK_EQ(0, break_point_hit_count);
// Run with breakpoint.
bp = SetBreakPoint(builtin, 0);
// Have the concurrent compile job finish now.
CompileRun(
- "%UnblockConcurrentRecompilation();"
- "%GetOptimizationStatus(test, 'sync');");
+ "%FinalizeOptimization();"
+ "%GetOptimizationStatus(test);");
CompileRun("test(0.2);");
CHECK_EQ(1, break_point_hit_count);
@@ -1446,7 +1450,6 @@ TEST(BreakPointInlineApiFunction) {
// Test that a break point can be set at a return store location.
TEST(BreakPointConditionBuiltin) {
i::FLAG_allow_natives_syntax = true;
- i::FLAG_block_concurrent_recompilation = true;
LocalContext env;
v8::HandleScope scope(env->GetIsolate());
diff --git a/deps/v8/test/cctest/test-decls.cc b/deps/v8/test/cctest/test-decls.cc
index e2b5772654..bdd642cf81 100644
--- a/deps/v8/test/cctest/test-decls.cc
+++ b/deps/v8/test/cctest/test-decls.cc
@@ -27,8 +27,10 @@
#include <stdlib.h>
+#include "include/v8-external.h"
+#include "include/v8-initialization.h"
+#include "include/v8-template.h"
#include "src/init/v8.h"
-
#include "test/cctest/cctest.h"
namespace v8 {
diff --git a/deps/v8/test/cctest/test-deoptimization.cc b/deps/v8/test/cctest/test-deoptimization.cc
index ac79a41d3e..8b235e1dbc 100644
--- a/deps/v8/test/cctest/test-deoptimization.cc
+++ b/deps/v8/test/cctest/test-deoptimization.cc
@@ -27,6 +27,7 @@
#include <stdlib.h>
+#include "include/v8-function.h"
#include "src/api/api-inl.h"
#include "src/base/platform/platform.h"
#include "src/base/strings.h"
diff --git a/deps/v8/test/cctest/test-disasm-ia32.cc b/deps/v8/test/cctest/test-disasm-ia32.cc
index 3d8d662e18..7acdc0b493 100644
--- a/deps/v8/test/cctest/test-disasm-ia32.cc
+++ b/deps/v8/test/cctest/test-disasm-ia32.cc
@@ -740,6 +740,8 @@ TEST(DisasmIa320) {
__ vcmpunordps(xmm5, xmm4, Operand(ebx, ecx, times_4, 10000));
__ vcmpneqps(xmm5, xmm4, xmm1);
__ vcmpneqps(xmm5, xmm4, Operand(ebx, ecx, times_4, 10000));
+ __ vcmpgeps(xmm5, xmm4, xmm1);
+ __ vcmpgeps(xmm5, xmm4, Operand(ebx, ecx, times_4, 10000));
__ vandpd(xmm0, xmm1, xmm2);
__ vandpd(xmm0, xmm1, Operand(ebx, ecx, times_4, 10000));
@@ -825,6 +827,7 @@ TEST(DisasmIa320) {
__ vmovshdup(xmm1, xmm2);
__ vbroadcastss(xmm1, Operand(ebx, ecx, times_4, 10000));
__ vmovdqa(xmm0, Operand(ebx, ecx, times_4, 10000));
+ __ vmovdqa(xmm0, xmm7);
__ vmovdqu(xmm0, Operand(ebx, ecx, times_4, 10000));
__ vmovdqu(Operand(ebx, ecx, times_4, 10000), xmm0);
__ vmovd(xmm0, edi);
@@ -865,6 +868,18 @@ TEST(DisasmIa320) {
}
}
+ // AVX2 instructions.
+ {
+ if (CpuFeatures::IsSupported(AVX2)) {
+ CpuFeatureScope scope(&assm, AVX2);
+#define EMIT_AVX2_BROADCAST(instruction, notUsed1, notUsed2, notUsed3, \
+ notUsed4) \
+ __ instruction(xmm0, xmm1); \
+ __ instruction(xmm0, Operand(ebx, ecx, times_4, 10000));
+ AVX2_BROADCAST_LIST(EMIT_AVX2_BROADCAST)
+ }
+ }
+
// FMA3 instruction
{
if (CpuFeatures::IsSupported(FMA3)) {
diff --git a/deps/v8/test/cctest/test-disasm-loong64.cc b/deps/v8/test/cctest/test-disasm-loong64.cc
new file mode 100644
index 0000000000..51549e76d1
--- /dev/null
+++ b/deps/v8/test/cctest/test-disasm-loong64.cc
@@ -0,0 +1,895 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+
+#include <stdio.h>
+#include <stdlib.h>
+
+#include "src/codegen/macro-assembler.h"
+#include "src/debug/debug.h"
+#include "src/diagnostics/disasm.h"
+#include "src/diagnostics/disassembler.h"
+#include "src/execution/frames-inl.h"
+#include "src/init/v8.h"
+#include "test/cctest/cctest.h"
+
+namespace v8 {
+namespace internal {
+
+bool DisassembleAndCompare(byte* pc, const char* compare_string) {
+ disasm::NameConverter converter;
+ disasm::Disassembler disasm(converter);
+ base::EmbeddedVector<char, 128> disasm_buffer;
+
+ /* if (prev_instr_compact_branch) {
+ disasm.InstructionDecode(disasm_buffer, pc);
+ pc += 4;
+ }*/
+
+ disasm.InstructionDecode(disasm_buffer, pc);
+
+ if (strcmp(compare_string, disasm_buffer.begin()) != 0) {
+ fprintf(stderr,
+ "expected: \n"
+ "%s\n"
+ "disassembled: \n"
+ "%s\n\n",
+ compare_string, disasm_buffer.begin());
+ return false;
+ }
+ return true;
+}
+
+// Set up V8 to a state where we can at least run the assembler and
+// disassembler. Declare the variables and allocate the data structures used
+// in the rest of the macros.
+#define SET_UP() \
+ CcTest::InitializeVM(); \
+ Isolate* isolate = CcTest::i_isolate(); \
+ HandleScope scope(isolate); \
+ byte* buffer = reinterpret_cast<byte*>(malloc(4 * 1024)); \
+ Assembler assm(AssemblerOptions{}, \
+ ExternalAssemblerBuffer(buffer, 4 * 1024)); \
+ bool failure = false;
+
+// This macro assembles one instruction using the preallocated assembler and
+// disassembles the generated instruction, comparing the output to the expected
+// value. If the comparison fails an error message is printed, but the test
+// continues to run until the end.
+#define COMPARE(asm_, compare_string) \
+ { \
+ int pc_offset = assm.pc_offset(); \
+ byte* progcounter = &buffer[pc_offset]; \
+ assm.asm_; \
+ if (!DisassembleAndCompare(progcounter, compare_string)) failure = true; \
+ }
+
+// Verify that all invocations of the COMPARE macro passed successfully.
+// Exit with a failure if at least one of the tests failed.
+#define VERIFY_RUN() \
+ if (failure) { \
+ FATAL("LOONG64 Disassembler tests failed.\n"); \
+ }
+
+#define COMPARE_PC_REL(asm_, compare_string, offset) \
+ { \
+ int pc_offset = assm.pc_offset(); \
+ byte* progcounter = &buffer[pc_offset]; \
+ char str_with_address[100]; \
+ printf("%p\n", static_cast<void*>(progcounter)); \
+ snprintf(str_with_address, sizeof(str_with_address), "%s -> %p", \
+ compare_string, static_cast<void*>(progcounter + (offset * 4))); \
+ assm.asm_; \
+ if (!DisassembleAndCompare(progcounter, str_with_address)) failure = true; \
+ }
+
+TEST(TypeOp6) {
+ SET_UP();
+
+ COMPARE(jirl(ra, t7, 0), "4c000261 jirl ra, t7, 0");
+ COMPARE(jirl(ra, t7, 32767), "4dfffe61 jirl ra, t7, 32767");
+ COMPARE(jirl(ra, t7, -32768), "4e000261 jirl ra, t7, -32768");
+
+ VERIFY_RUN();
+}
+
+TEST(TypeOp6PC) {
+ SET_UP();
+
+ COMPARE_PC_REL(beqz(t7, 1048575), "43fffe6f beqz t7, 1048575",
+ 1048575);
+ COMPARE_PC_REL(beqz(t0, -1048576), "40000190 beqz t0, -1048576",
+ -1048576);
+ COMPARE_PC_REL(beqz(t1, 0), "400001a0 beqz t1, 0", 0);
+
+ COMPARE_PC_REL(bnez(a2, 1048575), "47fffccf bnez a2, 1048575",
+ 1048575);
+ COMPARE_PC_REL(bnez(s3, -1048576), "44000350 bnez s3, -1048576",
+ -1048576);
+ COMPARE_PC_REL(bnez(t8, 0), "44000280 bnez t8, 0", 0);
+
+ COMPARE_PC_REL(bceqz(FCC0, 1048575), "4bfffc0f bceqz fcc0, 1048575",
+ 1048575);
+ COMPARE_PC_REL(bceqz(FCC0, -1048576),
+ "48000010 bceqz fcc0, -1048576", -1048576);
+ COMPARE_PC_REL(bceqz(FCC0, 0), "48000000 bceqz fcc0, 0", 0);
+
+ COMPARE_PC_REL(bcnez(FCC0, 1048575), "4bfffd0f bcnez fcc0, 1048575",
+ 1048575);
+ COMPARE_PC_REL(bcnez(FCC0, -1048576),
+ "48000110 bcnez fcc0, -1048576", -1048576);
+ COMPARE_PC_REL(bcnez(FCC0, 0), "48000100 bcnez fcc0, 0", 0);
+
+ COMPARE_PC_REL(b(33554431), "53fffdff b 33554431", 33554431);
+ COMPARE_PC_REL(b(-33554432), "50000200 b -33554432", -33554432);
+ COMPARE_PC_REL(b(0), "50000000 b 0", 0);
+
+ COMPARE_PC_REL(beq(t0, a6, 32767), "59fffd8a beq t0, a6, 32767",
+ 32767);
+ COMPARE_PC_REL(beq(t1, a0, -32768), "5a0001a4 beq t1, a0, -32768",
+ -32768);
+ COMPARE_PC_REL(beq(a4, t1, 0), "5800010d beq a4, t1, 0", 0);
+
+ COMPARE_PC_REL(bne(a3, a4, 32767), "5dfffce8 bne a3, a4, 32767",
+ 32767);
+ COMPARE_PC_REL(bne(a6, a5, -32768), "5e000149 bne a6, a5, -32768",
+ -32768);
+ COMPARE_PC_REL(bne(a4, a5, 0), "5c000109 bne a4, a5, 0", 0);
+
+ COMPARE_PC_REL(blt(a4, a6, 32767), "61fffd0a blt a4, a6, 32767",
+ 32767);
+ COMPARE_PC_REL(blt(a4, a5, -32768), "62000109 blt a4, a5, -32768",
+ -32768);
+ COMPARE_PC_REL(blt(a4, a6, 0), "6000010a blt a4, a6, 0", 0);
+
+ COMPARE_PC_REL(bge(s7, a5, 32767), "65ffffc9 bge s7, a5, 32767",
+ 32767);
+ COMPARE_PC_REL(bge(a1, a3, -32768), "660000a7 bge a1, a3, -32768",
+ -32768);
+ COMPARE_PC_REL(bge(a5, s3, 0), "6400013a bge a5, s3, 0", 0);
+
+ COMPARE_PC_REL(bltu(a5, s7, 32767), "69fffd3e bltu a5, s7, 32767",
+ 32767);
+ COMPARE_PC_REL(bltu(a4, a5, -32768), "6a000109 bltu a4, a5, -32768",
+ -32768);
+ COMPARE_PC_REL(bltu(a4, t6, 0), "68000112 bltu a4, t6, 0", 0);
+
+ COMPARE_PC_REL(bgeu(a7, a6, 32767), "6dfffd6a bgeu a7, a6, 32767",
+ 32767);
+ COMPARE_PC_REL(bgeu(a5, a3, -32768), "6e000127 bgeu a5, a3, -32768",
+ -32768);
+ COMPARE_PC_REL(bgeu(t2, t1, 0), "6c0001cd bgeu t2, t1, 0", 0);
+
+ VERIFY_RUN();
+}
+
+TEST(TypeOp7) {
+ SET_UP();
+
+ COMPARE(lu12i_w(a4, 524287), "14ffffe8 lu12i.w a4, 524287");
+ COMPARE(lu12i_w(a5, -524288), "15000009 lu12i.w a5, -524288");
+ COMPARE(lu12i_w(a6, 0), "1400000a lu12i.w a6, 0");
+
+ COMPARE(lu32i_d(a7, 524287), "16ffffeb lu32i.d a7, 524287");
+ COMPARE(lu32i_d(t0, 524288), "1700000c lu32i.d t0, -524288");
+ COMPARE(lu32i_d(t1, 0), "1600000d lu32i.d t1, 0");
+
+ COMPARE(pcaddi(t1, 1), "1800002d pcaddi t1, 1");
+ COMPARE(pcaddi(t2, 524287), "18ffffee pcaddi t2, 524287");
+ COMPARE(pcaddi(t3, -524288), "1900000f pcaddi t3, -524288");
+ COMPARE(pcaddi(t4, 0), "18000010 pcaddi t4, 0");
+
+ COMPARE(pcalau12i(t5, 524287), "1afffff1 pcalau12i t5, 524287");
+ COMPARE(pcalau12i(t6, -524288), "1b000012 pcalau12i t6, -524288");
+ COMPARE(pcalau12i(a4, 0), "1a000008 pcalau12i a4, 0");
+
+ COMPARE(pcaddu12i(a5, 524287), "1cffffe9 pcaddu12i a5, 524287");
+ COMPARE(pcaddu12i(a6, -524288), "1d00000a pcaddu12i a6, -524288");
+ COMPARE(pcaddu12i(a7, 0), "1c00000b pcaddu12i a7, 0");
+
+ COMPARE(pcaddu18i(t0, 524287), "1effffec pcaddu18i t0, 524287");
+ COMPARE(pcaddu18i(t1, -524288), "1f00000d pcaddu18i t1, -524288");
+ COMPARE(pcaddu18i(t2, 0), "1e00000e pcaddu18i t2, 0");
+
+ VERIFY_RUN();
+}
+
+TEST(TypeOp8) {
+ SET_UP();
+
+ COMPARE(ll_w(t2, t3, 32764), "207ffdee ll.w t2, t3, 32764");
+ COMPARE(ll_w(t3, t4, -32768), "2080020f ll.w t3, t4, -32768");
+ COMPARE(ll_w(t5, t6, 0), "20000251 ll.w t5, t6, 0");
+
+ COMPARE(sc_w(a6, a7, 32764), "217ffd6a sc.w a6, a7, 32764");
+ COMPARE(sc_w(t0, t1, -32768), "218001ac sc.w t0, t1, -32768");
+ COMPARE(sc_w(t2, t3, 0), "210001ee sc.w t2, t3, 0");
+
+ COMPARE(ll_d(a0, a1, 32764), "227ffca4 ll.d a0, a1, 32764");
+ COMPARE(ll_d(a2, a3, -32768), "228000e6 ll.d a2, a3, -32768");
+ COMPARE(ll_d(a4, a5, 0), "22000128 ll.d a4, a5, 0");
+
+ COMPARE(sc_d(t4, t5, 32764), "237ffe30 sc.d t4, t5, 32764");
+ COMPARE(sc_d(t6, a0, -32768), "23800092 sc.d t6, a0, -32768");
+ COMPARE(sc_d(a1, a2, 0), "230000c5 sc.d a1, a2, 0");
+
+ COMPARE(ldptr_w(a4, a5, 32764), "247ffd28 ldptr.w a4, a5, 32764");
+ COMPARE(ldptr_w(a6, a7, -32768), "2480016a ldptr.w a6, a7, -32768");
+ COMPARE(ldptr_w(t0, t1, 0), "240001ac ldptr.w t0, t1, 0");
+
+ COMPARE(stptr_w(a4, a5, 32764), "257ffd28 stptr.w a4, a5, 32764");
+ COMPARE(stptr_w(a6, a7, -32768), "2580016a stptr.w a6, a7, -32768");
+ COMPARE(stptr_w(t0, t1, 0), "250001ac stptr.w t0, t1, 0");
+
+ COMPARE(ldptr_d(t2, t3, 32764), "267ffdee ldptr.d t2, t3, 32764");
+ COMPARE(ldptr_d(t4, t5, -32768), "26800230 ldptr.d t4, t5, -32768");
+ COMPARE(ldptr_d(t6, a4, 0), "26000112 ldptr.d t6, a4, 0");
+
+ COMPARE(stptr_d(a5, a6, 32764), "277ffd49 stptr.d a5, a6, 32764");
+ COMPARE(stptr_d(a7, t0, -32768), "2780018b stptr.d a7, t0, -32768");
+ COMPARE(stptr_d(t1, t2, 0), "270001cd stptr.d t1, t2, 0");
+
+ VERIFY_RUN();
+}
+
+TEST(TypeOp10) {
+ SET_UP();
+
+ COMPARE(bstrins_w(a4, a5, 31, 16),
+ "007f4128 bstrins.w a4, a5, 31, 16");
+ COMPARE(bstrins_w(a6, a7, 5, 0), "0065016a bstrins.w a6, a7, 5, 0");
+
+ COMPARE(bstrins_d(a3, zero_reg, 17, 0),
+ "00910007 bstrins.d a3, zero_reg, 17, 0");
+ COMPARE(bstrins_d(t1, zero_reg, 17, 0),
+ "0091000d bstrins.d t1, zero_reg, 17, 0");
+
+ COMPARE(bstrpick_w(t0, t1, 31, 29),
+ "007ff5ac bstrpick.w t0, t1, 31, 29");
+ COMPARE(bstrpick_w(a4, a5, 16, 0),
+ "00708128 bstrpick.w a4, a5, 16, 0");
+
+ COMPARE(bstrpick_d(a5, a5, 31, 0),
+ "00df0129 bstrpick.d a5, a5, 31, 0");
+ COMPARE(bstrpick_d(a4, a4, 25, 2),
+ "00d90908 bstrpick.d a4, a4, 25, 2");
+
+ COMPARE(slti(t2, a5, 2047), "021ffd2e slti t2, a5, 2047");
+ COMPARE(slti(a7, a1, -2048), "022000ab slti a7, a1, -2048");
+
+ COMPARE(sltui(a7, a7, 2047), "025ffd6b sltui a7, a7, 2047");
+ COMPARE(sltui(t1, t1, -2048), "026001ad sltui t1, t1, -2048");
+
+ COMPARE(addi_w(t0, t2, 2047), "029ffdcc addi.w t0, t2, 2047");
+ COMPARE(addi_w(a0, a0, -2048), "02a00084 addi.w a0, a0, -2048");
+
+ COMPARE(addi_d(a0, zero_reg, 2047),
+ "02dffc04 addi.d a0, zero_reg, 2047");
+ COMPARE(addi_d(t7, t7, -2048), "02e00273 addi.d t7, t7, -2048");
+
+ COMPARE(lu52i_d(a0, a0, 2047), "031ffc84 lu52i.d a0, a0, 2047");
+ COMPARE(lu52i_d(a1, a1, -2048), "032000a5 lu52i.d a1, a1, -2048");
+
+ COMPARE(andi(s3, a3, 0xfff), "037ffcfa andi s3, a3, 0xfff");
+ COMPARE(andi(a4, a4, 0), "03400108 andi a4, a4, 0x0");
+
+ COMPARE(ori(t6, t6, 0xfff), "03bffe52 ori t6, t6, 0xfff");
+ COMPARE(ori(t6, t6, 0), "03800252 ori t6, t6, 0x0");
+
+ COMPARE(xori(t1, t1, 0xfff), "03fffdad xori t1, t1, 0xfff");
+ COMPARE(xori(a3, a3, 0x0), "03c000e7 xori a3, a3, 0x0");
+
+ COMPARE(ld_b(a1, a1, 2047), "281ffca5 ld.b a1, a1, 2047");
+ COMPARE(ld_b(a4, a4, -2048), "28200108 ld.b a4, a4, -2048");
+
+ COMPARE(ld_h(a4, a0, 2047), "285ffc88 ld.h a4, a0, 2047");
+ COMPARE(ld_h(a4, a3, -2048), "286000e8 ld.h a4, a3, -2048");
+
+ COMPARE(ld_w(a6, a6, 2047), "289ffd4a ld.w a6, a6, 2047");
+ COMPARE(ld_w(a5, a4, -2048), "28a00109 ld.w a5, a4, -2048");
+
+ COMPARE(ld_d(a0, a3, 2047), "28dffce4 ld.d a0, a3, 2047");
+ COMPARE(ld_d(a6, fp, -2048), "28e002ca ld.d a6, fp, -2048");
+ COMPARE(ld_d(a0, a6, 0), "28c00144 ld.d a0, a6, 0");
+
+ COMPARE(st_b(a4, a0, 2047), "291ffc88 st.b a4, a0, 2047");
+ COMPARE(st_b(a6, a5, -2048), "2920012a st.b a6, a5, -2048");
+
+ COMPARE(st_h(a4, a0, 2047), "295ffc88 st.h a4, a0, 2047");
+ COMPARE(st_h(t1, t2, -2048), "296001cd st.h t1, t2, -2048");
+
+ COMPARE(st_w(t3, a4, 2047), "299ffd0f st.w t3, a4, 2047");
+ COMPARE(st_w(a3, t2, -2048), "29a001c7 st.w a3, t2, -2048");
+
+ COMPARE(st_d(s3, sp, 2047), "29dffc7a st.d s3, sp, 2047");
+ COMPARE(st_d(fp, s6, -2048), "29e003b6 st.d fp, s6, -2048");
+
+ COMPARE(ld_bu(a6, a0, 2047), "2a1ffc8a ld.bu a6, a0, 2047");
+ COMPARE(ld_bu(a7, a7, -2048), "2a20016b ld.bu a7, a7, -2048");
+
+ COMPARE(ld_hu(a7, a7, 2047), "2a5ffd6b ld.hu a7, a7, 2047");
+ COMPARE(ld_hu(a3, a3, -2048), "2a6000e7 ld.hu a3, a3, -2048");
+
+ COMPARE(ld_wu(a3, a0, 2047), "2a9ffc87 ld.wu a3, a0, 2047");
+ COMPARE(ld_wu(a3, a5, -2048), "2aa00127 ld.wu a3, a5, -2048");
+
+ COMPARE(fld_s(f0, a3, 2047), "2b1ffce0 fld.s f0, a3, 2047");
+ COMPARE(fld_s(f0, a1, -2048), "2b2000a0 fld.s f0, a1, -2048");
+
+ COMPARE(fld_d(f0, a0, 2047), "2b9ffc80 fld.d f0, a0, 2047");
+ COMPARE(fld_d(f0, fp, -2048), "2ba002c0 fld.d f0, fp, -2048");
+
+ COMPARE(fst_d(f0, fp, 2047), "2bdffec0 fst.d f0, fp, 2047");
+ COMPARE(fst_d(f0, a0, -2048), "2be00080 fst.d f0, a0, -2048");
+
+ COMPARE(fst_s(f0, a5, 2047), "2b5ffd20 fst.s f0, a5, 2047");
+ COMPARE(fst_s(f0, a3, -2048), "2b6000e0 fst.s f0, a3, -2048");
+
+ VERIFY_RUN();
+}
+
+TEST(TypeOp12) {
+ SET_UP();
+
+ COMPARE(fmadd_s(f0, f1, f2, f3), "08118820 fmadd.s f0, f1, f2, f3");
+ COMPARE(fmadd_s(f4, f5, f6, f7), "081398a4 fmadd.s f4, f5, f6, f7");
+
+ COMPARE(fmadd_d(f8, f9, f10, f11),
+ "0825a928 fmadd.d f8, f9, f10, f11");
+ COMPARE(fmadd_d(f12, f13, f14, f15),
+ "0827b9ac fmadd.d f12, f13, f14, f15");
+
+ COMPARE(fmsub_s(f0, f1, f2, f3), "08518820 fmsub.s f0, f1, f2, f3");
+ COMPARE(fmsub_s(f4, f5, f6, f7), "085398a4 fmsub.s f4, f5, f6, f7");
+
+ COMPARE(fmsub_d(f8, f9, f10, f11),
+ "0865a928 fmsub.d f8, f9, f10, f11");
+ COMPARE(fmsub_d(f12, f13, f14, f15),
+ "0867b9ac fmsub.d f12, f13, f14, f15");
+
+ COMPARE(fnmadd_s(f0, f1, f2, f3),
+ "08918820 fnmadd.s f0, f1, f2, f3");
+ COMPARE(fnmadd_s(f4, f5, f6, f7),
+ "089398a4 fnmadd.s f4, f5, f6, f7");
+
+ COMPARE(fnmadd_d(f8, f9, f10, f11),
+ "08a5a928 fnmadd.d f8, f9, f10, f11");
+ COMPARE(fnmadd_d(f12, f13, f14, f15),
+ "08a7b9ac fnmadd.d f12, f13, f14, f15");
+
+ COMPARE(fnmsub_s(f0, f1, f2, f3),
+ "08d18820 fnmsub.s f0, f1, f2, f3");
+ COMPARE(fnmsub_s(f4, f5, f6, f7),
+ "08d398a4 fnmsub.s f4, f5, f6, f7");
+
+ COMPARE(fnmsub_d(f8, f9, f10, f11),
+ "08e5a928 fnmsub.d f8, f9, f10, f11");
+ COMPARE(fnmsub_d(f12, f13, f14, f15),
+ "08e7b9ac fnmsub.d f12, f13, f14, f15");
+
+ COMPARE(fcmp_cond_s(CAF, f1, f2, FCC0),
+ "0c100820 fcmp.caf.s fcc0, f1, f2");
+ COMPARE(fcmp_cond_s(CUN, f5, f6, FCC0),
+ "0c1418a0 fcmp.cun.s fcc0, f5, f6");
+ COMPARE(fcmp_cond_s(CEQ, f9, f10, FCC0),
+ "0c122920 fcmp.ceq.s fcc0, f9, f10");
+ COMPARE(fcmp_cond_s(CUEQ, f13, f14, FCC0),
+ "0c1639a0 fcmp.cueq.s fcc0, f13, f14");
+
+ COMPARE(fcmp_cond_s(CLT, f1, f2, FCC0),
+ "0c110820 fcmp.clt.s fcc0, f1, f2");
+ COMPARE(fcmp_cond_s(CULT, f5, f6, FCC0),
+ "0c1518a0 fcmp.cult.s fcc0, f5, f6");
+ COMPARE(fcmp_cond_s(CLE, f9, f10, FCC0),
+ "0c132920 fcmp.cle.s fcc0, f9, f10");
+ COMPARE(fcmp_cond_s(CULE, f13, f14, FCC0),
+ "0c1739a0 fcmp.cule.s fcc0, f13, f14");
+
+ COMPARE(fcmp_cond_s(CNE, f1, f2, FCC0),
+ "0c180820 fcmp.cne.s fcc0, f1, f2");
+ COMPARE(fcmp_cond_s(COR, f5, f6, FCC0),
+ "0c1a18a0 fcmp.cor.s fcc0, f5, f6");
+ COMPARE(fcmp_cond_s(CUNE, f9, f10, FCC0),
+ "0c1c2920 fcmp.cune.s fcc0, f9, f10");
+ COMPARE(fcmp_cond_s(SAF, f13, f14, FCC0),
+ "0c10b9a0 fcmp.saf.s fcc0, f13, f14");
+
+ COMPARE(fcmp_cond_s(SUN, f1, f2, FCC0),
+ "0c148820 fcmp.sun.s fcc0, f1, f2");
+ COMPARE(fcmp_cond_s(SEQ, f5, f6, FCC0),
+ "0c1298a0 fcmp.seq.s fcc0, f5, f6");
+ COMPARE(fcmp_cond_s(SUEQ, f9, f10, FCC0),
+ "0c16a920 fcmp.sueq.s fcc0, f9, f10");
+ // COMPARE(fcmp_cond_s(SLT, f13, f14, FCC0),
+ // "0c11b9a0 fcmp.slt.s fcc0, f13, f14");
+
+ COMPARE(fcmp_cond_s(SULT, f1, f2, FCC0),
+ "0c158820 fcmp.sult.s fcc0, f1, f2");
+ COMPARE(fcmp_cond_s(SLE, f5, f6, FCC0),
+ "0c1398a0 fcmp.sle.s fcc0, f5, f6");
+ COMPARE(fcmp_cond_s(SULE, f9, f10, FCC0),
+ "0c17a920 fcmp.sule.s fcc0, f9, f10");
+ COMPARE(fcmp_cond_s(SNE, f13, f14, FCC0),
+ "0c18b9a0 fcmp.sne.s fcc0, f13, f14");
+ COMPARE(fcmp_cond_s(SOR, f13, f14, FCC0),
+ "0c1ab9a0 fcmp.sor.s fcc0, f13, f14");
+ COMPARE(fcmp_cond_s(SUNE, f1, f2, FCC0),
+ "0c1c8820 fcmp.sune.s fcc0, f1, f2");
+
+ COMPARE(fcmp_cond_d(CAF, f1, f2, FCC0),
+ "0c200820 fcmp.caf.d fcc0, f1, f2");
+ COMPARE(fcmp_cond_d(CUN, f5, f6, FCC0),
+ "0c2418a0 fcmp.cun.d fcc0, f5, f6");
+ COMPARE(fcmp_cond_d(CEQ, f9, f10, FCC0),
+ "0c222920 fcmp.ceq.d fcc0, f9, f10");
+ COMPARE(fcmp_cond_d(CUEQ, f13, f14, FCC0),
+ "0c2639a0 fcmp.cueq.d fcc0, f13, f14");
+
+ COMPARE(fcmp_cond_d(CLT, f1, f2, FCC0),
+ "0c210820 fcmp.clt.d fcc0, f1, f2");
+ COMPARE(fcmp_cond_d(CULT, f5, f6, FCC0),
+ "0c2518a0 fcmp.cult.d fcc0, f5, f6");
+ COMPARE(fcmp_cond_d(CLE, f9, f10, FCC0),
+ "0c232920 fcmp.cle.d fcc0, f9, f10");
+ COMPARE(fcmp_cond_d(CULE, f13, f14, FCC0),
+ "0c2739a0 fcmp.cule.d fcc0, f13, f14");
+
+ COMPARE(fcmp_cond_d(CNE, f1, f2, FCC0),
+ "0c280820 fcmp.cne.d fcc0, f1, f2");
+ COMPARE(fcmp_cond_d(COR, f5, f6, FCC0),
+ "0c2a18a0 fcmp.cor.d fcc0, f5, f6");
+ COMPARE(fcmp_cond_d(CUNE, f9, f10, FCC0),
+ "0c2c2920 fcmp.cune.d fcc0, f9, f10");
+ COMPARE(fcmp_cond_d(SAF, f13, f14, FCC0),
+ "0c20b9a0 fcmp.saf.d fcc0, f13, f14");
+
+ COMPARE(fcmp_cond_d(SUN, f1, f2, FCC0),
+ "0c248820 fcmp.sun.d fcc0, f1, f2");
+ COMPARE(fcmp_cond_d(SEQ, f5, f6, FCC0),
+ "0c2298a0 fcmp.seq.d fcc0, f5, f6");
+ COMPARE(fcmp_cond_d(SUEQ, f9, f10, FCC0),
+ "0c26a920 fcmp.sueq.d fcc0, f9, f10");
+ // COMPARE(fcmp_cond_d(SLT, f13, f14, FCC0),
+ // "0c21b9a0 fcmp.slt.d fcc0, f13, f14");
+
+ COMPARE(fcmp_cond_d(SULT, f1, f2, FCC0),
+ "0c258820 fcmp.sult.d fcc0, f1, f2");
+ COMPARE(fcmp_cond_d(SLE, f5, f6, FCC0),
+ "0c2398a0 fcmp.sle.d fcc0, f5, f6");
+ COMPARE(fcmp_cond_d(SULE, f9, f10, FCC0),
+ "0c27a920 fcmp.sule.d fcc0, f9, f10");
+ COMPARE(fcmp_cond_d(SNE, f13, f14, FCC0),
+ "0c28b9a0 fcmp.sne.d fcc0, f13, f14");
+ COMPARE(fcmp_cond_d(SOR, f13, f14, FCC0),
+ "0c2ab9a0 fcmp.sor.d fcc0, f13, f14");
+ COMPARE(fcmp_cond_d(SUNE, f1, f2, FCC0),
+ "0c2c8820 fcmp.sune.d fcc0, f1, f2");
+
+ VERIFY_RUN();
+}
+
+TEST(TypeOp14) {
+ SET_UP();
+
+ COMPARE(alsl_w(a0, a1, a2, 1), "000418a4 alsl.w a0, a1, a2, 1");
+ COMPARE(alsl_w(a3, a4, a5, 3), "00052507 alsl.w a3, a4, a5, 3");
+ COMPARE(alsl_w(a6, a7, t0, 4), "0005b16a alsl.w a6, a7, t0, 4");
+
+ COMPARE(alsl_wu(t1, t2, t3, 1), "00063dcd alsl.wu t1, t2, t3, 1");
+ COMPARE(alsl_wu(t4, t5, t6, 3), "00074a30 alsl.wu t4, t5, t6, 3");
+ COMPARE(alsl_wu(a0, a1, a2, 4), "000798a4 alsl.wu a0, a1, a2, 4");
+
+ COMPARE(alsl_d(a3, a4, a5, 1), "002c2507 alsl.d a3, a4, a5, 1");
+ COMPARE(alsl_d(a6, a7, t0, 3), "002d316a alsl.d a6, a7, t0, 3");
+ COMPARE(alsl_d(t1, t2, t3, 4), "002dbdcd alsl.d t1, t2, t3, 4");
+
+ COMPARE(bytepick_w(t4, t5, t6, 0),
+ "00084a30 bytepick.w t4, t5, t6, 0");
+ COMPARE(bytepick_w(a0, a1, a2, 3),
+ "000998a4 bytepick.w a0, a1, a2, 3");
+
+ COMPARE(bytepick_d(a6, a7, t0, 0),
+ "000c316a bytepick.d a6, a7, t0, 0");
+ COMPARE(bytepick_d(t4, t5, t6, 7),
+ "000fca30 bytepick.d t4, t5, t6, 7");
+
+ COMPARE(slli_w(a3, a3, 31), "0040fce7 slli.w a3, a3, 31");
+ COMPARE(slli_w(a6, a6, 1), "0040854a slli.w a6, a6, 1");
+
+ COMPARE(slli_d(t3, t2, 63), "0041fdcf slli.d t3, t2, 63");
+ COMPARE(slli_d(t4, a6, 1), "00410550 slli.d t4, a6, 1");
+
+ COMPARE(srli_w(a7, a7, 31), "0044fd6b srli.w a7, a7, 31");
+ COMPARE(srli_w(a4, a4, 1), "00448508 srli.w a4, a4, 1");
+
+ COMPARE(srli_d(a4, a3, 63), "0045fce8 srli.d a4, a3, 63");
+ COMPARE(srli_d(a4, a4, 1), "00450508 srli.d a4, a4, 1");
+
+ COMPARE(srai_d(a0, a0, 63), "0049fc84 srai.d a0, a0, 63");
+ COMPARE(srai_d(a4, a1, 1), "004904a8 srai.d a4, a1, 1");
+
+ COMPARE(srai_w(s4, a3, 31), "0048fcfb srai.w s4, a3, 31");
+ COMPARE(srai_w(s4, a5, 1), "0048853b srai.w s4, a5, 1");
+
+ COMPARE(rotri_d(t7, t6, 1), "004d0653 rotri.d t7, t6, 1");
+
+ VERIFY_RUN();
+}
+
+TEST(TypeOp17) {
+ SET_UP();
+
+ COMPARE(sltu(t5, t4, a4), "0012a211 sltu t5, t4, a4");
+ COMPARE(sltu(t4, zero_reg, t4), "0012c010 sltu t4, zero_reg, t4");
+
+ COMPARE(add_w(a4, a4, a6), "00102908 add.w a4, a4, a6");
+ COMPARE(add_w(a5, a6, t3), "00103d49 add.w a5, a6, t3");
+
+ COMPARE(add_d(a4, t0, t1), "0010b588 add.d a4, t0, t1");
+ COMPARE(add_d(a6, a3, t1), "0010b4ea add.d a6, a3, t1");
+
+ COMPARE(sub_w(a7, a7, a2), "0011196b sub.w a7, a7, a2");
+ COMPARE(sub_w(a2, a2, s3), "001168c6 sub.w a2, a2, s3");
+
+ COMPARE(sub_d(s3, ra, s3), "0011e83a sub.d s3, ra, s3");
+ COMPARE(sub_d(a0, a1, a2), "001198a4 sub.d a0, a1, a2");
+
+ COMPARE(slt(a5, a5, a6), "00122929 slt a5, a5, a6");
+ COMPARE(slt(a6, t3, t4), "001241ea slt a6, t3, t4");
+
+ COMPARE(masknez(a5, a5, a3), "00131d29 masknez a5, a5, a3");
+ COMPARE(masknez(a3, a4, a5), "00132507 masknez a3, a4, a5");
+
+ COMPARE(maskeqz(a6, a7, t0), "0013b16a maskeqz a6, a7, t0");
+ COMPARE(maskeqz(t1, t2, t3), "0013bdcd maskeqz t1, t2, t3");
+
+ COMPARE(or_(s3, sp, zero_reg), "0015007a or s3, sp, zero_reg");
+ COMPARE(or_(a4, a0, zero_reg), "00150088 or a4, a0, zero_reg");
+
+ COMPARE(and_(sp, sp, t6), "0014c863 and sp, sp, t6");
+ COMPARE(and_(a3, a3, a7), "0014ace7 and a3, a3, a7");
+
+ COMPARE(nor(a7, a7, a7), "00142d6b nor a7, a7, a7");
+ COMPARE(nor(t4, t5, t6), "00144a30 nor t4, t5, t6");
+
+ COMPARE(xor_(a0, a1, a2), "001598a4 xor a0, a1, a2");
+ COMPARE(xor_(a3, a4, a5), "0015a507 xor a3, a4, a5");
+
+ COMPARE(orn(a6, a7, t0), "0016316a orn a6, a7, t0");
+ COMPARE(orn(t1, t2, t3), "00163dcd orn t1, t2, t3");
+
+ COMPARE(andn(t4, t5, t6), "0016ca30 andn t4, t5, t6");
+ COMPARE(andn(a0, a1, a2), "001698a4 andn a0, a1, a2");
+
+ COMPARE(sll_w(a3, t0, a7), "00172d87 sll.w a3, t0, a7");
+ COMPARE(sll_w(a3, a4, a3), "00171d07 sll.w a3, a4, a3");
+
+ COMPARE(srl_w(a3, a4, a3), "00179d07 srl.w a3, a4, a3");
+ COMPARE(srl_w(a3, t1, t4), "0017c1a7 srl.w a3, t1, t4");
+
+ COMPARE(sra_w(a4, t4, a4), "00182208 sra.w a4, t4, a4");
+ COMPARE(sra_w(a3, t1, a6), "001829a7 sra.w a3, t1, a6");
+
+ COMPARE(sll_d(a3, a1, a3), "00189ca7 sll.d a3, a1, a3");
+ COMPARE(sll_d(a7, a4, t0), "0018b10b sll.d a7, a4, t0");
+
+ COMPARE(srl_d(a7, a7, t0), "0019316b srl.d a7, a7, t0");
+ COMPARE(srl_d(t0, a6, t0), "0019314c srl.d t0, a6, t0");
+
+ COMPARE(sra_d(a3, a4, a5), "0019a507 sra.d a3, a4, a5");
+ COMPARE(sra_d(a6, a7, t0), "0019b16a sra.d a6, a7, t0");
+
+ COMPARE(rotr_d(t1, t2, t3), "001bbdcd rotr.d t1, t2, t3");
+ COMPARE(rotr_d(t4, t5, t6), "001bca30 rotr.d t4, t5, t6");
+
+ COMPARE(rotr_w(a0, a1, a2), "001b18a4 rotr.w a0, a1, a2");
+ COMPARE(rotr_w(a3, a4, a5), "001b2507 rotr.w a3, a4, a5");
+
+ COMPARE(mul_w(t8, a5, t7), "001c4d34 mul.w t8, a5, t7");
+ COMPARE(mul_w(t4, t5, t6), "001c4a30 mul.w t4, t5, t6");
+
+ COMPARE(mulh_w(s3, a3, t7), "001cccfa mulh.w s3, a3, t7");
+ COMPARE(mulh_w(a0, a1, a2), "001c98a4 mulh.w a0, a1, a2");
+
+ COMPARE(mulh_wu(a6, a7, t0), "001d316a mulh.wu a6, a7, t0");
+ COMPARE(mulh_wu(t1, t2, t3), "001d3dcd mulh.wu t1, t2, t3");
+
+ COMPARE(mul_d(t2, a5, t1), "001db52e mul.d t2, a5, t1");
+ COMPARE(mul_d(a4, a4, a5), "001da508 mul.d a4, a4, a5");
+
+ COMPARE(mulh_d(a3, a4, a5), "001e2507 mulh.d a3, a4, a5");
+ COMPARE(mulh_d(a6, a7, t0), "001e316a mulh.d a6, a7, t0");
+
+ COMPARE(mulh_du(t1, t2, t3), "001ebdcd mulh.du t1, t2, t3");
+ COMPARE(mulh_du(t4, t5, t6), "001eca30 mulh.du t4, t5, t6");
+
+ COMPARE(mulw_d_w(a0, a1, a2), "001f18a4 mulw.d.w a0, a1, a2");
+ COMPARE(mulw_d_w(a3, a4, a5), "001f2507 mulw.d.w a3, a4, a5");
+
+ COMPARE(mulw_d_wu(a6, a7, t0), "001fb16a mulw.d.wu a6, a7, t0");
+ COMPARE(mulw_d_wu(t1, t2, t3), "001fbdcd mulw.d.wu t1, t2, t3");
+
+ COMPARE(div_w(a5, a5, a3), "00201d29 div.w a5, a5, a3");
+ COMPARE(div_w(t4, t5, t6), "00204a30 div.w t4, t5, t6");
+
+ COMPARE(mod_w(a6, t3, a6), "0020a9ea mod.w a6, t3, a6");
+ COMPARE(mod_w(a3, a4, a3), "00209d07 mod.w a3, a4, a3");
+
+ COMPARE(div_wu(t1, t2, t3), "00213dcd div.wu t1, t2, t3");
+ COMPARE(div_wu(t4, t5, t6), "00214a30 div.wu t4, t5, t6");
+
+ COMPARE(mod_wu(a0, a1, a2), "002198a4 mod.wu a0, a1, a2");
+ COMPARE(mod_wu(a3, a4, a5), "0021a507 mod.wu a3, a4, a5");
+
+ COMPARE(div_d(t0, t0, a6), "0022298c div.d t0, t0, a6");
+ COMPARE(div_d(a7, a7, a5), "0022256b div.d a7, a7, a5");
+
+ COMPARE(mod_d(a6, a7, t0), "0022b16a mod.d a6, a7, t0");
+ COMPARE(mod_d(t1, t2, t3), "0022bdcd mod.d t1, t2, t3");
+
+ COMPARE(div_du(t4, t5, t6), "00234a30 div.du t4, t5, t6");
+ COMPARE(div_du(a0, a1, a2), "002318a4 div.du a0, a1, a2");
+
+ COMPARE(mod_du(a3, a4, a5), "0023a507 mod.du a3, a4, a5");
+ COMPARE(mod_du(a6, a7, t0), "0023b16a mod.du a6, a7, t0");
+
+ COMPARE(fadd_s(f3, f4, f5), "01009483 fadd.s f3, f4, f5");
+ COMPARE(fadd_s(f6, f7, f8), "0100a0e6 fadd.s f6, f7, f8");
+
+ COMPARE(fadd_d(f0, f1, f0), "01010020 fadd.d f0, f1, f0");
+ COMPARE(fadd_d(f0, f1, f2), "01010820 fadd.d f0, f1, f2");
+
+ COMPARE(fsub_s(f9, f10, f11), "0102ad49 fsub.s f9, f10, f11");
+ COMPARE(fsub_s(f12, f13, f14), "0102b9ac fsub.s f12, f13, f14");
+
+ COMPARE(fsub_d(f30, f0, f30), "0103781e fsub.d f30, f0, f30");
+ COMPARE(fsub_d(f0, f0, f1), "01030400 fsub.d f0, f0, f1");
+
+ COMPARE(fmul_s(f15, f16, f17), "0104c60f fmul.s f15, f16, f17");
+ COMPARE(fmul_s(f18, f19, f20), "0104d272 fmul.s f18, f19, f20");
+
+ COMPARE(fmul_d(f0, f0, f1), "01050400 fmul.d f0, f0, f1");
+ COMPARE(fmul_d(f0, f0, f0), "01050000 fmul.d f0, f0, f0");
+
+ COMPARE(fdiv_s(f0, f1, f2), "01068820 fdiv.s f0, f1, f2");
+ COMPARE(fdiv_s(f3, f4, f5), "01069483 fdiv.s f3, f4, f5");
+
+ COMPARE(fdiv_d(f0, f0, f1), "01070400 fdiv.d f0, f0, f1");
+ COMPARE(fdiv_d(f0, f1, f0), "01070020 fdiv.d f0, f1, f0");
+
+ COMPARE(fmax_s(f9, f10, f11), "0108ad49 fmax.s f9, f10, f11");
+ COMPARE(fmin_s(f6, f7, f8), "010aa0e6 fmin.s f6, f7, f8");
+
+ COMPARE(fmax_d(f0, f1, f0), "01090020 fmax.d f0, f1, f0");
+ COMPARE(fmin_d(f0, f1, f0), "010b0020 fmin.d f0, f1, f0");
+
+ COMPARE(fmaxa_s(f12, f13, f14), "010cb9ac fmaxa.s f12, f13, f14");
+ COMPARE(fmina_s(f15, f16, f17), "010ec60f fmina.s f15, f16, f17");
+
+ COMPARE(fmaxa_d(f18, f19, f20), "010d5272 fmaxa.d f18, f19, f20");
+ COMPARE(fmina_d(f0, f1, f2), "010f0820 fmina.d f0, f1, f2");
+
+ COMPARE(ldx_b(a0, a1, a2), "380018a4 ldx.b a0, a1, a2");
+ COMPARE(ldx_h(a3, a4, a5), "38042507 ldx.h a3, a4, a5");
+ COMPARE(ldx_w(a6, a7, t0), "3808316a ldx.w a6, a7, t0");
+
+ COMPARE(stx_b(t1, t2, t3), "38103dcd stx.b t1, t2, t3");
+ COMPARE(stx_h(t4, t5, t6), "38144a30 stx.h t4, t5, t6");
+ COMPARE(stx_w(a0, a1, a2), "381818a4 stx.w a0, a1, a2");
+
+ COMPARE(ldx_bu(a3, a4, a5), "38202507 ldx.bu a3, a4, a5");
+ COMPARE(ldx_hu(a6, a7, t0), "3824316a ldx.hu a6, a7, t0");
+ COMPARE(ldx_wu(t1, t2, t3), "38283dcd ldx.wu t1, t2, t3");
+
+ COMPARE(ldx_d(a2, s6, t6), "380c4ba6 ldx.d a2, s6, t6");
+ COMPARE(ldx_d(t7, s6, t6), "380c4bb3 ldx.d t7, s6, t6");
+
+ COMPARE(stx_d(a4, a3, t6), "381c48e8 stx.d a4, a3, t6");
+ COMPARE(stx_d(a0, a3, t6), "381c48e4 stx.d a0, a3, t6");
+
+ COMPARE(dbar(0), "38720000 dbar 0x0(0)");
+ COMPARE(ibar(5555), "387295b3 ibar 0x15b3(5555)");
+
+ COMPARE(break_(0), "002a0000 break code: 0x0(0)");
+ COMPARE(break_(0x3fc0), "002a3fc0 break code: 0x3fc0(16320)");
+
+ COMPARE(fldx_s(f3, a4, a5), "38302503 fldx.s f3, a4, a5");
+ COMPARE(fldx_d(f6, a7, t0), "38343166 fldx.d f6, a7, t0");
+
+ COMPARE(fstx_s(f1, t2, t3), "38383dc1 fstx.s f1, t2, t3");
+ COMPARE(fstx_d(f4, t5, t6), "383c4a24 fstx.d f4, t5, t6");
+
+ COMPARE(amswap_w(a4, a5, a6), "38602548 amswap.w a4, a5, a6");
+ COMPARE(amswap_d(a7, t0, t1), "3860b1ab amswap.d a7, t0, t1");
+
+ COMPARE(amadd_w(t2, t3, t4), "38613e0e amadd.w t2, t3, t4");
+ COMPARE(amadd_d(t5, t6, a0), "3861c891 amadd.d t5, t6, a0");
+
+ COMPARE(amand_w(a1, a2, a3), "386218e5 amand.w a1, a2, a3");
+ COMPARE(amand_d(a4, a5, a6), "3862a548 amand.d a4, a5, a6");
+
+ COMPARE(amor_w(a7, t0, t1), "386331ab amor.w a7, t0, t1");
+ COMPARE(amor_d(t2, t3, t4), "3863be0e amor.d t2, t3, t4");
+
+ COMPARE(amxor_w(t5, t6, a0), "38644891 amxor.w t5, t6, a0");
+ COMPARE(amxor_d(a1, a2, a3), "386498e5 amxor.d a1, a2, a3");
+
+ COMPARE(ammax_w(a4, a5, a6), "38652548 ammax.w a4, a5, a6");
+ COMPARE(ammax_d(a7, t0, t1), "3865b1ab ammax.d a7, t0, t1");
+
+ COMPARE(ammin_w(t2, t3, t4), "38663e0e ammin.w t2, t3, t4");
+ COMPARE(ammin_d(t5, t6, a0), "3866c891 ammin.d t5, t6, a0");
+
+ COMPARE(ammax_wu(a1, a2, a3), "386718e5 ammax.wu a1, a2, a3");
+ COMPARE(ammax_du(a4, a5, a6), "3867a548 ammax.du a4, a5, a6");
+
+ COMPARE(ammin_wu(a7, t0, t1), "386831ab ammin.wu a7, t0, t1");
+ COMPARE(ammin_du(t2, t3, t4), "3868be0e ammin.du t2, t3, t4");
+
+ COMPARE(ammax_db_d(a0, a1, a2), "386e94c4 ammax_db.d a0, a1, a2");
+ COMPARE(ammax_db_du(a3, a4, a5), "3870a127 ammax_db.du a3, a4, a5");
+
+ COMPARE(ammax_db_w(a6, a7, t0), "386e2d8a ammax_db.w a6, a7, t0");
+ COMPARE(ammax_db_wu(t1, t2, t3), "387039ed ammax_db.wu t1, t2, t3");
+
+ COMPARE(ammin_db_d(t4, t5, t6), "386fc650 ammin_db.d t4, t5, t6");
+ COMPARE(ammin_db_du(a0, a1, a2), "387194c4 ammin_db.du a0, a1, a2");
+
+ COMPARE(ammin_db_wu(a3, a4, a5), "38712127 ammin_db.wu a3, a4, a5");
+ COMPARE(ammin_db_w(a6, a7, t0), "386f2d8a ammin_db.w a6, a7, t0");
+
+ COMPARE(fscaleb_s(f0, f1, f2), "01108820 fscaleb.s f0, f1, f2");
+ COMPARE(fscaleb_d(f3, f4, f5), "01111483 fscaleb.d f3, f4, f5");
+
+ COMPARE(fcopysign_s(f6, f7, f8), "0112a0e6 fcopysign.s f6, f7, f8");
+ COMPARE(fcopysign_d(f9, f10, f12),
+ "01133149 fcopysign.d f9, f10, f12");
+
+ VERIFY_RUN();
+}
+
+TEST(TypeOp22) {
+ SET_UP();
+
+ COMPARE(clz_w(a3, a0), "00001487 clz.w a3, a0");
+ COMPARE(ctz_w(a0, a1), "00001ca4 ctz.w a0, a1");
+ COMPARE(clz_d(a2, a3), "000024e6 clz.d a2, a3");
+ COMPARE(ctz_d(a4, a5), "00002d28 ctz.d a4, a5");
+
+ COMPARE(clo_w(a0, a1), "000010a4 clo.w a0, a1");
+ COMPARE(cto_w(a2, a3), "000018e6 cto.w a2, a3");
+ COMPARE(clo_d(a4, a5), "00002128 clo.d a4, a5");
+ COMPARE(cto_d(a6, a7), "0000296a cto.d a6, a7");
+
+ COMPARE(revb_2h(a6, a7), "0000316a revb.2h a6, a7");
+ COMPARE(revb_4h(t0, t1), "000035ac revb.4h t0, t1");
+ COMPARE(revb_2w(t2, t3), "000039ee revb.2w t2, t3");
+ COMPARE(revb_d(t4, t5), "00003e30 revb.d t4, t5");
+
+ COMPARE(revh_2w(a0, a1), "000040a4 revh.2w a0, a1");
+ COMPARE(revh_d(a2, a3), "000044e6 revh.d a2, a3");
+
+ COMPARE(bitrev_4b(a4, a5), "00004928 bitrev.4b a4, a5");
+ COMPARE(bitrev_8b(a6, a7), "00004d6a bitrev.8b a6, a7");
+ COMPARE(bitrev_w(t0, t1), "000051ac bitrev.w t0, t1");
+ COMPARE(bitrev_d(t2, t3), "000055ee bitrev.d t2, t3");
+
+ COMPARE(ext_w_b(t4, t5), "00005e30 ext.w.b t4, t5");
+ COMPARE(ext_w_h(a0, a1), "000058a4 ext.w.h a0, a1");
+
+ COMPARE(fabs_s(f2, f3), "01140462 fabs.s f2, f3");
+ COMPARE(fabs_d(f0, f0), "01140800 fabs.d f0, f0");
+
+ COMPARE(fneg_s(f0, f1), "01141420 fneg.s f0, f1");
+ COMPARE(fneg_d(f0, f0), "01141800 fneg.d f0, f0");
+
+ COMPARE(fsqrt_s(f4, f5), "011444a4 fsqrt.s f4, f5");
+ COMPARE(fsqrt_d(f0, f0), "01144800 fsqrt.d f0, f0");
+
+ COMPARE(fmov_s(f6, f7), "011494e6 fmov.s f6, f7");
+ COMPARE(fmov_d(f0, f1), "01149820 fmov.d f0, f1");
+ COMPARE(fmov_d(f1, f0), "01149801 fmov.d f1, f0");
+
+ COMPARE(movgr2fr_d(f0, t6), "0114aa40 movgr2fr.d f0, t6");
+ COMPARE(movgr2fr_d(f1, t6), "0114aa41 movgr2fr.d f1, t6");
+
+ COMPARE(movgr2fr_w(f30, a3), "0114a4fe movgr2fr.w f30, a3");
+ COMPARE(movgr2fr_w(f30, a0), "0114a49e movgr2fr.w f30, a0");
+
+ COMPARE(movgr2frh_w(f30, t6), "0114ae5e movgr2frh.w f30, t6");
+ COMPARE(movgr2frh_w(f0, a3), "0114ace0 movgr2frh.w f0, a3");
+
+ COMPARE(movfr2gr_s(a3, f30), "0114b7c7 movfr2gr.s a3, f30");
+
+ COMPARE(movfr2gr_d(a6, f30), "0114bbca movfr2gr.d a6, f30");
+ COMPARE(movfr2gr_d(t7, f30), "0114bbd3 movfr2gr.d t7, f30");
+
+ COMPARE(movfrh2gr_s(a5, f0), "0114bc09 movfrh2gr.s a5, f0");
+ COMPARE(movfrh2gr_s(a4, f0), "0114bc08 movfrh2gr.s a4, f0");
+
+ COMPARE(movgr2fcsr(a2), "0114c0c0 movgr2fcsr fcsr, a2");
+ COMPARE(movfcsr2gr(a4), "0114c808 movfcsr2gr a4, fcsr");
+
+ COMPARE(movfr2cf(FCC0, f0), "0114d000 movfr2cf fcc0, f0");
+ COMPARE(movcf2fr(f1, FCC1), "0114d421 movcf2fr f1, fcc1");
+
+ COMPARE(movgr2cf(FCC2, a0), "0114d882 movgr2cf fcc2, a0");
+ COMPARE(movcf2gr(a1, FCC3), "0114dc65 movcf2gr a1, fcc3");
+
+ COMPARE(fcvt_s_d(f0, f0), "01191800 fcvt.s.d f0, f0");
+ COMPARE(fcvt_d_s(f0, f0), "01192400 fcvt.d.s f0, f0");
+
+ COMPARE(ftintrm_w_s(f8, f9), "011a0528 ftintrm.w.s f8, f9");
+ COMPARE(ftintrm_w_d(f10, f11), "011a096a ftintrm.w.d f10, f11");
+ COMPARE(ftintrm_l_s(f12, f13), "011a25ac ftintrm.l.s f12, f13");
+ COMPARE(ftintrm_l_d(f14, f15), "011a29ee ftintrm.l.d f14, f15");
+
+ COMPARE(ftintrp_w_s(f16, f17), "011a4630 ftintrp.w.s f16, f17");
+ COMPARE(ftintrp_w_d(f18, f19), "011a4a72 ftintrp.w.d f18, f19");
+ COMPARE(ftintrp_l_s(f20, f21), "011a66b4 ftintrp.l.s f20, f21");
+ COMPARE(ftintrp_l_d(f0, f1), "011a6820 ftintrp.l.d f0, f1");
+
+ COMPARE(ftintrz_w_s(f30, f4), "011a849e ftintrz.w.s f30, f4");
+ COMPARE(ftintrz_w_d(f30, f4), "011a889e ftintrz.w.d f30, f4");
+ COMPARE(ftintrz_l_s(f30, f0), "011aa41e ftintrz.l.s f30, f0");
+ COMPARE(ftintrz_l_d(f30, f30), "011aabde ftintrz.l.d f30, f30");
+
+ COMPARE(ftintrne_w_s(f2, f3), "011ac462 ftintrne.w.s f2, f3");
+ COMPARE(ftintrne_w_d(f4, f5), "011ac8a4 ftintrne.w.d f4, f5");
+ COMPARE(ftintrne_l_s(f6, f7), "011ae4e6 ftintrne.l.s f6, f7");
+ COMPARE(ftintrne_l_d(f8, f9), "011ae928 ftintrne.l.d f8, f9");
+
+ COMPARE(ftint_w_s(f10, f11), "011b056a ftint.w.s f10, f11");
+ COMPARE(ftint_w_d(f12, f13), "011b09ac ftint.w.d f12, f13");
+ COMPARE(ftint_l_s(f14, f15), "011b25ee ftint.l.s f14, f15");
+ COMPARE(ftint_l_d(f16, f17), "011b2a30 ftint.l.d f16, f17");
+
+ COMPARE(ffint_s_w(f18, f19), "011d1272 ffint.s.w f18, f19");
+ COMPARE(ffint_s_l(f20, f21), "011d1ab4 ffint.s.l f20, f21");
+ COMPARE(ffint_d_w(f0, f1), "011d2020 ffint.d.w f0, f1");
+ COMPARE(ffint_d_l(f2, f3), "011d2862 ffint.d.l f2, f3");
+
+ COMPARE(frint_s(f4, f5), "011e44a4 frint.s f4, f5");
+ COMPARE(frint_d(f6, f7), "011e48e6 frint.d f6, f7");
+
+ COMPARE(frecip_s(f8, f9), "01145528 frecip.s f8, f9");
+ COMPARE(frecip_d(f10, f11), "0114596a frecip.d f10, f11");
+
+ COMPARE(frsqrt_s(f12, f13), "011465ac frsqrt.s f12, f13");
+ COMPARE(frsqrt_d(f14, f15), "011469ee frsqrt.d f14, f15");
+
+ COMPARE(fclass_s(f16, f17), "01143630 fclass.s f16, f17");
+ COMPARE(fclass_d(f18, f19), "01143a72 fclass.d f18, f19");
+
+ COMPARE(flogb_s(f20, f21), "011426b4 flogb.s f20, f21");
+ COMPARE(flogb_d(f0, f1), "01142820 flogb.d f0, f1");
+
+ VERIFY_RUN();
+}
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/cctest/test-disasm-riscv64.cc b/deps/v8/test/cctest/test-disasm-riscv64.cc
index 01f2888fb3..ca16604ad0 100644
--- a/deps/v8/test/cctest/test-disasm-riscv64.cc
+++ b/deps/v8/test/cctest/test-disasm-riscv64.cc
@@ -527,5 +527,61 @@ TEST(Previleged) {
VERIFY_RUN();
}
*/
+#ifdef CAN_USE_RVV_INSTRUCTIONS
+TEST(RVV) {
+ SET_UP();
+ COMPARE(vsetvlmax(kScratchReg, E64, m1),
+ "018079d7 vsetvli s3, zero_reg, E64, m1");
+ COMPARE(vl(v2, a0, 0, VSew::E8), "02050107 vle8.v v2, (a0)");
+ COMPARE(vl(v2, a0, 0, VSew::E8), "02050107 vle8.v v2, (a0)");
+ COMPARE(vl(v2, a0, 0, VSew::E16), "02055107 vle16.v v2, (a0)");
+ COMPARE(vl(v2, a0, 0, VSew::E32), "02056107 vle32.v v2, (a0)");
+
+ COMPARE(vadd_vv(v0, v0, v1), "02008057 vadd.vv v0, v0, v1");
+ COMPARE(vadd_vx(v0, v1, t0), "0212c057 vadd.vx v0, v1, t0");
+ COMPARE(vadd_vi(v0, v1, 3), "0211b057 vadd.vi v0, v1, 3");
+ COMPARE(vsub_vv(v2, v3, v4), "0a320157 vsub.vv v2, v3, v4");
+ COMPARE(vsub_vx(v2, v3, a4), "0a374157 vsub.vx v2, v3, a4");
+ COMPARE(vsadd_vv(v0, v0, v1), "86008057 vsadd.vv v0, v0, v1");
+ COMPARE(vsadd_vx(v4, v5, t1), "86534257 vsadd.vx v4, v5, t1");
+ COMPARE(vsadd_vi(v6, v7, 5), "8672b357 vsadd.vi v6, v7, 5");
+ COMPARE(vssub_vv(v2, v3, v4), "8e320157 vssub.vv v2, v3, v4");
+ COMPARE(vssub_vx(v2, v3, t4), "8e3ec157 vssub.vx v2, v3, t4");
+ COMPARE(vor_vv(v21, v31, v9), "2bf48ad7 vor.vv v21, v31, v9");
+ COMPARE(vor_vx(v19, v29, s7), "2bdbc9d7 vor.vx v19, v29, s7");
+ COMPARE(vor_vi(v17, v28, 7), "2bc3b8d7 vor.vi v17, v28, 7");
+ COMPARE(vxor_vv(v21, v31, v9), "2ff48ad7 vxor.vv v21, v31, v9");
+ COMPARE(vxor_vx(v19, v29, s7), "2fdbc9d7 vxor.vx v19, v29, s7");
+ COMPARE(vxor_vi(v17, v28, 7), "2fc3b8d7 vxor.vi v17, v28, 7");
+ COMPARE(vand_vv(v21, v31, v9), "27f48ad7 vand.vv v21, v31, v9");
+ COMPARE(vand_vx(v19, v29, s7), "27dbc9d7 vand.vx v19, v29, s7");
+ COMPARE(vand_vi(v17, v28, 7), "27c3b8d7 vand.vi v17, v28, 7");
+ COMPARE(vmseq_vv(v17, v28, v29),
+ "63ce88d7 vmseq.vv v17, v28, v29");
+ COMPARE(vmsne_vv(v17, v28, v29), "67ce88d7 vmsne.vv v17, v28, v29");
+ COMPARE(vmseq_vx(v17, v28, t2), "63c3c8d7 vmseq.vx v17, v28, t2");
+ COMPARE(vmsne_vx(v17, v28, t6), "67cfc8d7 vmsne.vx v17, v28, t6");
+ COMPARE(vmseq_vi(v17, v28, 7), "63c3b8d7 vmseq.vi v17, v28, 7");
+ COMPARE(vmsne_vi(v17, v28, 7), "67c3b8d7 vmsne.vi v17, v28, 7");
+ COMPARE(vmsltu_vv(v17, v28, v14), "6bc708d7 vmsltu.vv v17, v28, v14");
+ COMPARE(vmsltu_vx(v17, v28, a5), "6bc7c8d7 vmsltu.vx v17, v28, a5");
+ COMPARE(vmslt_vv(v17, v28, v14), "6fc708d7 vmslt.vv v17, v28, v14");
+ COMPARE(vmslt_vx(v17, v28, a5), "6fc7c8d7 vmslt.vx v17, v28, a5");
+ COMPARE(vmsleu_vv(v17, v28, v14), "73c708d7 vmsleu.vv v17, v28, v14");
+ COMPARE(vmsleu_vx(v17, v28, a5), "73c7c8d7 vmsleu.vx v17, v28, a5");
+ COMPARE(vmsleu_vi(v17, v28, 5), "73c2b8d7 vmsleu.vi v17, v28, 5");
+ COMPARE(vmsle_vv(v17, v28, v14), "77c708d7 vmsle.vv v17, v28, v14");
+ COMPARE(vmsle_vx(v17, v28, a5), "77c7c8d7 vmsle.vx v17, v28, a5");
+ COMPARE(vmsle_vi(v17, v28, 5), "77c2b8d7 vmsle.vi v17, v28, 5");
+ COMPARE(vmsgt_vx(v17, v28, a5), "7fc7c8d7 vmsgt.vx v17, v28, a5");
+ COMPARE(vmsgt_vi(v17, v28, 5), "7fc2b8d7 vmsgt.vi v17, v28, 5");
+ COMPARE(vmsgtu_vx(v17, v28, a5), "7bc7c8d7 vmsgtu.vx v17, v28, a5");
+ COMPARE(vmsgtu_vi(v17, v28, 5), "7bc2b8d7 vmsgtu.vi v17, v28, 5");
+ COMPARE(vadc_vv(v7, v9, v6), "406483d7 vadc.vvm v7, v6, v9");
+ COMPARE(vadc_vx(v7, t6, v9), "409fc3d7 vadc.vxm v7, v9, t6");
+ COMPARE(vadc_vi(v7, 5, v9), "4092b3d7 vadc.vim v7, v9, 5");
+ VERIFY_RUN();
+}
+#endif
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/cctest/test-disasm-x64.cc b/deps/v8/test/cctest/test-disasm-x64.cc
index 3e0d8b14f1..95f1ab91d4 100644
--- a/deps/v8/test/cctest/test-disasm-x64.cc
+++ b/deps/v8/test/cctest/test-disasm-x64.cc
@@ -667,10 +667,15 @@ TEST(DisasmX64) {
__ vmovdqa(xmm4, xmm5);
__ vmovdqa(xmm4, Operand(rbx, rcx, times_4, 10000));
+ __ vmovdqa(ymm4, ymm5);
+ __ vmovdqa(xmm4, Operand(rbx, rcx, times_4, 10000));
__ vmovdqu(xmm9, Operand(rbx, rcx, times_4, 10000));
__ vmovdqu(Operand(rbx, rcx, times_4, 10000), xmm0);
__ vmovdqu(xmm4, xmm5);
+ __ vmovdqu(ymm9, Operand(rbx, rcx, times_4, 10000));
+ __ vmovdqu(Operand(rbx, rcx, times_4, 10000), ymm0);
+ __ vmovdqu(ymm4, ymm5);
__ vmovhlps(xmm1, xmm3, xmm5);
__ vmovlps(xmm8, xmm9, Operand(rbx, rcx, times_4, 10000));
@@ -739,6 +744,8 @@ TEST(DisasmX64) {
__ vcmpnltps(xmm5, xmm4, Operand(rbx, rcx, times_4, 10000));
__ vcmpnleps(xmm5, xmm4, xmm1);
__ vcmpnleps(xmm5, xmm4, Operand(rbx, rcx, times_4, 10000));
+ __ vcmpgeps(xmm5, xmm4, xmm1);
+ __ vcmpgeps(xmm5, xmm4, Operand(rbx, rcx, times_4, 10000));
__ vcmppd(xmm5, xmm4, xmm1, 1);
__ vcmppd(xmm5, xmm4, Operand(rbx, rcx, times_4, 10000), 1);
__ vcmpeqpd(xmm5, xmm4, xmm1);
@@ -758,14 +765,18 @@ TEST(DisasmX64) {
#define EMIT_SSE_UNOP_AVXINSTR(instruction, notUsed1, notUsed2) \
__ v##instruction(xmm10, xmm1); \
- __ v##instruction(xmm10, Operand(rbx, rcx, times_4, 10000));
+ __ v##instruction(xmm10, Operand(rbx, rcx, times_4, 10000)); \
+ __ v##instruction(ymm10, ymm1); \
+ __ v##instruction(ymm10, Operand(rbx, rcx, times_4, 10000));
SSE_UNOP_INSTRUCTION_LIST(EMIT_SSE_UNOP_AVXINSTR)
#undef EMIT_SSE_UNOP_AVXINSTR
-#define EMIT_SSE_BINOP_AVXINSTR(instruction, notUsed1, notUsed2) \
- __ v##instruction(xmm10, xmm5, xmm1); \
- __ v##instruction(xmm10, xmm5, Operand(rbx, rcx, times_4, 10000));
+#define EMIT_SSE_BINOP_AVXINSTR(instruction, notUsed1, notUsed2) \
+ __ v##instruction(xmm10, xmm5, xmm1); \
+ __ v##instruction(xmm10, xmm5, Operand(rbx, rcx, times_4, 10000)); \
+ __ v##instruction(ymm10, ymm5, ymm1); \
+ __ v##instruction(ymm10, ymm5, Operand(rbx, rcx, times_4, 10000));
SSE_BINOP_INSTRUCTION_LIST(EMIT_SSE_BINOP_AVXINSTR)
#undef EMIT_SSE_BINOP_AVXINSTR
@@ -869,13 +880,6 @@ TEST(DisasmX64) {
if (CpuFeatures::IsSupported(AVX2)) {
CpuFeatureScope scope(&assm, AVX2);
__ vbroadcastss(xmm1, xmm2);
- }
- }
-
- // AVX2 instructions.
- {
- if (CpuFeatures::IsSupported(AVX2)) {
- CpuFeatureScope scope(&assm, AVX2);
#define EMIT_AVX2_BROADCAST(instruction, notUsed1, notUsed2, notUsed3, \
notUsed4) \
__ instruction(xmm0, xmm1); \
@@ -1053,6 +1057,33 @@ TEST(DisasmX64) {
#endif
}
+TEST(DisasmX64YMMRegister) {
+ if (!CpuFeatures::IsSupported(AVX)) return;
+ CcTest::InitializeVM();
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope scope(isolate);
+ v8::internal::byte buffer[8192];
+ Assembler assm(AssemblerOptions{},
+ ExternalAssemblerBuffer(buffer, sizeof buffer));
+ CpuFeatureScope fscope(&assm, AVX);
+
+ __ vmovdqa(ymm0, ymm1);
+
+ base::Vector<char> actual = base::Vector<char>::New(37);
+ disasm::NameConverter converter;
+ disasm::Disassembler disassembler(converter);
+ disassembler.InstructionDecode(actual, buffer);
+#ifdef OBJECT_PRINT
+ fprintf(stdout, "Disassembled buffer: %s\n", actual.begin());
+#endif
+
+ base::Vector<const char> expected =
+ base::StaticCharVector("c5fd6fc1 vmovdqa ymm0,ymm1\0");
+ CHECK(expected == actual);
+
+ actual.Dispose();
+}
+
#undef __
} // namespace internal
diff --git a/deps/v8/test/cctest/test-factory.cc b/deps/v8/test/cctest/test-factory.cc
index 7c8d200ea0..97a4b5b959 100644
--- a/deps/v8/test/cctest/test-factory.cc
+++ b/deps/v8/test/cctest/test-factory.cc
@@ -4,7 +4,7 @@
#include <memory>
-#include "include/v8.h"
+#include "include/v8-isolate.h"
#include "src/codegen/code-desc.h"
#include "src/execution/isolate.h"
#include "src/handles/handles-inl.h"
diff --git a/deps/v8/test/cctest/test-global-handles.cc b/deps/v8/test/cctest/test-global-handles.cc
index 0ff266621b..55b34e3838 100644
--- a/deps/v8/test/cctest/test-global-handles.cc
+++ b/deps/v8/test/cctest/test-global-handles.cc
@@ -25,6 +25,8 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#include "include/v8-function.h"
+#include "include/v8-locker.h"
#include "src/api/api-inl.h"
#include "src/execution/isolate.h"
#include "src/handles/global-handles.h"
diff --git a/deps/v8/test/cctest/test-heap-profiler.cc b/deps/v8/test/cctest/test-heap-profiler.cc
index dc23a8f601..8643f16c79 100644
--- a/deps/v8/test/cctest/test-heap-profiler.cc
+++ b/deps/v8/test/cctest/test-heap-profiler.cc
@@ -31,6 +31,7 @@
#include <memory>
+#include "include/v8-function.h"
#include "include/v8-profiler.h"
#include "src/api/api-inl.h"
#include "src/base/hashmap.h"
diff --git a/deps/v8/test/cctest/test-icache.cc b/deps/v8/test/cctest/test-icache.cc
index f68789df2c..be7f846d86 100644
--- a/deps/v8/test/cctest/test-icache.cc
+++ b/deps/v8/test/cctest/test-icache.cc
@@ -56,6 +56,10 @@ static void FloodWithInc(Isolate* isolate, TestingAssemblerBuffer* buffer) {
for (int i = 0; i < kNumInstr; ++i) {
__ Addu(v0, v0, Operand(1));
}
+#elif V8_TARGET_ARCH_LOONG64
+ for (int i = 0; i < kNumInstr; ++i) {
+ __ Add_w(a0, a0, Operand(1));
+ }
#elif V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_PPC64
for (int i = 0; i < kNumInstr; ++i) {
__ addi(r3, r3, Operand(1));
diff --git a/deps/v8/test/cctest/test-inspector.cc b/deps/v8/test/cctest/test-inspector.cc
index e36ce19eca..c1651eaceb 100644
--- a/deps/v8/test/cctest/test-inspector.cc
+++ b/deps/v8/test/cctest/test-inspector.cc
@@ -5,7 +5,8 @@
#include <memory>
#include "include/v8-inspector.h"
-#include "include/v8.h"
+#include "include/v8-local-handle.h"
+#include "include/v8-primitive.h"
#include "src/inspector/protocol/Runtime.h"
#include "src/inspector/string-util.h"
#include "test/cctest/cctest.h"
diff --git a/deps/v8/test/cctest/test-js-to-wasm.cc b/deps/v8/test/cctest/test-js-to-wasm.cc
index ce4eac0cfe..ecb481ee23 100644
--- a/deps/v8/test/cctest/test-js-to-wasm.cc
+++ b/deps/v8/test/cctest/test-js-to-wasm.cc
@@ -4,7 +4,10 @@
#include <iomanip>
-#include "include/v8.h"
+#include "include/v8-exception.h"
+#include "include/v8-local-handle.h"
+#include "include/v8-primitive.h"
+#include "include/v8-value.h"
#include "src/api/api.h"
#include "src/wasm/wasm-module-builder.h"
#include "test/cctest/cctest.h"
diff --git a/deps/v8/test/cctest/test-js-weak-refs.cc b/deps/v8/test/cctest/test-js-weak-refs.cc
index cae033c0ab..8bff23bb8c 100644
--- a/deps/v8/test/cctest/test-js-weak-refs.cc
+++ b/deps/v8/test/cctest/test-js-weak-refs.cc
@@ -31,6 +31,14 @@ Handle<JSFinalizationRegistry> ConstructJSFinalizationRegistry(
JSObject::New(finalization_registry_fun, finalization_registry_fun,
Handle<AllocationSite>::null())
.ToHandleChecked());
+
+ // JSObject::New filled all of the internal fields with undefined. Some of
+ // them have more restrictive types, so set those now.
+ finalization_registry->set_native_context(*isolate->native_context());
+ finalization_registry->set_cleanup(
+ isolate->native_context()->empty_function());
+ finalization_registry->set_flags(0);
+
#ifdef VERIFY_HEAP
finalization_registry->JSFinalizationRegistryVerify(isolate);
#endif // VERIFY_HEAP
diff --git a/deps/v8/test/cctest/test-liveedit.cc b/deps/v8/test/cctest/test-liveedit.cc
index 1ef2c12966..a214be23fa 100644
--- a/deps/v8/test/cctest/test-liveedit.cc
+++ b/deps/v8/test/cctest/test-liveedit.cc
@@ -27,10 +27,10 @@
#include <stdlib.h>
-#include "src/init/v8.h"
-
+#include "include/v8-function.h"
#include "src/api/api-inl.h"
#include "src/debug/liveedit.h"
+#include "src/init/v8.h"
#include "src/objects/objects-inl.h"
#include "test/cctest/cctest.h"
diff --git a/deps/v8/test/cctest/test-lockers.cc b/deps/v8/test/cctest/test-lockers.cc
index a2362651a5..c757849295 100644
--- a/deps/v8/test/cctest/test-lockers.cc
+++ b/deps/v8/test/cctest/test-lockers.cc
@@ -29,12 +29,14 @@
#include <memory>
-#include "src/init/v8.h"
-
+#include "include/v8-extension.h"
+#include "include/v8-function.h"
+#include "include/v8-locker.h"
#include "src/base/platform/platform.h"
#include "src/codegen/compilation-cache.h"
#include "src/execution/execution.h"
#include "src/execution/isolate.h"
+#include "src/init/v8.h"
#include "src/objects/objects-inl.h"
#include "src/strings/unicode-inl.h"
#include "src/utils/utils.h"
diff --git a/deps/v8/test/cctest/test-log-stack-tracer.cc b/deps/v8/test/cctest/test-log-stack-tracer.cc
index db7917fd68..6fefd0d5df 100644
--- a/deps/v8/test/cctest/test-log-stack-tracer.cc
+++ b/deps/v8/test/cctest/test-log-stack-tracer.cc
@@ -29,6 +29,7 @@
#include <stdlib.h>
+#include "include/v8-function.h"
#include "include/v8-profiler.h"
#include "src/api/api-inl.h"
#include "src/base/strings.h"
diff --git a/deps/v8/test/cctest/test-log.cc b/deps/v8/test/cctest/test-log.cc
index 06bb5cfaba..75ebc4d432 100644
--- a/deps/v8/test/cctest/test-log.cc
+++ b/deps/v8/test/cctest/test-log.cc
@@ -30,6 +30,7 @@
#include <unordered_set>
#include <vector>
+#include "include/v8-function.h"
#include "src/api/api-inl.h"
#include "src/base/strings.h"
#include "src/builtins/builtins.h"
diff --git a/deps/v8/test/cctest/test-macro-assembler-loong64.cc b/deps/v8/test/cctest/test-macro-assembler-loong64.cc
new file mode 100644
index 0000000000..d3b597a5d7
--- /dev/null
+++ b/deps/v8/test/cctest/test-macro-assembler-loong64.cc
@@ -0,0 +1,2916 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include <stdlib.h>
+
+#include <iostream>
+
+#include "src/base/utils/random-number-generator.h"
+#include "src/codegen/assembler-inl.h"
+#include "src/codegen/macro-assembler.h"
+#include "src/deoptimizer/deoptimizer.h"
+#include "src/execution/simulator.h"
+#include "src/init/v8.h"
+#include "src/objects/objects-inl.h"
+#include "src/utils/ostreams.h"
+#include "test/cctest/cctest.h"
+#include "test/common/assembler-tester.h"
+
+namespace v8 {
+namespace internal {
+
+// TODO(LOONG64): Refine these signatures per test case.
+using FV = void*(int64_t x, int64_t y, int p2, int p3, int p4);
+using F1 = void*(int x, int p1, int p2, int p3, int p4);
+using F2 = void*(int x, int y, int p2, int p3, int p4);
+using F3 = void*(void* p, int p1, int p2, int p3, int p4);
+using F4 = void*(void* p0, void* p1, int p2, int p3, int p4);
+
+#define __ masm->
+
+TEST(BYTESWAP) {
+ CcTest::InitializeVM();
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope scope(isolate);
+
+ struct T {
+ uint64_t s8;
+ uint64_t s4;
+ uint64_t s2;
+ uint64_t u4;
+ uint64_t u2;
+ };
+
+ T t;
+ // clang-format off
+ uint64_t test_values[] = {0x5612FFCD9D327ACC,
+ 0x781A15C3,
+ 0xFCDE,
+ 0x9F,
+ 0xC81A15C3,
+ 0x8000000000000000,
+ 0xFFFFFFFFFFFFFFFF,
+ 0x0000000080000000,
+ 0x0000000000008000};
+ // clang-format on
+ MacroAssembler assembler(isolate, v8::internal::CodeObjectRequired::kYes);
+
+ MacroAssembler* masm = &assembler;
+
+ __ Ld_d(a4, MemOperand(a0, offsetof(T, s8)));
+ __ ByteSwapSigned(a4, a4, 8);
+ __ St_d(a4, MemOperand(a0, offsetof(T, s8)));
+
+ __ Ld_d(a4, MemOperand(a0, offsetof(T, s4)));
+ __ ByteSwapSigned(a4, a4, 4);
+ __ St_d(a4, MemOperand(a0, offsetof(T, s4)));
+
+ __ Ld_d(a4, MemOperand(a0, offsetof(T, s2)));
+ __ ByteSwapSigned(a4, a4, 2);
+ __ St_d(a4, MemOperand(a0, offsetof(T, s2)));
+
+ __ Ld_d(a4, MemOperand(a0, offsetof(T, u4)));
+ __ ByteSwapSigned(a4, a4, 4);
+ __ St_d(a4, MemOperand(a0, offsetof(T, u4)));
+
+ __ Ld_d(a4, MemOperand(a0, offsetof(T, u2)));
+ __ ByteSwapSigned(a4, a4, 2);
+ __ St_d(a4, MemOperand(a0, offsetof(T, u2)));
+
+ __ jirl(zero_reg, ra, 0);
+
+ CodeDesc desc;
+ masm->GetCode(isolate, &desc);
+ Handle<Code> code =
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
+ auto f = GeneratedCode<F3>::FromCode(*code);
+
+ for (size_t i = 0; i < arraysize(test_values); i++) {
+ int32_t in_s4 = static_cast<int32_t>(test_values[i]);
+ int16_t in_s2 = static_cast<int16_t>(test_values[i]);
+ uint32_t in_u4 = static_cast<uint32_t>(test_values[i]);
+ uint16_t in_u2 = static_cast<uint16_t>(test_values[i]);
+
+ t.s8 = test_values[i];
+ t.s4 = static_cast<uint64_t>(in_s4);
+ t.s2 = static_cast<uint64_t>(in_s2);
+ t.u4 = static_cast<uint64_t>(in_u4);
+ t.u2 = static_cast<uint64_t>(in_u2);
+
+ f.Call(&t, 0, 0, 0, 0);
+
+ CHECK_EQ(ByteReverse<uint64_t>(test_values[i]), t.s8);
+ CHECK_EQ(ByteReverse<int32_t>(in_s4), static_cast<int32_t>(t.s4));
+ CHECK_EQ(ByteReverse<int16_t>(in_s2), static_cast<int16_t>(t.s2));
+ CHECK_EQ(ByteReverse<uint32_t>(in_u4), static_cast<uint32_t>(t.u4));
+ CHECK_EQ(ByteReverse<uint16_t>(in_u2), static_cast<uint16_t>(t.u2));
+ }
+}
+
+TEST(LoadConstants) {
+ CcTest::InitializeVM();
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope handles(isolate);
+
+ int64_t refConstants[64];
+ int64_t result[64];
+
+ int64_t mask = 1;
+ for (int i = 0; i < 64; i++) {
+ refConstants[i] = ~(mask << i);
+ }
+
+ MacroAssembler assembler(isolate, v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler* masm = &assembler;
+
+ __ or_(a4, a0, zero_reg);
+ for (int i = 0; i < 64; i++) {
+ // Load constant.
+ __ li(a5, Operand(refConstants[i]));
+ __ St_d(a5, MemOperand(a4, zero_reg));
+ __ Add_d(a4, a4, Operand(kPointerSize));
+ }
+
+ __ jirl(zero_reg, ra, 0);
+
+ CodeDesc desc;
+ masm->GetCode(isolate, &desc);
+ Handle<Code> code =
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
+
+ auto f = GeneratedCode<FV>::FromCode(*code);
+ (void)f.Call(reinterpret_cast<int64_t>(result), 0, 0, 0, 0);
+ // Check results.
+ for (int i = 0; i < 64; i++) {
+ CHECK(refConstants[i] == result[i]);
+ }
+}
+
+TEST(jump_tables4) {
+ // Similar to test-assembler-loong64 jump_tables1, with extra test for branch
+ // trampoline required before emission of the dd table (where trampolines are
+ // blocked), and proper transition to long-branch mode.
+ // Regression test for v8:4294.
+ CcTest::InitializeVM();
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope scope(isolate);
+ MacroAssembler assembler(isolate, v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler* masm = &assembler;
+
+ const int kNumCases = 512;
+ int values[kNumCases];
+ isolate->random_number_generator()->NextBytes(values, sizeof(values));
+ Label labels[kNumCases];
+ Label near_start, end, done;
+
+ __ Push(ra);
+ __ xor_(a2, a2, a2);
+
+ __ Branch(&end);
+ __ bind(&near_start);
+
+ for (int i = 0; i < 32768 - 256; ++i) {
+ __ Add_d(a2, a2, 1);
+ }
+
+ __ GenerateSwitchTable(a0, kNumCases,
+ [&labels](size_t i) { return labels + i; });
+
+ for (int i = 0; i < kNumCases; ++i) {
+ __ bind(&labels[i]);
+ __ li(a2, values[i]);
+ __ Branch(&done);
+ }
+
+ __ bind(&done);
+ __ Pop(ra);
+ __ or_(a0, a2, zero_reg);
+ __ jirl(zero_reg, ra, 0);
+
+ __ bind(&end);
+ __ Branch(&near_start);
+
+ CodeDesc desc;
+ masm->GetCode(isolate, &desc);
+ Handle<Code> code =
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
+#ifdef OBJECT_PRINT
+ code->Print(std::cout);
+#endif
+ auto f = GeneratedCode<F1>::FromCode(*code);
+ for (int i = 0; i < kNumCases; ++i) {
+ int64_t res = reinterpret_cast<int64_t>(f.Call(i, 0, 0, 0, 0));
+ ::printf("f(%d) = %" PRId64 "\n", i, res);
+ CHECK_EQ(values[i], res);
+ }
+}
+
+TEST(jump_tables6) {
+ // Similar to test-assembler-loong64 jump_tables1, with extra test for branch
+ // trampoline required after emission of the dd table (where trampolines are
+ // blocked). This test checks if number of really generated instructions is
+ // greater than number of counted instructions from code, as we are expecting
+ // generation of trampoline in this case
+ CcTest::InitializeVM();
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope scope(isolate);
+ MacroAssembler assembler(isolate, v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler* masm = &assembler;
+
+ const int kSwitchTableCases = 80;
+
+ const int kMaxBranchOffset = (1 << (18 - 1)) - 1;
+ const int kTrampolineSlotsSize = Assembler::kTrampolineSlotsSize;
+ const int kSwitchTablePrologueSize = MacroAssembler::kSwitchTablePrologueSize;
+
+ const int kMaxOffsetForTrampolineStart =
+ kMaxBranchOffset - 16 * kTrampolineSlotsSize;
+ const int kFillInstr = (kMaxOffsetForTrampolineStart / kInstrSize) -
+ (kSwitchTablePrologueSize + kSwitchTableCases) - 20;
+
+ int values[kSwitchTableCases];
+ isolate->random_number_generator()->NextBytes(values, sizeof(values));
+ Label labels[kSwitchTableCases];
+ Label near_start, end, done;
+
+ __ Push(ra);
+ __ xor_(a2, a2, a2);
+
+ int offs1 = masm->pc_offset();
+ int gen_insn = 0;
+
+ __ Branch(&end);
+ gen_insn += 1;
+ __ bind(&near_start);
+
+ for (int i = 0; i < kFillInstr; ++i) {
+ __ Add_d(a2, a2, 1);
+ }
+ gen_insn += kFillInstr;
+
+ __ GenerateSwitchTable(a0, kSwitchTableCases,
+ [&labels](size_t i) { return labels + i; });
+ gen_insn += (kSwitchTablePrologueSize + kSwitchTableCases);
+
+ for (int i = 0; i < kSwitchTableCases; ++i) {
+ __ bind(&labels[i]);
+ __ li(a2, values[i]);
+ __ Branch(&done);
+ }
+ gen_insn += 3 * kSwitchTableCases;
+
+ // If offset from here to first branch instr is greater than max allowed
+ // offset for trampoline ...
+ CHECK_LT(kMaxOffsetForTrampolineStart, masm->pc_offset() - offs1);
+ // ... number of generated instructions must be greater then "gen_insn",
+ // as we are expecting trampoline generation
+ CHECK_LT(gen_insn, (masm->pc_offset() - offs1) / kInstrSize);
+
+ __ bind(&done);
+ __ Pop(ra);
+ __ or_(a0, a2, zero_reg);
+ __ jirl(zero_reg, ra, 0);
+
+ __ bind(&end);
+ __ Branch(&near_start);
+
+ CodeDesc desc;
+ masm->GetCode(isolate, &desc);
+ Handle<Code> code =
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
+#ifdef OBJECT_PRINT
+ code->Print(std::cout);
+#endif
+ auto f = GeneratedCode<F1>::FromCode(*code);
+ for (int i = 0; i < kSwitchTableCases; ++i) {
+ int64_t res = reinterpret_cast<int64_t>(f.Call(i, 0, 0, 0, 0));
+ ::printf("f(%d) = %" PRId64 "\n", i, res);
+ CHECK_EQ(values[i], res);
+ }
+}
+
+static uint64_t run_alsl_w(uint32_t rj, uint32_t rk, int8_t sa) {
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope scope(isolate);
+ MacroAssembler assembler(isolate, v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler* masm = &assembler;
+
+ __ Alsl_w(a2, a0, a1, sa);
+ __ or_(a0, a2, zero_reg);
+ __ jirl(zero_reg, ra, 0);
+
+ CodeDesc desc;
+ assembler.GetCode(isolate, &desc);
+ Handle<Code> code =
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
+
+ auto f = GeneratedCode<F1>::FromCode(*code);
+
+ uint64_t res = reinterpret_cast<uint64_t>(f.Call(rj, rk, 0, 0, 0));
+
+ return res;
+}
+
+TEST(ALSL_W) {
+ CcTest::InitializeVM();
+ struct TestCaseAlsl {
+ int32_t rj;
+ int32_t rk;
+ uint8_t sa;
+ uint64_t expected_res;
+ };
+ // clang-format off
+ struct TestCaseAlsl tc[] = {// rj, rk, sa, expected_res
+ {0x1, 0x4, 1, 0x6},
+ {0x1, 0x4, 2, 0x8},
+ {0x1, 0x4, 3, 0xC},
+ {0x1, 0x4, 4, 0x14},
+ {0x1, 0x4, 5, 0x24},
+ {0x1, 0x0, 1, 0x2},
+ {0x1, 0x0, 2, 0x4},
+ {0x1, 0x0, 3, 0x8},
+ {0x1, 0x0, 4, 0x10},
+ {0x1, 0x0, 5, 0x20},
+ {0x0, 0x4, 1, 0x4},
+ {0x0, 0x4, 2, 0x4},
+ {0x0, 0x4, 3, 0x4},
+ {0x0, 0x4, 4, 0x4},
+ {0x0, 0x4, 5, 0x4},
+
+ // Shift overflow.
+ {INT32_MAX, 0x4, 1, 0x2},
+ {INT32_MAX >> 1, 0x4, 2, 0x0},
+ {INT32_MAX >> 2, 0x4, 3, 0xFFFFFFFFFFFFFFFC},
+ {INT32_MAX >> 3, 0x4, 4, 0xFFFFFFFFFFFFFFF4},
+ {INT32_MAX >> 4, 0x4, 5, 0xFFFFFFFFFFFFFFE4},
+
+ // Signed addition overflow.
+ {0x1, INT32_MAX - 1, 1, 0xFFFFFFFF80000000},
+ {0x1, INT32_MAX - 3, 2, 0xFFFFFFFF80000000},
+ {0x1, INT32_MAX - 7, 3, 0xFFFFFFFF80000000},
+ {0x1, INT32_MAX - 15, 4, 0xFFFFFFFF80000000},
+ {0x1, INT32_MAX - 31, 5, 0xFFFFFFFF80000000},
+
+ // Addition overflow.
+ {0x1, -2, 1, 0x0},
+ {0x1, -4, 2, 0x0},
+ {0x1, -8, 3, 0x0},
+ {0x1, -16, 4, 0x0},
+ {0x1, -32, 5, 0x0}};
+ // clang-format on
+ size_t nr_test_cases = sizeof(tc) / sizeof(TestCaseAlsl);
+ for (size_t i = 0; i < nr_test_cases; ++i) {
+ uint64_t res = run_alsl_w(tc[i].rj, tc[i].rk, tc[i].sa);
+ PrintF("0x%" PRIx64 " =? 0x%" PRIx64 " == Alsl_w(a0, %x, %x, %hhu)\n",
+ tc[i].expected_res, res, tc[i].rj, tc[i].rk, tc[i].sa);
+ CHECK_EQ(tc[i].expected_res, res);
+ }
+}
+
+static uint64_t run_alsl_d(uint64_t rj, uint64_t rk, int8_t sa) {
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope scope(isolate);
+ MacroAssembler assembler(isolate, v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler* masm = &assembler;
+
+ __ Alsl_d(a2, a0, a1, sa);
+ __ or_(a0, a2, zero_reg);
+ __ jirl(zero_reg, ra, 0);
+
+ CodeDesc desc;
+ assembler.GetCode(isolate, &desc);
+ Handle<Code> code =
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
+
+ auto f = GeneratedCode<FV>::FromCode(*code);
+
+ uint64_t res = reinterpret_cast<uint64_t>(f.Call(rj, rk, 0, 0, 0));
+
+ return res;
+}
+
+TEST(ALSL_D) {
+ CcTest::InitializeVM();
+ struct TestCaseAlsl {
+ int64_t rj;
+ int64_t rk;
+ uint8_t sa;
+ uint64_t expected_res;
+ };
+ // clang-format off
+ struct TestCaseAlsl tc[] = {// rj, rk, sa, expected_res
+ {0x1, 0x4, 1, 0x6},
+ {0x1, 0x4, 2, 0x8},
+ {0x1, 0x4, 3, 0xC},
+ {0x1, 0x4, 4, 0x14},
+ {0x1, 0x4, 5, 0x24},
+ {0x1, 0x0, 1, 0x2},
+ {0x1, 0x0, 2, 0x4},
+ {0x1, 0x0, 3, 0x8},
+ {0x1, 0x0, 4, 0x10},
+ {0x1, 0x0, 5, 0x20},
+ {0x0, 0x4, 1, 0x4},
+ {0x0, 0x4, 2, 0x4},
+ {0x0, 0x4, 3, 0x4},
+ {0x0, 0x4, 4, 0x4},
+ {0x0, 0x4, 5, 0x4},
+
+ // Shift overflow.
+ {INT64_MAX, 0x4, 1, 0x2},
+ {INT64_MAX >> 1, 0x4, 2, 0x0},
+ {INT64_MAX >> 2, 0x4, 3, 0xFFFFFFFFFFFFFFFC},
+ {INT64_MAX >> 3, 0x4, 4, 0xFFFFFFFFFFFFFFF4},
+ {INT64_MAX >> 4, 0x4, 5, 0xFFFFFFFFFFFFFFE4},
+
+ // Signed addition overflow.
+ {0x1, INT64_MAX - 1, 1, 0x8000000000000000},
+ {0x1, INT64_MAX - 3, 2, 0x8000000000000000},
+ {0x1, INT64_MAX - 7, 3, 0x8000000000000000},
+ {0x1, INT64_MAX - 15, 4, 0x8000000000000000},
+ {0x1, INT64_MAX - 31, 5, 0x8000000000000000},
+
+ // Addition overflow.
+ {0x1, -2, 1, 0x0},
+ {0x1, -4, 2, 0x0},
+ {0x1, -8, 3, 0x0},
+ {0x1, -16, 4, 0x0},
+ {0x1, -32, 5, 0x0}};
+ // clang-format on
+
+ size_t nr_test_cases = sizeof(tc) / sizeof(TestCaseAlsl);
+ for (size_t i = 0; i < nr_test_cases; ++i) {
+ uint64_t res = run_alsl_d(tc[i].rj, tc[i].rk, tc[i].sa);
+ PrintF("0x%" PRIx64 " =? 0x%" PRIx64 " == Dlsa(v0, %" PRIx64 ", %" PRIx64
+ ", %hhu)\n",
+ tc[i].expected_res, res, tc[i].rj, tc[i].rk, tc[i].sa);
+ CHECK_EQ(tc[i].expected_res, res);
+ }
+}
+// clang-format off
+static const std::vector<uint32_t> ffint_ftintrz_uint32_test_values() {
+ static const uint32_t kValues[] = {0x00000000, 0x00000001, 0x00FFFF00,
+ 0x7FFFFFFF, 0x80000000, 0x80000001,
+ 0x80FFFF00, 0x8FFFFFFF, 0xFFFFFFFF};
+ return std::vector<uint32_t>(&kValues[0], &kValues[arraysize(kValues)]);
+}
+
+static const std::vector<int32_t> ffint_ftintrz_int32_test_values() {
+ static const int32_t kValues[] = {
+ static_cast<int32_t>(0x00000000), static_cast<int32_t>(0x00000001),
+ static_cast<int32_t>(0x00FFFF00), static_cast<int32_t>(0x7FFFFFFF),
+ static_cast<int32_t>(0x80000000), static_cast<int32_t>(0x80000001),
+ static_cast<int32_t>(0x80FFFF00), static_cast<int32_t>(0x8FFFFFFF),
+ static_cast<int32_t>(0xFFFFFFFF)};
+ return std::vector<int32_t>(&kValues[0], &kValues[arraysize(kValues)]);
+}
+
+static const std::vector<uint64_t> ffint_ftintrz_uint64_test_values() {
+ static const uint64_t kValues[] = {
+ 0x0000000000000000, 0x0000000000000001, 0x0000FFFFFFFF0000,
+ 0x7FFFFFFFFFFFFFFF, 0x8000000000000000, 0x8000000000000001,
+ 0x8000FFFFFFFF0000, 0x8FFFFFFFFFFFFFFF, 0xFFFFFFFFFFFFFFFF};
+ return std::vector<uint64_t>(&kValues[0], &kValues[arraysize(kValues)]);
+}
+
+static const std::vector<int64_t> ffint_ftintrz_int64_test_values() {
+ static const int64_t kValues[] = {static_cast<int64_t>(0x0000000000000000),
+ static_cast<int64_t>(0x0000000000000001),
+ static_cast<int64_t>(0x0000FFFFFFFF0000),
+ static_cast<int64_t>(0x7FFFFFFFFFFFFFFF),
+ static_cast<int64_t>(0x8000000000000000),
+ static_cast<int64_t>(0x8000000000000001),
+ static_cast<int64_t>(0x8000FFFFFFFF0000),
+ static_cast<int64_t>(0x8FFFFFFFFFFFFFFF),
+ static_cast<int64_t>(0xFFFFFFFFFFFFFFFF)};
+ return std::vector<int64_t>(&kValues[0], &kValues[arraysize(kValues)]);
+}
+ // clang-off on
+
+// Helper macros that can be used in FOR_INT32_INPUTS(i) { ... *i ... }
+#define FOR_INPUTS(ctype, itype, var, test_vector) \
+ std::vector<ctype> var##_vec = test_vector(); \
+ for (std::vector<ctype>::iterator var = var##_vec.begin(); \
+ var != var##_vec.end(); ++var)
+
+#define FOR_INPUTS2(ctype, itype, var, var2, test_vector) \
+ std::vector<ctype> var##_vec = test_vector(); \
+ std::vector<ctype>::iterator var; \
+ std::vector<ctype>::reverse_iterator var2; \
+ for (var = var##_vec.begin(), var2 = var##_vec.rbegin(); \
+ var != var##_vec.end(); ++var, ++var2)
+
+#define FOR_ENUM_INPUTS(var, type, test_vector) \
+ FOR_INPUTS(enum type, type, var, test_vector)
+#define FOR_STRUCT_INPUTS(var, type, test_vector) \
+ FOR_INPUTS(struct type, type, var, test_vector)
+#define FOR_INT32_INPUTS(var, test_vector) \
+ FOR_INPUTS(int32_t, int32, var, test_vector)
+#define FOR_INT32_INPUTS2(var, var2, test_vector) \
+ FOR_INPUTS2(int32_t, int32, var, var2, test_vector)
+#define FOR_INT64_INPUTS(var, test_vector) \
+ FOR_INPUTS(int64_t, int64, var, test_vector)
+#define FOR_UINT32_INPUTS(var, test_vector) \
+ FOR_INPUTS(uint32_t, uint32, var, test_vector)
+#define FOR_UINT64_INPUTS(var, test_vector) \
+ FOR_INPUTS(uint64_t, uint64, var, test_vector)
+
+template <typename RET_TYPE, typename IN_TYPE, typename Func>
+RET_TYPE run_CVT(IN_TYPE x, Func GenerateConvertInstructionFunc) {
+ using F_CVT = RET_TYPE(IN_TYPE x0, int x1, int x2, int x3, int x4);
+
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope scope(isolate);
+ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler* masm = &assm;
+
+ GenerateConvertInstructionFunc(masm);
+ __ movfr2gr_d(a2, f9);
+ __ or_(a0, a2, zero_reg);
+ __ jirl(zero_reg, ra, 0);
+
+ CodeDesc desc;
+ assm.GetCode(isolate, &desc);
+ Handle<Code> code = Factory::CodeBuilder(isolate, desc,
+ CodeKind::FOR_TESTING).Build();
+
+ auto f = GeneratedCode<F_CVT>::FromCode(*code);
+
+ return reinterpret_cast<RET_TYPE>(f.Call(x, 0, 0, 0, 0));
+}
+
+TEST(Ffint_s_uw_Ftintrz_uw_s) {
+ CcTest::InitializeVM();
+ FOR_UINT32_INPUTS(i, ffint_ftintrz_uint32_test_values) {
+ uint32_t input = *i;
+ auto fn = [](MacroAssembler* masm) {
+ __ Ffint_s_uw(f8, a0);
+ __ movgr2frh_w(f9, zero_reg);
+ __ Ftintrz_uw_s(f9, f8, f10);
+ };
+ CHECK_EQ(static_cast<float>(input), run_CVT<uint32_t>(input, fn));
+ }
+}
+
+TEST(Ffint_s_ul_Ftintrz_ul_s) {
+ CcTest::InitializeVM();
+ FOR_UINT64_INPUTS(i, ffint_ftintrz_uint64_test_values) {
+ uint64_t input = *i;
+ auto fn = [](MacroAssembler* masm) {
+ __ Ffint_s_ul(f8, a0);
+ __ Ftintrz_ul_s(f9, f8, f10, a2);
+ };
+ CHECK_EQ(static_cast<float>(input), run_CVT<uint64_t>(input, fn));
+ }
+}
+
+TEST(Ffint_d_uw_Ftintrz_uw_d) {
+ CcTest::InitializeVM();
+ FOR_UINT64_INPUTS(i, ffint_ftintrz_uint64_test_values) {
+ uint32_t input = *i;
+ auto fn = [](MacroAssembler* masm) {
+ __ Ffint_d_uw(f8, a0);
+ __ movgr2frh_w(f9, zero_reg);
+ __ Ftintrz_uw_d(f9, f8, f10);
+ };
+ CHECK_EQ(static_cast<double>(input), run_CVT<uint32_t>(input, fn));
+ }
+}
+
+TEST(Ffint_d_ul_Ftintrz_ul_d) {
+ CcTest::InitializeVM();
+ FOR_UINT64_INPUTS(i, ffint_ftintrz_uint64_test_values) {
+ uint64_t input = *i;
+ auto fn = [](MacroAssembler* masm) {
+ __ Ffint_d_ul(f8, a0);
+ __ Ftintrz_ul_d(f9, f8, f10, a2);
+ };
+ CHECK_EQ(static_cast<double>(input), run_CVT<uint64_t>(input, fn));
+ }
+}
+
+TEST(Ffint_d_l_Ftintrz_l_ud) {
+ CcTest::InitializeVM();
+ FOR_INT64_INPUTS(i, ffint_ftintrz_int64_test_values) {
+ int64_t input = *i;
+ uint64_t abs_input = (input < 0) ? -input : input;
+ auto fn = [](MacroAssembler* masm) {
+ __ movgr2fr_d(f8, a0);
+ __ ffint_d_l(f10, f8);
+ __ Ftintrz_l_ud(f9, f10, f11);
+ };
+ CHECK_EQ(static_cast<double>(abs_input), run_CVT<uint64_t>(input, fn));
+ }
+}
+
+TEST(ffint_d_l_Ftint_l_d) {
+ CcTest::InitializeVM();
+ FOR_INT64_INPUTS(i, ffint_ftintrz_int64_test_values) {
+ int64_t input = *i;
+ auto fn = [](MacroAssembler* masm) {
+ __ movgr2fr_d(f8, a0);
+ __ ffint_d_l(f10, f8);
+ __ Ftintrz_l_d(f9, f10);
+ };
+ CHECK_EQ(static_cast<double>(input), run_CVT<int64_t>(input, fn));
+ }
+}
+
+TEST(ffint_d_w_Ftint_w_d) {
+ CcTest::InitializeVM();
+ FOR_INT32_INPUTS(i, ffint_ftintrz_int32_test_values) {
+ int32_t input = *i;
+ auto fn = [](MacroAssembler* masm) {
+ __ movgr2fr_w(f8, a0);
+ __ ffint_d_w(f10, f8);
+ __ Ftintrz_w_d(f9, f10);
+ __ movfr2gr_s(a4, f9);
+ __ movgr2fr_d(f9, a4);
+ };
+ CHECK_EQ(static_cast<double>(input), run_CVT<int64_t>(input, fn));
+ }
+}
+
+
+static const std::vector<int64_t> overflow_int64_test_values() {
+ // clang-format off
+ static const int64_t kValues[] = {static_cast<int64_t>(0xF000000000000000),
+ static_cast<int64_t>(0x0000000000000001),
+ static_cast<int64_t>(0xFF00000000000000),
+ static_cast<int64_t>(0x0000F00111111110),
+ static_cast<int64_t>(0x0F00001000000000),
+ static_cast<int64_t>(0x991234AB12A96731),
+ static_cast<int64_t>(0xB0FFFF0F0F0F0F01),
+ static_cast<int64_t>(0x00006FFFFFFFFFFF),
+ static_cast<int64_t>(0xFFFFFFFFFFFFFFFF)};
+ // clang-format on
+ return std::vector<int64_t>(&kValues[0], &kValues[arraysize(kValues)]);
+}
+
+TEST(OverflowInstructions) {
+ CcTest::InitializeVM();
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope handles(isolate);
+
+ struct T {
+ int64_t lhs;
+ int64_t rhs;
+ int64_t output_add1;
+ int64_t output_add2;
+ int64_t output_sub1;
+ int64_t output_sub2;
+ int64_t output_mul1;
+ int64_t output_mul2;
+ int64_t overflow_add1;
+ int64_t overflow_add2;
+ int64_t overflow_sub1;
+ int64_t overflow_sub2;
+ int64_t overflow_mul1;
+ int64_t overflow_mul2;
+ };
+ T t;
+
+ FOR_INT64_INPUTS(i, overflow_int64_test_values) {
+ FOR_INT64_INPUTS(j, overflow_int64_test_values) {
+ int64_t ii = *i;
+ int64_t jj = *j;
+ int64_t expected_add, expected_sub;
+ int32_t ii32 = static_cast<int32_t>(ii);
+ int32_t jj32 = static_cast<int32_t>(jj);
+ int32_t expected_mul;
+ int64_t expected_add_ovf, expected_sub_ovf, expected_mul_ovf;
+ MacroAssembler assembler(isolate, v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler* masm = &assembler;
+
+ __ ld_d(t0, a0, offsetof(T, lhs));
+ __ ld_d(t1, a0, offsetof(T, rhs));
+
+ __ AddOverflow_d(t2, t0, Operand(t1), t3);
+ __ st_d(t2, a0, offsetof(T, output_add1));
+ __ st_d(t3, a0, offsetof(T, overflow_add1));
+ __ or_(t3, zero_reg, zero_reg);
+ __ AddOverflow_d(t0, t0, Operand(t1), t3);
+ __ st_d(t0, a0, offsetof(T, output_add2));
+ __ st_d(t3, a0, offsetof(T, overflow_add2));
+
+ __ ld_d(t0, a0, offsetof(T, lhs));
+ __ ld_d(t1, a0, offsetof(T, rhs));
+
+ __ SubOverflow_d(t2, t0, Operand(t1), t3);
+ __ st_d(t2, a0, offsetof(T, output_sub1));
+ __ st_d(t3, a0, offsetof(T, overflow_sub1));
+ __ or_(t3, zero_reg, zero_reg);
+ __ SubOverflow_d(t0, t0, Operand(t1), t3);
+ __ st_d(t0, a0, offsetof(T, output_sub2));
+ __ st_d(t3, a0, offsetof(T, overflow_sub2));
+
+ __ ld_d(t0, a0, offsetof(T, lhs));
+ __ ld_d(t1, a0, offsetof(T, rhs));
+ __ slli_w(t0, t0, 0);
+ __ slli_w(t1, t1, 0);
+
+ __ MulOverflow_w(t2, t0, Operand(t1), t3);
+ __ st_d(t2, a0, offsetof(T, output_mul1));
+ __ st_d(t3, a0, offsetof(T, overflow_mul1));
+ __ or_(t3, zero_reg, zero_reg);
+ __ MulOverflow_w(t0, t0, Operand(t1), t3);
+ __ st_d(t0, a0, offsetof(T, output_mul2));
+ __ st_d(t3, a0, offsetof(T, overflow_mul2));
+
+ __ jirl(zero_reg, ra, 0);
+
+ CodeDesc desc;
+ masm->GetCode(isolate, &desc);
+ Handle<Code> code =
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
+ auto f = GeneratedCode<F3>::FromCode(*code);
+ t.lhs = ii;
+ t.rhs = jj;
+ f.Call(&t, 0, 0, 0, 0);
+
+ expected_add_ovf = base::bits::SignedAddOverflow64(ii, jj, &expected_add);
+ expected_sub_ovf = base::bits::SignedSubOverflow64(ii, jj, &expected_sub);
+ expected_mul_ovf =
+ base::bits::SignedMulOverflow32(ii32, jj32, &expected_mul);
+
+ CHECK_EQ(expected_add_ovf, t.overflow_add1 < 0);
+ CHECK_EQ(expected_sub_ovf, t.overflow_sub1 < 0);
+ CHECK_EQ(expected_mul_ovf, t.overflow_mul1 != 0);
+
+ CHECK_EQ(t.overflow_add1, t.overflow_add2);
+ CHECK_EQ(t.overflow_sub1, t.overflow_sub2);
+ CHECK_EQ(t.overflow_mul1, t.overflow_mul2);
+
+ CHECK_EQ(expected_add, t.output_add1);
+ CHECK_EQ(expected_add, t.output_add2);
+ CHECK_EQ(expected_sub, t.output_sub1);
+ CHECK_EQ(expected_sub, t.output_sub2);
+ if (!expected_mul_ovf) {
+ CHECK_EQ(expected_mul, t.output_mul1);
+ CHECK_EQ(expected_mul, t.output_mul2);
+ }
+ }
+ }
+}
+
+TEST(min_max_nan) {
+ CcTest::InitializeVM();
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope scope(isolate);
+ MacroAssembler assembler(isolate, v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler* masm = &assembler;
+
+ struct TestFloat {
+ double a;
+ double b;
+ double c;
+ double d;
+ float e;
+ float f;
+ float g;
+ float h;
+ };
+
+ TestFloat test;
+ const double dnan = std::numeric_limits<double>::quiet_NaN();
+ const double dinf = std::numeric_limits<double>::infinity();
+ const double dminf = -std::numeric_limits<double>::infinity();
+ const float fnan = std::numeric_limits<float>::quiet_NaN();
+ const float finf = std::numeric_limits<float>::infinity();
+ const float fminf = -std::numeric_limits<float>::infinity();
+ const int kTableLength = 13;
+
+ // clang-format off
+ double inputsa[kTableLength] = {dnan, 3.0, -0.0, 0.0, 42.0, dinf, dminf,
+ dinf, dnan, 3.0, dinf, dnan, dnan};
+ double inputsb[kTableLength] = {dnan, 2.0, 0.0, -0.0, dinf, 42.0, dinf,
+ dminf, 3.0, dnan, dnan, dinf, dnan};
+ double outputsdmin[kTableLength] = {dnan, 2.0, -0.0, -0.0, 42.0,
+ 42.0, dminf, dminf, dnan, dnan,
+ dnan, dnan, dnan};
+ double outputsdmax[kTableLength] = {dnan, 3.0, 0.0, 0.0, dinf, dinf, dinf,
+ dinf, dnan, dnan, dnan, dnan, dnan};
+
+ float inputse[kTableLength] = {2.0, 3.0, -0.0, 0.0, 42.0, finf, fminf,
+ finf, fnan, 3.0, finf, fnan, fnan};
+ float inputsf[kTableLength] = {3.0, 2.0, 0.0, -0.0, finf, 42.0, finf,
+ fminf, 3.0, fnan, fnan, finf, fnan};
+ float outputsfmin[kTableLength] = {2.0, 2.0, -0.0, -0.0, 42.0, 42.0, fminf,
+ fminf, fnan, fnan, fnan, fnan, fnan};
+ float outputsfmax[kTableLength] = {3.0, 3.0, 0.0, 0.0, finf, finf, finf,
+ finf, fnan, fnan, fnan, fnan, fnan};
+
+ // clang-format on
+ auto handle_dnan = [masm](FPURegister dst, Label* nan, Label* back) {
+ __ bind(nan);
+ __ LoadRoot(t8, RootIndex::kNanValue);
+ __ Fld_d(dst, FieldMemOperand(t8, HeapNumber::kValueOffset));
+ __ Branch(back);
+ };
+
+ auto handle_snan = [masm, fnan](FPURegister dst, Label* nan, Label* back) {
+ __ bind(nan);
+ __ Move(dst, fnan);
+ __ Branch(back);
+ };
+
+ Label handle_mind_nan, handle_maxd_nan, handle_mins_nan, handle_maxs_nan;
+ Label back_mind_nan, back_maxd_nan, back_mins_nan, back_maxs_nan;
+
+ __ Push(s6);
+ __ InitializeRootRegister();
+ __ Fld_d(f8, MemOperand(a0, offsetof(TestFloat, a)));
+ __ Fld_d(f9, MemOperand(a0, offsetof(TestFloat, b)));
+ __ Fld_s(f10, MemOperand(a0, offsetof(TestFloat, e)));
+ __ Fld_s(f11, MemOperand(a0, offsetof(TestFloat, f)));
+ __ Float64Min(f12, f8, f9, &handle_mind_nan);
+ __ bind(&back_mind_nan);
+ __ Float64Max(f13, f8, f9, &handle_maxd_nan);
+ __ bind(&back_maxd_nan);
+ __ Float32Min(f14, f10, f11, &handle_mins_nan);
+ __ bind(&back_mins_nan);
+ __ Float32Max(f15, f10, f11, &handle_maxs_nan);
+ __ bind(&back_maxs_nan);
+ __ Fst_d(f12, MemOperand(a0, offsetof(TestFloat, c)));
+ __ Fst_d(f13, MemOperand(a0, offsetof(TestFloat, d)));
+ __ Fst_s(f14, MemOperand(a0, offsetof(TestFloat, g)));
+ __ Fst_s(f15, MemOperand(a0, offsetof(TestFloat, h)));
+ __ Pop(s6);
+ __ jirl(zero_reg, ra, 0);
+
+ handle_dnan(f12, &handle_mind_nan, &back_mind_nan);
+ handle_dnan(f13, &handle_maxd_nan, &back_maxd_nan);
+ handle_snan(f14, &handle_mins_nan, &back_mins_nan);
+ handle_snan(f15, &handle_maxs_nan, &back_maxs_nan);
+
+ CodeDesc desc;
+ masm->GetCode(isolate, &desc);
+ Handle<Code> code =
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
+ auto f = GeneratedCode<F3>::FromCode(*code);
+ for (int i = 0; i < kTableLength; i++) {
+ test.a = inputsa[i];
+ test.b = inputsb[i];
+ test.e = inputse[i];
+ test.f = inputsf[i];
+
+ f.Call(&test, 0, 0, 0, 0);
+ CHECK_EQ(0, memcmp(&test.c, &outputsdmin[i], sizeof(test.c)));
+ CHECK_EQ(0, memcmp(&test.d, &outputsdmax[i], sizeof(test.d)));
+ CHECK_EQ(0, memcmp(&test.g, &outputsfmin[i], sizeof(test.g)));
+ CHECK_EQ(0, memcmp(&test.h, &outputsfmax[i], sizeof(test.h)));
+ }
+}
+
+template <typename IN_TYPE, typename Func>
+bool run_Unaligned(char* memory_buffer, int32_t in_offset, int32_t out_offset,
+ IN_TYPE value, Func GenerateUnalignedInstructionFunc) {
+ using F_CVT = int32_t(char* x0, int x1, int x2, int x3, int x4);
+
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope scope(isolate);
+ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler* masm = &assm;
+ IN_TYPE res;
+
+ GenerateUnalignedInstructionFunc(masm, in_offset, out_offset);
+ __ jirl(zero_reg, ra, 0);
+
+ CodeDesc desc;
+ assm.GetCode(isolate, &desc);
+ Handle<Code> code =
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
+
+ auto f = GeneratedCode<F_CVT>::FromCode(*code);
+
+ MemCopy(memory_buffer + in_offset, &value, sizeof(IN_TYPE));
+ f.Call(memory_buffer, 0, 0, 0, 0);
+ MemCopy(&res, memory_buffer + out_offset, sizeof(IN_TYPE));
+
+ return res == value;
+}
+
+static const std::vector<uint64_t> unsigned_test_values() {
+ // clang-format off
+ static const uint64_t kValues[] = {
+ 0x2180F18A06384414, 0x000A714532102277, 0xBC1ACCCF180649F0,
+ 0x8000000080008000, 0x0000000000000001, 0xFFFFFFFFFFFFFFFF,
+ };
+ // clang-format on
+ return std::vector<uint64_t>(&kValues[0], &kValues[arraysize(kValues)]);
+}
+
+static const std::vector<int32_t> unsigned_test_offset() {
+ static const int32_t kValues[] = {// value, offset
+ -132 * KB, -21 * KB, 0, 19 * KB, 135 * KB};
+ return std::vector<int32_t>(&kValues[0], &kValues[arraysize(kValues)]);
+}
+
+static const std::vector<int32_t> unsigned_test_offset_increment() {
+ static const int32_t kValues[] = {-5, -4, -3, -2, -1, 0, 1, 2, 3, 4, 5};
+ return std::vector<int32_t>(&kValues[0], &kValues[arraysize(kValues)]);
+}
+
+TEST(Ld_b) {
+ CcTest::InitializeVM();
+
+ static const int kBufferSize = 300 * KB;
+ char memory_buffer[kBufferSize];
+ char* buffer_middle = memory_buffer + (kBufferSize / 2);
+
+ FOR_UINT64_INPUTS(i, unsigned_test_values) {
+ FOR_INT32_INPUTS2(j1, j2, unsigned_test_offset) {
+ FOR_INT32_INPUTS2(k1, k2, unsigned_test_offset_increment) {
+ uint16_t value = static_cast<uint64_t>(*i & 0xFFFF);
+ int32_t in_offset = *j1 + *k1;
+ int32_t out_offset = *j2 + *k2;
+
+ auto fn_1 = [](MacroAssembler* masm, int32_t in_offset,
+ int32_t out_offset) {
+ __ Ld_b(a2, MemOperand(a0, in_offset));
+ __ St_b(a2, MemOperand(a0, out_offset));
+ __ or_(a0, a2, zero_reg);
+ };
+ CHECK_EQ(true, run_Unaligned<uint8_t>(buffer_middle, in_offset,
+ out_offset, value, fn_1));
+
+ auto fn_2 = [](MacroAssembler* masm, int32_t in_offset,
+ int32_t out_offset) {
+ __ mov(t0, a0);
+ __ Ld_b(a0, MemOperand(a0, in_offset));
+ __ St_b(a0, MemOperand(t0, out_offset));
+ __ or_(a0, a2, zero_reg);
+ };
+ CHECK_EQ(true, run_Unaligned<uint8_t>(buffer_middle, in_offset,
+ out_offset, value, fn_2));
+
+ auto fn_3 = [](MacroAssembler* masm, int32_t in_offset,
+ int32_t out_offset) {
+ __ mov(t0, a0);
+ __ Ld_bu(a0, MemOperand(a0, in_offset));
+ __ St_b(a0, MemOperand(t0, out_offset));
+ __ or_(a0, a2, zero_reg);
+ };
+ CHECK_EQ(true, run_Unaligned<uint8_t>(buffer_middle, in_offset,
+ out_offset, value, fn_3));
+
+ auto fn_4 = [](MacroAssembler* masm, int32_t in_offset,
+ int32_t out_offset) {
+ __ Ld_bu(a2, MemOperand(a0, in_offset));
+ __ St_b(a2, MemOperand(a0, out_offset));
+ __ or_(a0, a2, zero_reg);
+ };
+ CHECK_EQ(true, run_Unaligned<uint8_t>(buffer_middle, in_offset,
+ out_offset, value, fn_4));
+ }
+ }
+ }
+}
+
+TEST(Ld_b_bitextension) {
+ CcTest::InitializeVM();
+
+ static const int kBufferSize = 300 * KB;
+ char memory_buffer[kBufferSize];
+ char* buffer_middle = memory_buffer + (kBufferSize / 2);
+
+ FOR_UINT64_INPUTS(i, unsigned_test_values) {
+ FOR_INT32_INPUTS2(j1, j2, unsigned_test_offset) {
+ FOR_INT32_INPUTS2(k1, k2, unsigned_test_offset_increment) {
+ uint16_t value = static_cast<uint64_t>(*i & 0xFFFF);
+ int32_t in_offset = *j1 + *k1;
+ int32_t out_offset = *j2 + *k2;
+
+ auto fn = [](MacroAssembler* masm, int32_t in_offset,
+ int32_t out_offset) {
+ Label success, fail, end, different;
+ __ Ld_b(t0, MemOperand(a0, in_offset));
+ __ Ld_bu(t1, MemOperand(a0, in_offset));
+ __ Branch(&different, ne, t0, Operand(t1));
+
+ // If signed and unsigned values are same, check
+ // the upper bits to see if they are zero
+ __ srai_w(t0, t0, 7);
+ __ Branch(&success, eq, t0, Operand(zero_reg));
+ __ Branch(&fail);
+
+ // If signed and unsigned values are different,
+ // check that the upper bits are complementary
+ __ bind(&different);
+ __ srai_w(t1, t1, 7);
+ __ Branch(&fail, ne, t1, Operand(1));
+ __ srai_w(t0, t0, 7);
+ __ addi_d(t0, t0, 1);
+ __ Branch(&fail, ne, t0, Operand(zero_reg));
+ // Fall through to success
+
+ __ bind(&success);
+ __ Ld_b(t0, MemOperand(a0, in_offset));
+ __ St_b(t0, MemOperand(a0, out_offset));
+ __ Branch(&end);
+ __ bind(&fail);
+ __ St_b(zero_reg, MemOperand(a0, out_offset));
+ __ bind(&end);
+ __ or_(a0, a2, zero_reg);
+ };
+ CHECK_EQ(true, run_Unaligned<uint8_t>(buffer_middle, in_offset,
+ out_offset, value, fn));
+ }
+ }
+ }
+}
+
+TEST(Ld_h) {
+ CcTest::InitializeVM();
+
+ static const int kBufferSize = 300 * KB;
+ char memory_buffer[kBufferSize];
+ char* buffer_middle = memory_buffer + (kBufferSize / 2);
+
+ FOR_UINT64_INPUTS(i, unsigned_test_values) {
+ FOR_INT32_INPUTS2(j1, j2, unsigned_test_offset) {
+ FOR_INT32_INPUTS2(k1, k2, unsigned_test_offset_increment) {
+ uint16_t value = static_cast<uint64_t>(*i & 0xFFFF);
+ int32_t in_offset = *j1 + *k1;
+ int32_t out_offset = *j2 + *k2;
+
+ auto fn_1 = [](MacroAssembler* masm, int32_t in_offset,
+ int32_t out_offset) {
+ __ Ld_h(a2, MemOperand(a0, in_offset));
+ __ St_h(a2, MemOperand(a0, out_offset));
+ __ or_(a0, a2, zero_reg);
+ };
+ CHECK_EQ(true, run_Unaligned<uint16_t>(buffer_middle, in_offset,
+ out_offset, value, fn_1));
+
+ auto fn_2 = [](MacroAssembler* masm, int32_t in_offset,
+ int32_t out_offset) {
+ __ mov(t0, a0);
+ __ Ld_h(a0, MemOperand(a0, in_offset));
+ __ St_h(a0, MemOperand(t0, out_offset));
+ __ or_(a0, a2, zero_reg);
+ };
+ CHECK_EQ(true, run_Unaligned<uint16_t>(buffer_middle, in_offset,
+ out_offset, value, fn_2));
+
+ auto fn_3 = [](MacroAssembler* masm, int32_t in_offset,
+ int32_t out_offset) {
+ __ mov(t0, a0);
+ __ Ld_hu(a0, MemOperand(a0, in_offset));
+ __ St_h(a0, MemOperand(t0, out_offset));
+ __ or_(a0, a2, zero_reg);
+ };
+ CHECK_EQ(true, run_Unaligned<uint16_t>(buffer_middle, in_offset,
+ out_offset, value, fn_3));
+
+ auto fn_4 = [](MacroAssembler* masm, int32_t in_offset,
+ int32_t out_offset) {
+ __ Ld_hu(a2, MemOperand(a0, in_offset));
+ __ St_h(a2, MemOperand(a0, out_offset));
+ __ or_(a0, a2, zero_reg);
+ };
+ CHECK_EQ(true, run_Unaligned<uint16_t>(buffer_middle, in_offset,
+ out_offset, value, fn_4));
+ }
+ }
+ }
+}
+
+TEST(Ld_h_bitextension) {
+ CcTest::InitializeVM();
+
+ static const int kBufferSize = 300 * KB;
+ char memory_buffer[kBufferSize];
+ char* buffer_middle = memory_buffer + (kBufferSize / 2);
+
+ FOR_UINT64_INPUTS(i, unsigned_test_values) {
+ FOR_INT32_INPUTS2(j1, j2, unsigned_test_offset) {
+ FOR_INT32_INPUTS2(k1, k2, unsigned_test_offset_increment) {
+ uint16_t value = static_cast<uint64_t>(*i & 0xFFFF);
+ int32_t in_offset = *j1 + *k1;
+ int32_t out_offset = *j2 + *k2;
+
+ auto fn = [](MacroAssembler* masm, int32_t in_offset,
+ int32_t out_offset) {
+ Label success, fail, end, different;
+ __ Ld_h(t0, MemOperand(a0, in_offset));
+ __ Ld_hu(t1, MemOperand(a0, in_offset));
+ __ Branch(&different, ne, t0, Operand(t1));
+
+ // If signed and unsigned values are same, check
+ // the upper bits to see if they are zero
+ __ srai_w(t0, t0, 15);
+ __ Branch(&success, eq, t0, Operand(zero_reg));
+ __ Branch(&fail);
+
+ // If signed and unsigned values are different,
+ // check that the upper bits are complementary
+ __ bind(&different);
+ __ srai_w(t1, t1, 15);
+ __ Branch(&fail, ne, t1, Operand(1));
+ __ srai_w(t0, t0, 15);
+ __ addi_d(t0, t0, 1);
+ __ Branch(&fail, ne, t0, Operand(zero_reg));
+ // Fall through to success
+
+ __ bind(&success);
+ __ Ld_h(t0, MemOperand(a0, in_offset));
+ __ St_h(t0, MemOperand(a0, out_offset));
+ __ Branch(&end);
+ __ bind(&fail);
+ __ St_h(zero_reg, MemOperand(a0, out_offset));
+ __ bind(&end);
+ __ or_(a0, a2, zero_reg);
+ };
+ CHECK_EQ(true, run_Unaligned<uint16_t>(buffer_middle, in_offset,
+ out_offset, value, fn));
+ }
+ }
+ }
+}
+
+TEST(Ld_w) {
+ CcTest::InitializeVM();
+
+ static const int kBufferSize = 300 * KB;
+ char memory_buffer[kBufferSize];
+ char* buffer_middle = memory_buffer + (kBufferSize / 2);
+
+ FOR_UINT64_INPUTS(i, unsigned_test_values) {
+ FOR_INT32_INPUTS2(j1, j2, unsigned_test_offset) {
+ FOR_INT32_INPUTS2(k1, k2, unsigned_test_offset_increment) {
+ uint32_t value = static_cast<uint32_t>(*i & 0xFFFFFFFF);
+ int32_t in_offset = *j1 + *k1;
+ int32_t out_offset = *j2 + *k2;
+
+ auto fn_1 = [](MacroAssembler* masm, int32_t in_offset,
+ int32_t out_offset) {
+ __ Ld_w(a2, MemOperand(a0, in_offset));
+ __ St_w(a2, MemOperand(a0, out_offset));
+ __ or_(a0, a2, zero_reg);
+ };
+ CHECK_EQ(true, run_Unaligned<uint32_t>(buffer_middle, in_offset,
+ out_offset, value, fn_1));
+
+ auto fn_2 = [](MacroAssembler* masm, int32_t in_offset,
+ int32_t out_offset) {
+ __ mov(t0, a0);
+ __ Ld_w(a0, MemOperand(a0, in_offset));
+ __ St_w(a0, MemOperand(t0, out_offset));
+ __ or_(a0, a2, zero_reg);
+ };
+ CHECK_EQ(true,
+ run_Unaligned<uint32_t>(buffer_middle, in_offset, out_offset,
+ (uint32_t)value, fn_2));
+
+ auto fn_3 = [](MacroAssembler* masm, int32_t in_offset,
+ int32_t out_offset) {
+ __ Ld_wu(a2, MemOperand(a0, in_offset));
+ __ St_w(a2, MemOperand(a0, out_offset));
+ __ or_(a0, a2, zero_reg);
+ };
+ CHECK_EQ(true, run_Unaligned<uint32_t>(buffer_middle, in_offset,
+ out_offset, value, fn_3));
+
+ auto fn_4 = [](MacroAssembler* masm, int32_t in_offset,
+ int32_t out_offset) {
+ __ mov(t0, a0);
+ __ Ld_wu(a0, MemOperand(a0, in_offset));
+ __ St_w(a0, MemOperand(t0, out_offset));
+ __ or_(a0, a2, zero_reg);
+ };
+ CHECK_EQ(true,
+ run_Unaligned<uint32_t>(buffer_middle, in_offset, out_offset,
+ (uint32_t)value, fn_4));
+ }
+ }
+ }
+}
+
+TEST(Ld_w_extension) {
+ CcTest::InitializeVM();
+
+ static const int kBufferSize = 300 * KB;
+ char memory_buffer[kBufferSize];
+ char* buffer_middle = memory_buffer + (kBufferSize / 2);
+
+ FOR_UINT64_INPUTS(i, unsigned_test_values) {
+ FOR_INT32_INPUTS2(j1, j2, unsigned_test_offset) {
+ FOR_INT32_INPUTS2(k1, k2, unsigned_test_offset_increment) {
+ uint32_t value = static_cast<uint32_t>(*i & 0xFFFFFFFF);
+ int32_t in_offset = *j1 + *k1;
+ int32_t out_offset = *j2 + *k2;
+
+ auto fn = [](MacroAssembler* masm, int32_t in_offset,
+ int32_t out_offset) {
+ Label success, fail, end, different;
+ __ Ld_w(t0, MemOperand(a0, in_offset));
+ __ Ld_wu(t1, MemOperand(a0, in_offset));
+ __ Branch(&different, ne, t0, Operand(t1));
+
+ // If signed and unsigned values are same, check
+ // the upper bits to see if they are zero
+ __ srai_d(t0, t0, 31);
+ __ Branch(&success, eq, t0, Operand(zero_reg));
+ __ Branch(&fail);
+
+ // If signed and unsigned values are different,
+ // check that the upper bits are complementary
+ __ bind(&different);
+ __ srai_d(t1, t1, 31);
+ __ Branch(&fail, ne, t1, Operand(1));
+ __ srai_d(t0, t0, 31);
+ __ addi_d(t0, t0, 1);
+ __ Branch(&fail, ne, t0, Operand(zero_reg));
+ // Fall through to success
+
+ __ bind(&success);
+ __ Ld_w(t0, MemOperand(a0, in_offset));
+ __ St_w(t0, MemOperand(a0, out_offset));
+ __ Branch(&end);
+ __ bind(&fail);
+ __ St_w(zero_reg, MemOperand(a0, out_offset));
+ __ bind(&end);
+ __ or_(a0, a2, zero_reg);
+ };
+ CHECK_EQ(true, run_Unaligned<uint32_t>(buffer_middle, in_offset,
+ out_offset, value, fn));
+ }
+ }
+ }
+}
+
+TEST(Ld_d) {
+ CcTest::InitializeVM();
+
+ static const int kBufferSize = 300 * KB;
+ char memory_buffer[kBufferSize];
+ char* buffer_middle = memory_buffer + (kBufferSize / 2);
+
+ FOR_UINT64_INPUTS(i, unsigned_test_values) {
+ FOR_INT32_INPUTS2(j1, j2, unsigned_test_offset) {
+ FOR_INT32_INPUTS2(k1, k2, unsigned_test_offset_increment) {
+ uint64_t value = *i;
+ int32_t in_offset = *j1 + *k1;
+ int32_t out_offset = *j2 + *k2;
+
+ auto fn_1 = [](MacroAssembler* masm, int32_t in_offset,
+ int32_t out_offset) {
+ __ Ld_d(a2, MemOperand(a0, in_offset));
+ __ St_d(a2, MemOperand(a0, out_offset));
+ __ or_(a0, a2, zero_reg);
+ };
+ CHECK_EQ(true, run_Unaligned<uint64_t>(buffer_middle, in_offset,
+ out_offset, value, fn_1));
+
+ auto fn_2 = [](MacroAssembler* masm, int32_t in_offset,
+ int32_t out_offset) {
+ __ mov(t0, a0);
+ __ Ld_d(a0, MemOperand(a0, in_offset));
+ __ St_d(a0, MemOperand(t0, out_offset));
+ __ or_(a0, a2, zero_reg);
+ };
+ CHECK_EQ(true,
+ run_Unaligned<uint64_t>(buffer_middle, in_offset, out_offset,
+ (uint32_t)value, fn_2));
+ }
+ }
+ }
+}
+
+TEST(Fld_s) {
+ CcTest::InitializeVM();
+
+ static const int kBufferSize = 300 * KB;
+ char memory_buffer[kBufferSize];
+ char* buffer_middle = memory_buffer + (kBufferSize / 2);
+
+ FOR_UINT64_INPUTS(i, unsigned_test_values) {
+ FOR_INT32_INPUTS2(j1, j2, unsigned_test_offset) {
+ FOR_INT32_INPUTS2(k1, k2, unsigned_test_offset_increment) {
+ float value = static_cast<float>(*i & 0xFFFFFFFF);
+ int32_t in_offset = *j1 + *k1;
+ int32_t out_offset = *j2 + *k2;
+
+ auto fn = [](MacroAssembler* masm, int32_t in_offset,
+ int32_t out_offset) {
+ __ Fld_s(f0, MemOperand(a0, in_offset));
+ __ Fst_s(f0, MemOperand(a0, out_offset));
+ };
+ CHECK_EQ(true, run_Unaligned<float>(buffer_middle, in_offset,
+ out_offset, value, fn));
+ }
+ }
+ }
+}
+
+TEST(Fld_d) {
+ CcTest::InitializeVM();
+
+ static const int kBufferSize = 300 * KB;
+ char memory_buffer[kBufferSize];
+ char* buffer_middle = memory_buffer + (kBufferSize / 2);
+
+ FOR_UINT64_INPUTS(i, unsigned_test_values) {
+ FOR_INT32_INPUTS2(j1, j2, unsigned_test_offset) {
+ FOR_INT32_INPUTS2(k1, k2, unsigned_test_offset_increment) {
+ double value = static_cast<double>(*i);
+ int32_t in_offset = *j1 + *k1;
+ int32_t out_offset = *j2 + *k2;
+
+ auto fn = [](MacroAssembler* masm, int32_t in_offset,
+ int32_t out_offset) {
+ __ Fld_d(f0, MemOperand(a0, in_offset));
+ __ Fst_d(f0, MemOperand(a0, out_offset));
+ };
+ CHECK_EQ(true, run_Unaligned<double>(buffer_middle, in_offset,
+ out_offset, value, fn));
+ }
+ }
+ }
+}
+
+static const std::vector<uint64_t> sltu_test_values() {
+ // clang-format off
+ static const uint64_t kValues[] = {
+ 0,
+ 1,
+ 0x7FE,
+ 0x7FF,
+ 0x800,
+ 0x801,
+ 0xFFE,
+ 0xFFF,
+ 0xFFFFFFFFFFFFF7FE,
+ 0xFFFFFFFFFFFFF7FF,
+ 0xFFFFFFFFFFFFF800,
+ 0xFFFFFFFFFFFFF801,
+ 0xFFFFFFFFFFFFFFFE,
+ 0xFFFFFFFFFFFFFFFF,
+ };
+ // clang-format on
+ return std::vector<uint64_t>(&kValues[0], &kValues[arraysize(kValues)]);
+}
+
+template <typename Func>
+bool run_Sltu(uint64_t rj, uint64_t rk, Func GenerateSltuInstructionFunc) {
+ using F_CVT = int64_t(uint64_t x0, uint64_t x1, int x2, int x3, int x4);
+
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope scope(isolate);
+ MacroAssembler assm(isolate, v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler* masm = &assm;
+
+ GenerateSltuInstructionFunc(masm, rk);
+ __ or_(a0, a2, zero_reg);
+ __ jirl(zero_reg, ra, 0);
+
+ CodeDesc desc;
+ assm.GetCode(isolate, &desc);
+ Handle<Code> code =
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
+
+ auto f = GeneratedCode<F_CVT>::FromCode(*code);
+ int64_t res = reinterpret_cast<int64_t>(f.Call(rj, rk, 0, 0, 0));
+ return res == 1;
+}
+
+TEST(Sltu) {
+ CcTest::InitializeVM();
+
+ FOR_UINT64_INPUTS(i, sltu_test_values) {
+ FOR_UINT64_INPUTS(j, sltu_test_values) {
+ uint64_t rj = *i;
+ uint64_t rk = *j;
+
+ auto fn_1 = [](MacroAssembler* masm, uint64_t imm) {
+ __ Sltu(a2, a0, Operand(imm));
+ };
+ CHECK_EQ(rj < rk, run_Sltu(rj, rk, fn_1));
+
+ auto fn_2 = [](MacroAssembler* masm, uint64_t imm) {
+ __ Sltu(a2, a0, a1);
+ };
+ CHECK_EQ(rj < rk, run_Sltu(rj, rk, fn_2));
+ }
+ }
+}
+
+template <typename T, typename Inputs, typename Results>
+static GeneratedCode<F4> GenerateMacroFloat32MinMax(MacroAssembler* masm) {
+ T a = T::from_code(8); // f8
+ T b = T::from_code(9); // f9
+ T c = T::from_code(10); // f10
+
+ Label ool_min_abc, ool_min_aab, ool_min_aba;
+ Label ool_max_abc, ool_max_aab, ool_max_aba;
+
+ Label done_min_abc, done_min_aab, done_min_aba;
+ Label done_max_abc, done_max_aab, done_max_aba;
+
+#define FLOAT_MIN_MAX(fminmax, res, x, y, done, ool, res_field) \
+ __ Fld_s(x, MemOperand(a0, offsetof(Inputs, src1_))); \
+ __ Fld_s(y, MemOperand(a0, offsetof(Inputs, src2_))); \
+ __ fminmax(res, x, y, &ool); \
+ __ bind(&done); \
+ __ Fst_s(a, MemOperand(a1, offsetof(Results, res_field)))
+
+ // a = min(b, c);
+ FLOAT_MIN_MAX(Float32Min, a, b, c, done_min_abc, ool_min_abc, min_abc_);
+ // a = min(a, b);
+ FLOAT_MIN_MAX(Float32Min, a, a, b, done_min_aab, ool_min_aab, min_aab_);
+ // a = min(b, a);
+ FLOAT_MIN_MAX(Float32Min, a, b, a, done_min_aba, ool_min_aba, min_aba_);
+
+ // a = max(b, c);
+ FLOAT_MIN_MAX(Float32Max, a, b, c, done_max_abc, ool_max_abc, max_abc_);
+ // a = max(a, b);
+ FLOAT_MIN_MAX(Float32Max, a, a, b, done_max_aab, ool_max_aab, max_aab_);
+ // a = max(b, a);
+ FLOAT_MIN_MAX(Float32Max, a, b, a, done_max_aba, ool_max_aba, max_aba_);
+
+#undef FLOAT_MIN_MAX
+
+ __ jirl(zero_reg, ra, 0);
+
+ // Generate out-of-line cases.
+ __ bind(&ool_min_abc);
+ __ Float32MinOutOfLine(a, b, c);
+ __ Branch(&done_min_abc);
+
+ __ bind(&ool_min_aab);
+ __ Float32MinOutOfLine(a, a, b);
+ __ Branch(&done_min_aab);
+
+ __ bind(&ool_min_aba);
+ __ Float32MinOutOfLine(a, b, a);
+ __ Branch(&done_min_aba);
+
+ __ bind(&ool_max_abc);
+ __ Float32MaxOutOfLine(a, b, c);
+ __ Branch(&done_max_abc);
+
+ __ bind(&ool_max_aab);
+ __ Float32MaxOutOfLine(a, a, b);
+ __ Branch(&done_max_aab);
+
+ __ bind(&ool_max_aba);
+ __ Float32MaxOutOfLine(a, b, a);
+ __ Branch(&done_max_aba);
+
+ CodeDesc desc;
+ masm->GetCode(masm->isolate(), &desc);
+ Handle<Code> code =
+ Factory::CodeBuilder(masm->isolate(), desc, CodeKind::FOR_TESTING)
+ .Build();
+#ifdef DEBUG
+ StdoutStream os;
+ code->Print(os);
+#endif
+ return GeneratedCode<F4>::FromCode(*code);
+}
+
+TEST(macro_float_minmax_f32) {
+ // Test the Float32Min and Float32Max macros.
+ CcTest::InitializeVM();
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope scope(isolate);
+
+ MacroAssembler assembler(isolate, v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler* masm = &assembler;
+
+ struct Inputs {
+ float src1_;
+ float src2_;
+ };
+
+ struct Results {
+ // Check all register aliasing possibilities in order to exercise all
+ // code-paths in the macro assembler.
+ float min_abc_;
+ float min_aab_;
+ float min_aba_;
+ float max_abc_;
+ float max_aab_;
+ float max_aba_;
+ };
+
+ GeneratedCode<F4> f =
+ GenerateMacroFloat32MinMax<FPURegister, Inputs, Results>(masm);
+
+#define CHECK_MINMAX(src1, src2, min, max) \
+ do { \
+ Inputs inputs = {src1, src2}; \
+ Results results; \
+ f.Call(&inputs, &results, 0, 0, 0); \
+ CHECK_EQ(bit_cast<uint32_t>(min), bit_cast<uint32_t>(results.min_abc_)); \
+ CHECK_EQ(bit_cast<uint32_t>(min), bit_cast<uint32_t>(results.min_aab_)); \
+ CHECK_EQ(bit_cast<uint32_t>(min), bit_cast<uint32_t>(results.min_aba_)); \
+ CHECK_EQ(bit_cast<uint32_t>(max), bit_cast<uint32_t>(results.max_abc_)); \
+ CHECK_EQ(bit_cast<uint32_t>(max), bit_cast<uint32_t>(results.max_aab_)); \
+ CHECK_EQ(bit_cast<uint32_t>(max), bit_cast<uint32_t>(results.max_aba_)); \
+ /* Use a bit_cast to correctly identify -0.0 and NaNs. */ \
+ } while (0)
+
+ float nan_a = std::numeric_limits<float>::quiet_NaN();
+ float nan_b = std::numeric_limits<float>::quiet_NaN();
+
+ CHECK_MINMAX(1.0f, -1.0f, -1.0f, 1.0f);
+ CHECK_MINMAX(-1.0f, 1.0f, -1.0f, 1.0f);
+ CHECK_MINMAX(0.0f, -1.0f, -1.0f, 0.0f);
+ CHECK_MINMAX(-1.0f, 0.0f, -1.0f, 0.0f);
+ CHECK_MINMAX(-0.0f, -1.0f, -1.0f, -0.0f);
+ CHECK_MINMAX(-1.0f, -0.0f, -1.0f, -0.0f);
+ CHECK_MINMAX(0.0f, 1.0f, 0.0f, 1.0f);
+ CHECK_MINMAX(1.0f, 0.0f, 0.0f, 1.0f);
+
+ CHECK_MINMAX(0.0f, 0.0f, 0.0f, 0.0f);
+ CHECK_MINMAX(-0.0f, -0.0f, -0.0f, -0.0f);
+ CHECK_MINMAX(-0.0f, 0.0f, -0.0f, 0.0f);
+ CHECK_MINMAX(0.0f, -0.0f, -0.0f, 0.0f);
+
+ CHECK_MINMAX(0.0f, nan_a, nan_a, nan_a);
+ CHECK_MINMAX(nan_a, 0.0f, nan_a, nan_a);
+ CHECK_MINMAX(nan_a, nan_b, nan_a, nan_a);
+ CHECK_MINMAX(nan_b, nan_a, nan_b, nan_b);
+
+#undef CHECK_MINMAX
+}
+
+template <typename T, typename Inputs, typename Results>
+static GeneratedCode<F4> GenerateMacroFloat64MinMax(MacroAssembler* masm) {
+ T a = T::from_code(8); // f8
+ T b = T::from_code(9); // f9
+ T c = T::from_code(10); // f10
+
+ Label ool_min_abc, ool_min_aab, ool_min_aba;
+ Label ool_max_abc, ool_max_aab, ool_max_aba;
+
+ Label done_min_abc, done_min_aab, done_min_aba;
+ Label done_max_abc, done_max_aab, done_max_aba;
+
+#define FLOAT_MIN_MAX(fminmax, res, x, y, done, ool, res_field) \
+ __ Fld_d(x, MemOperand(a0, offsetof(Inputs, src1_))); \
+ __ Fld_d(y, MemOperand(a0, offsetof(Inputs, src2_))); \
+ __ fminmax(res, x, y, &ool); \
+ __ bind(&done); \
+ __ Fst_d(a, MemOperand(a1, offsetof(Results, res_field)))
+
+ // a = min(b, c);
+ FLOAT_MIN_MAX(Float64Min, a, b, c, done_min_abc, ool_min_abc, min_abc_);
+ // a = min(a, b);
+ FLOAT_MIN_MAX(Float64Min, a, a, b, done_min_aab, ool_min_aab, min_aab_);
+ // a = min(b, a);
+ FLOAT_MIN_MAX(Float64Min, a, b, a, done_min_aba, ool_min_aba, min_aba_);
+
+ // a = max(b, c);
+ FLOAT_MIN_MAX(Float64Max, a, b, c, done_max_abc, ool_max_abc, max_abc_);
+ // a = max(a, b);
+ FLOAT_MIN_MAX(Float64Max, a, a, b, done_max_aab, ool_max_aab, max_aab_);
+ // a = max(b, a);
+ FLOAT_MIN_MAX(Float64Max, a, b, a, done_max_aba, ool_max_aba, max_aba_);
+
+#undef FLOAT_MIN_MAX
+
+ __ jirl(zero_reg, ra, 0);
+
+ // Generate out-of-line cases.
+ __ bind(&ool_min_abc);
+ __ Float64MinOutOfLine(a, b, c);
+ __ Branch(&done_min_abc);
+
+ __ bind(&ool_min_aab);
+ __ Float64MinOutOfLine(a, a, b);
+ __ Branch(&done_min_aab);
+
+ __ bind(&ool_min_aba);
+ __ Float64MinOutOfLine(a, b, a);
+ __ Branch(&done_min_aba);
+
+ __ bind(&ool_max_abc);
+ __ Float64MaxOutOfLine(a, b, c);
+ __ Branch(&done_max_abc);
+
+ __ bind(&ool_max_aab);
+ __ Float64MaxOutOfLine(a, a, b);
+ __ Branch(&done_max_aab);
+
+ __ bind(&ool_max_aba);
+ __ Float64MaxOutOfLine(a, b, a);
+ __ Branch(&done_max_aba);
+
+ CodeDesc desc;
+ masm->GetCode(masm->isolate(), &desc);
+ Handle<Code> code =
+ Factory::CodeBuilder(masm->isolate(), desc, CodeKind::FOR_TESTING)
+ .Build();
+#ifdef DEBUG
+ StdoutStream os;
+ code->Print(os);
+#endif
+ return GeneratedCode<F4>::FromCode(*code);
+}
+
+TEST(macro_float_minmax_f64) {
+ // Test the Float64Min and Float64Max macros.
+ CcTest::InitializeVM();
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope scope(isolate);
+
+ MacroAssembler assembler(isolate, v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler* masm = &assembler;
+
+ struct Inputs {
+ double src1_;
+ double src2_;
+ };
+
+ struct Results {
+ // Check all register aliasing possibilities in order to exercise all
+ // code-paths in the macro assembler.
+ double min_abc_;
+ double min_aab_;
+ double min_aba_;
+ double max_abc_;
+ double max_aab_;
+ double max_aba_;
+ };
+
+ GeneratedCode<F4> f =
+ GenerateMacroFloat64MinMax<DoubleRegister, Inputs, Results>(masm);
+
+#define CHECK_MINMAX(src1, src2, min, max) \
+ do { \
+ Inputs inputs = {src1, src2}; \
+ Results results; \
+ f.Call(&inputs, &results, 0, 0, 0); \
+ CHECK_EQ(bit_cast<uint64_t>(min), bit_cast<uint64_t>(results.min_abc_)); \
+ CHECK_EQ(bit_cast<uint64_t>(min), bit_cast<uint64_t>(results.min_aab_)); \
+ CHECK_EQ(bit_cast<uint64_t>(min), bit_cast<uint64_t>(results.min_aba_)); \
+ CHECK_EQ(bit_cast<uint64_t>(max), bit_cast<uint64_t>(results.max_abc_)); \
+ CHECK_EQ(bit_cast<uint64_t>(max), bit_cast<uint64_t>(results.max_aab_)); \
+ CHECK_EQ(bit_cast<uint64_t>(max), bit_cast<uint64_t>(results.max_aba_)); \
+ /* Use a bit_cast to correctly identify -0.0 and NaNs. */ \
+ } while (0)
+
+ double nan_a = std::numeric_limits<double>::quiet_NaN();
+ double nan_b = std::numeric_limits<double>::quiet_NaN();
+
+ CHECK_MINMAX(1.0, -1.0, -1.0, 1.0);
+ CHECK_MINMAX(-1.0, 1.0, -1.0, 1.0);
+ CHECK_MINMAX(0.0, -1.0, -1.0, 0.0);
+ CHECK_MINMAX(-1.0, 0.0, -1.0, 0.0);
+ CHECK_MINMAX(-0.0, -1.0, -1.0, -0.0);
+ CHECK_MINMAX(-1.0, -0.0, -1.0, -0.0);
+ CHECK_MINMAX(0.0, 1.0, 0.0, 1.0);
+ CHECK_MINMAX(1.0, 0.0, 0.0, 1.0);
+
+ CHECK_MINMAX(0.0, 0.0, 0.0, 0.0);
+ CHECK_MINMAX(-0.0, -0.0, -0.0, -0.0);
+ CHECK_MINMAX(-0.0, 0.0, -0.0, 0.0);
+ CHECK_MINMAX(0.0, -0.0, -0.0, 0.0);
+
+ CHECK_MINMAX(0.0, nan_a, nan_a, nan_a);
+ CHECK_MINMAX(nan_a, 0.0, nan_a, nan_a);
+ CHECK_MINMAX(nan_a, nan_b, nan_a, nan_a);
+ CHECK_MINMAX(nan_b, nan_a, nan_b, nan_b);
+
+#undef CHECK_MINMAX
+}
+
+uint64_t run_Sub_w(uint64_t imm, int32_t num_instr) {
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope scope(isolate);
+
+ MacroAssembler assembler(isolate, v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler* masm = &assembler;
+
+ Label code_start;
+ __ bind(&code_start);
+ __ Sub_w(a2, zero_reg, Operand(imm));
+ CHECK_EQ(masm->InstructionsGeneratedSince(&code_start), num_instr);
+ __ or_(a0, a2, zero_reg);
+ __ jirl(zero_reg, ra, 0);
+
+ CodeDesc desc;
+ masm->GetCode(isolate, &desc);
+ Handle<Code> code =
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
+#ifdef OBJECT_PRINT
+ code->Print(std::cout);
+#endif
+ auto f = GeneratedCode<F2>::FromCode(*code);
+
+ uint64_t res = reinterpret_cast<uint64_t>(f.Call(0, 0, 0, 0, 0));
+
+ return res;
+}
+
+TEST(SUB_W) {
+ CcTest::InitializeVM();
+
+ // Test Subu macro-instruction for min_int12 and max_int12 border cases.
+ // For subtracting int16 immediate values we use addiu.
+
+ struct TestCaseSub {
+ uint64_t imm;
+ uint64_t expected_res;
+ int32_t num_instr;
+ };
+
+ // We call Sub_w(v0, zero_reg, imm) to test cases listed below.
+ // 0 - imm = expected_res
+ // clang-format off
+ struct TestCaseSub tc[] = {
+ // imm, expected_res, num_instr
+ {0xFFFFFFFFFFFFF800, 0x800, 2}, // min_int12
+ // The test case above generates ori + add_w instruction sequence.
+ // We can't have just addi_ because -min_int12 > max_int12 so use
+ // register. We can load min_int12 to at register with addi_w and then
+ // subtract at with sub_w, but now we use ori + add_w because -min_int12
+ // can be loaded using ori.
+ {0x800, 0xFFFFFFFFFFFFF800, 1}, // max_int12 + 1
+ // Generates addi_w
+ // max_int12 + 1 is not int12 but -(max_int12 + 1) is, just use addi_w.
+ {0xFFFFFFFFFFFFF7FF, 0x801, 2}, // min_int12 - 1
+ // Generates ori + add_w
+ // To load this value to at we need two instructions and another one to
+ // subtract, lu12i + ori + sub_w. But we can load -value to at using just
+ // ori and then add at register with add_w.
+ {0x801, 0xFFFFFFFFFFFFF7FF, 2}, // max_int12 + 2
+ // Generates ori + sub_w
+ // Not int12 but is uint12, load value to at with ori and subtract with
+ // sub_w.
+ {0x00010000, 0xFFFFFFFFFFFF0000, 2},
+ // Generates lu12i_w + sub_w
+ // Load value using lui to at and subtract with subu.
+ {0x00010001, 0xFFFFFFFFFFFEFFFF, 3},
+ // Generates lu12i + ori + sub_w
+ // We have to generate three instructions in this case.
+ {0x7FFFFFFF, 0xFFFFFFFF80000001, 3}, // max_int32
+ // Generates lu12i_w + ori + sub_w
+ {0xFFFFFFFF80000000, 0xFFFFFFFF80000000, 2}, // min_int32
+ // The test case above generates lu12i + sub_w intruction sequence.
+ // The result of 0 - min_int32 eqauls max_int32 + 1, which wraps around to
+ // min_int32 again.
+ };
+ // clang-format on
+
+ size_t nr_test_cases = sizeof(tc) / sizeof(TestCaseSub);
+ for (size_t i = 0; i < nr_test_cases; ++i) {
+ CHECK_EQ(tc[i].expected_res, run_Sub_w(tc[i].imm, tc[i].num_instr));
+ }
+}
+
+uint64_t run_Sub_d(uint64_t imm, int32_t num_instr) {
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope scope(isolate);
+
+ MacroAssembler assembler(isolate, v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler* masm = &assembler;
+
+ Label code_start;
+ __ bind(&code_start);
+ __ Sub_d(a2, zero_reg, Operand(imm));
+ CHECK_EQ(masm->InstructionsGeneratedSince(&code_start), num_instr);
+ __ or_(a0, a2, zero_reg);
+ __ jirl(zero_reg, ra, 0);
+
+ CodeDesc desc;
+ masm->GetCode(isolate, &desc);
+ Handle<Code> code =
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
+#ifdef OBJECT_PRINT
+ code->Print(std::cout);
+#endif
+ auto f = GeneratedCode<F2>::FromCode(*code);
+
+ uint64_t res = reinterpret_cast<uint64_t>(f.Call(0, 0, 0, 0, 0));
+
+ return res;
+}
+
+TEST(SUB_D) {
+ CcTest::InitializeVM();
+
+ // Test Sub_d macro-instruction for min_int12 and max_int12 border cases.
+ // For subtracting int12 immediate values we use addi_d.
+
+ struct TestCaseSub {
+ uint64_t imm;
+ uint64_t expected_res;
+ int32_t num_instr;
+ };
+ // We call Sub(v0, zero_reg, imm) to test cases listed below.
+ // 0 - imm = expected_res
+ // clang-format off
+ struct TestCaseSub tc[] = {
+ // imm, expected_res, num_instr
+ {0xFFFFFFFFFFFFF800, 0x800, 2}, // min_int12
+ // The test case above generates addi_d instruction.
+ // This is int12 value and we can load it using just addi_d.
+ { 0x800, 0xFFFFFFFFFFFFF800, 1}, // max_int12 + 1
+ // Generates addi_d
+ // max_int12 + 1 is not int12 but is uint12, just use ori.
+ {0xFFFFFFFFFFFFF7FF, 0x801, 2}, // min_int12 - 1
+ // Generates ori + add_d
+ { 0x801, 0xFFFFFFFFFFFFF7FF, 2}, // max_int12 + 2
+ // Generates ori + add_d
+ { 0x00001000, 0xFFFFFFFFFFFFF000, 2}, // max_uint12 + 1
+ // Generates lu12i_w + sub_d
+ { 0x00001001, 0xFFFFFFFFFFFFEFFF, 3}, // max_uint12 + 2
+ // Generates lu12i_w + ori + sub_d
+ {0x00000000FFFFFFFF, 0xFFFFFFFF00000001, 3}, // max_uint32
+ // Generates addi_w + li32i_d + sub_d
+ {0x00000000FFFFFFFE, 0xFFFFFFFF00000002, 3}, // max_uint32 - 1
+ // Generates addi_w + li32i_d + sub_d
+ {0xFFFFFFFF80000000, 0x80000000, 2}, // min_int32
+ // Generates lu12i_w + sub_d
+ {0x0000000080000000, 0xFFFFFFFF80000000, 2}, // max_int32 + 1
+ // Generates lu12i_w + add_d
+ {0xFFFF0000FFFF8765, 0x0000FFFF0000789B, 4},
+ // Generates lu12i_w + ori + lu32i_d + sub
+ {0x1234ABCD87654321, 0xEDCB5432789ABCDF, 5},
+ // Generates lu12i_w + ori + lu32i_d + lu52i_d + sub
+ {0xFFFF789100000000, 0x876F00000000, 3},
+ // Generates xor + lu32i_d + sub
+ {0xF12F789100000000, 0xED0876F00000000, 4},
+ // Generates xor + lu32i_d + lu52i_d + sub
+ {0xF120000000000800, 0xEDFFFFFFFFFF800, 3},
+ // Generates ori + lu52i_d + sub
+ {0xFFF0000000000000, 0x10000000000000, 2}
+ // Generates lu52i_d + sub
+ };
+ // clang-format on
+
+ size_t nr_test_cases = sizeof(tc) / sizeof(TestCaseSub);
+ for (size_t i = 0; i < nr_test_cases; ++i) {
+ CHECK_EQ(tc[i].expected_res, run_Sub_d(tc[i].imm, tc[i].num_instr));
+ }
+}
+
+TEST(Move) {
+ CcTest::InitializeVM();
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope scope(isolate);
+ MacroAssembler assembler(isolate, v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler* masm = &assembler;
+
+ struct T {
+ float a;
+ float b;
+ float result_a;
+ float result_b;
+ double c;
+ double d;
+ double e;
+ double result_c;
+ double result_d;
+ double result_e;
+ };
+ T t;
+ __ li(a4, static_cast<int32_t>(0x80000000));
+ __ St_w(a4, MemOperand(a0, offsetof(T, a)));
+ __ li(a5, static_cast<int32_t>(0x12345678));
+ __ St_w(a5, MemOperand(a0, offsetof(T, b)));
+ __ li(a6, static_cast<int64_t>(0x8877665544332211));
+ __ St_d(a6, MemOperand(a0, offsetof(T, c)));
+ __ li(a7, static_cast<int64_t>(0x1122334455667788));
+ __ St_d(a7, MemOperand(a0, offsetof(T, d)));
+ __ li(t0, static_cast<int64_t>(0));
+ __ St_d(t0, MemOperand(a0, offsetof(T, e)));
+
+ __ Move(f8, static_cast<uint32_t>(0x80000000));
+ __ Move(f9, static_cast<uint32_t>(0x12345678));
+ __ Move(f10, static_cast<uint64_t>(0x8877665544332211));
+ __ Move(f11, static_cast<uint64_t>(0x1122334455667788));
+ __ Move(f12, static_cast<uint64_t>(0));
+ __ Fst_s(f8, MemOperand(a0, offsetof(T, result_a)));
+ __ Fst_s(f9, MemOperand(a0, offsetof(T, result_b)));
+ __ Fst_d(f10, MemOperand(a0, offsetof(T, result_c)));
+ __ Fst_d(f11, MemOperand(a0, offsetof(T, result_d)));
+ __ Fst_d(f12, MemOperand(a0, offsetof(T, result_e)));
+ __ jirl(zero_reg, ra, 0);
+
+ CodeDesc desc;
+ masm->GetCode(isolate, &desc);
+ Handle<Code> code =
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
+ auto f = GeneratedCode<F3>::FromCode(*code);
+ f.Call(&t, 0, 0, 0, 0);
+ CHECK_EQ(t.a, t.result_a);
+ CHECK_EQ(t.b, t.result_b);
+ CHECK_EQ(t.c, t.result_c);
+ CHECK_EQ(t.d, t.result_d);
+ CHECK_EQ(t.e, t.result_e);
+}
+
+TEST(Movz_Movn) {
+ const int kTableLength = 4;
+ CcTest::InitializeVM();
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope scope(isolate);
+ MacroAssembler assembler(isolate, v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler* masm = &assembler;
+
+ struct Test {
+ int64_t rt;
+ int64_t a;
+ int64_t b;
+ int64_t bold;
+ int64_t b1;
+ int64_t bold1;
+ int32_t c;
+ int32_t d;
+ int32_t dold;
+ int32_t d1;
+ int32_t dold1;
+ };
+
+ Test test;
+ // clang-format off
+ int64_t inputs_D[kTableLength] = {
+ 7, 8, -9, -10
+ };
+ int32_t inputs_W[kTableLength] = {
+ 3, 4, -5, -6
+ };
+
+ int32_t outputs_W[kTableLength] = {
+ 3, 4, -5, -6
+ };
+ int64_t outputs_D[kTableLength] = {
+ 7, 8, -9, -10
+ };
+ // clang-format on
+
+ __ Ld_d(a4, MemOperand(a0, offsetof(Test, a)));
+ __ Ld_w(a5, MemOperand(a0, offsetof(Test, c)));
+ __ Ld_d(a6, MemOperand(a0, offsetof(Test, rt)));
+ __ li(t0, 1);
+ __ li(t1, 1);
+ __ li(t2, 1);
+ __ li(t3, 1);
+ __ St_d(t0, MemOperand(a0, offsetof(Test, bold)));
+ __ St_d(t1, MemOperand(a0, offsetof(Test, bold1)));
+ __ St_w(t2, MemOperand(a0, offsetof(Test, dold)));
+ __ St_w(t3, MemOperand(a0, offsetof(Test, dold1)));
+ __ Movz(t0, a4, a6);
+ __ Movn(t1, a4, a6);
+ __ Movz(t2, a5, a6);
+ __ Movn(t3, a5, a6);
+ __ St_d(t0, MemOperand(a0, offsetof(Test, b)));
+ __ St_d(t1, MemOperand(a0, offsetof(Test, b1)));
+ __ St_w(t2, MemOperand(a0, offsetof(Test, d)));
+ __ St_w(t3, MemOperand(a0, offsetof(Test, d1)));
+ __ jirl(zero_reg, ra, 0);
+
+ CodeDesc desc;
+ masm->GetCode(isolate, &desc);
+ Handle<Code> code =
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
+ auto f = GeneratedCode<F3>::FromCode(*code);
+ for (int i = 0; i < kTableLength; i++) {
+ test.a = inputs_D[i];
+ test.c = inputs_W[i];
+
+ test.rt = 1;
+ f.Call(&test, 0, 0, 0, 0);
+ CHECK_EQ(test.b, test.bold);
+ CHECK_EQ(test.d, test.dold);
+ CHECK_EQ(test.b1, outputs_D[i]);
+ CHECK_EQ(test.d1, outputs_W[i]);
+
+ test.rt = 0;
+ f.Call(&test, 0, 0, 0, 0);
+ CHECK_EQ(test.b, outputs_D[i]);
+ CHECK_EQ(test.d, outputs_W[i]);
+ CHECK_EQ(test.b1, test.bold1);
+ CHECK_EQ(test.d1, test.dold1);
+ }
+}
+
+TEST(macro_instructions1) {
+ // Test 32bit calculate instructions macros.
+ CcTest::InitializeVM();
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope scope(isolate);
+ MacroAssembler assembler(isolate, v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler* masm = &assembler;
+
+ Label exit, error;
+
+ __ li(a4, 0x00000004);
+ __ li(a5, 0x00001234);
+ __ li(a6, 0x12345678);
+ __ li(a7, 0x7FFFFFFF);
+ __ li(t0, static_cast<int32_t>(0xFFFFFFFC));
+ __ li(t1, static_cast<int32_t>(0xFFFFEDCC));
+ __ li(t2, static_cast<int32_t>(0xEDCBA988));
+ __ li(t3, static_cast<int32_t>(0x80000000));
+
+ __ or_(a2, zero_reg, zero_reg);
+ __ or_(a3, zero_reg, zero_reg);
+ __ add_w(a2, a7, t1);
+ __ Add_w(a3, t1, a7);
+ __ Branch(&error, ne, a2, Operand(a3));
+ __ Add_w(t4, t1, static_cast<int32_t>(0x7FFFFFFF));
+ __ Branch(&error, ne, a2, Operand(t4));
+ __ addi_w(a2, a6, 0x800);
+ __ Add_w(a3, a6, 0xFFFFF800);
+ __ Branch(&error, ne, a2, Operand(a3));
+
+ __ or_(a2, zero_reg, zero_reg);
+ __ or_(a3, zero_reg, zero_reg);
+ __ mul_w(a2, t1, a7);
+ __ Mul_w(a3, t1, a7);
+ __ Branch(&error, ne, a2, Operand(a3));
+ __ Mul_w(t4, t1, static_cast<int32_t>(0x7FFFFFFF));
+ __ Branch(&error, ne, a2, Operand(t4));
+
+ __ or_(a2, zero_reg, zero_reg);
+ __ or_(a3, zero_reg, zero_reg);
+ __ mulh_w(a2, t1, a7);
+ __ Mulh_w(a3, t1, a7);
+ __ Branch(&error, ne, a2, Operand(a3));
+ __ Mulh_w(t4, t1, static_cast<int32_t>(0x7FFFFFFF));
+ __ Branch(&error, ne, a2, Operand(t4));
+
+ __ or_(a2, zero_reg, zero_reg);
+ __ or_(a3, zero_reg, zero_reg);
+ __ Mulh_wu(a2, a4, static_cast<int32_t>(0xFFFFEDCC));
+ __ Branch(&error, ne, a2, Operand(0x3));
+ __ Mulh_wu(a3, a4, t1);
+ __ Branch(&error, ne, a3, Operand(0x3));
+
+ __ or_(a2, zero_reg, zero_reg);
+ __ or_(a3, zero_reg, zero_reg);
+ __ div_w(a2, a7, t2);
+ __ Div_w(a3, a7, t2);
+ __ Branch(&error, ne, a2, Operand(a3));
+ __ Div_w(t4, a7, static_cast<int32_t>(0xEDCBA988));
+ __ Branch(&error, ne, a2, Operand(t4));
+
+ __ or_(a2, zero_reg, zero_reg);
+ __ or_(a3, zero_reg, zero_reg);
+ __ Div_wu(a2, a7, a5);
+ __ Branch(&error, ne, a2, Operand(0x70821));
+ __ Div_wu(a3, t0, static_cast<int32_t>(0x00001234));
+ __ Branch(&error, ne, a3, Operand(0xE1042));
+
+ __ or_(a2, zero_reg, zero_reg);
+ __ or_(a3, zero_reg, zero_reg);
+ __ Mod_w(a2, a6, a5);
+ __ Branch(&error, ne, a2, Operand(0xDA8));
+ __ Mod_w(a3, t2, static_cast<int32_t>(0x00001234));
+ __ Branch(&error, ne, a3, Operand(0xFFFFFFFFFFFFF258));
+
+ __ or_(a2, zero_reg, zero_reg);
+ __ or_(a3, zero_reg, zero_reg);
+ __ Mod_wu(a2, a6, a5);
+ __ Branch(&error, ne, a2, Operand(0xDA8));
+ __ Mod_wu(a3, t2, static_cast<int32_t>(0x00001234));
+ __ Branch(&error, ne, a3, Operand(0xF0));
+
+ __ li(a2, 0x31415926);
+ __ b(&exit);
+
+ __ bind(&error);
+ __ li(a2, 0x666);
+
+ __ bind(&exit);
+ __ or_(a0, a2, zero_reg);
+ __ jirl(zero_reg, ra, 0);
+
+ CodeDesc desc;
+ masm->GetCode(isolate, &desc);
+ Handle<Code> code =
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
+ auto f = GeneratedCode<F2>::FromCode(*code);
+ int64_t res = reinterpret_cast<int64_t>(f.Call(0, 0, 0, 0, 0));
+
+ CHECK_EQ(0x31415926L, res);
+}
+
+TEST(macro_instructions2) {
+ // Test 64bit calculate instructions macros.
+ CcTest::InitializeVM();
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope scope(isolate);
+ MacroAssembler assembler(isolate, v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler* masm = &assembler;
+
+ Label exit, error;
+
+ __ li(a4, 0x17312);
+ __ li(a5, 0x1012131415161718);
+ __ li(a6, 0x51F4B764A26E7412);
+ __ li(a7, 0x7FFFFFFFFFFFFFFF);
+ __ li(t0, static_cast<int64_t>(0xFFFFFFFFFFFFF547));
+ __ li(t1, static_cast<int64_t>(0xDF6B8F35A10E205C));
+ __ li(t2, static_cast<int64_t>(0x81F25A87C4236841));
+ __ li(t3, static_cast<int64_t>(0x8000000000000000));
+
+ __ or_(a2, zero_reg, zero_reg);
+ __ or_(a3, zero_reg, zero_reg);
+ __ add_d(a2, a7, t1);
+ __ Add_d(a3, t1, a7);
+ __ Branch(&error, ne, a2, Operand(a3));
+ __ Add_d(t4, t1, Operand(0x7FFFFFFFFFFFFFFF));
+ __ Branch(&error, ne, a2, Operand(t4));
+ __ addi_d(a2, a6, 0x800);
+ __ Add_d(a3, a6, Operand(0xFFFFFFFFFFFFF800));
+ __ Branch(&error, ne, a2, Operand(a3));
+
+ __ or_(a2, zero_reg, zero_reg);
+ __ or_(a3, zero_reg, zero_reg);
+ __ Mul_d(a2, a5, a6);
+ __ Branch(&error, ne, a2, Operand(0xdbe6a8729a547fb0));
+ __ Mul_d(a3, t0, Operand(0xDF6B8F35A10E205C));
+ __ Branch(&error, ne, a3, Operand(0x57ad69f40f870584));
+
+ __ or_(a2, zero_reg, zero_reg);
+ __ or_(a3, zero_reg, zero_reg);
+ __ Mulh_d(a2, a5, a6);
+ __ Branch(&error, ne, a2, Operand(0x52514c6c6b54467));
+ __ Mulh_d(a3, t0, Operand(0xDF6B8F35A10E205C));
+ __ Branch(&error, ne, a3, Operand(0x15d));
+
+ __ or_(a2, zero_reg, zero_reg);
+ __ or_(a3, zero_reg, zero_reg);
+ __ Div_d(a2, t0, t1);
+ __ Branch(&error, ne, a2, Operand(static_cast<int64_t>(0)));
+ __ Div_d(a3, t1, Operand(0x17312));
+ __ Branch(&error, ne, a3, Operand(0xffffe985f631e6d9));
+
+ __ or_(a2, zero_reg, zero_reg);
+ __ or_(a3, zero_reg, zero_reg);
+ __ Div_du(a2, t0, t1);
+ __ Branch(&error, ne, a2, Operand(0x1));
+ __ Div_du(a3, t1, 0x17312);
+ __ Branch(&error, ne, a3, Operand(0x9a22ffd3973d));
+
+ __ or_(a2, zero_reg, zero_reg);
+ __ or_(a3, zero_reg, zero_reg);
+ __ Mod_d(a2, a6, a4);
+ __ Branch(&error, ne, a2, Operand(0x13558));
+ __ Mod_d(a3, t2, Operand(0xFFFFFFFFFFFFF547));
+ __ Branch(&error, ne, a3, Operand(0xfffffffffffffb0a));
+
+ __ or_(a2, zero_reg, zero_reg);
+ __ or_(a3, zero_reg, zero_reg);
+ __ Mod_du(a2, a6, a4);
+ __ Branch(&error, ne, a2, Operand(0x13558));
+ __ Mod_du(a3, t2, Operand(0xFFFFFFFFFFFFF547));
+ __ Branch(&error, ne, a3, Operand(0x81f25a87c4236841));
+
+ __ li(a2, 0x31415926);
+ __ b(&exit);
+
+ __ bind(&error);
+ __ li(a2, 0x666);
+
+ __ bind(&exit);
+ __ or_(a0, a2, zero_reg);
+ __ jirl(zero_reg, ra, 0);
+
+ CodeDesc desc;
+ masm->GetCode(isolate, &desc);
+ Handle<Code> code =
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
+ auto f = GeneratedCode<F2>::FromCode(*code);
+ int64_t res = reinterpret_cast<int64_t>(f.Call(0, 0, 0, 0, 0));
+
+ CHECK_EQ(0x31415926L, res);
+}
+
+TEST(macro_instructions3) {
+ // Test 64bit calculate instructions macros.
+ CcTest::InitializeVM();
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope scope(isolate);
+ MacroAssembler assembler(isolate, v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler* masm = &assembler;
+
+ Label exit, error;
+
+ __ li(a4, 0x17312);
+ __ li(a5, 0x1012131415161718);
+ __ li(a6, 0x51F4B764A26E7412);
+ __ li(a7, 0x7FFFFFFFFFFFFFFF);
+ __ li(t0, static_cast<int64_t>(0xFFFFFFFFFFFFF547));
+ __ li(t1, static_cast<int64_t>(0xDF6B8F35A10E205C));
+ __ li(t2, static_cast<int64_t>(0x81F25A87C4236841));
+ __ li(t3, static_cast<int64_t>(0x8000000000000000));
+
+ __ or_(a2, zero_reg, zero_reg);
+ __ or_(a3, zero_reg, zero_reg);
+ __ And(a2, a4, a5);
+ __ Branch(&error, ne, a2, Operand(0x1310));
+ __ And(a3, a6, Operand(0x7FFFFFFFFFFFFFFF));
+ __ Branch(&error, ne, a3, Operand(0x51F4B764A26E7412));
+ __ andi(a2, a6, 0xDCB);
+ __ And(a3, a6, Operand(0xDCB));
+ __ Branch(&error, ne, a3, Operand(a2));
+
+ __ or_(a2, zero_reg, zero_reg);
+ __ or_(a3, zero_reg, zero_reg);
+ __ Or(a2, t0, t1);
+ __ Branch(&error, ne, a2, Operand(0xfffffffffffff55f));
+ __ Or(a3, t2, Operand(0x8000000000000000));
+ __ Branch(&error, ne, a3, Operand(0x81f25a87c4236841));
+ __ ori(a2, a5, 0xDCB);
+ __ Or(a3, a5, Operand(0xDCB));
+ __ Branch(&error, ne, a2, Operand(a3));
+
+ __ or_(a2, zero_reg, zero_reg);
+ __ or_(a3, zero_reg, zero_reg);
+ __ Orn(a2, t0, t1);
+ __ Branch(&error, ne, a2, Operand(0xffffffffffffffe7));
+ __ Orn(a3, t2, Operand(0x81F25A87C4236841));
+ __ Branch(&error, ne, a3, Operand(0xffffffffffffffff));
+
+ __ or_(a2, zero_reg, zero_reg);
+ __ or_(a3, zero_reg, zero_reg);
+ __ Xor(a2, t0, t1);
+ __ Branch(&error, ne, a2, Operand(0x209470ca5ef1d51b));
+ __ Xor(a3, t2, Operand(0x8000000000000000));
+ __ Branch(&error, ne, a3, Operand(0x1f25a87c4236841));
+ __ Xor(a2, t2, Operand(0xDCB));
+ __ Branch(&error, ne, a2, Operand(0x81f25a87c423658a));
+
+ __ or_(a2, zero_reg, zero_reg);
+ __ or_(a3, zero_reg, zero_reg);
+ __ Nor(a2, a4, a5);
+ __ Branch(&error, ne, a2, Operand(0xefedecebeae888e5));
+ __ Nor(a3, a6, Operand(0x7FFFFFFFFFFFFFFF));
+ __ Branch(&error, ne, a3, Operand(0x8000000000000000));
+
+ __ or_(a2, zero_reg, zero_reg);
+ __ or_(a3, zero_reg, zero_reg);
+ __ Andn(a2, a4, a5);
+ __ Branch(&error, ne, a2, Operand(0x16002));
+ __ Andn(a3, a6, Operand(0x7FFFFFFFFFFFFFFF));
+ __ Branch(&error, ne, a3, Operand(static_cast<int64_t>(0)));
+
+ __ or_(a2, zero_reg, zero_reg);
+ __ or_(a3, zero_reg, zero_reg);
+ __ Orn(a2, t0, t1);
+ __ Branch(&error, ne, a2, Operand(0xffffffffffffffe7));
+ __ Orn(a3, t2, Operand(0x8000000000000000));
+ __ Branch(&error, ne, a3, Operand(0xffffffffffffffff));
+
+ __ or_(a2, zero_reg, zero_reg);
+ __ or_(a3, zero_reg, zero_reg);
+ __ Neg(a2, a7);
+ __ Branch(&error, ne, a2, Operand(0x8000000000000001));
+ __ Neg(a3, t0);
+ __ Branch(&error, ne, a3, Operand(0xAB9));
+
+ __ or_(a2, zero_reg, zero_reg);
+ __ or_(a3, zero_reg, zero_reg);
+ __ Slt(a2, a5, a6);
+ __ Branch(&error, ne, a2, Operand(0x1));
+ __ Slt(a3, a7, Operand(0xFFFFFFFFFFFFF547));
+ __ Branch(&error, ne, a3, Operand(static_cast<int64_t>(0)));
+ __ Slt(a3, a4, 0x800);
+ __ Branch(&error, ne, a3, Operand(static_cast<int64_t>(0)));
+
+ __ or_(a2, zero_reg, zero_reg);
+ __ or_(a3, zero_reg, zero_reg);
+ __ Sle(a2, a5, a6);
+ __ Branch(&error, ne, a2, Operand(0x1));
+ __ Sle(a3, t0, Operand(0xFFFFFFFFFFFFF547));
+ __ Branch(&error, ne, a3, Operand(static_cast<int64_t>(0x1)));
+ __ Sle(a2, a7, t0);
+ __ Branch(&error, ne, a2, Operand(static_cast<int64_t>(0)));
+
+ __ or_(a2, zero_reg, zero_reg);
+ __ or_(a3, zero_reg, zero_reg);
+ __ Sleu(a2, a5, a6);
+ __ Branch(&error, ne, a2, Operand(0x1));
+ __ Sleu(a3, t0, Operand(0xFFFFFFFFFFFFF547));
+ __ Branch(&error, ne, a3, Operand(static_cast<int64_t>(0x1)));
+ __ Sleu(a2, a7, t0);
+ __ Branch(&error, ne, a2, Operand(static_cast<int64_t>(0x1)));
+
+ __ or_(a2, zero_reg, zero_reg);
+ __ or_(a3, zero_reg, zero_reg);
+ __ Sge(a2, a5, a6);
+ __ Branch(&error, ne, a2, Operand(static_cast<int64_t>(0)));
+ __ Sge(a3, t0, Operand(0xFFFFFFFFFFFFF547));
+ __ Branch(&error, ne, a3, Operand(static_cast<int64_t>(0x1)));
+ __ Sge(a2, a7, t0);
+ __ Branch(&error, ne, a2, Operand(static_cast<int64_t>(0x1)));
+
+ __ or_(a2, zero_reg, zero_reg);
+ __ or_(a3, zero_reg, zero_reg);
+ __ Sgeu(a2, a5, a6);
+ __ Branch(&error, ne, a2, Operand(static_cast<int64_t>(0)));
+ __ Sgeu(a3, t0, Operand(0xFFFFFFFFFFFFF547));
+ __ Branch(&error, ne, a3, Operand(static_cast<int64_t>(0x1)));
+ __ Sgeu(a2, a7, t0);
+ __ Branch(&error, ne, a2, Operand(static_cast<int64_t>(0)));
+
+ __ or_(a2, zero_reg, zero_reg);
+ __ or_(a3, zero_reg, zero_reg);
+ __ Sgt(a2, a5, a6);
+ __ Branch(&error, ne, a2, Operand(static_cast<int64_t>(0)));
+ __ Sgt(a3, t0, Operand(0xFFFFFFFFFFFFF547));
+ __ Branch(&error, ne, a3, Operand(static_cast<int64_t>(0)));
+ __ Sgt(a2, a7, t0);
+ __ Branch(&error, ne, a2, Operand(static_cast<int64_t>(0x1)));
+
+ __ or_(a2, zero_reg, zero_reg);
+ __ or_(a3, zero_reg, zero_reg);
+ __ Sgtu(a2, a5, a6);
+ __ Branch(&error, ne, a2, Operand(static_cast<int64_t>(0)));
+ __ Sgtu(a3, t0, Operand(0xFFFFFFFFFFFFF547));
+ __ Branch(&error, ne, a3, Operand(static_cast<int64_t>(0)));
+ __ Sgtu(a2, a7, t0);
+ __ Branch(&error, ne, a2, Operand(static_cast<int64_t>(0)));
+
+ __ li(a2, 0x31415926);
+ __ b(&exit);
+
+ __ bind(&error);
+ __ li(a2, 0x666);
+
+ __ bind(&exit);
+ __ or_(a0, a2, zero_reg);
+ __ jirl(zero_reg, ra, 0);
+
+ CodeDesc desc;
+ masm->GetCode(isolate, &desc);
+ Handle<Code> code =
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
+ auto f = GeneratedCode<F2>::FromCode(*code);
+ int64_t res = reinterpret_cast<int64_t>(f.Call(0, 0, 0, 0, 0));
+
+ CHECK_EQ(0x31415926L, res);
+}
+
+TEST(Rotr_w) {
+ CcTest::InitializeVM();
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope scope(isolate);
+ MacroAssembler assembler(isolate, v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler* masm = &assembler;
+
+ struct T {
+ int32_t input;
+ int32_t result_rotr_0;
+ int32_t result_rotr_4;
+ int32_t result_rotr_8;
+ int32_t result_rotr_12;
+ int32_t result_rotr_16;
+ int32_t result_rotr_20;
+ int32_t result_rotr_24;
+ int32_t result_rotr_28;
+ int32_t result_rotr_32;
+ int32_t result_rotri_0;
+ int32_t result_rotri_4;
+ int32_t result_rotri_8;
+ int32_t result_rotri_12;
+ int32_t result_rotri_16;
+ int32_t result_rotri_20;
+ int32_t result_rotri_24;
+ int32_t result_rotri_28;
+ int32_t result_rotri_32;
+ };
+ T t;
+
+ __ Ld_w(a4, MemOperand(a0, offsetof(T, input)));
+
+ __ Rotr_w(a5, a4, 0);
+ __ Rotr_w(a6, a4, 0x04);
+ __ Rotr_w(a7, a4, 0x08);
+ __ Rotr_w(t0, a4, 0x0C);
+ __ Rotr_w(t1, a4, 0x10);
+ __ Rotr_w(t2, a4, -0x0C);
+ __ Rotr_w(t3, a4, -0x08);
+ __ Rotr_w(t4, a4, -0x04);
+ __ Rotr_w(t5, a4, 0x20);
+ __ St_w(a5, MemOperand(a0, offsetof(T, result_rotr_0)));
+ __ St_w(a6, MemOperand(a0, offsetof(T, result_rotr_4)));
+ __ St_w(a7, MemOperand(a0, offsetof(T, result_rotr_8)));
+ __ St_w(t0, MemOperand(a0, offsetof(T, result_rotr_12)));
+ __ St_w(t1, MemOperand(a0, offsetof(T, result_rotr_16)));
+ __ St_w(t2, MemOperand(a0, offsetof(T, result_rotr_20)));
+ __ St_w(t3, MemOperand(a0, offsetof(T, result_rotr_24)));
+ __ St_w(t4, MemOperand(a0, offsetof(T, result_rotr_28)));
+ __ St_w(t5, MemOperand(a0, offsetof(T, result_rotr_32)));
+
+ __ li(t5, 0);
+ __ Rotr_w(a5, a4, t5);
+ __ li(t5, 0x04);
+ __ Rotr_w(a6, a4, t5);
+ __ li(t5, 0x08);
+ __ Rotr_w(a7, a4, t5);
+ __ li(t5, 0x0C);
+ __ Rotr_w(t0, a4, t5);
+ __ li(t5, 0x10);
+ __ Rotr_w(t1, a4, t5);
+ __ li(t5, -0x0C);
+ __ Rotr_w(t2, a4, t5);
+ __ li(t5, -0x08);
+ __ Rotr_w(t3, a4, t5);
+ __ li(t5, -0x04);
+ __ Rotr_w(t4, a4, t5);
+ __ li(t5, 0x20);
+ __ Rotr_w(t5, a4, t5);
+
+ __ St_w(a5, MemOperand(a0, offsetof(T, result_rotri_0)));
+ __ St_w(a6, MemOperand(a0, offsetof(T, result_rotri_4)));
+ __ St_w(a7, MemOperand(a0, offsetof(T, result_rotri_8)));
+ __ St_w(t0, MemOperand(a0, offsetof(T, result_rotri_12)));
+ __ St_w(t1, MemOperand(a0, offsetof(T, result_rotri_16)));
+ __ St_w(t2, MemOperand(a0, offsetof(T, result_rotri_20)));
+ __ St_w(t3, MemOperand(a0, offsetof(T, result_rotri_24)));
+ __ St_w(t4, MemOperand(a0, offsetof(T, result_rotri_28)));
+ __ St_w(t5, MemOperand(a0, offsetof(T, result_rotri_32)));
+
+ __ jirl(zero_reg, ra, 0);
+
+ CodeDesc desc;
+ masm->GetCode(isolate, &desc);
+ Handle<Code> code =
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
+ auto f = GeneratedCode<F3>::FromCode(*code);
+ t.input = 0x12345678;
+ f.Call(&t, 0, 0, 0, 0);
+
+ CHECK_EQ(static_cast<int32_t>(0x12345678), t.result_rotr_0);
+ CHECK_EQ(static_cast<int32_t>(0x81234567), t.result_rotr_4);
+ CHECK_EQ(static_cast<int32_t>(0x78123456), t.result_rotr_8);
+ CHECK_EQ(static_cast<int32_t>(0x67812345), t.result_rotr_12);
+ CHECK_EQ(static_cast<int32_t>(0x56781234), t.result_rotr_16);
+ CHECK_EQ(static_cast<int32_t>(0x45678123), t.result_rotr_20);
+ CHECK_EQ(static_cast<int32_t>(0x34567812), t.result_rotr_24);
+ CHECK_EQ(static_cast<int32_t>(0x23456781), t.result_rotr_28);
+ CHECK_EQ(static_cast<int32_t>(0x12345678), t.result_rotr_32);
+
+ CHECK_EQ(static_cast<int32_t>(0x12345678), t.result_rotri_0);
+ CHECK_EQ(static_cast<int32_t>(0x81234567), t.result_rotri_4);
+ CHECK_EQ(static_cast<int32_t>(0x78123456), t.result_rotri_8);
+ CHECK_EQ(static_cast<int32_t>(0x67812345), t.result_rotri_12);
+ CHECK_EQ(static_cast<int32_t>(0x56781234), t.result_rotri_16);
+ CHECK_EQ(static_cast<int32_t>(0x45678123), t.result_rotri_20);
+ CHECK_EQ(static_cast<int32_t>(0x34567812), t.result_rotri_24);
+ CHECK_EQ(static_cast<int32_t>(0x23456781), t.result_rotri_28);
+ CHECK_EQ(static_cast<int32_t>(0x12345678), t.result_rotri_32);
+}
+
+TEST(Rotr_d) {
+ CcTest::InitializeVM();
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope scope(isolate);
+ MacroAssembler assembler(isolate, v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler* masm = &assembler;
+
+ struct T {
+ int64_t input;
+ int64_t result_rotr_0;
+ int64_t result_rotr_8;
+ int64_t result_rotr_16;
+ int64_t result_rotr_24;
+ int64_t result_rotr_32;
+ int64_t result_rotr_40;
+ int64_t result_rotr_48;
+ int64_t result_rotr_56;
+ int64_t result_rotr_64;
+ int64_t result_rotri_0;
+ int64_t result_rotri_8;
+ int64_t result_rotri_16;
+ int64_t result_rotri_24;
+ int64_t result_rotri_32;
+ int64_t result_rotri_40;
+ int64_t result_rotri_48;
+ int64_t result_rotri_56;
+ int64_t result_rotri_64;
+ };
+ T t;
+
+ __ Ld_d(a4, MemOperand(a0, offsetof(T, input)));
+
+ __ Rotr_d(a5, a4, 0);
+ __ Rotr_d(a6, a4, 0x08);
+ __ Rotr_d(a7, a4, 0x10);
+ __ Rotr_d(t0, a4, 0x18);
+ __ Rotr_d(t1, a4, 0x20);
+ __ Rotr_d(t2, a4, -0x18);
+ __ Rotr_d(t3, a4, -0x10);
+ __ Rotr_d(t4, a4, -0x08);
+ __ Rotr_d(t5, a4, 0x40);
+ __ St_d(a5, MemOperand(a0, offsetof(T, result_rotr_0)));
+ __ St_d(a6, MemOperand(a0, offsetof(T, result_rotr_8)));
+ __ St_d(a7, MemOperand(a0, offsetof(T, result_rotr_16)));
+ __ St_d(t0, MemOperand(a0, offsetof(T, result_rotr_24)));
+ __ St_d(t1, MemOperand(a0, offsetof(T, result_rotr_32)));
+ __ St_d(t2, MemOperand(a0, offsetof(T, result_rotr_40)));
+ __ St_d(t3, MemOperand(a0, offsetof(T, result_rotr_48)));
+ __ St_d(t4, MemOperand(a0, offsetof(T, result_rotr_56)));
+ __ St_d(t5, MemOperand(a0, offsetof(T, result_rotr_64)));
+
+ __ li(t5, 0);
+ __ Rotr_d(a5, a4, t5);
+ __ li(t5, 0x08);
+ __ Rotr_d(a6, a4, t5);
+ __ li(t5, 0x10);
+ __ Rotr_d(a7, a4, t5);
+ __ li(t5, 0x18);
+ __ Rotr_d(t0, a4, t5);
+ __ li(t5, 0x20);
+ __ Rotr_d(t1, a4, t5);
+ __ li(t5, -0x18);
+ __ Rotr_d(t2, a4, t5);
+ __ li(t5, -0x10);
+ __ Rotr_d(t3, a4, t5);
+ __ li(t5, -0x08);
+ __ Rotr_d(t4, a4, t5);
+ __ li(t5, 0x40);
+ __ Rotr_d(t5, a4, t5);
+
+ __ St_d(a5, MemOperand(a0, offsetof(T, result_rotri_0)));
+ __ St_d(a6, MemOperand(a0, offsetof(T, result_rotri_8)));
+ __ St_d(a7, MemOperand(a0, offsetof(T, result_rotri_16)));
+ __ St_d(t0, MemOperand(a0, offsetof(T, result_rotri_24)));
+ __ St_d(t1, MemOperand(a0, offsetof(T, result_rotri_32)));
+ __ St_d(t2, MemOperand(a0, offsetof(T, result_rotri_40)));
+ __ St_d(t3, MemOperand(a0, offsetof(T, result_rotri_48)));
+ __ St_d(t4, MemOperand(a0, offsetof(T, result_rotri_56)));
+ __ St_d(t5, MemOperand(a0, offsetof(T, result_rotri_64)));
+
+ __ jirl(zero_reg, ra, 0);
+
+ CodeDesc desc;
+ masm->GetCode(isolate, &desc);
+ Handle<Code> code =
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
+ auto f = GeneratedCode<F3>::FromCode(*code);
+ t.input = 0x0123456789ABCDEF;
+ f.Call(&t, 0, 0, 0, 0);
+
+ CHECK_EQ(static_cast<int64_t>(0x0123456789ABCDEF), t.result_rotr_0);
+ CHECK_EQ(static_cast<int64_t>(0xEF0123456789ABCD), t.result_rotr_8);
+ CHECK_EQ(static_cast<int64_t>(0xCDEF0123456789AB), t.result_rotr_16);
+ CHECK_EQ(static_cast<int64_t>(0xABCDEF0123456789), t.result_rotr_24);
+ CHECK_EQ(static_cast<int64_t>(0x89ABCDEF01234567), t.result_rotr_32);
+ CHECK_EQ(static_cast<int64_t>(0x6789ABCDEF012345), t.result_rotr_40);
+ CHECK_EQ(static_cast<int64_t>(0x456789ABCDEF0123), t.result_rotr_48);
+ CHECK_EQ(static_cast<int64_t>(0x23456789ABCDEF01), t.result_rotr_56);
+ CHECK_EQ(static_cast<int64_t>(0x0123456789ABCDEF), t.result_rotr_64);
+
+ CHECK_EQ(static_cast<int64_t>(0x0123456789ABCDEF), t.result_rotri_0);
+ CHECK_EQ(static_cast<int64_t>(0xEF0123456789ABCD), t.result_rotri_8);
+ CHECK_EQ(static_cast<int64_t>(0xCDEF0123456789AB), t.result_rotri_16);
+ CHECK_EQ(static_cast<int64_t>(0xABCDEF0123456789), t.result_rotri_24);
+ CHECK_EQ(static_cast<int64_t>(0x89ABCDEF01234567), t.result_rotri_32);
+ CHECK_EQ(static_cast<int64_t>(0x6789ABCDEF012345), t.result_rotri_40);
+ CHECK_EQ(static_cast<int64_t>(0x456789ABCDEF0123), t.result_rotri_48);
+ CHECK_EQ(static_cast<int64_t>(0x23456789ABCDEF01), t.result_rotri_56);
+ CHECK_EQ(static_cast<int64_t>(0x0123456789ABCDEF), t.result_rotri_64);
+}
+
+TEST(macro_instructions4) {
+ CcTest::InitializeVM();
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope scope(isolate);
+ MacroAssembler assembler(isolate, v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler* masm = &assembler;
+
+ struct T {
+ double a;
+ float b;
+ double result_floor_a;
+ float result_floor_b;
+ double result_ceil_a;
+ float result_ceil_b;
+ double result_trunc_a;
+ float result_trunc_b;
+ double result_round_a;
+ float result_round_b;
+ };
+ T t;
+
+ const int kTableLength = 16;
+
+ // clang-format off
+ double inputs_d[kTableLength] = {
+ 2.1, 2.6, 2.5, 3.1, 3.6, 3.5,
+ -2.1, -2.6, -2.5, -3.1, -3.6, -3.5,
+ 1.7976931348623157E+308, 6.27463370218383111104242366943E-307,
+ std::numeric_limits<double>::max() - 0.1,
+ std::numeric_limits<double>::infinity()
+ };
+ float inputs_s[kTableLength] = {
+ 2.1, 2.6, 2.5, 3.1, 3.6, 3.5,
+ -2.1, -2.6, -2.5, -3.1, -3.6, -3.5,
+ 1.7976931348623157E+38, 6.27463370218383111104242366943E-37,
+ std::numeric_limits<float>::lowest() + 0.6,
+ std::numeric_limits<float>::infinity()
+ };
+ float outputs_round_s[kTableLength] = {
+ 2.0, 3.0, 2.0, 3.0, 4.0, 4.0,
+ -2.0, -3.0, -2.0, -3.0, -4.0, -4.0,
+ 1.7976931348623157E+38, 0,
+ std::numeric_limits<float>::lowest() + 1,
+ std::numeric_limits<float>::infinity()
+ };
+ double outputs_round_d[kTableLength] = {
+ 2.0, 3.0, 2.0, 3.0, 4.0, 4.0,
+ -2.0, -3.0, -2.0, -3.0, -4.0, -4.0,
+ 1.7976931348623157E+308, 0,
+ std::numeric_limits<double>::max(),
+ std::numeric_limits<double>::infinity()
+ };
+ float outputs_trunc_s[kTableLength] = {
+ 2.0, 2.0, 2.0, 3.0, 3.0, 3.0,
+ -2.0, -2.0, -2.0, -3.0, -3.0, -3.0,
+ 1.7976931348623157E+38, 0,
+ std::numeric_limits<float>::lowest() + 1,
+ std::numeric_limits<float>::infinity()
+ };
+ double outputs_trunc_d[kTableLength] = {
+ 2.0, 2.0, 2.0, 3.0, 3.0, 3.0,
+ -2.0, -2.0, -2.0, -3.0, -3.0, -3.0,
+ 1.7976931348623157E+308, 0,
+ std::numeric_limits<double>::max() - 1,
+ std::numeric_limits<double>::infinity()
+ };
+ float outputs_ceil_s[kTableLength] = {
+ 3.0, 3.0, 3.0, 4.0, 4.0, 4.0,
+ -2.0, -2.0, -2.0, -3.0, -3.0, -3.0,
+ 1.7976931348623157E38, 1,
+ std::numeric_limits<float>::lowest() + 1,
+ std::numeric_limits<float>::infinity()
+ };
+ double outputs_ceil_d[kTableLength] = {
+ 3.0, 3.0, 3.0, 4.0, 4.0, 4.0,
+ -2.0, -2.0, -2.0, -3.0, -3.0, -3.0,
+ 1.7976931348623157E308, 1,
+ std::numeric_limits<double>::max(),
+ std::numeric_limits<double>::infinity()
+ };
+ float outputs_floor_s[kTableLength] = {
+ 2.0, 2.0, 2.0, 3.0, 3.0, 3.0,
+ -3.0, -3.0, -3.0, -4.0, -4.0, -4.0,
+ 1.7976931348623157E38, 0,
+ std::numeric_limits<float>::lowest() + 1,
+ std::numeric_limits<float>::infinity()
+ };
+ double outputs_floor_d[kTableLength] = {
+ 2.0, 2.0, 2.0, 3.0, 3.0, 3.0,
+ -3.0, -3.0, -3.0, -4.0, -4.0, -4.0,
+ 1.7976931348623157E308, 0,
+ std::numeric_limits<double>::max(),
+ std::numeric_limits<double>::infinity()
+ };
+ // clang-format on
+
+ __ Fld_d(f8, MemOperand(a0, offsetof(T, a)));
+ __ Fld_s(f9, MemOperand(a0, offsetof(T, b)));
+ __ Floor_d(f10, f8);
+ __ Floor_s(f11, f9);
+ __ Fst_d(f10, MemOperand(a0, offsetof(T, result_floor_a)));
+ __ Fst_s(f11, MemOperand(a0, offsetof(T, result_floor_b)));
+ __ Ceil_d(f10, f8);
+ __ Ceil_s(f11, f9);
+ __ Fst_d(f10, MemOperand(a0, offsetof(T, result_ceil_a)));
+ __ Fst_s(f11, MemOperand(a0, offsetof(T, result_ceil_b)));
+ __ Trunc_d(f10, f8);
+ __ Trunc_s(f11, f9);
+ __ Fst_d(f10, MemOperand(a0, offsetof(T, result_trunc_a)));
+ __ Fst_s(f11, MemOperand(a0, offsetof(T, result_trunc_b)));
+ __ Round_d(f10, f8);
+ __ Round_s(f11, f9);
+ __ Fst_d(f10, MemOperand(a0, offsetof(T, result_round_a)));
+ __ Fst_s(f11, MemOperand(a0, offsetof(T, result_round_b)));
+ __ jirl(zero_reg, ra, 0);
+
+ CodeDesc desc;
+ masm->GetCode(isolate, &desc);
+ Handle<Code> code =
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
+ auto f = GeneratedCode<F3>::FromCode(*code);
+ for (int i = 0; i < kTableLength; i++) {
+ t.a = inputs_d[i];
+ t.b = inputs_s[i];
+ f.Call(&t, 0, 0, 0, 0);
+ CHECK_EQ(t.result_floor_a, outputs_floor_d[i]);
+ CHECK_EQ(t.result_floor_b, outputs_floor_s[i]);
+ CHECK_EQ(t.result_ceil_a, outputs_ceil_d[i]);
+ CHECK_EQ(t.result_ceil_b, outputs_ceil_s[i]);
+ CHECK_EQ(t.result_trunc_a, outputs_trunc_d[i]);
+ CHECK_EQ(t.result_trunc_b, outputs_trunc_s[i]);
+ CHECK_EQ(t.result_round_a, outputs_round_d[i]);
+ CHECK_EQ(t.result_round_b, outputs_round_s[i]);
+ }
+}
+
+uint64_t run_ExtractBits(uint64_t source, int pos, int size, bool sign_extend) {
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope scope(isolate);
+ MacroAssembler assembler(isolate, v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler* masm = &assembler;
+
+ if (sign_extend) {
+ __ ExtractBits(t0, a0, a1, size, true);
+ } else {
+ __ ExtractBits(t0, a0, a1, size);
+ }
+ __ or_(a0, t0, zero_reg);
+ __ jirl(zero_reg, ra, 0);
+
+ CodeDesc desc;
+ masm->GetCode(isolate, &desc);
+ Handle<Code> code =
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
+ auto f = GeneratedCode<FV>::FromCode(*code);
+ uint64_t res = reinterpret_cast<uint64_t>(f.Call(source, pos, 0, 0, 0));
+ return res;
+}
+
+TEST(ExtractBits) {
+ CcTest::InitializeVM();
+
+ struct TestCase {
+ uint64_t source;
+ int pos;
+ int size;
+ bool sign_extend;
+ uint64_t res;
+ };
+
+ // clang-format off
+ struct TestCase tc[] = {
+ //source, pos, size, sign_extend, res;
+ {0x800, 4, 8, false, 0x80},
+ {0x800, 4, 8, true, 0xFFFFFFFFFFFFFF80},
+ {0x800, 5, 8, true, 0x40},
+ {0x40000, 3, 16, false, 0x8000},
+ {0x40000, 3, 16, true, 0xFFFFFFFFFFFF8000},
+ {0x40000, 4, 16, true, 0x4000},
+ {0x200000000, 2, 32, false, 0x80000000},
+ {0x200000000, 2, 32, true, 0xFFFFFFFF80000000},
+ {0x200000000, 3, 32, true, 0x40000000},
+ };
+ // clang-format on
+ size_t nr_test_cases = sizeof(tc) / sizeof(TestCase);
+ for (size_t i = 0; i < nr_test_cases; ++i) {
+ uint64_t result =
+ run_ExtractBits(tc[i].source, tc[i].pos, tc[i].size, tc[i].sign_extend);
+ CHECK_EQ(tc[i].res, result);
+ }
+}
+
+uint64_t run_InsertBits(uint64_t dest, uint64_t source, int pos, int size) {
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope scope(isolate);
+ MacroAssembler assembler(isolate, v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler* masm = &assembler;
+
+ __ InsertBits(a0, a1, a2, size);
+ __ jirl(zero_reg, ra, 0);
+
+ CodeDesc desc;
+ masm->GetCode(isolate, &desc);
+ Handle<Code> code =
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
+ auto f = GeneratedCode<FV>::FromCode(*code);
+ uint64_t res = reinterpret_cast<uint64_t>(f.Call(dest, source, pos, 0, 0));
+ return res;
+}
+
+TEST(InsertBits) {
+ CcTest::InitializeVM();
+
+ struct TestCase {
+ uint64_t dest;
+ uint64_t source;
+ int pos;
+ int size;
+ uint64_t res;
+ };
+
+ // clang-format off
+ struct TestCase tc[] = {
+ //dest source, pos, size, res;
+ {0x11111111, 0x1234, 32, 16, 0x123411111111},
+ {0x111111111111, 0xFFFFF, 24, 10, 0x1113FF111111},
+ {0x1111111111111111, 0xFEDCBA, 16, 4, 0x11111111111A1111},
+ };
+ // clang-format on
+ size_t nr_test_cases = sizeof(tc) / sizeof(TestCase);
+ for (size_t i = 0; i < nr_test_cases; ++i) {
+ uint64_t result =
+ run_InsertBits(tc[i].dest, tc[i].source, tc[i].pos, tc[i].size);
+ CHECK_EQ(tc[i].res, result);
+ }
+}
+
+TEST(Popcnt) {
+ CcTest::InitializeVM();
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope scope(isolate);
+ MacroAssembler assembler(isolate, v8::internal::CodeObjectRequired::kYes);
+ MacroAssembler* masm = &assembler;
+
+ struct TestCase {
+ uint32_t a;
+ uint64_t b;
+ int expected_a;
+ int expected_b;
+ int result_a;
+ int result_b;
+ };
+ // clang-format off
+ struct TestCase tc[] = {
+ { 0x12345678, 0x1122334455667788, 13, 26, 0, 0},
+ { 0x1234, 0x123456, 5, 9, 0, 0},
+ { 0xFFF00000, 0xFFFF000000000000, 12, 16, 0, 0},
+ { 0xFF000012, 0xFFFF000000001234, 10, 21, 0, 0}
+ };
+ // clang-format on
+
+ __ Ld_w(t0, MemOperand(a0, offsetof(TestCase, a)));
+ __ Ld_d(t1, MemOperand(a0, offsetof(TestCase, b)));
+ __ Popcnt_w(t2, t0);
+ __ Popcnt_d(t3, t1);
+ __ St_w(t2, MemOperand(a0, offsetof(TestCase, result_a)));
+ __ St_w(t3, MemOperand(a0, offsetof(TestCase, result_b)));
+ __ jirl(zero_reg, ra, 0);
+
+ CodeDesc desc;
+ masm->GetCode(isolate, &desc);
+ Handle<Code> code =
+ Factory::CodeBuilder(isolate, desc, CodeKind::FOR_TESTING).Build();
+ auto f = GeneratedCode<F3>::FromCode(*code);
+
+ size_t nr_test_cases = sizeof(tc) / sizeof(TestCase);
+ for (size_t i = 0; i < nr_test_cases; ++i) {
+ f.Call(&tc[i], 0, 0, 0, 0);
+ CHECK_EQ(tc[i].expected_a, tc[i].result_a);
+ CHECK_EQ(tc[i].expected_b, tc[i].result_b);
+ }
+}
+
+TEST(DeoptExitSizeIsFixed) {
+ CHECK(Deoptimizer::kSupportsFixedDeoptExitSizes);
+
+ Isolate* isolate = CcTest::i_isolate();
+ HandleScope handles(isolate);
+ auto buffer = AllocateAssemblerBuffer();
+ MacroAssembler masm(isolate, v8::internal::CodeObjectRequired::kYes,
+ buffer->CreateView());
+ STATIC_ASSERT(static_cast<int>(kFirstDeoptimizeKind) == 0);
+ for (int i = 0; i < kDeoptimizeKindCount; i++) {
+ DeoptimizeKind kind = static_cast<DeoptimizeKind>(i);
+ Label before_exit;
+ masm.bind(&before_exit);
+ if (kind == DeoptimizeKind::kEagerWithResume) {
+ Builtin target = Deoptimizer::GetDeoptWithResumeBuiltin(
+ DeoptimizeReason::kDynamicCheckMaps);
+ masm.CallForDeoptimization(target, 42, &before_exit, kind, &before_exit,
+ nullptr);
+ CHECK_EQ(masm.SizeOfCodeGeneratedSince(&before_exit),
+ Deoptimizer::kEagerWithResumeBeforeArgsSize);
+ } else {
+ Builtin target = Deoptimizer::GetDeoptimizationEntry(kind);
+ masm.CallForDeoptimization(target, 42, &before_exit, kind, &before_exit,
+ nullptr);
+ CHECK_EQ(masm.SizeOfCodeGeneratedSince(&before_exit),
+ kind == DeoptimizeKind::kLazy
+ ? Deoptimizer::kLazyDeoptExitSize
+ : Deoptimizer::kNonLazyDeoptExitSize);
+ }
+ }
+}
+
+#undef __
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/cctest/test-macro-assembler-riscv64.cc b/deps/v8/test/cctest/test-macro-assembler-riscv64.cc
index 6c3f36cc7c..5be85480e2 100644
--- a/deps/v8/test/cctest/test-macro-assembler-riscv64.cc
+++ b/deps/v8/test/cctest/test-macro-assembler-riscv64.cc
@@ -937,8 +937,8 @@ TEST(Uld) {
}
auto fn = [](MacroAssembler& masm, int32_t in_offset, int32_t out_offset) {
- __ ULoadFloat(fa0, MemOperand(a0, in_offset));
- __ UStoreFloat(fa0, MemOperand(a0, out_offset));
+ __ ULoadFloat(fa0, MemOperand(a0, in_offset), t0);
+ __ UStoreFloat(fa0, MemOperand(a0, out_offset), t0);
};
TEST(ULoadFloat) {
@@ -971,8 +971,8 @@ TEST(ULoadDouble) {
char* buffer_middle = memory_buffer + (kBufferSize / 2);
auto fn = [](MacroAssembler& masm, int32_t in_offset, int32_t out_offset) {
- __ ULoadDouble(fa0, MemOperand(a0, in_offset));
- __ UStoreDouble(fa0, MemOperand(a0, out_offset));
+ __ ULoadDouble(fa0, MemOperand(a0, in_offset), t0);
+ __ UStoreDouble(fa0, MemOperand(a0, out_offset), t0);
};
FOR_FLOAT64_INPUTS(i) {
@@ -1376,9 +1376,9 @@ TEST(Ctz64) {
TEST(ByteSwap) {
CcTest::InitializeVM();
- auto fn0 = [](MacroAssembler& masm) { __ ByteSwap(a0, a0, 4); };
+ auto fn0 = [](MacroAssembler& masm) { __ ByteSwap(a0, a0, 4, t0); };
CHECK_EQ((int32_t)0x89ab'cdef, GenAndRunTest<int32_t>(0xefcd'ab89, fn0));
- auto fn1 = [](MacroAssembler& masm) { __ ByteSwap(a0, a0, 8); };
+ auto fn1 = [](MacroAssembler& masm) { __ ByteSwap(a0, a0, 8, t0); };
CHECK_EQ((int64_t)0x0123'4567'89ab'cdef,
GenAndRunTest<int64_t>(0xefcd'ab89'6745'2301, fn1));
}
@@ -1411,17 +1411,17 @@ TEST(Dpopcnt) {
for (int i = 0; i < 7; i++) {
// Load constant.
__ li(a3, Operand(in[i]));
- __ Popcnt64(a5, a3);
+ __ Popcnt64(a5, a3, t0);
__ Sd(a5, MemOperand(a4));
__ Add64(a4, a4, Operand(kSystemPointerSize));
}
__ li(a3, Operand(in[7]));
- __ Popcnt64(a5, a3);
+ __ Popcnt64(a5, a3, t0);
__ Sd(a5, MemOperand(a4));
__ Add64(a4, a4, Operand(kSystemPointerSize));
__ li(a3, Operand(in[8]));
- __ Popcnt64(a5, a3);
+ __ Popcnt64(a5, a3, t0);
__ Sd(a5, MemOperand(a4));
__ Add64(a4, a4, Operand(kSystemPointerSize));
};
@@ -1462,18 +1462,18 @@ TEST(Popcnt) {
for (int i = 0; i < 6; i++) {
// Load constant.
__ li(a3, Operand(in[i]));
- __ Popcnt32(a5, a3);
+ __ Popcnt32(a5, a3, t0);
__ Sd(a5, MemOperand(a4));
__ Add64(a4, a4, Operand(kSystemPointerSize));
}
__ li(a3, Operand(in[6]));
- __ Popcnt64(a5, a3);
+ __ Popcnt64(a5, a3, t0);
__ Sd(a5, MemOperand(a4));
__ Add64(a4, a4, Operand(kSystemPointerSize));
__ li(a3, Operand(in[7]));
- __ Popcnt64(a5, a3);
+ __ Popcnt64(a5, a3, t0);
__ Sd(a5, MemOperand(a4));
__ Add64(a4, a4, Operand(kSystemPointerSize));
};
diff --git a/deps/v8/test/cctest/test-macro-assembler-x64.cc b/deps/v8/test/cctest/test-macro-assembler-x64.cc
index b0487e46d0..d9eb8851b0 100644
--- a/deps/v8/test/cctest/test-macro-assembler-x64.cc
+++ b/deps/v8/test/cctest/test-macro-assembler-x64.cc
@@ -893,7 +893,7 @@ void TestFloat32x4Abs(MacroAssembler* masm, Label* exit, float x, float y,
__ Movss(Operand(rsp, 3 * kFloatSize), xmm4);
__ Movups(xmm0, Operand(rsp, 0));
- __ Absps(xmm0);
+ __ Absps(xmm0, xmm0);
__ Movups(Operand(rsp, 0), xmm0);
__ incq(rax);
@@ -930,7 +930,7 @@ void TestFloat32x4Neg(MacroAssembler* masm, Label* exit, float x, float y,
__ Movss(Operand(rsp, 3 * kFloatSize), xmm4);
__ Movups(xmm0, Operand(rsp, 0));
- __ Negps(xmm0);
+ __ Negps(xmm0, xmm0);
__ Movups(Operand(rsp, 0), xmm0);
__ incq(rax);
@@ -962,7 +962,7 @@ void TestFloat64x2Abs(MacroAssembler* masm, Label* exit, double x, double y) {
__ Movsd(Operand(rsp, 1 * kDoubleSize), xmm2);
__ movupd(xmm0, Operand(rsp, 0));
- __ Abspd(xmm0);
+ __ Abspd(xmm0, xmm0);
__ movupd(Operand(rsp, 0), xmm0);
__ incq(rax);
@@ -986,7 +986,7 @@ void TestFloat64x2Neg(MacroAssembler* masm, Label* exit, double x, double y) {
__ Movsd(Operand(rsp, 1 * kDoubleSize), xmm2);
__ movupd(xmm0, Operand(rsp, 0));
- __ Negpd(xmm0);
+ __ Negpd(xmm0, xmm0);
__ movupd(Operand(rsp, 0), xmm0);
__ incq(rax);
diff --git a/deps/v8/test/cctest/test-modules.cc b/deps/v8/test/cctest/test-modules.cc
index ab3afab758..523c83845a 100644
--- a/deps/v8/test/cctest/test-modules.cc
+++ b/deps/v8/test/cctest/test-modules.cc
@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include "include/v8-function.h"
#include "src/flags/flags.h"
-
#include "test/cctest/cctest.h"
namespace {
diff --git a/deps/v8/test/cctest/test-parsing.cc b/deps/v8/test/cctest/test-parsing.cc
index 574e0a5f0c..8181edfc8e 100644
--- a/deps/v8/test/cctest/test-parsing.cc
+++ b/deps/v8/test/cctest/test-parsing.cc
@@ -31,6 +31,8 @@
#include <memory>
+#include "include/v8-initialization.h"
+#include "include/v8-locker.h"
#include "src/api/api-inl.h"
#include "src/ast/ast-value-factory.h"
#include "src/ast/ast.h"
diff --git a/deps/v8/test/cctest/test-platform.cc b/deps/v8/test/cctest/test-platform.cc
index b5c65a2c63..5c06b1fc83 100644
--- a/deps/v8/test/cctest/test-platform.cc
+++ b/deps/v8/test/cctest/test-platform.cc
@@ -4,6 +4,7 @@
#include <stdint.h>
+#include "include/v8-function.h"
#include "src/base/build_config.h"
#include "src/base/platform/platform.h"
#include "test/cctest/cctest-utils.h"
diff --git a/deps/v8/test/cctest/test-poison-disasm-arm.cc b/deps/v8/test/cctest/test-poison-disasm-arm.cc
deleted file mode 100644
index cd718a19b4..0000000000
--- a/deps/v8/test/cctest/test-poison-disasm-arm.cc
+++ /dev/null
@@ -1,157 +0,0 @@
-// Copyright 2018 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// The C++ style guide recommends using <re2> instead of <regex>. However, the
-// former isn't available in V8.
-#include <regex> // NOLINT(build/c++11)
-#include <vector>
-
-#include "src/codegen/arm/register-arm.h"
-#include "test/cctest/cctest.h"
-#include "test/cctest/disasm-regex-helper.h"
-
-namespace v8 {
-namespace internal {
-
-namespace {
-// Poison register.
-const int kPRegCode = kSpeculationPoisonRegister.code();
-const std::string kPReg = // NOLINT(runtime/string)
- "r" + std::to_string(kPRegCode);
-} // namespace
-
-TEST(DisasmPoisonMonomorphicLoad) {
-#ifdef ENABLE_DISASSEMBLER
- if (i::FLAG_always_opt || !i::FLAG_opt) return;
- // TODO(9684): Re-enable for TurboProp if necessary.
- if (i::FLAG_turboprop) return;
-
- i::FLAG_allow_natives_syntax = true;
- i::FLAG_untrusted_code_mitigations = true;
-
- CcTest::InitializeVM();
- v8::HandleScope scope(CcTest::isolate());
-
- CompileRun(
- "function mono(o) { return o.x; };"
- "%PrepareFunctionForOptimization(mono);"
- "mono({ x : 1 });"
- "mono({ x : 1 });"
- "%OptimizeFunctionOnNextCall(mono);"
- "mono({ x : 1 });");
-
- // Matches that the property access sequence is instrumented with
- // poisoning.
- std::vector<std::string> patterns_array = {
- "ldr <<Map:r[0-9]+>>, \\[<<Obj:r[0-9]+>>, #-1\\]", // load map
- "ldr <<ExpMap:r[0-9]+>>, \\[pc, #", // load expected map
- "cmp <<Map>>, <<ExpMap>>", // compare maps
- "bne", // deopt if different
- "eorne " + kPReg + ", " + kPReg + ", " + kPReg, // update the poison
- "csdb", // spec. barrier
- "ldr <<Field:r[0-9]+>>, \\[<<Obj>>, #\\+[0-9]+\\]", // load the field
- "and <<Field>>, <<Field>>, " + kPReg, // apply the poison
- };
- CHECK(CheckDisassemblyRegexPatterns("mono", patterns_array));
-#endif // ENABLE_DISASSEMBLER
-}
-
-TEST(DisasmPoisonPolymorphicLoad) {
-#ifdef ENABLE_DISASSEMBLER
- if (i::FLAG_always_opt || !i::FLAG_opt) return;
- // TODO(9684): Re-enable for TurboProp if necessary.
- if (i::FLAG_turboprop) return;
-
- i::FLAG_allow_natives_syntax = true;
- i::FLAG_untrusted_code_mitigations = true;
-
- CcTest::InitializeVM();
- v8::HandleScope scope(CcTest::isolate());
-
- CompileRun(
- "function poly(o) { return o.x + 1; };"
- "let o1 = { x : 1 };"
- "let o2 = { y : 1 };"
- "o2.x = 2;"
- "%PrepareFunctionForOptimization(poly);"
- "poly(o2);"
- "poly(o1);"
- "poly(o2);"
- "%OptimizeFunctionOnNextCall(poly);"
- "poly(o1);");
-
- // Matches that the property access sequence is instrumented with
- // poisoning.
- std::vector<std::string> patterns_array = {
- "ldr <<Map0:r[0-9]+>>, \\[<<Obj:r[0-9]+>>, #-1\\]", // load map
- "ldr <<ExpMap0:r[0-9]+>>, \\[pc", // load map const #1
- "cmp <<Map0>>, <<ExpMap0>>", // compare maps
- "beq", // ? go to the load
- "eoreq " + kPReg + ", " + kPReg + ", " + kPReg, // update the poison
- "csdb", // spec. barrier
- "ldr <<Map1:r[0-9]+>>, \\[<<Obj>>, #-1\\]", // load map
- "ldr <<ExpMap1:r[0-9]+>>, \\[pc", // load map const #2
- "cmp <<Map1>>, <<ExpMap1>>", // compare maps
- "bne", // deopt if different
- "eorne " + kPReg + ", " + kPReg + ", " + kPReg, // update the poison
- "csdb", // spec. barrier
- "ldr <<Field:r[0-9]+>>, \\[<<Obj>>, #\\+[0-9]+\\]", // load the field
- "and <<Field>>, <<Field>>, " + kPReg, // apply the poison
- "mov r[0-9]+, <<Field>>, asr #1", // untag
- "b", // goto merge point
- // Lcase1:
- "eorne " + kPReg + ", " + kPReg + ", " + kPReg, // update the poison
- "csdb", // spec. barrier
- "ldr <<BSt:r[0-9]+>>, \\[<<Obj>>, #\\+[0-9]+\\]", // load backing store
- "and <<BSt>>, <<BSt>>, " + kPReg, // apply the poison
- "ldr <<Prop:r[0-9]+>>, \\[<<BSt>>, #\\+[0-9]+\\]", // load the property
- "and <<Prop>>, <<Prop>>, " + kPReg, // apply the poison
- // Ldone:
- };
- CHECK(CheckDisassemblyRegexPatterns("poly", patterns_array));
-#endif // ENABLE_DISASSEMBLER
-}
-
-TEST(DisasmPoisonMonomorphicLoadFloat64) {
-#ifdef ENABLE_DISASSEMBLER
- if (i::FLAG_always_opt || !i::FLAG_opt) return;
- // TODO(9684): Re-enable for TurboProp if necessary.
- if (i::FLAG_turboprop) return;
-
- i::FLAG_allow_natives_syntax = true;
- i::FLAG_untrusted_code_mitigations = true;
-
- CcTest::InitializeVM();
- v8::HandleScope scope(CcTest::isolate());
-
- CompileRun(
- "function mono(o) { return o.x; }"
- "%PrepareFunctionForOptimization(mono);"
- "mono({ x : 1.1 });"
- "mono({ x : 1.1 });"
- "%OptimizeFunctionOnNextCall(mono);"
- "mono({ x : 1.1 });");
-
- // Matches that the property access sequence is instrumented with
- // poisoning.
- std::vector<std::string> patterns_array = {
- "ldr <<Map:r[0-9]+>>, \\[<<Obj:r[0-9]+>>, #-1\\]", // load map
- "ldr <<ExpMap:r[0-9]+>>, \\[pc, #", // load expected map
- "cmp <<Map>>, <<ExpMap>>", // compare maps
- "bne", // deopt if different
- "eorne " + kPReg + ", " + kPReg + ", " + kPReg, // update the poison
- "csdb", // spec. barrier
- "ldr <<Field:r[0-9]+>>, \\[<<Obj>>, #\\+[0-9]+\\]", // load the field
- "and <<Field>>, <<Field>>, " + kPReg, // apply the poison
- "mov <<Mov:r[0-9]+>>, #[0-9]+", // addr. calculation
- "add ip, <<Field>>, <<Mov>>", // addr. calculation
- "and ip, ip, " + kPReg, // apply the poison
- "vldr d[0-9]+, \\[ip", // load Float64
- };
- CHECK(CheckDisassemblyRegexPatterns("mono", patterns_array));
-#endif // ENABLE_DISASSEMBLER
-}
-
-} // namespace internal
-} // namespace v8
diff --git a/deps/v8/test/cctest/test-poison-disasm-arm64.cc b/deps/v8/test/cctest/test-poison-disasm-arm64.cc
deleted file mode 100644
index cec69b6e9d..0000000000
--- a/deps/v8/test/cctest/test-poison-disasm-arm64.cc
+++ /dev/null
@@ -1,231 +0,0 @@
-// Copyright 2019 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// The C++ style guide recommends using <re2> instead of <regex>. However, the
-// former isn't available in V8.
-#include <regex> // NOLINT(build/c++11)
-#include <vector>
-
-#include "src/codegen/arm64/register-arm64.h"
-#include "test/cctest/cctest.h"
-#include "test/cctest/disasm-regex-helper.h"
-
-namespace v8 {
-namespace internal {
-
-namespace {
-// Poison register.
-const int kPRegCode = kSpeculationPoisonRegister.code();
-const std::string kPReg = // NOLINT(runtime/string)
- "x" + std::to_string(kPRegCode);
-} // namespace
-
-TEST(DisasmPoisonMonomorphicLoad) {
-#ifdef ENABLE_DISASSEMBLER
- if (i::FLAG_always_opt || !i::FLAG_opt) return;
- // TODO(9684): Re-enable for TurboProp if necessary.
- if (i::FLAG_turboprop) return;
-
- i::FLAG_allow_natives_syntax = true;
- i::FLAG_untrusted_code_mitigations = true;
-#ifdef V8_ENABLE_DEBUG_CODE
- i::FLAG_debug_code = false;
-#else
- STATIC_ASSERT(i::FLAG_debug_code == false);
-#endif
-
- CcTest::InitializeVM();
- v8::HandleScope scope(CcTest::isolate());
-
- CompileRun(
- "function mono(o) { return o.x; };"
- "%PrepareFunctionForOptimization(mono);"
- "mono({ x : 1 });"
- "mono({ x : 1 });"
- "%OptimizeFunctionOnNextCall(mono);"
- "mono({ x : 1 });");
-
- // Matches that the property access sequence is instrumented with
- // poisoning.
-#if defined(V8_COMPRESS_POINTERS)
- std::vector<std::string> patterns_array = {
- "ldur <<Map:w[0-9]+>>, \\[<<Obj:x[0-9]+>>, #-1\\]", // load map
- "ldr <<ExpMap:w[0-9]+>>, pc", // load expected map
- "cmp <<Map>>, <<ExpMap>>", // compare maps
- "b.ne", // deopt if different
- "csel " + kPReg + ", xzr, " + kPReg + ", ne", // update the poison
- "csdb", // spec. barrier
- "ldur w<<Field:[0-9]+>>, \\[<<Obj>>, #[0-9]+\\]", // load the field
- "and x<<Field>>, x<<Field>>, " + kPReg, // apply the poison
- };
-#else
- std::vector<std::string> patterns_array = {
- "ldur <<Map:x[0-9]+>>, \\[<<Obj:x[0-9]+>>, #-1\\]", // load map
- "ldr <<ExpMap:x[0-9]+>>, pc", // load expected map
- "cmp <<Map>>, <<ExpMap>>", // compare maps
- "b.ne", // deopt if different
- "csel " + kPReg + ", xzr, " + kPReg + ", ne", // update the poison
- "csdb", // spec. barrier
- "ldur <<Field:x[0-9]+>>, \\[<<Obj>>, #[0-9]+\\]", // load the field
- "and <<Field>>, <<Field>>, " + kPReg, // apply the poison
- };
-#endif
- CHECK(CheckDisassemblyRegexPatterns("mono", patterns_array));
-#endif // ENABLE_DISASSEMBLER
-}
-
-TEST(DisasmPoisonPolymorphicLoad) {
-#ifdef ENABLE_DISASSEMBLER
- if (i::FLAG_always_opt || !i::FLAG_opt) return;
- // TODO(9684): Re-enable for TurboProp if necessary.
- if (i::FLAG_turboprop) return;
-
- i::FLAG_allow_natives_syntax = true;
- i::FLAG_untrusted_code_mitigations = true;
-#ifdef V8_ENABLE_DEBUG_CODE
- i::FLAG_debug_code = false;
-#else
- STATIC_ASSERT(i::FLAG_debug_code == false);
-#endif
-
- CcTest::InitializeVM();
- v8::HandleScope scope(CcTest::isolate());
-
- CompileRun(
- "function poly(o) { return o.x + 1; };"
- "let o1 = { x : 1 };"
- "let o2 = { y : 1 };"
- "o2.x = 2;"
- "%PrepareFunctionForOptimization(poly);"
- "poly(o2);"
- "poly(o1);"
- "poly(o2);"
- "%OptimizeFunctionOnNextCall(poly);"
- "poly(o1);");
-
- // Matches that the property access sequence is instrumented with
- // poisoning.
-#if defined(V8_COMPRESS_POINTERS)
- std::vector<std::string> patterns_array = {
- "ldur <<Map0:w[0-9]+>>, \\[<<Obj:x[0-9]+>>, #-1\\]", // load map
- "ldr <<ExpMap:w[0-9]+>>, pc", // load map const #1
- "cmp <<Map0>>, <<ExpMap>>", // compare maps
- "b.eq", // ? go to the load
- "csel " + kPReg + ", xzr, " + kPReg + ", eq", // update the poison
- "csdb", // spec. barrier
- "ldur <<Map1:w[0-9]+>>, \\[<<Obj>>, #-1\\]", // load map
- "ldr <<ExpMap1:w[0-9]+>>, pc", // load map const #2
- "cmp <<Map1>>, <<ExpMap1>>", // compare maps
- "b.ne", // deopt if different
- "csel " + kPReg + ", xzr, " + kPReg + ", ne", // update the poison
- "csdb", // spec. barrier
- "ldur w<<Field:[0-9]+>>, \\[<<Obj>>, #[0-9]+\\]", // load the field
- "and x<<Field>>, x<<Field>>, " + kPReg, // apply the poison
- "asr w[0-9]+, w<<Field>>, #1", // untag
- "b", // goto merge point
- // Lcase1:
- "csel " + kPReg + ", xzr, " + kPReg + ", ne", // update the poison
- "csdb", // spec. barrier
- "ldur w<<BSt:[0-9]+>>, \\[<<Obj>>, #[0-9]+\\]", // load backing store
- // branchful decompress
- "add x<<BSt>>, x2[68], x<<BSt>>", // Add root to ref
- "and x<<BSt>>, x<<BSt>>, " + kPReg, // apply the poison
- "ldur w<<Prop:[0-9]+>>, \\[x<<BSt>>, #[0-9]+\\]", // load the property
- "and x<<Prop>>, x<<Prop>>, " + kPReg, // apply the poison
- // Ldone:
- };
-#else
- std::vector<std::string> patterns_array = {
- "ldur <<Map0:x[0-9]+>>, \\[<<Obj:x[0-9]+>>, #-1\\]", // load map
- "ldr <<ExpMap0:x[0-9]+>>, pc", // load map const #1
- "cmp <<Map0>>, <<ExpMap0>>", // compare maps
- "b.eq", // ? go to the load
- "csel " + kPReg + ", xzr, " + kPReg + ", eq", // update the poison
- "csdb", // spec. barrier
- "ldur <<Map1:x[0-9]+>>, \\[<<Obj>>, #-1\\]", // load map
- "ldr <<ExpMap1:x[0-9]+>>, pc", // load map const #2
- "cmp <<Map1>>, <<ExpMap1>>", // compare maps
- "b.ne", // deopt if different
- "csel " + kPReg + ", xzr, " + kPReg + ", ne", // update the poison
- "csdb", // spec. barrier
- "ldur x<<Field:[0-9]+>>, \\[<<Obj>>, #[0-9]+\\]", // load the field
- "and x<<Field>>, x<<Field>>, " + kPReg, // apply the poison
-#ifdef V8_31BIT_SMIS_ON_64BIT_ARCH
- "asr w<<Field>>, w<<Field>>, #1", // untag
-#else
- "asr x[0-9]+, x<<Field>>, #32", // untag
-#endif
- "b", // goto merge point
- // Lcase1:
- "csel " + kPReg + ", xzr, " + kPReg + ", ne", // update the poison
- "csdb", // spec. barrier
- "ldur <<BSt:x[0-9]+>>, \\[<<Obj>>, #[0-9]+\\]", // load backing store
- "and <<BSt>>, <<BSt>>, " + kPReg, // apply the poison
- "ldur <<Prop:x[0-9]+>>, \\[<<BSt>>, #[0-9]+\\]", // load the property
- "and <<Prop>>, <<Prop>>, " + kPReg, // apply the poison
- // Ldone:
- };
-#endif
- CHECK(CheckDisassemblyRegexPatterns("poly", patterns_array));
-#endif // ENABLE_DISASSEMBLER
-}
-
-TEST(DisasmPoisonMonomorphicLoadFloat64) {
-#ifdef ENABLE_DISASSEMBLER
- if (i::FLAG_always_opt || !i::FLAG_opt) return;
- // TODO(9684): Re-enable for TurboProp if necessary.
- if (i::FLAG_turboprop) return;
-
- i::FLAG_allow_natives_syntax = true;
- i::FLAG_untrusted_code_mitigations = true;
-
- CcTest::InitializeVM();
- v8::HandleScope scope(CcTest::isolate());
-
- CompileRun(
- "function mono(o) { return o.x; }"
- "%PrepareFunctionForOptimization(mono);"
- "mono({ x : 1.1 });"
- "mono({ x : 1.1 });"
- "%OptimizeFunctionOnNextCall(mono);"
- "mono({ x : 1.1 });");
-
- // Matches that the property access sequence is instrumented with
- // poisoning.
-#if defined(V8_COMPRESS_POINTERS)
- std::vector<std::string> patterns_array = {
- "ldur <<Map:w[0-9]+>>, \\[<<Obj:x[0-9]+>>, #-1\\]", // load map
- "ldr <<ExpMap:w[0-9]+>>, pc", // load expected map
- "cmp <<Map>>, <<ExpMap>>", // compare maps
- "b.ne", // deopt if differ
- "csel " + kPReg + ", xzr, " + kPReg + ", ne", // update the poison
- "csdb", // spec. barrier
- "ldur w<<F1:[0-9]+>>, \\[<<Obj>>, #11\\]", // load heap number
- "add x<<F1>>, x2[68], x<<F1>>", // Decompress ref
- "and x<<F1>>, x<<F1>>, " + kPReg, // apply the poison
- "add <<Addr:x[0-9]+>>, x<<F1>>, #0x[0-9a-f]+", // addr. calculation
- "and <<Addr>>, <<Addr>>, " + kPReg, // apply the poison
- "ldr d[0-9]+, \\[<<Addr>>\\]", // load Float64
- };
-#else
- std::vector<std::string> patterns_array = {
- "ldur <<Map:x[0-9]+>>, \\[<<Obj:x[0-9]+>>, #-1\\]", // load map
- "ldr <<ExpMap:x[0-9]+>>, pc", // load expected map
- "cmp <<Map>>, <<ExpMap>>", // compare maps
- "b.ne", // deopt if differ
- "csel " + kPReg + ", xzr, " + kPReg + ", ne", // update the poison
- "csdb", // spec. barrier
- "ldur <<F1:x[0-9]+>>, \\[<<Obj>>, #23\\]", // load heap number
- "and <<F1>>, <<F1>>, " + kPReg, // apply the poison
- "add <<Addr:x[0-9]+>>, <<F1>>, #0x7", // addr. calculation
- "and <<Addr>>, <<Addr>>, " + kPReg, // apply the poison
- "ldr d[0-9]+, \\[<<Addr>>\\]", // load Float64
- };
-#endif
- CHECK(CheckDisassemblyRegexPatterns("mono", patterns_array));
-#endif // ENABLE_DISASSEMBLER
-}
-
-} // namespace internal
-} // namespace v8
diff --git a/deps/v8/test/cctest/test-profile-generator.cc b/deps/v8/test/cctest/test-profile-generator.cc
index 5fbf1198ef..4f3dd4fd69 100644
--- a/deps/v8/test/cctest/test-profile-generator.cc
+++ b/deps/v8/test/cctest/test-profile-generator.cc
@@ -27,6 +27,7 @@
//
// Tests of profiles generator and utilities.
+#include "include/v8-function.h"
#include "include/v8-profiler.h"
#include "src/api/api-inl.h"
#include "src/base/strings.h"
diff --git a/deps/v8/test/cctest/test-regexp.cc b/deps/v8/test/cctest/test-regexp.cc
index aa24fe3dd2..2692748e62 100644
--- a/deps/v8/test/cctest/test-regexp.cc
+++ b/deps/v8/test/cctest/test-regexp.cc
@@ -29,7 +29,10 @@
#include <memory>
#include <sstream>
-#include "include/v8.h"
+#include "include/v8-context.h"
+#include "include/v8-initialization.h"
+#include "include/v8-isolate.h"
+#include "include/v8-local-handle.h"
#include "src/api/api-inl.h"
#include "src/ast/ast.h"
#include "src/base/strings.h"
@@ -63,10 +66,9 @@ static bool CheckParse(const char* input) {
v8::HandleScope scope(CcTest::isolate());
Zone zone(isolate->allocator(), ZONE_NAME);
Handle<String> str = isolate->factory()->NewStringFromAsciiChecked(input);
- FlatStringReader reader(isolate, str);
RegExpCompileData result;
- return v8::internal::RegExpParser::ParseRegExp(isolate, &zone, &reader,
- JSRegExp::kNone, &result);
+ return RegExpParser::ParseRegExpFromHeapString(isolate, &zone, str, {},
+ &result);
}
static void CheckParseEq(const char* input, const char* expected,
@@ -76,11 +78,10 @@ static void CheckParseEq(const char* input, const char* expected,
v8::HandleScope scope(CcTest::isolate());
Zone zone(isolate->allocator(), ZONE_NAME);
Handle<String> str = isolate->factory()->NewStringFromAsciiChecked(input);
- FlatStringReader reader(isolate, str);
RegExpCompileData result;
- JSRegExp::Flags flags = JSRegExp::kNone;
- if (unicode) flags |= JSRegExp::kUnicode;
- CHECK(v8::internal::RegExpParser::ParseRegExp(isolate, &zone, &reader, flags,
+ RegExpFlags flags;
+ if (unicode) flags |= RegExpFlag::kUnicode;
+ CHECK(RegExpParser::ParseRegExpFromHeapString(isolate, &zone, str, flags,
&result));
CHECK_NOT_NULL(result.tree);
CHECK(result.error == RegExpError::kNone);
@@ -98,10 +99,9 @@ static bool CheckSimple(const char* input) {
v8::HandleScope scope(CcTest::isolate());
Zone zone(isolate->allocator(), ZONE_NAME);
Handle<String> str = isolate->factory()->NewStringFromAsciiChecked(input);
- FlatStringReader reader(isolate, str);
RegExpCompileData result;
- CHECK(v8::internal::RegExpParser::ParseRegExp(isolate, &zone, &reader,
- JSRegExp::kNone, &result));
+ CHECK(RegExpParser::ParseRegExpFromHeapString(isolate, &zone, str, {},
+ &result));
CHECK_NOT_NULL(result.tree);
CHECK(result.error == RegExpError::kNone);
return result.simple;
@@ -118,10 +118,9 @@ static MinMaxPair CheckMinMaxMatch(const char* input) {
v8::HandleScope scope(CcTest::isolate());
Zone zone(isolate->allocator(), ZONE_NAME);
Handle<String> str = isolate->factory()->NewStringFromAsciiChecked(input);
- FlatStringReader reader(isolate, str);
RegExpCompileData result;
- CHECK(v8::internal::RegExpParser::ParseRegExp(isolate, &zone, &reader,
- JSRegExp::kNone, &result));
+ CHECK(RegExpParser::ParseRegExpFromHeapString(isolate, &zone, str, {},
+ &result));
CHECK_NOT_NULL(result.tree);
CHECK(result.error == RegExpError::kNone);
int min_match = result.tree->min_match();
@@ -433,11 +432,10 @@ static void ExpectError(const char* input, const char* expected,
v8::HandleScope scope(CcTest::isolate());
Zone zone(isolate->allocator(), ZONE_NAME);
Handle<String> str = isolate->factory()->NewStringFromAsciiChecked(input);
- FlatStringReader reader(isolate, str);
RegExpCompileData result;
- JSRegExp::Flags flags = JSRegExp::kNone;
- if (unicode) flags |= JSRegExp::kUnicode;
- CHECK(!v8::internal::RegExpParser::ParseRegExp(isolate, &zone, &reader, flags,
+ RegExpFlags flags;
+ if (unicode) flags |= RegExpFlag::kUnicode;
+ CHECK(!RegExpParser::ParseRegExpFromHeapString(isolate, &zone, str, flags,
&result));
CHECK_NULL(result.tree);
CHECK(result.error != RegExpError::kNone);
@@ -536,15 +534,15 @@ static RegExpNode* Compile(const char* input, bool multiline, bool unicode,
bool is_one_byte, Zone* zone) {
Isolate* isolate = CcTest::i_isolate();
Handle<String> str = isolate->factory()->NewStringFromAsciiChecked(input);
- FlatStringReader reader(isolate, str);
RegExpCompileData compile_data;
compile_data.compilation_target = RegExpCompilationTarget::kNative;
- JSRegExp::Flags flags = JSRegExp::kNone;
- if (multiline) flags = JSRegExp::kMultiline;
- if (unicode) flags = JSRegExp::kUnicode;
- if (!v8::internal::RegExpParser::ParseRegExp(isolate, zone, &reader, flags,
- &compile_data))
+ RegExpFlags flags;
+ if (multiline) flags |= RegExpFlag::kMultiline;
+ if (unicode) flags |= RegExpFlag::kUnicode;
+ if (!RegExpParser::ParseRegExpFromHeapString(isolate, zone, str, flags,
+ &compile_data)) {
return nullptr;
+ }
Handle<String> pattern = isolate->factory()
->NewStringFromUtf8(base::CStrVector(input))
.ToHandleChecked();
@@ -614,8 +612,8 @@ using ArchRegExpMacroAssembler = RegExpMacroAssemblerPPC;
using ArchRegExpMacroAssembler = RegExpMacroAssemblerMIPS;
#elif V8_TARGET_ARCH_MIPS64
using ArchRegExpMacroAssembler = RegExpMacroAssemblerMIPS;
-#elif V8_TARGET_ARCH_X87
-using ArchRegExpMacroAssembler = RegExpMacroAssemblerX87;
+#elif V8_TARGET_ARCH_LOONG64
+using ArchRegExpMacroAssembler = RegExpMacroAssemblerLOONG64;
#elif V8_TARGET_ARCH_RISCV64
using ArchRegExpMacroAssembler = RegExpMacroAssemblerRISCV;
#endif
@@ -643,7 +641,7 @@ static Handle<JSRegExp> CreateJSRegExp(Handle<String> source, Handle<Code> code,
Handle<JSRegExp> regexp =
Handle<JSRegExp>::cast(factory->NewJSObject(constructor));
- factory->SetRegExpIrregexpData(regexp, source, JSRegExp::kNone, 0,
+ factory->SetRegExpIrregexpData(regexp, source, {}, 0,
JSRegExp::kNoBacktrackLimit);
regexp->SetDataAt(is_unicode ? JSRegExp::kIrregexpUC16CodeIndex
: JSRegExp::kIrregexpLatin1CodeIndex,
@@ -2348,6 +2346,50 @@ TEST(UnicodePropertyEscapeCodeSize) {
}
}
+namespace {
+
+struct RegExpExecData {
+ i::Isolate* isolate;
+ i::Handle<i::JSRegExp> regexp;
+ i::Handle<i::String> subject;
+};
+
+i::Handle<i::Object> RegExpExec(const RegExpExecData* d) {
+ return i::RegExp::Exec(d->isolate, d->regexp, d->subject, 0,
+ d->isolate->regexp_last_match_info())
+ .ToHandleChecked();
+}
+
+void ReenterRegExp(v8::Isolate* isolate, void* data) {
+ RegExpExecData* d = static_cast<RegExpExecData*>(data);
+ i::Handle<i::Object> result = RegExpExec(d);
+ CHECK(result->IsNull());
+}
+
+} // namespace
+
+// Tests reentrant irregexp calls.
+TEST(RegExpInterruptReentrantExecution) {
+ CHECK(!i::FLAG_jitless);
+ i::FLAG_regexp_tier_up = false; // Enter irregexp, not the interpreter.
+
+ LocalContext context;
+ v8::Isolate* isolate = context->GetIsolate();
+ v8::HandleScope scope(isolate);
+
+ RegExpExecData d;
+ d.isolate = reinterpret_cast<i::Isolate*>(isolate);
+ d.regexp = v8::Utils::OpenHandle(
+ *v8::RegExp::New(context.local(), v8_str("(a*)*x"), v8::RegExp::kNone)
+ .ToLocalChecked());
+ d.subject = v8::Utils::OpenHandle(*v8_str("aaaa"));
+
+ isolate->RequestInterrupt(&ReenterRegExp, &d);
+
+ i::Handle<i::Object> result = RegExpExec(&d);
+ CHECK(result->IsNull());
+}
+
#undef CHECK_PARSE_ERROR
#undef CHECK_SIMPLE
#undef CHECK_MIN_MAX
diff --git a/deps/v8/test/cctest/test-sampler-api.cc b/deps/v8/test/cctest/test-sampler-api.cc
index 2e9a069d3f..33e003e925 100644
--- a/deps/v8/test/cctest/test-sampler-api.cc
+++ b/deps/v8/test/cctest/test-sampler-api.cc
@@ -6,7 +6,11 @@
#include <map>
#include <string>
-#include "include/v8.h"
+
+#include "include/v8-isolate.h"
+#include "include/v8-local-handle.h"
+#include "include/v8-template.h"
+#include "include/v8-unwinder.h"
#include "src/flags/flags.h"
#include "test/cctest/cctest.h"
diff --git a/deps/v8/test/cctest/test-serialize.cc b/deps/v8/test/cctest/test-serialize.cc
index b78052c9cd..08f9447b4e 100644
--- a/deps/v8/test/cctest/test-serialize.cc
+++ b/deps/v8/test/cctest/test-serialize.cc
@@ -28,6 +28,9 @@
#include <signal.h>
#include <sys/stat.h>
+#include "include/v8-extension.h"
+#include "include/v8-function.h"
+#include "include/v8-locker.h"
#include "src/api/api-inl.h"
#include "src/codegen/assembler-inl.h"
#include "src/codegen/compilation-cache.h"
@@ -1574,8 +1577,8 @@ static Handle<SharedFunctionInfo> CompileScript(
Isolate* isolate, Handle<String> source,
const ScriptDetails& script_details, AlignedCachedData* cached_data,
v8::ScriptCompiler::CompileOptions options) {
- return Compiler::GetSharedFunctionInfoForScript(
- isolate, source, script_details, nullptr, cached_data, options,
+ return Compiler::GetSharedFunctionInfoForScriptWithCachedData(
+ isolate, source, script_details, cached_data, options,
ScriptCompiler::kNoCacheNoReason, NOT_NATIVES_CODE)
.ToHandleChecked();
}
@@ -1586,7 +1589,7 @@ static Handle<SharedFunctionInfo> CompileScriptAndProduceCache(
v8::ScriptCompiler::CompileOptions options) {
Handle<SharedFunctionInfo> sfi =
Compiler::GetSharedFunctionInfoForScript(
- isolate, source, script_details, nullptr, nullptr, options,
+ isolate, source, script_details, options,
ScriptCompiler::kNoCacheNoReason, NOT_NATIVES_CODE)
.ToHandleChecked();
std::unique_ptr<ScriptCompiler::CachedData> cached_data(
diff --git a/deps/v8/test/cctest/test-stack-unwinding-win64.cc b/deps/v8/test/cctest/test-stack-unwinding-win64.cc
index 138f4822fa..4b30658ad3 100644
--- a/deps/v8/test/cctest/test-stack-unwinding-win64.cc
+++ b/deps/v8/test/cctest/test-stack-unwinding-win64.cc
@@ -2,6 +2,11 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include "include/v8-external.h"
+#include "include/v8-function.h"
+#include "include/v8-isolate.h"
+#include "include/v8-local-handle.h"
+#include "include/v8-template.h"
#include "src/base/win32-headers.h"
#include "src/init/v8.h"
#include "test/cctest/cctest.h"
diff --git a/deps/v8/test/cctest/test-strings.cc b/deps/v8/test/cctest/test-strings.cc
index 2655897ee0..0f0bc56fcd 100644
--- a/deps/v8/test/cctest/test-strings.cc
+++ b/deps/v8/test/cctest/test-strings.cc
@@ -32,6 +32,8 @@
#include <stdlib.h>
+#include "include/v8-initialization.h"
+#include "include/v8-json.h"
#include "src/api/api-inl.h"
#include "src/base/platform/elapsed-timer.h"
#include "src/base/strings.h"
diff --git a/deps/v8/test/cctest/test-thread-termination.cc b/deps/v8/test/cctest/test-thread-termination.cc
index 93f899d559..30b39a0d73 100644
--- a/deps/v8/test/cctest/test-thread-termination.cc
+++ b/deps/v8/test/cctest/test-thread-termination.cc
@@ -25,14 +25,15 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#include "include/v8-function.h"
+#include "include/v8-locker.h"
#include "src/api/api-inl.h"
+#include "src/base/platform/platform.h"
#include "src/execution/isolate.h"
#include "src/init/v8.h"
#include "src/objects/objects-inl.h"
#include "test/cctest/cctest.h"
-#include "src/base/platform/platform.h"
-
v8::base::Semaphore* semaphore = nullptr;
void Signal(const v8::FunctionCallbackInfo<v8::Value>& args) {
@@ -241,7 +242,7 @@ TEST(TerminateBigIntToString) {
TEST(TerminateBigIntFromString) {
TestTerminatingSlowOperation(
- "var a = '12344567890'.repeat(10000);\n"
+ "var a = '12344567890'.repeat(100000);\n"
"terminate();\n"
"BigInt(a);\n"
"fail();\n");
diff --git a/deps/v8/test/cctest/test-trace-event.cc b/deps/v8/test/cctest/test-trace-event.cc
index 43a477fae0..2e2dd3d2cb 100644
--- a/deps/v8/test/cctest/test-trace-event.cc
+++ b/deps/v8/test/cctest/test-trace-event.cc
@@ -1,14 +1,14 @@
// Copyright 2015 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+
#include <stdlib.h>
#include <string.h>
+#include "include/v8-function.h"
#include "src/init/v8.h"
-
-#include "test/cctest/cctest.h"
-
#include "src/tracing/trace-event.h"
+#include "test/cctest/cctest.h"
namespace {
diff --git a/deps/v8/test/cctest/test-unscopables-hidden-prototype.cc b/deps/v8/test/cctest/test-unscopables-hidden-prototype.cc
index 2d19f5c835..e4e2a08e1f 100644
--- a/deps/v8/test/cctest/test-unscopables-hidden-prototype.cc
+++ b/deps/v8/test/cctest/test-unscopables-hidden-prototype.cc
@@ -4,6 +4,7 @@
#include <stdlib.h>
+#include "include/v8-function.h"
#include "src/init/v8.h"
#include "test/cctest/cctest.h"
diff --git a/deps/v8/test/cctest/test-unwinder-code-pages.cc b/deps/v8/test/cctest/test-unwinder-code-pages.cc
index 053756c615..551fa7a912 100644
--- a/deps/v8/test/cctest/test-unwinder-code-pages.cc
+++ b/deps/v8/test/cctest/test-unwinder-code-pages.cc
@@ -2,8 +2,10 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include "include/v8-function.h"
+#include "include/v8-isolate.h"
+#include "include/v8-local-handle.h"
#include "include/v8-unwinder-state.h"
-#include "include/v8.h"
#include "src/api/api-inl.h"
#include "src/builtins/builtins.h"
#include "src/execution/isolate.h"
diff --git a/deps/v8/test/cctest/test-utils.cc b/deps/v8/test/cctest/test-utils.cc
index e065316891..ab0a86ab8c 100644
--- a/deps/v8/test/cctest/test-utils.cc
+++ b/deps/v8/test/cctest/test-utils.cc
@@ -29,11 +29,11 @@
#include <vector>
-#include "src/init/v8.h"
-
+#include "include/v8-initialization.h"
#include "src/api/api-inl.h"
#include "src/base/bit-field.h"
#include "src/base/platform/platform.h"
+#include "src/init/v8.h"
#include "src/numbers/conversions.h"
#include "test/cctest/cctest.h"
#include "test/cctest/collector.h"
diff --git a/deps/v8/test/cctest/test-virtual-memory-cage.cc b/deps/v8/test/cctest/test-virtual-memory-cage.cc
new file mode 100644
index 0000000000..d5afed0590
--- /dev/null
+++ b/deps/v8/test/cctest/test-virtual-memory-cage.cc
@@ -0,0 +1,36 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/init/vm-cage.h"
+#include "test/cctest/cctest.h"
+
+#ifdef V8_VIRTUAL_MEMORY_CAGE
+
+namespace v8 {
+namespace internal {
+
+UNINITIALIZED_TEST(VirtualMemoryCageCreation) {
+ base::PageAllocator page_allocator;
+
+ V8VirtualMemoryCage cage;
+
+ CHECK(!cage.is_initialized());
+ CHECK(!cage.is_disabled());
+ CHECK_EQ(cage.size(), 0);
+
+ CHECK(cage.Initialize(&page_allocator));
+
+ CHECK(cage.is_initialized());
+ CHECK_GT(cage.base(), 0);
+ CHECK_GT(cage.size(), 0);
+
+ cage.TearDown();
+
+ CHECK(!cage.is_initialized());
+}
+
+} // namespace internal
+} // namespace v8
+
+#endif // V8_VIRTUAL_MEMORY_CAGE
diff --git a/deps/v8/test/cctest/test-web-snapshots.cc b/deps/v8/test/cctest/test-web-snapshots.cc
index 56c79d075a..e45f0d90b6 100644
--- a/deps/v8/test/cctest/test-web-snapshots.cc
+++ b/deps/v8/test/cctest/test-web-snapshots.cc
@@ -2,6 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include "include/v8-function.h"
#include "src/api/api-inl.h"
#include "src/web-snapshot/web-snapshot.h"
#include "test/cctest/cctest-utils.h"
diff --git a/deps/v8/test/cctest/trace-extension.cc b/deps/v8/test/cctest/trace-extension.cc
index 9aa2f380f6..a48ecf768d 100644
--- a/deps/v8/test/cctest/trace-extension.cc
+++ b/deps/v8/test/cctest/trace-extension.cc
@@ -28,6 +28,7 @@
#include "test/cctest/trace-extension.h"
#include "include/v8-profiler.h"
+#include "include/v8-template.h"
#include "src/execution/vm-state-inl.h"
#include "src/objects/smi.h"
#include "src/profiler/tick-sample.h"
diff --git a/deps/v8/test/cctest/trace-extension.h b/deps/v8/test/cctest/trace-extension.h
index 78927f0fb6..0ddc0f0e8c 100644
--- a/deps/v8/test/cctest/trace-extension.h
+++ b/deps/v8/test/cctest/trace-extension.h
@@ -28,10 +28,14 @@
#ifndef V8_TEST_CCTEST_TRACE_EXTENSION_H_
#define V8_TEST_CCTEST_TRACE_EXTENSION_H_
-#include "include/v8.h"
+#include "include/v8-extension.h"
#include "src/common/globals.h"
namespace v8 {
+
+template <typename T>
+class FunctionCallbackInfo;
+
namespace internal {
struct TickSample;
diff --git a/deps/v8/test/cctest/wasm/test-gc.cc b/deps/v8/test/cctest/wasm/test-gc.cc
index c8f557b7ed..d24a926401 100644
--- a/deps/v8/test/cctest/wasm/test-gc.cc
+++ b/deps/v8/test/cctest/wasm/test-gc.cc
@@ -786,7 +786,8 @@ WASM_COMPILED_EXEC_TEST(WasmBasicArray) {
WASM_RTT_CANON(type_index)),
kExprEnd});
- const uint32_t kTooLong = kV8MaxWasmArrayLength + 1;
+ ArrayType array_type(kWasmI32, true);
+ const uint32_t kTooLong = WasmArray::MaxLength(&array_type) + 1;
const byte kAllocateTooLarge = tester.DefineFunction(
&sig_q_v, {},
{WASM_ARRAY_NEW_DEFAULT(type_index, WASM_I32V(kTooLong),
@@ -904,7 +905,6 @@ WASM_COMPILED_EXEC_TEST(WasmPackedArrayS) {
}
WASM_COMPILED_EXEC_TEST(WasmArrayCopy) {
- FLAG_SCOPE(experimental_wasm_gc_experiments);
WasmGCTester tester(execution_tier);
const byte array32_index = tester.DefineArray(kWasmI32, true);
const byte array16_index = tester.DefineArray(kWasmI16, true);
@@ -1186,8 +1186,6 @@ WASM_COMPILED_EXEC_TEST(BasicRtt) {
WASM_COMPILED_EXEC_TEST(RttFreshSub) {
WasmGCTester tester(execution_tier);
- FlagScope<bool> flag_gc_experiments(&FLAG_experimental_wasm_gc_experiments,
- true);
const byte kType = tester.DefineStruct({F(wasm::kWasmI32, true)});
HeapType::Representation type_repr =
static_cast<HeapType::Representation>(kType);
diff --git a/deps/v8/test/cctest/wasm/test-jump-table-assembler.cc b/deps/v8/test/cctest/wasm/test-jump-table-assembler.cc
index e671d247ce..f1c78fe970 100644
--- a/deps/v8/test/cctest/wasm/test-jump-table-assembler.cc
+++ b/deps/v8/test/cctest/wasm/test-jump-table-assembler.cc
@@ -140,6 +140,11 @@ void CompileJumpTableThunk(Address thunk, Address jump_target) {
__ Lw(scratch, MemOperand(scratch, 0));
__ Branch(&exit, ne, scratch, Operand(zero_reg));
__ Jump(jump_target, RelocInfo::NONE);
+#elif V8_TARGET_ARCH_LOONG64
+ __ li(scratch, Operand(stop_bit_address, RelocInfo::NONE));
+ __ Ld_w(scratch, MemOperand(scratch, 0));
+ __ Branch(&exit, ne, scratch, Operand(zero_reg));
+ __ Jump(jump_target, RelocInfo::NONE);
#elif V8_TARGET_ARCH_MIPS
__ li(scratch, Operand(stop_bit_address, RelocInfo::NONE));
__ lw(scratch, MemOperand(scratch, 0));
diff --git a/deps/v8/test/cctest/wasm/test-run-wasm-exceptions.cc b/deps/v8/test/cctest/wasm/test-run-wasm-exceptions.cc
index 4bf836060f..af6510d0cd 100644
--- a/deps/v8/test/cctest/wasm/test-run-wasm-exceptions.cc
+++ b/deps/v8/test/cctest/wasm/test-run-wasm-exceptions.cc
@@ -2,6 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include "include/v8-function.h"
#include "src/api/api-inl.h"
#include "test/cctest/wasm/wasm-atomics-utils.h"
#include "test/common/wasm/test-signatures.h"
diff --git a/deps/v8/test/cctest/wasm/test-run-wasm-interpreter.cc b/deps/v8/test/cctest/wasm/test-run-wasm-interpreter.cc
index 0d039843e6..4b6b28d3f2 100644
--- a/deps/v8/test/cctest/wasm/test-run-wasm-interpreter.cc
+++ b/deps/v8/test/cctest/wasm/test-run-wasm-interpreter.cc
@@ -504,6 +504,13 @@ TEST(Regress1092130) {
r.Call();
}
+TEST(Regress1247119) {
+ WasmRunner<uint32_t> r(TestExecutionTier::kInterpreter);
+ BUILD(r, kExprLoop, 0, kExprTry, 0, kExprUnreachable, kExprDelegate, 0,
+ kExprEnd);
+ r.Call();
+}
+
} // namespace test_run_wasm_interpreter
} // namespace wasm
} // namespace internal
diff --git a/deps/v8/test/cctest/wasm/test-run-wasm-js.cc b/deps/v8/test/cctest/wasm/test-run-wasm-js.cc
index 6a82070b9f..99b539bbc5 100644
--- a/deps/v8/test/cctest/wasm/test-run-wasm-js.cc
+++ b/deps/v8/test/cctest/wasm/test-run-wasm-js.cc
@@ -7,6 +7,7 @@
#include <stdlib.h>
#include <string.h>
+#include "include/v8-function.h"
#include "src/api/api-inl.h"
#include "src/codegen/assembler-inl.h"
#include "src/objects/heap-number-inl.h"
diff --git a/deps/v8/test/cctest/wasm/test-run-wasm-simd.cc b/deps/v8/test/cctest/wasm/test-run-wasm-simd.cc
index 2b417f0955..3ba26da89c 100644
--- a/deps/v8/test/cctest/wasm/test-run-wasm-simd.cc
+++ b/deps/v8/test/cctest/wasm/test-run-wasm-simd.cc
@@ -430,6 +430,46 @@ WASM_SIMD_TEST(F32x4Le) {
RunF32x4CompareOpTest(execution_tier, kExprF32x4Le, LessEqual);
}
+template <typename ScalarType>
+void RunShiftAddTestSequence(TestExecutionTier execution_tier,
+ WasmOpcode shiftr_opcode, WasmOpcode add_opcode,
+ WasmOpcode splat_opcode, int32_t imm,
+ ScalarType (*shift_fn)(ScalarType, int32_t)) {
+ WasmRunner<int32_t, ScalarType> r(execution_tier);
+ // globals to store results for left and right cases
+ ScalarType* g1 = r.builder().template AddGlobal<ScalarType>(kWasmS128);
+ ScalarType* g2 = r.builder().template AddGlobal<ScalarType>(kWasmS128);
+ byte param = 0;
+ byte temp1 = r.AllocateLocal(kWasmS128);
+ byte temp2 = r.AllocateLocal(kWasmS128);
+ auto expected_fn = [shift_fn](ScalarType x, ScalarType y, uint32_t imm) {
+ return base::AddWithWraparound(x, shift_fn(y, imm));
+ };
+ BUILD(
+ r,
+ WASM_LOCAL_SET(temp1, WASM_SIMD_OPN(splat_opcode, WASM_LOCAL_GET(param))),
+ WASM_LOCAL_SET(temp2, WASM_SIMD_OPN(splat_opcode, WASM_LOCAL_GET(param))),
+ WASM_GLOBAL_SET(0, WASM_SIMD_BINOP(add_opcode,
+ WASM_SIMD_BINOP(shiftr_opcode,
+ WASM_LOCAL_GET(temp2),
+ WASM_I32V(imm)),
+ WASM_LOCAL_GET(temp1))),
+ WASM_GLOBAL_SET(1, WASM_SIMD_BINOP(add_opcode, WASM_LOCAL_GET(temp1),
+ WASM_SIMD_BINOP(shiftr_opcode,
+ WASM_LOCAL_GET(temp2),
+ WASM_I32V(imm)))),
+
+ WASM_ONE);
+ for (ScalarType x : compiler::ValueHelper::GetVector<ScalarType>()) {
+ r.Call(x);
+ ScalarType expected = expected_fn(x, x, imm);
+ for (size_t i = 0; i < kSimd128Size / sizeof(ScalarType); i++) {
+ CHECK_EQ(expected, LANE(g1, i));
+ CHECK_EQ(expected, LANE(g2, i));
+ }
+ }
+}
+
WASM_SIMD_TEST(I64x2Splat) {
WasmRunner<int32_t, int64_t> r(execution_tier);
// Set up a global to hold output vector.
@@ -500,6 +540,17 @@ WASM_SIMD_TEST(I64x2ShrU) {
RunI64x2ShiftOpTest(execution_tier, kExprI64x2ShrU, LogicalShiftRight);
}
+WASM_SIMD_TEST(I64x2ShiftAdd) {
+ for (int imm = 0; imm <= 64; imm++) {
+ RunShiftAddTestSequence<int64_t>(execution_tier, kExprI64x2ShrU,
+ kExprI64x2Add, kExprI64x2Splat, imm,
+ LogicalShiftRight);
+ RunShiftAddTestSequence<int64_t>(execution_tier, kExprI64x2ShrS,
+ kExprI64x2Add, kExprI64x2Splat, imm,
+ ArithmeticShiftRight);
+ }
+}
+
WASM_SIMD_TEST(I64x2Add) {
RunI64x2BinOpTest(execution_tier, kExprI64x2Add, base::AddWithWraparound);
}
@@ -1350,6 +1401,17 @@ WASM_SIMD_TEST(I32x4ShrU) {
RunI32x4ShiftOpTest(execution_tier, kExprI32x4ShrU, LogicalShiftRight);
}
+WASM_SIMD_TEST(I32x4ShiftAdd) {
+ for (int imm = 0; imm <= 32; imm++) {
+ RunShiftAddTestSequence<int32_t>(execution_tier, kExprI32x4ShrU,
+ kExprI32x4Add, kExprI32x4Splat, imm,
+ LogicalShiftRight);
+ RunShiftAddTestSequence<int32_t>(execution_tier, kExprI32x4ShrS,
+ kExprI32x4Add, kExprI32x4Splat, imm,
+ ArithmeticShiftRight);
+ }
+}
+
// Tests both signed and unsigned conversion from I8x16 (unpacking).
WASM_SIMD_TEST(I16x8ConvertI8x16) {
WasmRunner<int32_t, int32_t> r(execution_tier);
@@ -1623,6 +1685,77 @@ WASM_SIMD_TEST(I64x2ExtMulHighI32x4U) {
MulHalf::kHigh);
}
+namespace {
+// Test add(mul(x, y, z) optimizations.
+template <typename S, typename T>
+void RunExtMulAddOptimizationTest(TestExecutionTier execution_tier,
+ WasmOpcode ext_mul, WasmOpcode narrow_splat,
+ WasmOpcode wide_splat, WasmOpcode wide_add,
+ std::function<T(T, T)> addop) {
+ WasmRunner<int32_t, S, T> r(execution_tier);
+ T* g = r.builder().template AddGlobal<T>(kWasmS128);
+
+ // global[0] =
+ // add(
+ // splat(local[1]),
+ // extmul(splat(local[0]), splat(local[0])))
+ BUILD(r,
+ WASM_GLOBAL_SET(
+ 0, WASM_SIMD_BINOP(
+ wide_add, WASM_SIMD_UNOP(wide_splat, WASM_LOCAL_GET(1)),
+ WASM_SIMD_BINOP(
+ ext_mul, WASM_SIMD_UNOP(narrow_splat, WASM_LOCAL_GET(0)),
+ WASM_SIMD_UNOP(narrow_splat, WASM_LOCAL_GET(0))))),
+ WASM_ONE);
+
+ constexpr int lanes = kSimd128Size / sizeof(T);
+ for (S x : compiler::ValueHelper::GetVector<S>()) {
+ for (T y : compiler::ValueHelper::GetVector<T>()) {
+ r.Call(x, y);
+
+ T expected = addop(MultiplyLong<T, S>(x, x), y);
+ for (int i = 0; i < lanes; i++) {
+ CHECK_EQ(expected, LANE(g, i));
+ }
+ }
+ }
+}
+} // namespace
+
+// Helper which defines high/low, signed/unsigned test cases for extmul + add
+// optimization.
+#define EXTMUL_ADD_OPTIMIZATION_TEST(NarrowType, NarrowShape, WideType, \
+ WideShape) \
+ WASM_SIMD_TEST(WideShape##ExtMulLow##NarrowShape##SAddOptimization) { \
+ RunExtMulAddOptimizationTest<NarrowType, WideType>( \
+ execution_tier, kExpr##WideShape##ExtMulLow##NarrowShape##S, \
+ kExpr##NarrowShape##Splat, kExpr##WideShape##Splat, \
+ kExpr##WideShape##Add, base::AddWithWraparound<WideType>); \
+ } \
+ WASM_SIMD_TEST(WideShape##ExtMulHigh##NarrowShape##SAddOptimization) { \
+ RunExtMulAddOptimizationTest<NarrowType, WideType>( \
+ execution_tier, kExpr##WideShape##ExtMulHigh##NarrowShape##S, \
+ kExpr##NarrowShape##Splat, kExpr##WideShape##Splat, \
+ kExpr##WideShape##Add, base::AddWithWraparound<WideType>); \
+ } \
+ WASM_SIMD_TEST(WideShape##ExtMulLow##NarrowShape##UAddOptimization) { \
+ RunExtMulAddOptimizationTest<u##NarrowType, u##WideType>( \
+ execution_tier, kExpr##WideShape##ExtMulLow##NarrowShape##U, \
+ kExpr##NarrowShape##Splat, kExpr##WideShape##Splat, \
+ kExpr##WideShape##Add, std::plus<u##WideType>()); \
+ } \
+ WASM_SIMD_TEST(WideShape##ExtMulHigh##NarrowShape##UAddOptimization) { \
+ RunExtMulAddOptimizationTest<u##NarrowType, u##WideType>( \
+ execution_tier, kExpr##WideShape##ExtMulHigh##NarrowShape##U, \
+ kExpr##NarrowShape##Splat, kExpr##WideShape##Splat, \
+ kExpr##WideShape##Add, std::plus<u##WideType>()); \
+ }
+
+EXTMUL_ADD_OPTIMIZATION_TEST(int8_t, I8x16, int16_t, I16x8)
+EXTMUL_ADD_OPTIMIZATION_TEST(int16_t, I16x8, int32_t, I32x4)
+
+#undef EXTMUL_ADD_OPTIMIZATION_TEST
+
WASM_SIMD_TEST(I32x4DotI16x8S) {
WasmRunner<int32_t, int16_t, int16_t> r(execution_tier);
int32_t* g = r.builder().template AddGlobal<int32_t>(kWasmS128);
@@ -1660,6 +1793,17 @@ WASM_SIMD_TEST(I16x8ShrU) {
RunI16x8ShiftOpTest(execution_tier, kExprI16x8ShrU, LogicalShiftRight);
}
+WASM_SIMD_TEST(I16x8ShiftAdd) {
+ for (int imm = 0; imm <= 16; imm++) {
+ RunShiftAddTestSequence<int16_t>(execution_tier, kExprI16x8ShrU,
+ kExprI16x8Add, kExprI16x8Splat, imm,
+ LogicalShiftRight);
+ RunShiftAddTestSequence<int16_t>(execution_tier, kExprI16x8ShrS,
+ kExprI16x8Add, kExprI16x8Splat, imm,
+ ArithmeticShiftRight);
+ }
+}
+
WASM_SIMD_TEST(I8x16Neg) {
RunI8x16UnOpTest(execution_tier, kExprI8x16Neg, base::NegateWithWraparound);
}
@@ -1817,6 +1961,17 @@ WASM_SIMD_TEST(I8x16ShrU) {
RunI8x16ShiftOpTest(execution_tier, kExprI8x16ShrU, LogicalShiftRight);
}
+WASM_SIMD_TEST(I8x16ShiftAdd) {
+ for (int imm = 0; imm <= 8; imm++) {
+ RunShiftAddTestSequence<int8_t>(execution_tier, kExprI8x16ShrU,
+ kExprI8x16Add, kExprI8x16Splat, imm,
+ LogicalShiftRight);
+ RunShiftAddTestSequence<int8_t>(execution_tier, kExprI8x16ShrS,
+ kExprI8x16Add, kExprI8x16Splat, imm,
+ ArithmeticShiftRight);
+ }
+}
+
// Test Select by making a mask where the 0th and 3rd lanes are true and the
// rest false, and comparing for non-equality with zero to convert to a boolean
// vector.
diff --git a/deps/v8/test/cctest/wasm/test-run-wasm-wrappers.cc b/deps/v8/test/cctest/wasm/test-run-wasm-wrappers.cc
index d082f02c51..fd6316c377 100644
--- a/deps/v8/test/cctest/wasm/test-run-wasm-wrappers.cc
+++ b/deps/v8/test/cctest/wasm/test-run-wasm-wrappers.cc
@@ -28,6 +28,7 @@ Handle<WasmInstanceObject> CompileModule(Zone* zone, Isolate* isolate,
MaybeHandle<WasmInstanceObject> maybe_instance =
CompileAndInstantiateForTesting(
isolate, &thrower, ModuleWireBytes(buffer.begin(), buffer.end()));
+ CHECK_WITH_MSG(!thrower.error(), thrower.error_msg());
return maybe_instance.ToHandleChecked();
}
@@ -302,10 +303,15 @@ TEST(WrapperReplacement_IndirectExport) {
uint32_t function_index = f->func_index();
// Export a table of indirect functions.
- uint32_t table_index = builder->AllocateIndirectFunctions(2);
+ const uint32_t table_size = 2;
+ const uint32_t table_index =
+ builder->AddTable(kWasmFuncRef, table_size, table_size);
builder->AddExport(base::CStrVector("exported_table"), kExternalTable, 0);
+
// Point from the exported table to the Wasm function.
- builder->SetIndirectFunction(0, function_index);
+ builder->SetIndirectFunction(
+ table_index, 0, function_index,
+ WasmModuleBuilder::WasmElemSegment::kRelativeToImports);
// Compile the module.
Handle<WasmInstanceObject> instance =
diff --git a/deps/v8/test/cctest/wasm/test-run-wasm.cc b/deps/v8/test/cctest/wasm/test-run-wasm.cc
index 366a614cc0..e0ef82d8d7 100644
--- a/deps/v8/test/cctest/wasm/test-run-wasm.cc
+++ b/deps/v8/test/cctest/wasm/test-run-wasm.cc
@@ -11,6 +11,7 @@
#include "src/base/platform/elapsed-timer.h"
#include "src/codegen/assembler-inl.h"
#include "src/utils/utils.h"
+#include "src/wasm/code-space-access.h"
#include "src/wasm/wasm-opcodes-inl.h"
#include "test/cctest/cctest.h"
#include "test/cctest/compiler/value-helper.h"
@@ -3880,28 +3881,31 @@ TEST(Liftoff_tier_up) {
r.builder().instance_object()->module_object().native_module();
// This test only works if we managed to compile with Liftoff.
- if (native_module->GetCode(add.function_index())->is_liftoff()) {
- // First run should execute {add}.
- CHECK_EQ(18, r.Call(11, 7));
-
- // Now make a copy of the {sub} function, and add it to the native module at
- // the index of {add}.
- CodeDesc desc;
- memset(&desc, 0, sizeof(CodeDesc));
- WasmCode* sub_code = native_module->GetCode(sub.function_index());
- size_t sub_size = sub_code->instructions().size();
- std::unique_ptr<byte[]> buffer(new byte[sub_code->instructions().size()]);
- memcpy(buffer.get(), sub_code->instructions().begin(), sub_size);
- desc.buffer = buffer.get();
- desc.instr_size = static_cast<int>(sub_size);
+ if (!native_module->GetCode(add.function_index())->is_liftoff()) return;
+
+ // First run should execute {add}.
+ CHECK_EQ(18, r.Call(11, 7));
+
+ // Now make a copy of the {sub} function, and add it to the native module at
+ // the index of {add}.
+ CodeDesc desc;
+ memset(&desc, 0, sizeof(CodeDesc));
+ WasmCode* sub_code = native_module->GetCode(sub.function_index());
+ size_t sub_size = sub_code->instructions().size();
+ std::unique_ptr<byte[]> buffer(new byte[sub_code->instructions().size()]);
+ memcpy(buffer.get(), sub_code->instructions().begin(), sub_size);
+ desc.buffer = buffer.get();
+ desc.instr_size = static_cast<int>(sub_size);
+ {
+ CodeSpaceWriteScope write_scope(native_module);
std::unique_ptr<WasmCode> new_code = native_module->AddCode(
add.function_index(), desc, 0, 0, {}, {}, WasmCode::kFunction,
ExecutionTier::kTurbofan, kNoDebugging);
native_module->PublishCode(std::move(new_code));
-
- // Second run should now execute {sub}.
- CHECK_EQ(4, r.Call(11, 7));
}
+
+ // Second run should now execute {sub}.
+ CHECK_EQ(4, r.Call(11, 7));
}
TEST(Regression_1085507) {
diff --git a/deps/v8/test/cctest/wasm/test-wasm-serialization.cc b/deps/v8/test/cctest/wasm/test-wasm-serialization.cc
index a3359c6b3e..cfdbba0541 100644
--- a/deps/v8/test/cctest/wasm/test-wasm-serialization.cc
+++ b/deps/v8/test/cctest/wasm/test-wasm-serialization.cc
@@ -5,6 +5,7 @@
#include <stdlib.h>
#include <string.h>
+#include "include/v8-wasm.h"
#include "src/api/api-inl.h"
#include "src/objects/objects-inl.h"
#include "src/snapshot/code-serializer.h"
@@ -16,7 +17,6 @@
#include "src/wasm/wasm-objects-inl.h"
#include "src/wasm/wasm-opcodes.h"
#include "src/wasm/wasm-serialization.h"
-
#include "test/cctest/cctest.h"
#include "test/common/wasm/flag-utils.h"
#include "test/common/wasm/test-signatures.h"
diff --git a/deps/v8/test/cctest/wasm/test-wasm-stack.cc b/deps/v8/test/cctest/wasm/test-wasm-stack.cc
index 5e59f13c3f..a5179c04ca 100644
--- a/deps/v8/test/cctest/wasm/test-wasm-stack.cc
+++ b/deps/v8/test/cctest/wasm/test-wasm-stack.cc
@@ -2,6 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include "include/v8-function.h"
#include "src/api/api-inl.h"
#include "src/codegen/assembler-inl.h"
#include "src/objects/stack-frame-info-inl.h"
diff --git a/deps/v8/test/cctest/wasm/test-wasm-trap-position.cc b/deps/v8/test/cctest/wasm/test-wasm-trap-position.cc
index 2ffd72aaaf..9d5378c9ab 100644
--- a/deps/v8/test/cctest/wasm/test-wasm-trap-position.cc
+++ b/deps/v8/test/cctest/wasm/test-wasm-trap-position.cc
@@ -2,6 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include "include/v8-function.h"
#include "src/api/api-inl.h"
#include "src/codegen/assembler-inl.h"
#include "src/trap-handler/trap-handler.h"
diff --git a/deps/v8/test/cctest/wasm/wasm-run-utils.cc b/deps/v8/test/cctest/wasm/wasm-run-utils.cc
index 3c3ba34d5a..0c44578db4 100644
--- a/deps/v8/test/cctest/wasm/wasm-run-utils.cc
+++ b/deps/v8/test/cctest/wasm/wasm-run-utils.cc
@@ -9,6 +9,7 @@
#include "src/diagnostics/code-tracer.h"
#include "src/heap/heap-inl.h"
#include "src/wasm/baseline/liftoff-compiler.h"
+#include "src/wasm/code-space-access.h"
#include "src/wasm/graph-builder-interface.h"
#include "src/wasm/leb-helper.h"
#include "src/wasm/module-compiler.h"
@@ -69,7 +70,6 @@ TestingModuleBuilder::TestingModuleBuilder(
if (maybe_import) {
// Manually compile an import wrapper and insert it into the instance.
- CodeSpaceMemoryModificationScope modification_scope(isolate_->heap());
auto resolved = compiler::ResolveWasmImportCall(
maybe_import->js_function, maybe_import->sig,
instance_object_->module(), enabled_features_);
@@ -82,6 +82,7 @@ TestingModuleBuilder::TestingModuleBuilder(
static_cast<int>(maybe_import->sig->parameter_count()));
auto import_wrapper = cache_scope[key];
if (import_wrapper == nullptr) {
+ CodeSpaceWriteScope write_scope(native_module_);
import_wrapper = CompileImportWrapper(
native_module_, isolate_->counters(), kind, maybe_import->sig,
static_cast<int>(maybe_import->sig->parameter_count()), &cache_scope);
@@ -383,15 +384,16 @@ void TestBuildingGraphWithBuilder(compiler::WasmGraphBuilder* builder,
std::vector<compiler::WasmLoopInfo> loops;
DecodeResult result =
BuildTFGraph(zone->allocator(), WasmFeatures::All(), nullptr, builder,
- &unused_detected_features, body, &loops, nullptr, 0);
+ &unused_detected_features, body, &loops, nullptr, 0,
+ kInstrumentEndpoints);
if (result.failed()) {
#ifdef DEBUG
if (!FLAG_trace_wasm_decoder) {
// Retry the compilation with the tracing flag on, to help in debugging.
FLAG_trace_wasm_decoder = true;
- result =
- BuildTFGraph(zone->allocator(), WasmFeatures::All(), nullptr, builder,
- &unused_detected_features, body, &loops, nullptr, 0);
+ result = BuildTFGraph(zone->allocator(), WasmFeatures::All(), nullptr,
+ builder, &unused_detected_features, body, &loops,
+ nullptr, 0, kInstrumentEndpoints);
}
#endif
diff --git a/deps/v8/test/common/wasm/wasm-interpreter.cc b/deps/v8/test/common/wasm/wasm-interpreter.cc
index 84871cccb6..a0beab1a90 100644
--- a/deps/v8/test/common/wasm/wasm-interpreter.cc
+++ b/deps/v8/test/common/wasm/wasm-interpreter.cc
@@ -968,12 +968,17 @@ class SideTable : public ZoneObject {
Control* c = &control_stack.back();
const size_t new_stack_size = control_stack.size() - 1;
const size_t max_depth = new_stack_size - 1;
- if (imm.depth < max_depth) {
+ size_t target_depth = imm.depth;
+ while (target_depth < max_depth &&
+ *control_stack[max_depth - target_depth].pc != kExprTry) {
+ target_depth++;
+ }
+ if (target_depth < max_depth) {
constexpr int kUnusedControlIndex = -1;
c->else_label->Bind(i.pc(), kRethrowOrDelegateExceptionIndex,
kUnusedControlIndex);
c->else_label->Finish(&map_, code->start);
- Control* target = &control_stack[max_depth - imm.depth];
+ Control* target = &control_stack[max_depth - target_depth];
DCHECK_EQ(*target->pc, kExprTry);
DCHECK_NOT_NULL(target->else_label);
if (!control_parent().unreachable) {
@@ -1621,8 +1626,7 @@ class WasmInterpreterInternals {
DCHECK_GE(instance_object_->memory_size(), index);
// Compute the effective address of the access, making sure to condition
// the index even in the in-bounds case.
- return reinterpret_cast<Address>(instance_object_->memory_start()) +
- (index & instance_object_->memory_mask());
+ return reinterpret_cast<Address>(instance_object_->memory_start()) + index;
}
template <typename mtype>
diff --git a/deps/v8/test/common/wasm/wasm-macro-gen.h b/deps/v8/test/common/wasm/wasm-macro-gen.h
index dbe6100876..4203c22696 100644
--- a/deps/v8/test/common/wasm/wasm-macro-gen.h
+++ b/deps/v8/test/common/wasm/wasm-macro-gen.h
@@ -894,6 +894,7 @@ inline WasmOpcode LoadStoreOpcodeOf(MachineType type, bool store) {
#define TO_BYTE(val) static_cast<byte>(val)
// Encode all simd ops as a 2-byte LEB.
#define WASM_SIMD_OP(op) kSimdPrefix, U32V_2(op & 0xff)
+#define WASM_SIMD_OPN(op, ...) __VA_ARGS__, WASM_SIMD_OP(op)
#define WASM_SIMD_SPLAT(Type, ...) __VA_ARGS__, WASM_SIMD_OP(kExpr##Type##Splat)
#define WASM_SIMD_UNOP(op, x) x, WASM_SIMD_OP(op)
#define WASM_SIMD_BINOP(op, x, y) x, y, WASM_SIMD_OP(op)
diff --git a/deps/v8/test/debugger/debug/regress/regress-opt-after-debug-deopt.js b/deps/v8/test/debugger/debug/regress/regress-opt-after-debug-deopt.js
index b39c97b7d7..71bff57c5b 100644
--- a/deps/v8/test/debugger/debug/regress/regress-opt-after-debug-deopt.js
+++ b/deps/v8/test/debugger/debug/regress/regress-opt-after-debug-deopt.js
@@ -25,14 +25,9 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --concurrent-recompilation --block-concurrent-recompilation
+// Flags: --concurrent-recompilation
// Flags: --no-always-opt
-if (!%IsConcurrentRecompilationSupported()) {
- print("Concurrent recompilation is disabled. Skipping this test.");
- quit();
-}
-
Debug = debug.Debug;
function listener(event, exec_state, event_data, data) {
@@ -58,19 +53,19 @@ var f = function() {
%PrepareFunctionForOptimization(f);
f();
f();
-%OptimizeFunctionOnNextCall(f, "concurrent"); // Mark with builtin.
+%DisableOptimizationFinalization();
+%OptimizeFunctionOnNextCall(f, "concurrent");
f(); // Kick off concurrent recompilation.
+%WaitForBackgroundOptimization();
// After compile graph has been created...
Debug.setListener(listener); // Activate debugger.
Debug.setBreakPoint(f, 2, 0); // Force deopt.
+assertUnoptimized(f);
+%FinalizeOptimization();
-// At this point, concurrent recompilation is still being blocked.
-assertUnoptimized(f, "no sync");
-// Let concurrent recompilation proceed.
-%UnblockConcurrentRecompilation();
-// Sync with optimization thread. But no optimized code is installed.
-assertUnoptimized(f, "sync");
+// No optimized code was installed.
+assertUnoptimized(f);
f(); // Trigger break point.
assertEquals(1, listened);
diff --git a/deps/v8/test/debugger/debug/regress/regress-prepare-break-while-recompile.js b/deps/v8/test/debugger/debug/regress/regress-prepare-break-while-recompile.js
index 83d2181b99..f781a06725 100644
--- a/deps/v8/test/debugger/debug/regress/regress-prepare-break-while-recompile.js
+++ b/deps/v8/test/debugger/debug/regress/regress-prepare-break-while-recompile.js
@@ -25,14 +25,9 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --concurrent-recompilation --block-concurrent-recompilation
+// Flags: --concurrent-recompilation
// Flags: --no-always-opt
-if (!%IsConcurrentRecompilationSupported()) {
- print("Concurrent recompilation is disabled. Skipping this test.");
- quit();
-}
-
Debug = debug.Debug
function foo() {
@@ -49,6 +44,7 @@ function bar() {
foo();
foo();
// Mark and kick off recompilation.
+%DisableOptimizationFinalization();
%OptimizeFunctionOnNextCall(foo, "concurrent");
foo();
@@ -56,16 +52,14 @@ foo();
// and (shared) unoptimized code on foo, and sets both to lazy-compile builtin.
// Clear the break point immediately after to deactivate the debugger.
// Do all of this after compile graph has been created.
+%WaitForBackgroundOptimization();
Debug.setListener(function(){});
Debug.setBreakPoint(bar, 0, 0);
Debug.clearAllBreakPoints();
Debug.setListener(null);
-
-// At this point, concurrent recompilation is still blocked.
-assertUnoptimized(foo, "no sync");
-// Let concurrent recompilation proceed.
-%UnblockConcurrentRecompilation();
+assertUnoptimized(foo);
// Install optimized code when concurrent optimization finishes.
// This needs to be able to deal with shared code being a builtin.
-assertUnoptimized(foo, "sync");
+%FinalizeOptimization();
+assertUnoptimized(foo);
diff --git a/deps/v8/test/debugger/regress/regress-7421.js b/deps/v8/test/debugger/regress/regress-7421.js
index dfac06d864..2a1a5a7299 100644
--- a/deps/v8/test/debugger/regress/regress-7421.js
+++ b/deps/v8/test/debugger/regress/regress-7421.js
@@ -2,8 +2,6 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --block-concurrent-recompilation
-
Debug = debug.Debug
// Test that the side-effect check is not bypassed in optimized code.
@@ -60,10 +58,10 @@ function listener(event, exec_state, event_data, data) {
"%OptimizeFunctionOnNextCall(wrapper2); wrapper2(true)");
%PrepareFunctionForOptimization(wrapper2);
+ %DisableOptimizationFinalization();
%OptimizeFunctionOnNextCall(wrapper2, "concurrent");
wrapper2(false);
- fail("%UnblockConcurrentRecompilation();" +
- "%GetOptimizationStatus(wrapper2, 'sync');" +
+ fail("%FinalizeOptimization();" +
"wrapper2(true);");
} catch (e) {
exception = e;
diff --git a/deps/v8/test/fuzzer/fuzzer-support.cc b/deps/v8/test/fuzzer/fuzzer-support.cc
index 608e4875ca..cfa737b70c 100644
--- a/deps/v8/test/fuzzer/fuzzer-support.cc
+++ b/deps/v8/test/fuzzer/fuzzer-support.cc
@@ -9,6 +9,8 @@
#include <string.h>
#include "include/libplatform/libplatform.h"
+#include "include/v8-context.h"
+#include "include/v8-initialization.h"
#include "src/flags/flags.h"
#include "src/trap-handler/trap-handler.h"
@@ -21,6 +23,11 @@ FuzzerSupport::FuzzerSupport(int* argc, char*** argv) {
v8::V8::InitializeExternalStartupData((*argv)[0]);
platform_ = v8::platform::NewDefaultPlatform();
v8::V8::InitializePlatform(platform_.get());
+#ifdef V8_VIRTUAL_MEMORY_CAGE
+ if (!v8::V8::InitializeVirtualMemoryCage()) {
+ FATAL("Could not initialize the virtual memory cage");
+ }
+#endif
v8::V8::Initialize();
allocator_ = v8::ArrayBuffer::Allocator::NewDefaultAllocator();
diff --git a/deps/v8/test/fuzzer/fuzzer-support.h b/deps/v8/test/fuzzer/fuzzer-support.h
index 7b967073b5..247d91a96e 100644
--- a/deps/v8/test/fuzzer/fuzzer-support.h
+++ b/deps/v8/test/fuzzer/fuzzer-support.h
@@ -8,13 +8,22 @@
#include <memory>
#include "include/libplatform/libplatform.h"
-#include "include/v8.h"
+#include "include/v8-array-buffer.h"
+#include "include/v8-local-handle.h"
+#include "include/v8-persistent-handle.h"
+
+namespace v8 {
+class Context;
+class Isolate;
+} // namespace v8
namespace v8_fuzzer {
class FuzzerSupport {
public:
FuzzerSupport(int* argc, char*** argv);
+ FuzzerSupport(const FuzzerSupport&) = delete;
+ FuzzerSupport& operator=(const FuzzerSupport&) = delete;
~FuzzerSupport();
@@ -30,10 +39,6 @@ class FuzzerSupport {
v8::platform::MessageLoopBehavior::kDoNotWait);
private:
- // Prevent copying. Not implemented.
- FuzzerSupport(const FuzzerSupport&);
- FuzzerSupport& operator=(const FuzzerSupport&);
-
static std::unique_ptr<FuzzerSupport> fuzzer_support_;
std::unique_ptr<v8::Platform> platform_;
v8::ArrayBuffer::Allocator* allocator_;
diff --git a/deps/v8/test/fuzzer/inspector-fuzzer.cc b/deps/v8/test/fuzzer/inspector-fuzzer.cc
index 348e79820c..280a7b1afd 100644
--- a/deps/v8/test/fuzzer/inspector-fuzzer.cc
+++ b/deps/v8/test/fuzzer/inspector-fuzzer.cc
@@ -12,7 +12,11 @@
#include <vector>
#include "include/libplatform/libplatform.h"
-#include "include/v8.h"
+#include "include/v8-isolate.h"
+#include "include/v8-local-handle.h"
+#include "include/v8-object.h"
+#include "include/v8-primitive.h"
+#include "include/v8-template.h"
#include "src/base/platform/platform.h"
#include "src/base/platform/time.h"
#include "src/base/small-vector.h"
@@ -32,7 +36,7 @@ namespace {
base::SmallVector<TaskRunner*, 2> task_runners;
-class UtilsExtension : public IsolateData::SetupGlobalTask {
+class UtilsExtension : public InspectorIsolateData::SetupGlobalTask {
public:
~UtilsExtension() override = default;
void Run(v8::Isolate* isolate,
@@ -116,7 +120,8 @@ class UtilsExtension : public IsolateData::SetupGlobalTask {
ToVector(args.GetIsolate(), args[2].As<v8::String>());
int context_group_id = args[0].As<v8::Int32>()->Value();
RunSyncTask(backend_runner_,
- [&context_group_id, &reason, &details](IsolateData* data) {
+ [&context_group_id, &reason,
+ &details](InspectorIsolateData* data) {
data->SchedulePauseOnNextStatement(
context_group_id,
v8_inspector::StringView(reason.data(), reason.size()),
@@ -130,9 +135,10 @@ class UtilsExtension : public IsolateData::SetupGlobalTask {
return;
}
int context_group_id = args[0].As<v8::Int32>()->Value();
- RunSyncTask(backend_runner_, [&context_group_id](IsolateData* data) {
- data->CancelPauseOnNextStatement(context_group_id);
- });
+ RunSyncTask(backend_runner_,
+ [&context_group_id](InspectorIsolateData* data) {
+ data->CancelPauseOnNextStatement(context_group_id);
+ });
}
static void CreateContextGroup(
@@ -141,9 +147,10 @@ class UtilsExtension : public IsolateData::SetupGlobalTask {
return;
}
int context_group_id = 0;
- RunSyncTask(backend_runner_, [&context_group_id](IsolateData* data) {
- context_group_id = data->CreateContextGroup();
- });
+ RunSyncTask(backend_runner_,
+ [&context_group_id](InspectorIsolateData* data) {
+ context_group_id = data->CreateContextGroup();
+ });
args.GetReturnValue().Set(
v8::Int32::New(args.GetIsolate(), context_group_id));
}
@@ -154,9 +161,10 @@ class UtilsExtension : public IsolateData::SetupGlobalTask {
return;
}
int context_group_id = args[0].As<v8::Int32>()->Value();
- RunSyncTask(backend_runner_, [&context_group_id](IsolateData* data) {
- data->ResetContextGroup(context_group_id);
- });
+ RunSyncTask(backend_runner_,
+ [&context_group_id](InspectorIsolateData* data) {
+ data->ResetContextGroup(context_group_id);
+ });
}
static void ConnectSession(const v8::FunctionCallbackInfo<v8::Value>& args) {
@@ -166,8 +174,8 @@ class UtilsExtension : public IsolateData::SetupGlobalTask {
}
v8::Local<v8::Context> context = args.GetIsolate()->GetCurrentContext();
FrontendChannelImpl* channel = new FrontendChannelImpl(
- IsolateData::FromContext(context)->task_runner(),
- IsolateData::FromContext(context)->GetContextGroupId(context),
+ InspectorIsolateData::FromContext(context)->task_runner(),
+ InspectorIsolateData::FromContext(context)->GetContextGroupId(context),
args.GetIsolate(), args[2].As<v8::Function>());
std::vector<uint8_t> state =
@@ -175,7 +183,7 @@ class UtilsExtension : public IsolateData::SetupGlobalTask {
int context_group_id = args[0].As<v8::Int32>()->Value();
int session_id = 0;
RunSyncTask(backend_runner_, [&context_group_id, &session_id, &channel,
- &state](IsolateData* data) {
+ &state](InspectorIsolateData* data) {
session_id = data->ConnectSession(
context_group_id,
v8_inspector::StringView(state.data(), state.size()), channel);
@@ -193,9 +201,10 @@ class UtilsExtension : public IsolateData::SetupGlobalTask {
}
int session_id = args[0].As<v8::Int32>()->Value();
std::vector<uint8_t> state;
- RunSyncTask(backend_runner_, [&session_id, &state](IsolateData* data) {
- state = data->DisconnectSession(session_id);
- });
+ RunSyncTask(backend_runner_,
+ [&session_id, &state](InspectorIsolateData* data) {
+ state = data->DisconnectSession(session_id);
+ });
channels_.erase(session_id);
args.GetReturnValue().Set(ToV8String(args.GetIsolate(), state));
}
@@ -223,7 +232,7 @@ bool StrictAccessCheck(v8::Local<v8::Context> accessing_context,
return accessing_context.IsEmpty();
}
-class InspectorExtension : public IsolateData::SetupGlobalTask {
+class InspectorExtension : public InspectorIsolateData::SetupGlobalTask {
public:
~InspectorExtension() override = default;
void Run(v8::Isolate* isolate,
@@ -288,7 +297,7 @@ class InspectorExtension : public IsolateData::SetupGlobalTask {
static void FireContextCreated(
const v8::FunctionCallbackInfo<v8::Value>& args) {
v8::Local<v8::Context> context = args.GetIsolate()->GetCurrentContext();
- IsolateData* data = IsolateData::FromContext(context);
+ InspectorIsolateData* data = InspectorIsolateData::FromContext(context);
data->FireContextCreated(context, data->GetContextGroupId(context),
v8_inspector::StringView());
}
@@ -296,13 +305,13 @@ class InspectorExtension : public IsolateData::SetupGlobalTask {
static void FireContextDestroyed(
const v8::FunctionCallbackInfo<v8::Value>& args) {
v8::Local<v8::Context> context = args.GetIsolate()->GetCurrentContext();
- IsolateData* data = IsolateData::FromContext(context);
+ InspectorIsolateData* data = InspectorIsolateData::FromContext(context);
data->FireContextDestroyed(context);
}
static void FreeContext(const v8::FunctionCallbackInfo<v8::Value>& args) {
v8::Local<v8::Context> context = args.GetIsolate()->GetCurrentContext();
- IsolateData* data = IsolateData::FromContext(context);
+ InspectorIsolateData* data = InspectorIsolateData::FromContext(context);
data->FreeContext(context);
}
@@ -312,7 +321,7 @@ class InspectorExtension : public IsolateData::SetupGlobalTask {
return;
}
v8::Local<v8::Context> context = args.GetIsolate()->GetCurrentContext();
- IsolateData* data = IsolateData::FromContext(context);
+ InspectorIsolateData* data = InspectorIsolateData::FromContext(context);
data->AddInspectedObject(args[0].As<v8::Int32>()->Value(), args[1]);
}
@@ -321,7 +330,7 @@ class InspectorExtension : public IsolateData::SetupGlobalTask {
if (args.Length() != 1 || !args[0]->IsInt32()) {
return;
}
- IsolateData::FromContext(args.GetIsolate()->GetCurrentContext())
+ InspectorIsolateData::FromContext(args.GetIsolate()->GetCurrentContext())
->SetMaxAsyncTaskStacksForTest(args[0].As<v8::Int32>()->Value());
}
@@ -330,7 +339,7 @@ class InspectorExtension : public IsolateData::SetupGlobalTask {
if (args.Length() != 0) {
return;
}
- IsolateData::FromContext(args.GetIsolate()->GetCurrentContext())
+ InspectorIsolateData::FromContext(args.GetIsolate()->GetCurrentContext())
->DumpAsyncTaskStacksStateForTest();
}
@@ -339,7 +348,7 @@ class InspectorExtension : public IsolateData::SetupGlobalTask {
return;
}
v8::Local<v8::Context> context = args.GetIsolate()->GetCurrentContext();
- IsolateData* data = IsolateData::FromContext(context);
+ InspectorIsolateData* data = InspectorIsolateData::FromContext(context);
std::vector<uint16_t> reason =
ToVector(args.GetIsolate(), args[0].As<v8::String>());
v8_inspector::StringView reason_view(reason.data(), reason.size());
@@ -376,7 +385,7 @@ class InspectorExtension : public IsolateData::SetupGlobalTask {
ToVector(args.GetIsolate(), args[2].As<v8::String>());
v8_inspector::StringView details_view(details.data(), details.size());
v8::Local<v8::Context> context = args.GetIsolate()->GetCurrentContext();
- IsolateData* data = IsolateData::FromContext(context);
+ InspectorIsolateData* data = InspectorIsolateData::FromContext(context);
int context_group_id = data->GetContextGroupId(context);
data->SchedulePauseOnNextStatement(context_group_id, reason_view,
details_view);
@@ -438,7 +447,7 @@ class InspectorExtension : public IsolateData::SetupGlobalTask {
}
v8::Isolate* isolate = args.GetIsolate();
v8::Local<v8::Context> context = isolate->GetCurrentContext();
- IsolateData* data = IsolateData::FromContext(context);
+ InspectorIsolateData* data = InspectorIsolateData::FromContext(context);
std::vector<uint16_t> description =
ToVector(isolate, args[0].As<v8::String>());
v8_inspector::StringView description_view(description.data(),
@@ -458,7 +467,7 @@ class InspectorExtension : public IsolateData::SetupGlobalTask {
return;
}
v8::Local<v8::Context> context = args.GetIsolate()->GetCurrentContext();
- IsolateData* data = IsolateData::FromContext(context);
+ InspectorIsolateData* data = InspectorIsolateData::FromContext(context);
v8_inspector::V8StackTraceId* id =
static_cast<v8_inspector::V8StackTraceId*>(
args[0].As<v8::ArrayBuffer>()->GetBackingStore()->Data());
@@ -471,7 +480,7 @@ class InspectorExtension : public IsolateData::SetupGlobalTask {
return;
}
v8::Local<v8::Context> context = args.GetIsolate()->GetCurrentContext();
- IsolateData* data = IsolateData::FromContext(context);
+ InspectorIsolateData* data = InspectorIsolateData::FromContext(context);
v8_inspector::V8StackTraceId* id =
static_cast<v8_inspector::V8StackTraceId*>(
args[0].As<v8::ArrayBuffer>()->GetBackingStore()->Data());
@@ -486,7 +495,7 @@ class InspectorExtension : public IsolateData::SetupGlobalTask {
}
v8::Isolate* isolate = args.GetIsolate();
v8::Local<v8::Context> context = isolate->GetCurrentContext();
- IsolateData* data = IsolateData::FromContext(context);
+ InspectorIsolateData* data = InspectorIsolateData::FromContext(context);
int context_group_id = data->GetContextGroupId(context);
bool with_empty_stack = args[2].As<v8::Boolean>()->Value();
if (with_empty_stack) context->Exit();
@@ -518,7 +527,7 @@ class InspectorExtension : public IsolateData::SetupGlobalTask {
}
v8::Isolate* isolate = args.GetIsolate();
v8::Local<v8::Context> context = isolate->GetCurrentContext();
- IsolateData* data = IsolateData::FromContext(context);
+ InspectorIsolateData* data = InspectorIsolateData::FromContext(context);
data->SetResourceNamePrefix(v8::Local<v8::String>::Cast(args[0]));
}
};
@@ -547,7 +556,7 @@ class Watchdog final : public base::Thread {
void FuzzInspector(const uint8_t* data, size_t size) {
base::Semaphore ready_semaphore(0);
- IsolateData::SetupGlobalTasks frontend_extensions;
+ InspectorIsolateData::SetupGlobalTasks frontend_extensions;
frontend_extensions.emplace_back(new UtilsExtension());
TaskRunner frontend_runner(std::move(frontend_extensions),
kSuppressUncaughtExceptions, &ready_semaphore,
@@ -556,11 +565,11 @@ void FuzzInspector(const uint8_t* data, size_t size) {
int frontend_context_group_id = 0;
RunSyncTask(&frontend_runner,
- [&frontend_context_group_id](IsolateData* data) {
+ [&frontend_context_group_id](InspectorIsolateData* data) {
frontend_context_group_id = data->CreateContextGroup();
});
- IsolateData::SetupGlobalTasks backend_extensions;
+ InspectorIsolateData::SetupGlobalTasks backend_extensions;
backend_extensions.emplace_back(new SetTimeoutExtension());
backend_extensions.emplace_back(new InspectorExtension());
TaskRunner backend_runner(std::move(backend_extensions),
diff --git a/deps/v8/test/fuzzer/json.cc b/deps/v8/test/fuzzer/json.cc
index 3789baab56..6024e78d3c 100644
--- a/deps/v8/test/fuzzer/json.cc
+++ b/deps/v8/test/fuzzer/json.cc
@@ -6,7 +6,12 @@
#include <stddef.h>
#include <stdint.h>
-#include "include/v8.h"
+#include "include/v8-context.h"
+#include "include/v8-exception.h"
+#include "include/v8-isolate.h"
+#include "include/v8-json.h"
+#include "include/v8-local-handle.h"
+#include "include/v8-primitive.h"
#include "test/fuzzer/fuzzer-support.h"
extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size) {
diff --git a/deps/v8/test/fuzzer/parser.cc b/deps/v8/test/fuzzer/parser.cc
index 2075075ab4..f756c13ec3 100644
--- a/deps/v8/test/fuzzer/parser.cc
+++ b/deps/v8/test/fuzzer/parser.cc
@@ -9,7 +9,10 @@
#include <cctype>
#include <list>
-#include "include/v8.h"
+#include "include/v8-context.h"
+#include "include/v8-exception.h"
+#include "include/v8-isolate.h"
+#include "include/v8-local-handle.h"
#include "src/objects/objects-inl.h"
#include "src/objects/objects.h"
#include "src/parsing/parse-info.h"
diff --git a/deps/v8/test/fuzzer/regexp-builtins.cc b/deps/v8/test/fuzzer/regexp-builtins.cc
index e17400c4c3..5989f641e9 100644
--- a/deps/v8/test/fuzzer/regexp-builtins.cc
+++ b/deps/v8/test/fuzzer/regexp-builtins.cc
@@ -9,7 +9,11 @@
#include <functional>
#include <string>
-#include "include/v8.h"
+#include "include/v8-exception.h"
+#include "include/v8-isolate.h"
+#include "include/v8-local-handle.h"
+#include "include/v8-primitive.h"
+#include "include/v8-script.h"
#include "src/heap/factory.h"
#include "src/objects/objects-inl.h"
#include "src/regexp/regexp.h"
diff --git a/deps/v8/test/fuzzer/regexp.cc b/deps/v8/test/fuzzer/regexp.cc
index 2f56475df9..c51ac98108 100644
--- a/deps/v8/test/fuzzer/regexp.cc
+++ b/deps/v8/test/fuzzer/regexp.cc
@@ -8,7 +8,10 @@
#include <stddef.h>
#include <stdint.h>
-#include "include/v8.h"
+#include "include/v8-context.h"
+#include "include/v8-exception.h"
+#include "include/v8-isolate.h"
+#include "include/v8-local-handle.h"
#include "src/base/strings.h"
#include "src/heap/factory.h"
#include "src/objects/objects-inl.h"
diff --git a/deps/v8/test/fuzzer/wasm-async.cc b/deps/v8/test/fuzzer/wasm-async.cc
index 5dead005f9..19b85cb8cf 100644
--- a/deps/v8/test/fuzzer/wasm-async.cc
+++ b/deps/v8/test/fuzzer/wasm-async.cc
@@ -6,7 +6,10 @@
#include <stddef.h>
#include <stdint.h>
-#include "include/v8.h"
+#include "include/v8-context.h"
+#include "include/v8-exception.h"
+#include "include/v8-isolate.h"
+#include "include/v8-local-handle.h"
#include "src/api/api.h"
#include "src/execution/isolate-inl.h"
#include "src/heap/factory.h"
diff --git a/deps/v8/test/fuzzer/wasm-compile.cc b/deps/v8/test/fuzzer/wasm-compile.cc
index b39d49e500..b2052d8c28 100644
--- a/deps/v8/test/fuzzer/wasm-compile.cc
+++ b/deps/v8/test/fuzzer/wasm-compile.cc
@@ -8,7 +8,7 @@
#include <algorithm>
-#include "include/v8.h"
+#include "src/base/macros.h"
#include "src/execution/isolate.h"
#include "src/objects/objects-inl.h"
#include "src/objects/objects.h"
@@ -35,6 +35,8 @@ constexpr int kMaxGlobals = 64;
constexpr int kMaxParameters = 15;
constexpr int kMaxReturns = 15;
constexpr int kMaxExceptions = 4;
+constexpr int kMaxTableSize = 32;
+constexpr int kMaxTables = 4;
class DataRange {
base::Vector<const uint8_t> data_;
@@ -68,13 +70,8 @@ class DataRange {
template <typename T, size_t max_bytes = sizeof(T)>
T get() {
- // DISABLE FOR BOOL
- // The -O3 on release will break the result. This creates a different
- // observable side effect when invoking get<bool> between debug and release
- // version, which eventually makes the code output different as well as
- // raising various unrecoverable errors on runtime. It is caused by
- // undefined behavior of assigning boolean via memcpy from randomized bytes.
- STATIC_ASSERT(!(std::is_same<T, bool>::value));
+ // Bool needs special handling (see template specialization below).
+ static_assert(!std::is_same<T, bool>::value, "bool needs special handling");
STATIC_ASSERT(max_bytes <= sizeof(T));
// We want to support the case where we have less than sizeof(T) bytes
// remaining in the slice. For example, if we emit an i32 constant, it's
@@ -89,6 +86,20 @@ class DataRange {
}
};
+// Explicit specialization must be defined outside of class body.
+template <>
+bool DataRange::get() {
+ // The general implementation above is not instantiable for bool, as that
+ // would cause undefinied behaviour when memcpy'ing random bytes to the
+ // bool. This can result in different observable side effects when invoking
+ // get<bool> between debug and release version, which eventually makes the
+ // code output different as well as raising various unrecoverable errors on
+ // runtime.
+ // Hence we specialize get<bool> to consume a full byte and use the least
+ // significant bit only (0 == false, 1 == true).
+ return get<uint8_t>() % 2;
+}
+
ValueType GetValueType(uint32_t num_types, DataRange* data,
bool liftoff_as_reference) {
constexpr ValueType types[] = {
@@ -97,15 +108,18 @@ ValueType GetValueType(uint32_t num_types, DataRange* data,
kWasmS128, kWasmExternRef,
kWasmFuncRef, kWasmEqRef,
kWasmAnyRef, ValueType::Ref(HeapType(HeapType::kData), kNullable)};
+ constexpr int kLiftoffOnlyTypeCount = 3; // at the end of {types}.
if (liftoff_as_reference) {
+ // TODO(11954): Only generate signature types that correspond to functions
uint32_t id = data->get<uint8_t>() % (arraysize(types) + num_types);
if (id >= arraysize(types)) {
return ValueType::Ref(id - arraysize(types), kNullable);
}
return types[id];
}
- return types[data->get<uint8_t>() % arraysize(types)];
+ return types[data->get<uint8_t>() %
+ (arraysize(types) - kLiftoffOnlyTypeCount)];
}
class WasmGenerator {
@@ -212,14 +226,12 @@ class WasmGenerator {
}
void try_block_helper(ValueType return_type, DataRange* data) {
- bool has_catch_all = data->get<uint8_t>() % 2;
+ bool has_catch_all = data->get<bool>();
uint8_t num_catch =
data->get<uint8_t>() % (builder_->builder()->NumExceptions() + 1);
- bool is_delegate =
- num_catch == 0 && !has_catch_all && data->get<uint8_t>() % 2 == 0;
+ bool is_delegate = num_catch == 0 && !has_catch_all && data->get<bool>();
// Allow one more target than there are enclosing try blocks, for delegating
// to the caller.
- uint8_t delegate_target = data->get<uint8_t>() % (try_blocks_.size() + 1);
base::Vector<const ValueType> return_type_vec =
return_type.kind() == kVoid ? base::Vector<ValueType>{}
@@ -227,9 +239,7 @@ class WasmGenerator {
BlockScope block_scope(this, kExprTry, {}, return_type_vec, return_type_vec,
!is_delegate);
int control_depth = static_cast<int>(blocks_.size()) - 1;
- try_blocks_.push_back(control_depth);
Generate(return_type, data);
- try_blocks_.pop_back();
catch_blocks_.push_back(control_depth);
for (int i = 0; i < num_catch; ++i) {
const FunctionSig* exception_type =
@@ -245,12 +255,10 @@ class WasmGenerator {
Generate(return_type, data);
}
if (is_delegate) {
- DCHECK_GT(blocks_.size(), try_blocks_.size());
- // If {delegate_target == try_blocks_.size()}, delegate to the caller.
- int delegate_depth = delegate_target == try_blocks_.size()
- ? static_cast<int>(blocks_.size()) - 2
- : static_cast<int>(blocks_.size() - 2 -
- try_blocks_[delegate_target]);
+ // The delegate target depth does not include the current try block,
+ // because 'delegate' closes this scope. However it is still in the
+ // {blocks_} list, so remove one to get the correct size.
+ int delegate_depth = data->get<uint8_t>() % (blocks_.size() - 1);
builder_->EmitWithU32V(kExprDelegate, delegate_depth);
}
catch_blocks_.pop_back();
@@ -313,6 +321,29 @@ class WasmGenerator {
data);
}
+ template <ValueKind wanted_kind>
+ void br_on_null(DataRange* data) {
+ DCHECK(!blocks_.empty());
+ const uint32_t target_block = data->get<uint32_t>() % blocks_.size();
+ const auto break_types = base::VectorOf(blocks_[target_block]);
+ if (!liftoff_as_reference_) {
+ Generate<wanted_kind>(data);
+ return;
+ }
+ Generate(break_types, data);
+ GenerateOptRef(HeapType(HeapType::kAny), data);
+ builder_->EmitWithI32V(
+ kExprBrOnNull,
+ static_cast<uint32_t>(blocks_.size()) - 1 - target_block);
+ builder_->Emit(kExprDrop);
+ ConsumeAndGenerate(
+ break_types,
+ wanted_kind == kVoid
+ ? base::Vector<ValueType>{}
+ : base::VectorOf({ValueType::Primitive(wanted_kind)}),
+ data);
+ }
+
// TODO(eholk): make this function constexpr once gcc supports it
static uint8_t max_alignment(WasmOpcode memop) {
switch (memop) {
@@ -558,8 +589,11 @@ class WasmGenerator {
if (call_direct) {
builder_->EmitWithU32V(kExprReturnCall, func_index);
} else {
+ // This will not trap because table[func_index] always contains function
+ // func_index.
builder_->EmitI32Const(func_index);
builder_->EmitWithU32V(kExprReturnCallIndirect, sig_index);
+ // TODO(11954): Use other table indices too.
builder_->EmitByte(0); // Table index.
}
return;
@@ -567,8 +601,11 @@ class WasmGenerator {
if (call_direct) {
builder_->EmitWithU32V(kExprCallFunction, func_index);
} else {
+ // This will not trap because table[func_index] always contains function
+ // func_index.
builder_->EmitI32Const(func_index);
builder_->EmitWithU32V(kExprCallIndirect, sig_index);
+ // TODO(11954): Use other table indices too.
builder_->EmitByte(0); // Table index.
}
}
@@ -707,7 +744,7 @@ class WasmGenerator {
void set_global(DataRange* data) { global_op<kVoid>(data); }
void throw_or_rethrow(DataRange* data) {
- bool rethrow = data->get<uint8_t>() % 2;
+ bool rethrow = data->get<bool>();
if (rethrow && !catch_blocks_.empty()) {
int control_depth = static_cast<int>(blocks_.size() - 1);
int catch_index =
@@ -751,63 +788,122 @@ class WasmGenerator {
}
}
void new_object(HeapType type, DataRange* data) {
- if (liftoff_as_reference_ && type.is_index()) {
- bool new_default = data->get<uint8_t>() % 2;
- uint32_t index = type.ref_index();
- if (builder_->builder()->IsStructType(index)) {
- if (new_default) {
- builder_->EmitWithPrefix(kExprRttCanon);
- builder_->EmitU32V(index);
- builder_->EmitWithPrefix(kExprStructNewDefault);
- builder_->EmitU32V(index);
- } else {
- StructType* struct_gen = builder_->builder()->GetStructType(index);
- int field_count = struct_gen->field_count();
- for (int i = 0; i < field_count; i++) {
- Generate(struct_gen->field(i), data);
- }
- builder_->EmitWithPrefix(kExprRttCanon);
- builder_->EmitU32V(index);
- builder_->EmitWithPrefix(kExprStructNewWithRtt);
- builder_->EmitU32V(index);
+ DCHECK(liftoff_as_reference_ && type.is_index());
+ bool new_default = data->get<bool>();
+ uint32_t index = type.ref_index();
+ if (builder_->builder()->IsStructType(index)) {
+ if (new_default) {
+ builder_->EmitWithPrefix(kExprRttCanon);
+ builder_->EmitU32V(index);
+ builder_->EmitWithPrefix(kExprStructNewDefault);
+ builder_->EmitU32V(index);
+ } else {
+ StructType* struct_gen = builder_->builder()->GetStructType(index);
+ int field_count = struct_gen->field_count();
+ for (int i = 0; i < field_count; i++) {
+ Generate(struct_gen->field(i), data);
}
- return;
- } else if (builder_->builder()->IsArrayType(index)) {
- if (new_default) {
- Generate(kWasmI32, data);
- builder_->EmitWithPrefix(kExprRttCanon);
- builder_->EmitU32V(index);
- builder_->EmitWithPrefix(kExprArrayNewDefault);
- builder_->EmitU32V(index);
- } else {
- Generate(builder_->builder()->GetArrayType(index)->element_type(),
- data);
- Generate(kWasmI32, data);
- builder_->EmitWithPrefix(kExprRttCanon);
- builder_->EmitU32V(index);
- builder_->EmitWithPrefix(kExprArrayNewWithRtt);
- builder_->EmitU32V(index);
+ builder_->EmitWithPrefix(kExprRttCanon);
+ builder_->EmitU32V(index);
+ builder_->EmitWithPrefix(kExprStructNewWithRtt);
+ builder_->EmitU32V(index);
+ }
+ } else if (builder_->builder()->IsArrayType(index)) {
+ if (new_default) {
+ Generate(kWasmI32, data);
+ builder_->EmitWithPrefix(kExprRttCanon);
+ builder_->EmitU32V(index);
+ builder_->EmitWithPrefix(kExprArrayNewDefault);
+ builder_->EmitU32V(index);
+ } else {
+ Generate(builder_->builder()->GetArrayType(index)->element_type(),
+ data);
+ Generate(kWasmI32, data);
+ builder_->EmitWithPrefix(kExprRttCanon);
+ builder_->EmitU32V(index);
+ builder_->EmitWithPrefix(kExprArrayNewWithRtt);
+ builder_->EmitU32V(index);
+ }
+ } else {
+ DCHECK(builder_->builder()->IsSignature(index));
+ int func_size = builder_->builder()->NumFunctions();
+ for (int i = 0; i < func_size; i++) {
+ WasmFunctionBuilder* func = builder_->builder()->GetFunction(i);
+ // TODO(11954): Choose a random function from among those matching the
+ // signature (consider function subtyping?).
+ if (func->sig_index() == index) {
+ builder_->EmitWithU32V(kExprRefFunc, func->func_index());
+ return;
}
- return;
+ }
+ ref_null(type, data);
+ }
+ }
+
+ template <ValueKind wanted_kind>
+ void table_op(std::vector<ValueType> types, DataRange* data,
+ WasmOpcode opcode) {
+ DCHECK(opcode == kExprTableSet || opcode == kExprTableSize ||
+ opcode == kExprTableGrow || opcode == kExprTableFill);
+ int num_tables = builder_->builder()->NumTables();
+ DCHECK_GT(num_tables, 0);
+ int index = data->get<uint8_t>() % num_tables;
+ for (size_t i = 0; i < types.size(); i++) {
+ // When passing the reftype by default kWasmFuncRef is used.
+ // Then the type is changed according to its table type.
+ if (types[i] == kWasmFuncRef) {
+ types[i] = builder_->builder()->GetTableType(index);
+ }
+ }
+ Generate(base::VectorOf(types), data);
+ if (opcode == kExprTableSet) {
+ builder_->Emit(opcode);
+ } else {
+ builder_->EmitWithPrefix(opcode);
+ }
+ builder_->EmitU32V(index);
+ }
+ void table_get(HeapType type, DataRange* data) {
+ ValueType needed_type = ValueType::Ref(type, kNullable);
+ int table_size = builder_->builder()->NumTables();
+ ZoneVector<uint32_t> table(builder_->builder()->zone());
+ for (int i = 0; i < table_size; i++) {
+ if (builder_->builder()->GetTableType(i) == needed_type) {
+ table.push_back(i);
}
}
- ref_null(type, data);
+ if (table.empty()) {
+ ref_null(type, data);
+ return;
+ }
+ int index = data->get<uint8_t>() % static_cast<int>(table.size());
+ Generate(kWasmI32, data);
+ builder_->Emit(kExprTableGet);
+ builder_->EmitU32V(table[index]);
+ }
+ void table_set(DataRange* data) {
+ table_op<kVoid>({kWasmI32, kWasmFuncRef}, data, kExprTableSet);
+ }
+ void table_size(DataRange* data) { table_op<kI32>({}, data, kExprTableSize); }
+ void table_grow(DataRange* data) {
+ table_op<kI32>({kWasmFuncRef, kWasmI32}, data, kExprTableGrow);
+ }
+ void table_fill(DataRange* data) {
+ table_op<kVoid>({kWasmI32, kWasmFuncRef, kWasmI32}, data, kExprTableFill);
}
template <ValueKind wanted_kind>
void struct_get(DataRange* data) {
WasmModuleBuilder* builder = builder_->builder();
- int num_types = builder->NumTypes();
ZoneVector<uint32_t> field_index(builder->zone());
ZoneVector<uint32_t> struct_index(builder->zone());
- for (int i = 0; i < num_types; i++) {
- if (builder->IsStructType(i)) {
- int field_count = builder->GetStructType(i)->field_count();
- for (int index = 0; index < field_count; index++) {
- if (builder->GetStructType(i)->field(index).kind() == wanted_kind) {
- field_index.push_back(index);
- struct_index.push_back(i);
- }
+ for (uint32_t i = 0; i < num_structs_; i++) {
+ DCHECK(builder->IsStructType(i));
+ int field_count = builder->GetStructType(i)->field_count();
+ for (int index = 0; index < field_count; index++) {
+ if (builder->GetStructType(i)->field(index).kind() == wanted_kind) {
+ field_index.push_back(index);
+ struct_index.push_back(i);
}
}
}
@@ -821,6 +917,40 @@ class WasmGenerator {
builder_->EmitU32V(struct_index[index]);
builder_->EmitU32V(field_index[index]);
}
+ void struct_set(DataRange* data) {
+ WasmModuleBuilder* builder = builder_->builder();
+ if (num_structs_ > 0) {
+ int struct_index = data->get<uint8_t>() % num_structs_;
+ DCHECK(builder->IsStructType(struct_index));
+ int field_count = builder->GetStructType(struct_index)->field_count();
+ if (field_count == 0) {
+ return;
+ }
+ int field_index = data->get<uint8_t>() % field_count;
+ GenerateOptRef(HeapType(struct_index), data);
+ Generate(builder->GetStructType(struct_index)->field(field_index), data);
+ builder_->EmitWithPrefix(kExprStructSet);
+ builder_->EmitU32V(struct_index);
+ builder_->EmitU32V(field_index);
+ }
+ }
+
+ template <ValueKind wanted_kind>
+ void ref_is_null(DataRange* data) {
+ GenerateOptRef(HeapType(HeapType::kAny), data);
+ builder_->Emit(kExprRefIsNull);
+ }
+
+ void ref_eq(DataRange* data) {
+ if (!liftoff_as_reference_) {
+ Generate(kWasmI32, data);
+ return;
+ }
+ GenerateOptRef(HeapType(HeapType::kEq), data);
+ GenerateOptRef(HeapType(HeapType::kEq), data);
+ builder_->Emit(kExprRefEq);
+ }
+
using GenerateFn = void (WasmGenerator::*const)(DataRange*);
using GenerateFnWithHeap = void (WasmGenerator::*const)(HeapType, DataRange*);
@@ -860,12 +990,15 @@ class WasmGenerator {
public:
WasmGenerator(WasmFunctionBuilder* fn, const std::vector<uint32_t>& functions,
const std::vector<ValueType>& globals,
- const std::vector<uint8_t>& mutable_globals, DataRange* data,
+ const std::vector<uint8_t>& mutable_globals,
+ uint32_t num_structs, uint32_t num_arrays, DataRange* data,
bool liftoff_as_reference)
: builder_(fn),
functions_(functions),
globals_(globals),
mutable_globals_(mutable_globals),
+ num_structs_(num_structs),
+ num_arrays_(num_arrays),
liftoff_as_reference_(liftoff_as_reference) {
FunctionSig* sig = fn->signature();
blocks_.emplace_back();
@@ -912,9 +1045,10 @@ class WasmGenerator {
std::vector<ValueType> globals_;
std::vector<uint8_t> mutable_globals_; // indexes into {globals_}.
uint32_t recursion_depth = 0;
- std::vector<int> try_blocks_;
std::vector<int> catch_blocks_;
bool has_simd_;
+ uint32_t num_structs_;
+ uint32_t num_arrays_;
bool liftoff_as_reference_;
static constexpr uint32_t kMaxRecursionDepth = 64;
@@ -949,6 +1083,7 @@ void WasmGenerator::Generate<kVoid>(DataRange* data) {
&WasmGenerator::if_<kVoid, kIfElse>,
&WasmGenerator::br,
&WasmGenerator::br_if<kVoid>,
+ &WasmGenerator::br_on_null<kVoid>,
&WasmGenerator::memop<kExprI32StoreMem, kI32>,
&WasmGenerator::memop<kExprI32StoreMem8, kI32>,
@@ -980,7 +1115,12 @@ void WasmGenerator::Generate<kVoid>(DataRange* data) {
&WasmGenerator::set_local,
&WasmGenerator::set_global,
&WasmGenerator::throw_or_rethrow,
- &WasmGenerator::try_block<kVoid>};
+ &WasmGenerator::try_block<kVoid>,
+
+ &WasmGenerator::struct_set,
+
+ &WasmGenerator::table_set,
+ &WasmGenerator::table_fill};
GenerateOneOf(alternatives, data);
}
@@ -1067,6 +1207,7 @@ void WasmGenerator::Generate<kI32>(DataRange* data) {
&WasmGenerator::loop<kI32>,
&WasmGenerator::if_<kI32, kIfElse>,
&WasmGenerator::br_if<kI32>,
+ &WasmGenerator::br_on_null<kI32>,
&WasmGenerator::memop<kExprI32LoadMem>,
&WasmGenerator::memop<kExprI32LoadMem8S>,
@@ -1130,7 +1271,13 @@ void WasmGenerator::Generate<kI32>(DataRange* data) {
&WasmGenerator::call_indirect<kI32>,
&WasmGenerator::try_block<kI32>,
- &WasmGenerator::struct_get<kI32>};
+ &WasmGenerator::struct_get<kI32>,
+
+ &WasmGenerator::ref_is_null<kI32>,
+ &WasmGenerator::ref_eq,
+
+ &WasmGenerator::table_size,
+ &WasmGenerator::table_grow};
GenerateOneOf(alternatives, data);
}
@@ -1188,6 +1335,7 @@ void WasmGenerator::Generate<kI64>(DataRange* data) {
&WasmGenerator::loop<kI64>,
&WasmGenerator::if_<kI64, kIfElse>,
&WasmGenerator::br_if<kI64>,
+ &WasmGenerator::br_on_null<kI64>,
&WasmGenerator::memop<kExprI64LoadMem>,
&WasmGenerator::memop<kExprI64LoadMem8S>,
@@ -1290,6 +1438,7 @@ void WasmGenerator::Generate<kF32>(DataRange* data) {
&WasmGenerator::loop<kF32>,
&WasmGenerator::if_<kF32, kIfElse>,
&WasmGenerator::br_if<kF32>,
+ &WasmGenerator::br_on_null<kF32>,
&WasmGenerator::memop<kExprF32LoadMem>,
@@ -1349,6 +1498,7 @@ void WasmGenerator::Generate<kF64>(DataRange* data) {
&WasmGenerator::loop<kF64>,
&WasmGenerator::if_<kF64, kIfElse>,
&WasmGenerator::br_if<kF64>,
+ &WasmGenerator::br_on_null<kF64>,
&WasmGenerator::memop<kExprF64LoadMem>,
@@ -1642,11 +1792,73 @@ void WasmGenerator::Generate(ValueType type, DataRange* data) {
}
void WasmGenerator::GenerateOptRef(HeapType type, DataRange* data) {
- constexpr GenerateFnWithHeap alternatives[] = {
- &WasmGenerator::ref_null, &WasmGenerator::get_local_opt_ref,
- &WasmGenerator::new_object};
+ switch (type.representation()) {
+ // For abstract types, generate one of their subtypes, or fall back to the
+ // default case.
+ case HeapType::kAny: {
+ // Weighed according to the types in the module.
+ // TODO(11954): Generate i31ref.
+ uint32_t num_types = builder_->builder()->NumTypes();
+ uint8_t random = data->get<uint8_t>() % (num_types + 2);
+ if (random < num_structs_ + num_arrays_) {
+ GenerateOptRef(HeapType(HeapType::kData), data);
+ return;
+ } else if (random < num_types) {
+ GenerateOptRef(HeapType(HeapType::kFunc), data);
+ return;
+ } else if (random == num_types) {
+ GenerateOptRef(HeapType(HeapType::kExtern), data);
+ return;
+ }
+ // Else fall back to the default case outside the switch.
+ break;
+ }
+ case HeapType::kData:
+ case HeapType::kEq: {
+ uint8_t random = data->get<uint8_t>() % (num_structs_ + num_arrays_ + 1);
+ if (random > 0) {
+ GenerateOptRef(HeapType(random - 1), data);
+ return;
+ }
+ // Else fall back to the default case outside the switch.
+ break;
+ }
+ case HeapType::kFunc: {
+ uint32_t num_signatures =
+ builder_->builder()->NumTypes() - num_structs_ - num_arrays_;
+ uint32_t random = data->get<uint32_t>() % (num_signatures + 1);
+ if (random > 0) {
+ uint32_t signature_index = random + num_arrays_ + num_structs_ - 1;
+ DCHECK(builder_->builder()->IsSignature(signature_index));
+ GenerateOptRef(HeapType(signature_index), data);
+ return;
+ }
+ // Else fall back to the default case outside the switch.
+ break;
+ }
+ // TODO(11954): Add i31ref case.
+ default:
+ break;
+ }
+
+ constexpr GenerateFnWithHeap alternatives_with_index[] = {
+ &WasmGenerator::new_object, &WasmGenerator::get_local_opt_ref,
+ &WasmGenerator::ref_null};
+
+ constexpr GenerateFnWithHeap alternatives_func_extern[] = {
+ &WasmGenerator::table_get, &WasmGenerator::get_local_opt_ref,
+ &WasmGenerator::ref_null};
+
+ constexpr GenerateFnWithHeap alternatives_null[] = {
+ &WasmGenerator::ref_null, &WasmGenerator::get_local_opt_ref};
- GenerateOneOf(alternatives, type, data);
+ if (liftoff_as_reference_ && type.is_index()) {
+ GenerateOneOf(alternatives_with_index, type, data);
+ } else if (type == HeapType::kFunc || type == HeapType::kExtern) {
+ GenerateOneOf(alternatives_func_extern, type, data);
+ } else {
+ GenerateOneOf(alternatives_null, type, data);
+ }
}
std::vector<ValueType> WasmGenerator::GenerateTypes(DataRange* data) {
@@ -1740,14 +1952,19 @@ class WasmCompileFuzzer : public WasmExecutionFuzzer {
std::vector<uint32_t> function_signatures;
// Add struct and array types first so that we get a chance to generate
- // these types in function signatures
+ // these types in function signatures.
+ // Currently, WasmGenerator assumes this order for struct/array/signature
+ // definitions.
+ uint32_t num_structs = 0, num_arrays = 0;
if (liftoff_as_reference) {
+ num_structs = 1;
+ num_arrays = 4;
uint32_t count = 4;
StructType::Builder struct_builder(zone, count);
- struct_builder.AddField(kWasmI32, false);
- struct_builder.AddField(kWasmI64, false);
- struct_builder.AddField(kWasmF32, false);
- struct_builder.AddField(kWasmF64, false);
+ struct_builder.AddField(kWasmI32, true);
+ struct_builder.AddField(kWasmI64, true);
+ struct_builder.AddField(kWasmF32, true);
+ struct_builder.AddField(kWasmF64, true);
StructType* struct_fuz = struct_builder.Build();
builder.AddStructType(struct_fuz);
ArrayType* array_fuzI32 = zone->New<ArrayType>(kWasmI32, true);
@@ -1759,7 +1976,6 @@ class WasmCompileFuzzer : public WasmExecutionFuzzer {
builder.AddArrayType(array_fuzF32);
builder.AddArrayType(array_fuzF64);
}
-
function_signatures.push_back(builder.AddSignature(sigs.i_iii()));
static_assert(kMaxFunctions >= 1, "need min. 1 function");
@@ -1795,15 +2011,58 @@ class WasmCompileFuzzer : public WasmExecutionFuzzer {
if (mutability) mutable_globals.push_back(static_cast<uint8_t>(i));
}
+ // Generate function declarations before tables. This will be needed once we
+ // have typed-function tables.
+ std::vector<WasmFunctionBuilder*> functions;
for (int i = 0; i < num_functions; ++i) {
- DataRange function_range =
- i == num_functions - 1 ? std::move(range) : range.split();
-
FunctionSig* sig = builder.GetSignature(function_signatures[i]);
- WasmFunctionBuilder* f = builder.AddFunction(sig);
+ functions.push_back(builder.AddFunction(sig));
+ }
+ // Generate tables before function bodies, so they are available for table
+ // operations.
+ // Always generate at least one table for call_indirect.
+ int num_tables = range.get<uint8_t>() % kMaxTables + 1;
+ for (int i = 0; i < num_tables; i++) {
+ // Table 0 has to reference all functions in the program. This is so that
+ // all functions count as declared so they can be referenced with
+ // ref.func.
+ // TODO(11954): Consider removing this restriction.
+ uint32_t min_size =
+ i == 0 ? num_functions : range.get<uint8_t>() % kMaxTableSize;
+ uint32_t max_size =
+ range.get<uint8_t>() % (kMaxTableSize - min_size) + min_size;
+ // Table 0 is always funcref.
+ // TODO(11954): Remove this requirement once we support call_indirect with
+ // other table indices.
+ // TODO(11954): Support typed function tables.
+ bool use_funcref = i == 0 || range.get<bool>();
+ ValueType type = use_funcref ? kWasmFuncRef : kWasmExternRef;
+ uint32_t table_index = builder.AddTable(type, min_size, max_size);
+ if (type == kWasmFuncRef) {
+ // For function tables, initialize them with functions from the program.
+ // Currently, the fuzzer assumes that every function table contains the
+ // functions in the program in the order they are defined.
+ // TODO(11954): Consider generalizing this.
+ WasmModuleBuilder::WasmElemSegment segment(
+ zone, kWasmFuncRef, table_index, WasmInitExpr(0));
+ for (int entry_index = 0; entry_index < static_cast<int>(min_size);
+ entry_index++) {
+ segment.entries.emplace_back(
+ WasmModuleBuilder::WasmElemSegment::Entry::kRefFuncEntry,
+ entry_index % num_functions);
+ }
+ builder.AddElementSegment(std::move(segment));
+ }
+ }
+
+ for (int i = 0; i < num_functions; ++i) {
+ WasmFunctionBuilder* f = functions[i];
+ DataRange function_range = range.split();
WasmGenerator gen(f, function_signatures, globals, mutable_globals,
- &function_range, liftoff_as_reference);
+ num_structs, num_arrays, &function_range,
+ liftoff_as_reference);
+ FunctionSig* sig = f->signature();
base::Vector<const ValueType> return_types(sig->returns().begin(),
sig->return_count());
gen.Generate(return_types, &function_range);
@@ -1812,10 +2071,6 @@ class WasmCompileFuzzer : public WasmExecutionFuzzer {
if (i == 0) builder.AddExport(base::CStrVector("main"), f);
}
- builder.AllocateIndirectFunctions(num_functions);
- for (int i = 0; i < num_functions; ++i) {
- builder.SetIndirectFunction(i, i);
- }
builder.SetMaxMemorySize(32);
// We enable shared memory to be able to test atomics.
builder.SetHasSharedMemory();
diff --git a/deps/v8/test/fuzzer/wasm-fuzzer-common.cc b/deps/v8/test/fuzzer/wasm-fuzzer-common.cc
index 3948743f94..7165f1994a 100644
--- a/deps/v8/test/fuzzer/wasm-fuzzer-common.cc
+++ b/deps/v8/test/fuzzer/wasm-fuzzer-common.cc
@@ -6,7 +6,11 @@
#include <ctime>
-#include "include/v8.h"
+#include "include/v8-context.h"
+#include "include/v8-exception.h"
+#include "include/v8-isolate.h"
+#include "include/v8-local-handle.h"
+#include "include/v8-metrics.h"
#include "src/execution/isolate.h"
#include "src/objects/objects-inl.h"
#include "src/utils/ostreams.h"
@@ -75,7 +79,7 @@ Handle<WasmModuleObject> CompileReferenceModule(Zone* zone, Isolate* isolate,
Handle<Script> script =
GetWasmEngine()->GetOrCreateScript(isolate, native_module, kNoSourceUrl);
Handle<FixedArray> export_wrappers = isolate->factory()->NewFixedArray(
- static_cast<int>(module->num_exported_functions));
+ static_cast<int>(module->functions.size()));
return WasmModuleObject::New(isolate, std::move(native_module), script,
export_wrappers);
}
@@ -358,7 +362,7 @@ void GenerateTestCase(Isolate* isolate, ModuleWireBytes wire_bytes,
ModuleOrigin::kWasmOrigin, isolate->counters(),
isolate->metrics_recorder(), v8::metrics::Recorder::ContextId::Empty(),
DecodingMethod::kSync, GetWasmEngine()->allocator());
- CHECK(module_res.ok());
+ CHECK_WITH_MSG(module_res.ok(), module_res.error().message().c_str());
WasmModule* module = module_res.value().get();
CHECK_NOT_NULL(module);
@@ -382,7 +386,7 @@ void GenerateTestCase(Isolate* isolate, ModuleWireBytes wire_bytes,
"\n"
"// Flags: --wasm-staging --experimental-wasm-gc\n"
"\n"
- "load('test/mjsunit/wasm/wasm-module-builder.js');\n"
+ "d8.file.execute('test/mjsunit/wasm/wasm-module-builder.js');\n"
"\n"
"const builder = new WasmModuleBuilder();\n";
@@ -443,17 +447,13 @@ void GenerateTestCase(Isolate* isolate, ModuleWireBytes wire_bytes,
Zone tmp_zone(isolate->allocator(), ZONE_NAME);
- // There currently cannot be more than one table.
- // TODO(manoskouk): Add support for more tables.
- // TODO(9495): Add support for talbes with explicit initializers.
- DCHECK_GE(1, module->tables.size());
+ // TODO(9495): Add support for tables with explicit initializers.
for (const WasmTable& table : module->tables) {
- os << "builder.setTableBounds(" << table.initial_size << ", ";
- if (table.has_maximum_size) {
- os << table.maximum_size << ");\n";
- } else {
- os << "undefined);\n";
- }
+ os << "builder.addTable(" << ValueTypeToConstantName(table.type) << ", "
+ << table.initial_size << ", "
+ << (table.has_maximum_size ? std::to_string(table.maximum_size)
+ : "undefined")
+ << ", undefined)\n";
}
for (const WasmElemSegment& elem_segment : module->elem_segments) {
const char* status_str =
@@ -476,6 +476,11 @@ void GenerateTestCase(Isolate* isolate, ModuleWireBytes wire_bytes,
os << "], " << ValueTypeToConstantName(elem_segment.type) << ");\n";
}
+ for (const WasmTag& tag : module->tags) {
+ os << "builder.addTag(makeSig(" << PrintParameters(tag.ToFunctionSig())
+ << ", []));\n";
+ }
+
for (const WasmFunction& func : module->functions) {
base::Vector<const uint8_t> func_code = wire_bytes.GetFunctionBytes(&func);
os << "// Generate function " << (func.func_index + 1) << " (out of "
diff --git a/deps/v8/test/fuzzer/wasm.cc b/deps/v8/test/fuzzer/wasm.cc
index dd15f6bb46..cc76fc5111 100644
--- a/deps/v8/test/fuzzer/wasm.cc
+++ b/deps/v8/test/fuzzer/wasm.cc
@@ -6,7 +6,11 @@
#include <stddef.h>
#include <stdint.h>
-#include "include/v8.h"
+#include "include/libplatform/libplatform.h"
+#include "include/v8-context.h"
+#include "include/v8-exception.h"
+#include "include/v8-isolate.h"
+#include "include/v8-local-handle.h"
#include "src/execution/isolate-inl.h"
#include "src/heap/factory.h"
#include "src/objects/objects-inl.h"
diff --git a/deps/v8/test/inspector/counters/collection-expected.txt b/deps/v8/test/inspector/counters/collection-expected.txt
deleted file mode 100644
index 36ade78130..0000000000
--- a/deps/v8/test/inspector/counters/collection-expected.txt
+++ /dev/null
@@ -1,2 +0,0 @@
-Test Counters collection using Profiler.getCounters.
-PASSED
diff --git a/deps/v8/test/inspector/counters/collection.js b/deps/v8/test/inspector/counters/collection.js
deleted file mode 100644
index 1412956ee6..0000000000
--- a/deps/v8/test/inspector/counters/collection.js
+++ /dev/null
@@ -1,83 +0,0 @@
-// Copyright 2020 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-let {session, contextGroup, Protocol} = InspectorTest.start(
- 'Test Counters collection using Profiler.getCounters.');
-
-var source =
-`
-function fib(x) {
- if (x < 2) return 1;
- return fib(x-1) + fib(x-2);
-}
-fib(5);
-`;
-
-function buildCounterMap(result) {
- let counterMap = new Map();
-
- let counters = result.result.result;
- for (const {name, value} of counters) {
- counterMap.set(name, value);
- }
-
- return counterMap;
-}
-
-function compareCounterMaps(counterMap, counterMap2) {
- // Check for counters that are present in the first map but are not found
- // in the the second map
- for (let counter of counterMap.keys()) {
- if (!counterMap2.has(counter)) {
- InspectorTest.log(`Counter ${counter} is missing`);
- return false;
- }
- }
-
- // Check for the counter value changes
- let counterValueIncreased = false;
- for (let [counter, value2] of counterMap2) {
- let value = counterMap.get(counter);
- if (value !== undefined) {
- if (value2 < value) {
- InspectorTest.log(`Counter ${counter} value decreased: ${value} -> ${value2}`);
- return false;
- }
- if (value2 > value) {
- counterValueIncreased = true;
- }
- }
- }
-
- if (!counterValueIncreased && counterMap.size === counterMap2.size) {
- InspectorTest.log(`No counter values has increased or added`);
- return false;
- }
-
- return true;
-}
-
-(async function test() {
- await Protocol.Runtime.enable();
- await Protocol.Profiler.enableCounters();
-
- let counterMap = buildCounterMap(await Protocol.Profiler.getCounters());
-
- await Protocol.Runtime.evaluate({ expression: source, sourceURL: arguments.callee.name, persistScript: true });
-
- let counterMap2 = buildCounterMap(await Protocol.Profiler.getCounters());
- const check1 = compareCounterMaps(counterMap, counterMap2);
-
- await Protocol.Runtime.evaluate({ expression: source, sourceURL: arguments.callee.name, persistScript: true });
-
- let counterMap3 = buildCounterMap(await Protocol.Profiler.getCounters());
- const check2 = compareCounterMaps(counterMap2, counterMap3);
-
- await Protocol.Profiler.disableCounters();
- await Protocol.Runtime.disable();
-
- InspectorTest.log(check1 && check2 ? 'PASSED' : 'FAILED');
-
- InspectorTest.completeTest();
-})().catch(e => InspectorTest.log('caught: ' + e));
diff --git a/deps/v8/test/inspector/counters/enable-disable-expected.txt b/deps/v8/test/inspector/counters/enable-disable-expected.txt
deleted file mode 100644
index 8d80a763b8..0000000000
--- a/deps/v8/test/inspector/counters/enable-disable-expected.txt
+++ /dev/null
@@ -1,6 +0,0 @@
-Test Counters collection enabling and disabling.
-Expected error: "Counters collection is not enabled."
-Expected error: "Counters collection already enabled."
-Some counters reported
-Expected error: "Counters collection is not enabled."
-Less counters reported
diff --git a/deps/v8/test/inspector/counters/enable-disable.js b/deps/v8/test/inspector/counters/enable-disable.js
deleted file mode 100644
index ffc6518864..0000000000
--- a/deps/v8/test/inspector/counters/enable-disable.js
+++ /dev/null
@@ -1,51 +0,0 @@
-// Copyright 2020 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-let {session, contextGroup, Protocol} = InspectorTest.start(
- 'Test Counters collection enabling and disabling.');
-
-var source =
-`
-function fib(x) {
- if (x < 2) return 1;
- return fib(x-1) + fib(x-2);
-}
-fib(5);
-`;
-
-function logErrorMessage(result) {
- InspectorTest.log('Expected error: "' + result.error.message + '"');
-}
-
-(async function test() {
- await Protocol.Runtime.enable();
-
- // This should fail with "not enabled" error.
- logErrorMessage(await Protocol.Profiler.getCounters());
-
- // This should fail with "already enabled" error.
- await Protocol.Profiler.enableCounters();
- logErrorMessage(await Protocol.Profiler.enableCounters());
-
- // The result should not be empty.
- await Protocol.Runtime.evaluate({ expression: source, sourceURL: arguments.callee.name, persistScript: true });
- const counters = (await Protocol.Profiler.getCounters()).result.result;
- if (counters.length > 0)
- InspectorTest.log('Some counters reported');
- await Protocol.Profiler.disableCounters();
-
- // This should fail with "not enabled" error too.
- logErrorMessage(await Protocol.Profiler.getCounters());
-
- // The result should not be empty and have smaller amount of counters than
- // the first result.
- await Protocol.Profiler.enableCounters();
- const counters2 = (await Protocol.Profiler.getCounters()).result.result;
- if (counters2.length > 0 && counters2.length < counters.length)
- InspectorTest.log('Less counters reported');
- await Protocol.Profiler.disableCounters();
-
- await Protocol.Runtime.disable();
- InspectorTest.completeTest();
-})().catch(e => InspectorTest.log('caught: ' + e));
diff --git a/deps/v8/test/inspector/cpu-profiler/coverage-block.js b/deps/v8/test/inspector/cpu-profiler/coverage-block.js
index 867991f0f1..70475041d0 100644
--- a/deps/v8/test/inspector/cpu-profiler/coverage-block.js
+++ b/deps/v8/test/inspector/cpu-profiler/coverage-block.js
@@ -5,6 +5,7 @@
// Flags: --allow-natives-syntax --no-always-opt --opt
// Flags: --no-stress-flush-code
// Flags: --no-stress-incremental-marking
+// Flags: --no-concurrent-recompilation
var source =
`
diff --git a/deps/v8/test/inspector/cpu-profiler/coverage.js b/deps/v8/test/inspector/cpu-profiler/coverage.js
index 76e692c6f4..78f699f8b2 100644
--- a/deps/v8/test/inspector/cpu-profiler/coverage.js
+++ b/deps/v8/test/inspector/cpu-profiler/coverage.js
@@ -5,6 +5,7 @@
// Flags: --allow-natives-syntax --no-always-opt --opt
// Flags: --no-stress-flush-code
// Flags: --no-stress-incremental-marking
+// Flags: --no-concurrent-recompilation
// Flags: --no-baseline-batch-compilation
var source =
diff --git a/deps/v8/test/inspector/debugger/pause-on-oom-expected.txt b/deps/v8/test/inspector/debugger/pause-on-oom-expected.txt
index 7570134c6a..4ca988deca 100644
--- a/deps/v8/test/inspector/debugger/pause-on-oom-expected.txt
+++ b/deps/v8/test/inspector/debugger/pause-on-oom-expected.txt
@@ -1,3 +1,2 @@
Check pause on OOM
-nearHeapLimitCallback
reason: OOM
diff --git a/deps/v8/test/inspector/debugger/pause-on-oom-extrawide-expected.txt b/deps/v8/test/inspector/debugger/pause-on-oom-extrawide-expected.txt
index 7570134c6a..4ca988deca 100644
--- a/deps/v8/test/inspector/debugger/pause-on-oom-extrawide-expected.txt
+++ b/deps/v8/test/inspector/debugger/pause-on-oom-extrawide-expected.txt
@@ -1,3 +1,2 @@
Check pause on OOM
-nearHeapLimitCallback
reason: OOM
diff --git a/deps/v8/test/inspector/debugger/pause-on-oom-wide-expected.txt b/deps/v8/test/inspector/debugger/pause-on-oom-wide-expected.txt
index 7570134c6a..4ca988deca 100644
--- a/deps/v8/test/inspector/debugger/pause-on-oom-wide-expected.txt
+++ b/deps/v8/test/inspector/debugger/pause-on-oom-wide-expected.txt
@@ -1,3 +1,2 @@
Check pause on OOM
-nearHeapLimitCallback
reason: OOM
diff --git a/deps/v8/test/inspector/frontend-channel.h b/deps/v8/test/inspector/frontend-channel.h
index f92940bf76..2853a95a85 100644
--- a/deps/v8/test/inspector/frontend-channel.h
+++ b/deps/v8/test/inspector/frontend-channel.h
@@ -7,8 +7,12 @@
#include <vector>
+#include "include/v8-context.h"
+#include "include/v8-exception.h"
+#include "include/v8-function.h"
#include "include/v8-inspector.h"
-#include "include/v8.h"
+#include "include/v8-microtask-queue.h"
+#include "include/v8-persistent-handle.h"
#include "test/inspector/task-runner.h"
#include "test/inspector/utils.h"
@@ -51,7 +55,7 @@ class FrontendChannelImpl : public v8_inspector::V8Inspector::Channel {
bool is_priority_task() final { return false; }
private:
- void Run(IsolateData* data) override {
+ void Run(InspectorIsolateData* data) override {
v8::MicrotasksScope microtasks_scope(data->isolate(),
v8::MicrotasksScope::kRunMicrotasks);
v8::HandleScope handle_scope(data->isolate());
diff --git a/deps/v8/test/inspector/inspector-test.cc b/deps/v8/test/inspector/inspector-test.cc
index 976ab4be68..81395445ac 100644
--- a/deps/v8/test/inspector/inspector-test.cc
+++ b/deps/v8/test/inspector/inspector-test.cc
@@ -12,7 +12,9 @@
#include <vector>
#include "include/libplatform/libplatform.h"
-#include "include/v8.h"
+#include "include/v8-initialization.h"
+#include "include/v8-local-handle.h"
+#include "include/v8-snapshot.h"
#include "src/base/platform/platform.h"
#include "src/base/small-vector.h"
#include "src/base/vector.h"
@@ -41,7 +43,7 @@ namespace {
base::SmallVector<TaskRunner*, 2> task_runners;
-class UtilsExtension : public IsolateData::SetupGlobalTask {
+class UtilsExtension : public InspectorIsolateData::SetupGlobalTask {
public:
~UtilsExtension() override = default;
void Run(v8::Isolate* isolate,
@@ -195,7 +197,7 @@ class UtilsExtension : public IsolateData::SetupGlobalTask {
std::string chars;
v8::Isolate* isolate = args.GetIsolate();
v8::Local<v8::Context> context = isolate->GetCurrentContext();
- IsolateData* data = IsolateData::FromContext(context);
+ InspectorIsolateData* data = InspectorIsolateData::FromContext(context);
int context_group_id = data->GetContextGroupId(context);
if (ReadFile(isolate, args[0], &chars)) {
ExecuteStringTask(chars, context_group_id).Run(data);
@@ -250,7 +252,8 @@ class UtilsExtension : public IsolateData::SetupGlobalTask {
ToVector(args.GetIsolate(), args[2].As<v8::String>());
int context_group_id = args[0].As<v8::Int32>()->Value();
RunSyncTask(backend_runner_,
- [&context_group_id, &reason, &details](IsolateData* data) {
+ [&context_group_id, &reason,
+ &details](InspectorIsolateData* data) {
data->SchedulePauseOnNextStatement(
context_group_id,
v8_inspector::StringView(reason.data(), reason.size()),
@@ -264,9 +267,10 @@ class UtilsExtension : public IsolateData::SetupGlobalTask {
FATAL("Internal error: cancelPauseOnNextStatement(context_group_id).");
}
int context_group_id = args[0].As<v8::Int32>()->Value();
- RunSyncTask(backend_runner_, [&context_group_id](IsolateData* data) {
- data->CancelPauseOnNextStatement(context_group_id);
- });
+ RunSyncTask(backend_runner_,
+ [&context_group_id](InspectorIsolateData* data) {
+ data->CancelPauseOnNextStatement(context_group_id);
+ });
}
static void SetLogConsoleApiMessageCalls(
@@ -294,7 +298,7 @@ class UtilsExtension : public IsolateData::SetupGlobalTask {
}
std::vector<uint16_t> script =
ToVector(args.GetIsolate(), args[0].As<v8::String>());
- RunSyncTask(backend_runner_, [&script](IsolateData* data) {
+ RunSyncTask(backend_runner_, [&script](InspectorIsolateData* data) {
data->SetAdditionalConsoleApi(
v8_inspector::StringView(script.data(), script.size()));
});
@@ -306,9 +310,10 @@ class UtilsExtension : public IsolateData::SetupGlobalTask {
FATAL("Internal error: createContextGroup().");
}
int context_group_id = 0;
- RunSyncTask(backend_runner_, [&context_group_id](IsolateData* data) {
- context_group_id = data->CreateContextGroup();
- });
+ RunSyncTask(backend_runner_,
+ [&context_group_id](InspectorIsolateData* data) {
+ context_group_id = data->CreateContextGroup();
+ });
args.GetReturnValue().Set(
v8::Int32::New(args.GetIsolate(), context_group_id));
}
@@ -321,7 +326,8 @@ class UtilsExtension : public IsolateData::SetupGlobalTask {
std::vector<uint16_t> name =
ToVector(args.GetIsolate(), args[1].As<v8::String>());
- RunSyncTask(backend_runner_, [&context_group_id, name](IsolateData* data) {
+ RunSyncTask(backend_runner_, [&context_group_id,
+ name](InspectorIsolateData* data) {
CHECK(data->CreateContext(
context_group_id,
v8_inspector::StringView(name.data(), name.size())));
@@ -334,9 +340,10 @@ class UtilsExtension : public IsolateData::SetupGlobalTask {
FATAL("Internal error: resetContextGroup(context_group_id).");
}
int context_group_id = args[0].As<v8::Int32>()->Value();
- RunSyncTask(backend_runner_, [&context_group_id](IsolateData* data) {
- data->ResetContextGroup(context_group_id);
- });
+ RunSyncTask(backend_runner_,
+ [&context_group_id](InspectorIsolateData* data) {
+ data->ResetContextGroup(context_group_id);
+ });
}
static void ConnectSession(const v8::FunctionCallbackInfo<v8::Value>& args) {
@@ -348,8 +355,8 @@ class UtilsExtension : public IsolateData::SetupGlobalTask {
}
v8::Local<v8::Context> context = args.GetIsolate()->GetCurrentContext();
FrontendChannelImpl* channel = new FrontendChannelImpl(
- IsolateData::FromContext(context)->task_runner(),
- IsolateData::FromContext(context)->GetContextGroupId(context),
+ InspectorIsolateData::FromContext(context)->task_runner(),
+ InspectorIsolateData::FromContext(context)->GetContextGroupId(context),
args.GetIsolate(), args[2].As<v8::Function>());
std::vector<uint8_t> state =
@@ -357,7 +364,7 @@ class UtilsExtension : public IsolateData::SetupGlobalTask {
int context_group_id = args[0].As<v8::Int32>()->Value();
int session_id = 0;
RunSyncTask(backend_runner_, [&context_group_id, &session_id, &channel,
- &state](IsolateData* data) {
+ &state](InspectorIsolateData* data) {
session_id = data->ConnectSession(
context_group_id,
v8_inspector::StringView(state.data(), state.size()), channel);
@@ -375,9 +382,10 @@ class UtilsExtension : public IsolateData::SetupGlobalTask {
}
int session_id = args[0].As<v8::Int32>()->Value();
std::vector<uint8_t> state;
- RunSyncTask(backend_runner_, [&session_id, &state](IsolateData* data) {
- state = data->DisconnectSession(session_id);
- });
+ RunSyncTask(backend_runner_,
+ [&session_id, &state](InspectorIsolateData* data) {
+ state = data->DisconnectSession(session_id);
+ });
channels_.erase(session_id);
args.GetReturnValue().Set(ToV8String(args.GetIsolate(), state));
}
@@ -410,7 +418,7 @@ bool StrictAccessCheck(v8::Local<v8::Context> accessing_context,
return accessing_context.IsEmpty();
}
-class InspectorExtension : public IsolateData::SetupGlobalTask {
+class InspectorExtension : public InspectorIsolateData::SetupGlobalTask {
public:
~InspectorExtension() override = default;
void Run(v8::Isolate* isolate,
@@ -482,7 +490,7 @@ class InspectorExtension : public IsolateData::SetupGlobalTask {
static void FireContextCreated(
const v8::FunctionCallbackInfo<v8::Value>& args) {
v8::Local<v8::Context> context = args.GetIsolate()->GetCurrentContext();
- IsolateData* data = IsolateData::FromContext(context);
+ InspectorIsolateData* data = InspectorIsolateData::FromContext(context);
data->FireContextCreated(context, data->GetContextGroupId(context),
v8_inspector::StringView());
}
@@ -490,13 +498,13 @@ class InspectorExtension : public IsolateData::SetupGlobalTask {
static void FireContextDestroyed(
const v8::FunctionCallbackInfo<v8::Value>& args) {
v8::Local<v8::Context> context = args.GetIsolate()->GetCurrentContext();
- IsolateData* data = IsolateData::FromContext(context);
+ InspectorIsolateData* data = InspectorIsolateData::FromContext(context);
data->FireContextDestroyed(context);
}
static void FreeContext(const v8::FunctionCallbackInfo<v8::Value>& args) {
v8::Local<v8::Context> context = args.GetIsolate()->GetCurrentContext();
- IsolateData* data = IsolateData::FromContext(context);
+ InspectorIsolateData* data = InspectorIsolateData::FromContext(context);
data->FreeContext(context);
}
@@ -506,7 +514,7 @@ class InspectorExtension : public IsolateData::SetupGlobalTask {
FATAL("Internal error: addInspectedObject(session_id, object).");
}
v8::Local<v8::Context> context = args.GetIsolate()->GetCurrentContext();
- IsolateData* data = IsolateData::FromContext(context);
+ InspectorIsolateData* data = InspectorIsolateData::FromContext(context);
data->AddInspectedObject(args[0].As<v8::Int32>()->Value(), args[1]);
}
@@ -515,7 +523,7 @@ class InspectorExtension : public IsolateData::SetupGlobalTask {
if (args.Length() != 1 || !args[0]->IsInt32()) {
FATAL("Internal error: setMaxAsyncTaskStacks(max).");
}
- IsolateData::FromContext(args.GetIsolate()->GetCurrentContext())
+ InspectorIsolateData::FromContext(args.GetIsolate()->GetCurrentContext())
->SetMaxAsyncTaskStacksForTest(args[0].As<v8::Int32>()->Value());
}
@@ -524,7 +532,7 @@ class InspectorExtension : public IsolateData::SetupGlobalTask {
if (args.Length() != 0) {
FATAL("Internal error: dumpAsyncTaskStacksStateForTest().");
}
- IsolateData::FromContext(args.GetIsolate()->GetCurrentContext())
+ InspectorIsolateData::FromContext(args.GetIsolate()->GetCurrentContext())
->DumpAsyncTaskStacksStateForTest();
}
@@ -533,7 +541,7 @@ class InspectorExtension : public IsolateData::SetupGlobalTask {
FATAL("Internal error: breakProgram('reason', 'details').");
}
v8::Local<v8::Context> context = args.GetIsolate()->GetCurrentContext();
- IsolateData* data = IsolateData::FromContext(context);
+ InspectorIsolateData* data = InspectorIsolateData::FromContext(context);
std::vector<uint16_t> reason =
ToVector(args.GetIsolate(), args[0].As<v8::String>());
v8_inspector::StringView reason_view(reason.data(), reason.size());
@@ -570,7 +578,7 @@ class InspectorExtension : public IsolateData::SetupGlobalTask {
ToVector(args.GetIsolate(), args[2].As<v8::String>());
v8_inspector::StringView details_view(details.data(), details.size());
v8::Local<v8::Context> context = args.GetIsolate()->GetCurrentContext();
- IsolateData* data = IsolateData::FromContext(context);
+ InspectorIsolateData* data = InspectorIsolateData::FromContext(context);
int context_group_id = data->GetContextGroupId(context);
data->SchedulePauseOnNextStatement(context_group_id, reason_view,
details_view);
@@ -634,7 +642,7 @@ class InspectorExtension : public IsolateData::SetupGlobalTask {
}
v8::Isolate* isolate = args.GetIsolate();
v8::Local<v8::Context> context = isolate->GetCurrentContext();
- IsolateData* data = IsolateData::FromContext(context);
+ InspectorIsolateData* data = InspectorIsolateData::FromContext(context);
std::vector<uint16_t> description =
ToVector(isolate, args[0].As<v8::String>());
v8_inspector::StringView description_view(description.data(),
@@ -654,7 +662,7 @@ class InspectorExtension : public IsolateData::SetupGlobalTask {
FATAL("Internal error: externalAsyncTaskStarted(id)\n");
}
v8::Local<v8::Context> context = args.GetIsolate()->GetCurrentContext();
- IsolateData* data = IsolateData::FromContext(context);
+ InspectorIsolateData* data = InspectorIsolateData::FromContext(context);
v8_inspector::V8StackTraceId* id =
static_cast<v8_inspector::V8StackTraceId*>(
args[0].As<v8::ArrayBuffer>()->GetBackingStore()->Data());
@@ -667,7 +675,7 @@ class InspectorExtension : public IsolateData::SetupGlobalTask {
FATAL("Internal error: externalAsyncTaskFinished(id)\n");
}
v8::Local<v8::Context> context = args.GetIsolate()->GetCurrentContext();
- IsolateData* data = IsolateData::FromContext(context);
+ InspectorIsolateData* data = InspectorIsolateData::FromContext(context);
v8_inspector::V8StackTraceId* id =
static_cast<v8_inspector::V8StackTraceId*>(
args[0].As<v8::ArrayBuffer>()->GetBackingStore()->Data());
@@ -684,7 +692,7 @@ class InspectorExtension : public IsolateData::SetupGlobalTask {
}
v8::Isolate* isolate = args.GetIsolate();
v8::Local<v8::Context> context = isolate->GetCurrentContext();
- IsolateData* data = IsolateData::FromContext(context);
+ InspectorIsolateData* data = InspectorIsolateData::FromContext(context);
int context_group_id = data->GetContextGroupId(context);
bool with_empty_stack = args[2].As<v8::Boolean>()->Value();
if (with_empty_stack) context->Exit();
@@ -715,7 +723,7 @@ class InspectorExtension : public IsolateData::SetupGlobalTask {
}
v8::Isolate* isolate = args.GetIsolate();
v8::Local<v8::Context> context = isolate->GetCurrentContext();
- IsolateData* data = IsolateData::FromContext(context);
+ InspectorIsolateData* data = InspectorIsolateData::FromContext(context);
data->SetResourceNamePrefix(v8::Local<v8::String>::Cast(args[0]));
}
@@ -729,7 +737,7 @@ class InspectorExtension : public IsolateData::SetupGlobalTask {
}
v8::Isolate* isolate = args.GetIsolate();
v8::Local<v8::Context> context = isolate->GetCurrentContext();
- IsolateData* data = IsolateData::FromContext(context);
+ InspectorIsolateData* data = InspectorIsolateData::FromContext(context);
auto error = v8::Exception::Error(args[0].As<v8::String>());
CHECK(data->AssociateExceptionData(error, args[1].As<v8::String>(),
@@ -742,6 +750,11 @@ int InspectorTestMain(int argc, char* argv[]) {
v8::V8::InitializeICUDefaultLocation(argv[0]);
std::unique_ptr<Platform> platform(platform::NewDefaultPlatform());
v8::V8::InitializePlatform(platform.get());
+#ifdef V8_VIRTUAL_MEMORY_CAGE
+ if (!v8::V8::InitializeVirtualMemoryCage()) {
+ FATAL("Could not initialize the virtual memory cage");
+ }
+#endif
FLAG_abort_on_contradictory_flags = true;
v8::V8::SetFlagsFromCommandLine(&argc, argv, true);
v8::V8::InitializeExternalStartupData(argv[0]);
@@ -762,7 +775,7 @@ int InspectorTestMain(int argc, char* argv[]) {
}
{
- IsolateData::SetupGlobalTasks frontend_extensions;
+ InspectorIsolateData::SetupGlobalTasks frontend_extensions;
frontend_extensions.emplace_back(new UtilsExtension());
TaskRunner frontend_runner(std::move(frontend_extensions),
kFailOnUncaughtExceptions, &ready_semaphore,
@@ -772,11 +785,11 @@ int InspectorTestMain(int argc, char* argv[]) {
int frontend_context_group_id = 0;
RunSyncTask(&frontend_runner,
- [&frontend_context_group_id](IsolateData* data) {
+ [&frontend_context_group_id](InspectorIsolateData* data) {
frontend_context_group_id = data->CreateContextGroup();
});
- IsolateData::SetupGlobalTasks backend_extensions;
+ InspectorIsolateData::SetupGlobalTasks backend_extensions;
backend_extensions.emplace_back(new SetTimeoutExtension());
backend_extensions.emplace_back(new InspectorExtension());
TaskRunner backend_runner(
diff --git a/deps/v8/test/inspector/inspector.status b/deps/v8/test/inspector/inspector.status
index 6f1e661b6a..2fec7679dc 100644
--- a/deps/v8/test/inspector/inspector.status
+++ b/deps/v8/test/inspector/inspector.status
@@ -20,21 +20,17 @@
# loop instead of properly reporting a RangeError for a stack overflow.
'regress/regress-crbug-1080638': [SKIP],
- # https://crbug.com/v8/11338
- 'runtime-call-stats/enable-disable': [SKIP],
-
# Tests that need to run sequentially (e.g. due to memory consumption).
'runtime/console-messages-limits': [PASS, HEAVY],
'runtime/regression-732717': [PASS, HEAVY],
}], # ALWAYS
##############################################################################
-['mode != debug or dcheck_always_on', {
- # Investigating flaky tests: https://crbug.com/v8/10876. Enable only on pure debug.
- 'debugger/pause-on-oom': [SKIP],
- 'debugger/pause-on-oom-wide': [SKIP],
- 'debugger/pause-on-oom-extrawide': [SKIP],
-}], # 'mode != debug or dcheck_always_on'
+['arch == x64 and mode == debug', {
+ # Flaky tests: https://crbug.com/v8/10876
+ 'debugger/pause-on-oom-extrawide': [PASS, FAIL],
+ 'debugger/pause-on-oom-wide': [PASS, FAIL],
+}], # 'arch == x64 and mode == debug'
##############################################################################
['system == android', {
@@ -119,12 +115,12 @@
}], # no_simd_hardware
##############################################################################
-['arch == riscv64', {
+['arch == riscv64 or arch == loong64', {
# SIMD support is still in progress.
'debugger/wasm-scope-info*': [SKIP],
'debugger/wasm-step-after-trap': [SKIP],
-}], # 'arch == riscv64'
+}], # 'arch == riscv64 or arch == loong64'
['arch == riscv64 and variant == stress_incremental_marking', {
'debugger/wasm-gc-breakpoints': [SKIP]
@@ -169,7 +165,6 @@
'debugger/set-breakpoint-at-last-line': [SKIP],
'debugger/set-breakpoint-breaks-on-first-breakable-location': [SKIP],
'heap-profiler/collect-garbage' : [SKIP],
- 'runtime-call-stats/collection': [SKIP],
'runtime/context-destroyed-on-context-collected': [SKIP],
'runtime/evaluate-async': [SKIP],
'runtime/internal-properties-entries': [SKIP],
@@ -192,7 +187,6 @@
'cpu-profiler/coverage': [SKIP],
'cpu-profiler/coverage-block': [SKIP],
'runtime/internal-properties-entries': [SKIP],
- 'runtime-call-stats/collection': [SKIP],
# Skip tests that might fail with concurrent allocation
'debugger/pause-on-oom-wide': [SKIP],
@@ -222,8 +216,6 @@
'regress/regress-crbug-1199919': [SKIP],
'console/destroy-context-during-log': [SKIP],
'console/scoped-variables': [SKIP],
- 'counters/collection': [SKIP],
- 'counters/enable-disable': [SKIP],
'cpu-profiler/console-profile': [SKIP],
'cpu-profiler/console-profile-asm-js': [SKIP],
'cpu-profiler/console-profile-end-parameterless-crash': [SKIP],
@@ -429,8 +421,6 @@
'print-method-not-found': [SKIP],
'regress/regress-crbug-1147552': [SKIP],
'regress/regress-crbug-1183664': [SKIP],
- 'runtime-call-stats/collection': [SKIP],
- 'runtime-call-stats/enable-disable': [SKIP],
'runtime/add-binding': [SKIP],
'runtime/await-promise': [SKIP],
'runtime/call-function-on-async': [SKIP],
diff --git a/deps/v8/test/inspector/isolate-data.cc b/deps/v8/test/inspector/isolate-data.cc
index 8de62cee45..976a862907 100644
--- a/deps/v8/test/inspector/isolate-data.cc
+++ b/deps/v8/test/inspector/isolate-data.cc
@@ -4,6 +4,10 @@
#include "test/inspector/isolate-data.h"
+#include "include/v8-context.h"
+#include "include/v8-exception.h"
+#include "include/v8-microtask-queue.h"
+#include "include/v8-template.h"
#include "src/base/vector.h"
#include "src/inspector/test-interface.h"
#include "test/inspector/task-runner.h"
@@ -38,10 +42,10 @@ class Inspectable : public v8_inspector::V8InspectorSession::Inspectable {
} // namespace
-IsolateData::IsolateData(TaskRunner* task_runner,
- IsolateData::SetupGlobalTasks setup_global_tasks,
- v8::StartupData* startup_data,
- WithInspector with_inspector)
+InspectorIsolateData::InspectorIsolateData(
+ TaskRunner* task_runner,
+ InspectorIsolateData::SetupGlobalTasks setup_global_tasks,
+ v8::StartupData* startup_data, WithInspector with_inspector)
: task_runner_(task_runner),
setup_global_tasks_(std::move(setup_global_tasks)) {
v8::Isolate::CreateParams params;
@@ -53,8 +57,9 @@ IsolateData::IsolateData(TaskRunner* task_runner,
isolate_.reset(v8::Isolate::New(params));
isolate_->SetMicrotasksPolicy(v8::MicrotasksPolicy::kScoped);
if (with_inspector) {
- isolate_->AddMessageListener(&IsolateData::MessageHandler);
- isolate_->SetPromiseRejectCallback(&IsolateData::PromiseRejectHandler);
+ isolate_->AddMessageListener(&InspectorIsolateData::MessageHandler);
+ isolate_->SetPromiseRejectCallback(
+ &InspectorIsolateData::PromiseRejectHandler);
inspector_ = v8_inspector::V8Inspector::create(isolate_.get(), this);
}
v8::HandleScope handle_scope(isolate_.get());
@@ -65,12 +70,13 @@ IsolateData::IsolateData(TaskRunner* task_runner,
v8::String::NewFromUtf8Literal(isolate_.get(), "notInspectable")));
}
-IsolateData* IsolateData::FromContext(v8::Local<v8::Context> context) {
- return static_cast<IsolateData*>(
+InspectorIsolateData* InspectorIsolateData::FromContext(
+ v8::Local<v8::Context> context) {
+ return static_cast<InspectorIsolateData*>(
context->GetAlignedPointerFromEmbedderData(kIsolateDataIndex));
}
-int IsolateData::CreateContextGroup() {
+int InspectorIsolateData::CreateContextGroup() {
int context_group_id = ++last_context_group_id_;
if (!CreateContext(context_group_id, v8_inspector::StringView())) {
DCHECK(isolate_->IsExecutionTerminating());
@@ -79,8 +85,8 @@ int IsolateData::CreateContextGroup() {
return context_group_id;
}
-bool IsolateData::CreateContext(int context_group_id,
- v8_inspector::StringView name) {
+bool InspectorIsolateData::CreateContext(int context_group_id,
+ v8_inspector::StringView name) {
v8::HandleScope handle_scope(isolate_.get());
v8::Local<v8::ObjectTemplate> global_template =
v8::ObjectTemplate::New(isolate_.get());
@@ -100,29 +106,32 @@ bool IsolateData::CreateContext(int context_group_id,
return true;
}
-v8::Local<v8::Context> IsolateData::GetDefaultContext(int context_group_id) {
+v8::Local<v8::Context> InspectorIsolateData::GetDefaultContext(
+ int context_group_id) {
return contexts_[context_group_id].begin()->Get(isolate_.get());
}
-void IsolateData::ResetContextGroup(int context_group_id) {
+void InspectorIsolateData::ResetContextGroup(int context_group_id) {
v8::SealHandleScope seal_handle_scope(isolate());
inspector_->resetContextGroup(context_group_id);
}
-int IsolateData::GetContextGroupId(v8::Local<v8::Context> context) {
+int InspectorIsolateData::GetContextGroupId(v8::Local<v8::Context> context) {
return static_cast<int>(
reinterpret_cast<intptr_t>(
context->GetAlignedPointerFromEmbedderData(kContextGroupIdIndex)) /
2);
}
-void IsolateData::RegisterModule(v8::Local<v8::Context> context,
- std::vector<uint16_t> name,
- v8::ScriptCompiler::Source* source) {
+void InspectorIsolateData::RegisterModule(v8::Local<v8::Context> context,
+ std::vector<uint16_t> name,
+ v8::ScriptCompiler::Source* source) {
v8::Local<v8::Module> module;
if (!v8::ScriptCompiler::CompileModule(isolate(), source).ToLocal(&module))
return;
- if (!module->InstantiateModule(context, &IsolateData::ModuleResolveCallback)
+ if (!module
+ ->InstantiateModule(context,
+ &InspectorIsolateData::ModuleResolveCallback)
.FromMaybe(false)) {
return;
}
@@ -132,12 +141,12 @@ void IsolateData::RegisterModule(v8::Local<v8::Context> context,
}
// static
-v8::MaybeLocal<v8::Module> IsolateData::ModuleResolveCallback(
+v8::MaybeLocal<v8::Module> InspectorIsolateData::ModuleResolveCallback(
v8::Local<v8::Context> context, v8::Local<v8::String> specifier,
v8::Local<v8::FixedArray> import_assertions,
v8::Local<v8::Module> referrer) {
// TODO(v8:11189) Consider JSON modules support in the InspectorClient
- IsolateData* data = IsolateData::FromContext(context);
+ InspectorIsolateData* data = InspectorIsolateData::FromContext(context);
std::string str = *v8::String::Utf8Value(data->isolate(), specifier);
v8::MaybeLocal<v8::Module> maybe_module =
data->modules_[ToVector(data->isolate(), specifier)].Get(data->isolate());
@@ -149,9 +158,9 @@ v8::MaybeLocal<v8::Module> IsolateData::ModuleResolveCallback(
return maybe_module;
}
-int IsolateData::ConnectSession(int context_group_id,
- const v8_inspector::StringView& state,
- v8_inspector::V8Inspector::Channel* channel) {
+int InspectorIsolateData::ConnectSession(
+ int context_group_id, const v8_inspector::StringView& state,
+ v8_inspector::V8Inspector::Channel* channel) {
v8::SealHandleScope seal_handle_scope(isolate());
int session_id = ++last_session_id_;
sessions_[session_id] = inspector_->connect(context_group_id, channel, state);
@@ -159,7 +168,7 @@ int IsolateData::ConnectSession(int context_group_id,
return session_id;
}
-std::vector<uint8_t> IsolateData::DisconnectSession(int session_id) {
+std::vector<uint8_t> InspectorIsolateData::DisconnectSession(int session_id) {
v8::SealHandleScope seal_handle_scope(isolate());
auto it = sessions_.find(session_id);
CHECK(it != sessions_.end());
@@ -169,16 +178,16 @@ std::vector<uint8_t> IsolateData::DisconnectSession(int session_id) {
return result;
}
-void IsolateData::SendMessage(int session_id,
- const v8_inspector::StringView& message) {
+void InspectorIsolateData::SendMessage(
+ int session_id, const v8_inspector::StringView& message) {
v8::SealHandleScope seal_handle_scope(isolate());
auto it = sessions_.find(session_id);
if (it != sessions_.end()) it->second->dispatchProtocolMessage(message);
}
-void IsolateData::BreakProgram(int context_group_id,
- const v8_inspector::StringView& reason,
- const v8_inspector::StringView& details) {
+void InspectorIsolateData::BreakProgram(
+ int context_group_id, const v8_inspector::StringView& reason,
+ const v8_inspector::StringView& details) {
v8::SealHandleScope seal_handle_scope(isolate());
for (int session_id : GetSessionIds(context_group_id)) {
auto it = sessions_.find(session_id);
@@ -186,7 +195,7 @@ void IsolateData::BreakProgram(int context_group_id,
}
}
-void IsolateData::SchedulePauseOnNextStatement(
+void InspectorIsolateData::SchedulePauseOnNextStatement(
int context_group_id, const v8_inspector::StringView& reason,
const v8_inspector::StringView& details) {
v8::SealHandleScope seal_handle_scope(isolate());
@@ -197,7 +206,7 @@ void IsolateData::SchedulePauseOnNextStatement(
}
}
-void IsolateData::CancelPauseOnNextStatement(int context_group_id) {
+void InspectorIsolateData::CancelPauseOnNextStatement(int context_group_id) {
v8::SealHandleScope seal_handle_scope(isolate());
for (int session_id : GetSessionIds(context_group_id)) {
auto it = sessions_.find(session_id);
@@ -205,42 +214,42 @@ void IsolateData::CancelPauseOnNextStatement(int context_group_id) {
}
}
-void IsolateData::AsyncTaskScheduled(const v8_inspector::StringView& name,
- void* task, bool recurring) {
+void InspectorIsolateData::AsyncTaskScheduled(
+ const v8_inspector::StringView& name, void* task, bool recurring) {
v8::SealHandleScope seal_handle_scope(isolate());
inspector_->asyncTaskScheduled(name, task, recurring);
}
-void IsolateData::AsyncTaskStarted(void* task) {
+void InspectorIsolateData::AsyncTaskStarted(void* task) {
v8::SealHandleScope seal_handle_scope(isolate());
inspector_->asyncTaskStarted(task);
}
-void IsolateData::AsyncTaskFinished(void* task) {
+void InspectorIsolateData::AsyncTaskFinished(void* task) {
v8::SealHandleScope seal_handle_scope(isolate());
inspector_->asyncTaskFinished(task);
}
-v8_inspector::V8StackTraceId IsolateData::StoreCurrentStackTrace(
+v8_inspector::V8StackTraceId InspectorIsolateData::StoreCurrentStackTrace(
const v8_inspector::StringView& description) {
v8::SealHandleScope seal_handle_scope(isolate());
return inspector_->storeCurrentStackTrace(description);
}
-void IsolateData::ExternalAsyncTaskStarted(
+void InspectorIsolateData::ExternalAsyncTaskStarted(
const v8_inspector::V8StackTraceId& parent) {
v8::SealHandleScope seal_handle_scope(isolate());
inspector_->externalAsyncTaskStarted(parent);
}
-void IsolateData::ExternalAsyncTaskFinished(
+void InspectorIsolateData::ExternalAsyncTaskFinished(
const v8_inspector::V8StackTraceId& parent) {
v8::SealHandleScope seal_handle_scope(isolate());
inspector_->externalAsyncTaskFinished(parent);
}
-void IsolateData::AddInspectedObject(int session_id,
- v8::Local<v8::Value> object) {
+void InspectorIsolateData::AddInspectedObject(int session_id,
+ v8::Local<v8::Value> object) {
v8::SealHandleScope seal_handle_scope(isolate());
auto it = sessions_.find(session_id);
if (it == sessions_.end()) return;
@@ -249,24 +258,24 @@ void IsolateData::AddInspectedObject(int session_id,
it->second->addInspectedObject(std::move(inspectable));
}
-void IsolateData::SetMaxAsyncTaskStacksForTest(int limit) {
+void InspectorIsolateData::SetMaxAsyncTaskStacksForTest(int limit) {
v8::SealHandleScope seal_handle_scope(isolate());
v8_inspector::SetMaxAsyncTaskStacksForTest(inspector_.get(), limit);
}
-void IsolateData::DumpAsyncTaskStacksStateForTest() {
+void InspectorIsolateData::DumpAsyncTaskStacksStateForTest() {
v8::SealHandleScope seal_handle_scope(isolate());
v8_inspector::DumpAsyncTaskStacksStateForTest(inspector_.get());
}
// static
-int IsolateData::HandleMessage(v8::Local<v8::Message> message,
- v8::Local<v8::Value> exception) {
+int InspectorIsolateData::HandleMessage(v8::Local<v8::Message> message,
+ v8::Local<v8::Value> exception) {
v8::Isolate* isolate = message->GetIsolate();
v8::Local<v8::Context> context = isolate->GetEnteredOrMicrotaskContext();
if (context.IsEmpty()) return 0;
v8_inspector::V8Inspector* inspector =
- IsolateData::FromContext(context)->inspector_.get();
+ InspectorIsolateData::FromContext(context)->inspector_.get();
v8::Local<v8::StackTrace> stack = message->GetStackTrace();
int script_id = message->GetScriptOrigin().ScriptId();
@@ -297,13 +306,13 @@ int IsolateData::HandleMessage(v8::Local<v8::Message> message,
}
// static
-void IsolateData::MessageHandler(v8::Local<v8::Message> message,
- v8::Local<v8::Value> exception) {
+void InspectorIsolateData::MessageHandler(v8::Local<v8::Message> message,
+ v8::Local<v8::Value> exception) {
HandleMessage(message, exception);
}
// static
-void IsolateData::PromiseRejectHandler(v8::PromiseRejectMessage data) {
+void InspectorIsolateData::PromiseRejectHandler(v8::PromiseRejectMessage data) {
v8::Isolate* isolate = data.GetPromise()->GetIsolate();
v8::Local<v8::Context> context = isolate->GetEnteredOrMicrotaskContext();
if (context.IsEmpty()) return;
@@ -316,7 +325,7 @@ void IsolateData::PromiseRejectHandler(v8::PromiseRejectMessage data) {
if (!promise->GetPrivate(context, id_private).ToLocal(&id)) return;
if (!id->IsInt32()) return;
v8_inspector::V8Inspector* inspector =
- IsolateData::FromContext(context)->inspector_.get();
+ InspectorIsolateData::FromContext(context)->inspector_.get();
v8::SealHandleScope seal_handle_scope(isolate);
const char* reason_str = "Handler added to rejected promise";
inspector->exceptionRevoked(
@@ -337,28 +346,29 @@ void IsolateData::PromiseRejectHandler(v8::PromiseRejectMessage data) {
}
}
-void IsolateData::FireContextCreated(v8::Local<v8::Context> context,
- int context_group_id,
- v8_inspector::StringView name) {
+void InspectorIsolateData::FireContextCreated(v8::Local<v8::Context> context,
+ int context_group_id,
+ v8_inspector::StringView name) {
v8_inspector::V8ContextInfo info(context, context_group_id, name);
info.hasMemoryOnConsole = true;
v8::SealHandleScope seal_handle_scope(isolate());
inspector_->contextCreated(info);
}
-void IsolateData::FireContextDestroyed(v8::Local<v8::Context> context) {
+void InspectorIsolateData::FireContextDestroyed(
+ v8::Local<v8::Context> context) {
v8::SealHandleScope seal_handle_scope(isolate());
inspector_->contextDestroyed(context);
}
-void IsolateData::FreeContext(v8::Local<v8::Context> context) {
+void InspectorIsolateData::FreeContext(v8::Local<v8::Context> context) {
int context_group_id = GetContextGroupId(context);
auto it = contexts_.find(context_group_id);
if (it == contexts_.end()) return;
contexts_.erase(it);
}
-std::vector<int> IsolateData::GetSessionIds(int context_group_id) {
+std::vector<int> InspectorIsolateData::GetSessionIds(int context_group_id) {
std::vector<int> result;
for (auto& it : sessions_) {
if (context_group_by_session_[it.second.get()] == context_group_id)
@@ -367,7 +377,8 @@ std::vector<int> IsolateData::GetSessionIds(int context_group_id) {
return result;
}
-bool IsolateData::isInspectableHeapObject(v8::Local<v8::Object> object) {
+bool InspectorIsolateData::isInspectableHeapObject(
+ v8::Local<v8::Object> object) {
v8::Local<v8::Context> context = isolate()->GetCurrentContext();
v8::MicrotasksScope microtasks_scope(
isolate(), v8::MicrotasksScope::kDoNotRunMicrotasks);
@@ -375,55 +386,56 @@ bool IsolateData::isInspectableHeapObject(v8::Local<v8::Object> object) {
.FromMaybe(false);
}
-v8::Local<v8::Context> IsolateData::ensureDefaultContextInGroup(
+v8::Local<v8::Context> InspectorIsolateData::ensureDefaultContextInGroup(
int context_group_id) {
return GetDefaultContext(context_group_id);
}
-void IsolateData::SetCurrentTimeMS(double time) {
+void InspectorIsolateData::SetCurrentTimeMS(double time) {
current_time_ = time;
current_time_set_ = true;
}
-double IsolateData::currentTimeMS() {
+double InspectorIsolateData::currentTimeMS() {
if (current_time_set_) return current_time_;
return V8::GetCurrentPlatform()->CurrentClockTimeMillis();
}
-void IsolateData::SetMemoryInfo(v8::Local<v8::Value> memory_info) {
+void InspectorIsolateData::SetMemoryInfo(v8::Local<v8::Value> memory_info) {
memory_info_.Reset(isolate_.get(), memory_info);
}
-void IsolateData::SetLogConsoleApiMessageCalls(bool log) {
+void InspectorIsolateData::SetLogConsoleApiMessageCalls(bool log) {
log_console_api_message_calls_ = log;
}
-void IsolateData::SetLogMaxAsyncCallStackDepthChanged(bool log) {
+void InspectorIsolateData::SetLogMaxAsyncCallStackDepthChanged(bool log) {
log_max_async_call_stack_depth_changed_ = log;
}
-void IsolateData::SetAdditionalConsoleApi(v8_inspector::StringView api_script) {
+void InspectorIsolateData::SetAdditionalConsoleApi(
+ v8_inspector::StringView api_script) {
v8::HandleScope handle_scope(isolate());
additional_console_api_.Reset(isolate(), ToV8String(isolate(), api_script));
}
-v8::MaybeLocal<v8::Value> IsolateData::memoryInfo(v8::Isolate* isolate,
- v8::Local<v8::Context>) {
+v8::MaybeLocal<v8::Value> InspectorIsolateData::memoryInfo(
+ v8::Isolate* isolate, v8::Local<v8::Context>) {
if (memory_info_.IsEmpty()) return v8::MaybeLocal<v8::Value>();
return memory_info_.Get(isolate);
}
-void IsolateData::runMessageLoopOnPause(int) {
+void InspectorIsolateData::runMessageLoopOnPause(int) {
v8::SealHandleScope seal_handle_scope(isolate());
task_runner_->RunMessageLoop(true);
}
-void IsolateData::quitMessageLoopOnPause() {
+void InspectorIsolateData::quitMessageLoopOnPause() {
v8::SealHandleScope seal_handle_scope(isolate());
task_runner_->QuitMessageLoop();
}
-void IsolateData::installAdditionalCommandLineAPI(
+void InspectorIsolateData::installAdditionalCommandLineAPI(
v8::Local<v8::Context> context, v8::Local<v8::Object> object) {
if (additional_console_api_.IsEmpty()) return;
CHECK(context->GetIsolate() == isolate());
@@ -438,12 +450,11 @@ void IsolateData::installAdditionalCommandLineAPI(
CHECK(!script.ToLocalChecked()->Run(context).IsEmpty());
}
-void IsolateData::consoleAPIMessage(int contextGroupId,
- v8::Isolate::MessageErrorLevel level,
- const v8_inspector::StringView& message,
- const v8_inspector::StringView& url,
- unsigned lineNumber, unsigned columnNumber,
- v8_inspector::V8StackTrace* stack) {
+void InspectorIsolateData::consoleAPIMessage(
+ int contextGroupId, v8::Isolate::MessageErrorLevel level,
+ const v8_inspector::StringView& message,
+ const v8_inspector::StringView& url, unsigned lineNumber,
+ unsigned columnNumber, v8_inspector::V8StackTrace* stack) {
if (!log_console_api_message_calls_) return;
Print(isolate_.get(), message);
fprintf(stdout, " (");
@@ -453,18 +464,18 @@ void IsolateData::consoleAPIMessage(int contextGroupId,
fprintf(stdout, "\n");
}
-void IsolateData::maxAsyncCallStackDepthChanged(int depth) {
+void InspectorIsolateData::maxAsyncCallStackDepthChanged(int depth) {
if (!log_max_async_call_stack_depth_changed_) return;
fprintf(stdout, "maxAsyncCallStackDepthChanged: %d\n", depth);
}
-void IsolateData::SetResourceNamePrefix(v8::Local<v8::String> prefix) {
+void InspectorIsolateData::SetResourceNamePrefix(v8::Local<v8::String> prefix) {
resource_name_prefix_.Reset(isolate(), prefix);
}
-bool IsolateData::AssociateExceptionData(v8::Local<v8::Value> exception,
- v8::Local<v8::Name> key,
- v8::Local<v8::Value> value) {
+bool InspectorIsolateData::AssociateExceptionData(
+ v8::Local<v8::Value> exception, v8::Local<v8::Name> key,
+ v8::Local<v8::Value> value) {
return inspector_->associateExceptionData(
this->isolate()->GetCurrentContext(), exception, key, value);
}
@@ -484,7 +495,8 @@ class StringBufferImpl : public v8_inspector::StringBuffer {
};
} // anonymous namespace
-std::unique_ptr<v8_inspector::StringBuffer> IsolateData::resourceNameToUrl(
+std::unique_ptr<v8_inspector::StringBuffer>
+InspectorIsolateData::resourceNameToUrl(
const v8_inspector::StringView& resourceName) {
if (resource_name_prefix_.IsEmpty()) return nullptr;
v8::HandleScope handle_scope(isolate());
@@ -494,7 +506,7 @@ std::unique_ptr<v8_inspector::StringBuffer> IsolateData::resourceNameToUrl(
return std::make_unique<StringBufferImpl>(isolate(), url);
}
-int64_t IsolateData::generateUniqueId() {
+int64_t InspectorIsolateData::generateUniqueId() {
static int64_t last_unique_id = 0L;
// Keep it not too random for tests.
return ++last_unique_id;
diff --git a/deps/v8/test/inspector/isolate-data.h b/deps/v8/test/inspector/isolate-data.h
index 921edfb462..4b8ddfad07 100644
--- a/deps/v8/test/inspector/isolate-data.h
+++ b/deps/v8/test/inspector/isolate-data.h
@@ -8,21 +8,29 @@
#include <map>
#include <memory>
+#include "include/v8-array-buffer.h"
#include "include/v8-inspector.h"
+#include "include/v8-local-handle.h"
#include "include/v8-platform.h"
-#include "include/v8.h"
+#include "include/v8-script.h"
#include "src/base/macros.h"
#include "src/base/platform/platform.h"
#include "src/base/vector.h"
namespace v8 {
+
+class Context;
+class Isolate;
+class ObjectTemplate;
+class StartupData;
+
namespace internal {
class TaskRunner;
enum WithInspector : bool { kWithInspector = true, kNoInspector = false };
-class IsolateData : public v8_inspector::V8InspectorClient {
+class InspectorIsolateData : public v8_inspector::V8InspectorClient {
public:
class SetupGlobalTask {
public:
@@ -32,14 +40,16 @@ class IsolateData : public v8_inspector::V8InspectorClient {
};
using SetupGlobalTasks = std::vector<std::unique_ptr<SetupGlobalTask>>;
- IsolateData(const IsolateData&) = delete;
- IsolateData& operator=(const IsolateData&) = delete;
- IsolateData(TaskRunner* task_runner, SetupGlobalTasks setup_global_tasks,
- v8::StartupData* startup_data, WithInspector with_inspector);
- static IsolateData* FromContext(v8::Local<v8::Context> context);
+ InspectorIsolateData(const InspectorIsolateData&) = delete;
+ InspectorIsolateData& operator=(const InspectorIsolateData&) = delete;
+ InspectorIsolateData(TaskRunner* task_runner,
+ SetupGlobalTasks setup_global_tasks,
+ v8::StartupData* startup_data,
+ WithInspector with_inspector);
+ static InspectorIsolateData* FromContext(v8::Local<v8::Context> context);
- ~IsolateData() override {
- // Enter the isolate before destructing this IsolateData, so that
+ ~InspectorIsolateData() override {
+ // Enter the isolate before destructing this InspectorIsolateData, so that
// destructors that run before the Isolate's destructor still see it as
// entered.
isolate()->Enter();
@@ -141,7 +151,7 @@ class IsolateData : public v8_inspector::V8InspectorClient {
// disposed in the right order, relative to other member variables.
struct IsolateDeleter {
void operator()(v8::Isolate* isolate) const {
- // Exit the isolate after it was entered by ~IsolateData.
+ // Exit the isolate after it was entered by ~InspectorIsolateData.
isolate->Exit();
isolate->Dispose();
}
diff --git a/deps/v8/test/inspector/runtime-call-stats/collection-expected.txt b/deps/v8/test/inspector/runtime-call-stats/collection-expected.txt
deleted file mode 100644
index ba963ba627..0000000000
--- a/deps/v8/test/inspector/runtime-call-stats/collection-expected.txt
+++ /dev/null
@@ -1,2 +0,0 @@
-Test RunTimeCallStats collection using Profiler.getRuntimeCallStats.
-PASSED
diff --git a/deps/v8/test/inspector/runtime-call-stats/collection.js b/deps/v8/test/inspector/runtime-call-stats/collection.js
deleted file mode 100644
index a3a680e11b..0000000000
--- a/deps/v8/test/inspector/runtime-call-stats/collection.js
+++ /dev/null
@@ -1,83 +0,0 @@
-// Copyright 2020 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-let {session, contextGroup, Protocol} = InspectorTest.start(
- 'Test RunTimeCallStats collection using Profiler.getRuntimeCallStats.');
-
-var source =
-`
-function fib(x) {
- if (x < 2) return 1;
- return fib(x-1) + fib(x-2);
-}
-fib(5);
-`;
-
-function buildCounterMap(result) {
- let counterMap = new Map();
-
- let counters = result.result.result;
- for (const {name, value} of counters) {
- counterMap.set(name, value);
- }
-
- return counterMap;
-}
-
-function compareCounterMaps(counterMap, counterMap2) {
- // Check for counters that are present in the first map but are not found
- // in the the second map
- for (let counter of counterMap.keys()) {
- if (!counterMap2.has(counter)) {
- InspectorTest.log(`Counter ${counter} is missing`);
- return false;
- }
- }
-
- // Check for the counter value changes
- let counterValueIncreased = false;
- for (let [counter, value2] of counterMap2) {
- let value = counterMap.get(counter);
- if (value !== undefined) {
- if (value2 < value) {
- InspectorTest.log(`Counter ${counter} value decreased: ${value} -> ${value2}`);
- return false;
- }
- if (value2 > value) {
- counterValueIncreased = true;
- }
- }
- }
-
- if (!counterValueIncreased && counterMap.size === counterMap2.size) {
- InspectorTest.log(`No counter values has increased or added`);
- return false;
- }
-
- return true;
-}
-
-(async function test() {
- await Protocol.Runtime.enable();
- await Protocol.Profiler.enableRuntimeCallStats();
-
- let counterMap = buildCounterMap(await Protocol.Profiler.getRuntimeCallStats());
-
- await Protocol.Runtime.evaluate({ expression: source, sourceURL: arguments.callee.name, persistScript: true });
-
- let counterMap2 = buildCounterMap(await Protocol.Profiler.getRuntimeCallStats());
- const check1 = compareCounterMaps(counterMap, counterMap2);
-
- await Protocol.Runtime.evaluate({ expression: source, sourceURL: arguments.callee.name, persistScript: true });
-
- let counterMap3 = buildCounterMap(await Protocol.Profiler.getRuntimeCallStats());
- const check2 = compareCounterMaps(counterMap2, counterMap3);
-
- await Protocol.Profiler.disableRuntimeCallStats();
- await Protocol.Runtime.disable();
-
- InspectorTest.log(check1 && check2 ? 'PASSED' : 'FAILED');
-
- InspectorTest.completeTest();
-})().catch(e => InspectorTest.log('caught: ' + e));
diff --git a/deps/v8/test/inspector/runtime-call-stats/enable-disable-expected.txt b/deps/v8/test/inspector/runtime-call-stats/enable-disable-expected.txt
deleted file mode 100644
index 4ba4feeb23..0000000000
--- a/deps/v8/test/inspector/runtime-call-stats/enable-disable-expected.txt
+++ /dev/null
@@ -1,5 +0,0 @@
-Test Runtime Call Stats collection enabling and disabling.
-Expected error: "Runtime Call Stats collection is not enabled."
-Expected error: "Runtime Call Stats collection is already enabled."
-Some counters reported
-Expected error: "Runtime Call Stats collection is not enabled."
diff --git a/deps/v8/test/inspector/runtime-call-stats/enable-disable.js b/deps/v8/test/inspector/runtime-call-stats/enable-disable.js
deleted file mode 100644
index 81ea070f7a..0000000000
--- a/deps/v8/test/inspector/runtime-call-stats/enable-disable.js
+++ /dev/null
@@ -1,51 +0,0 @@
-// Copyright 2020 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-let {session, contextGroup, Protocol} = InspectorTest.start(
- 'Test Runtime Call Stats collection enabling and disabling.');
-
-var source =
-`
-function fib(x) {
-if (x < 2) return 1;
-return fib(x-1) + fib(x-2);
-}
-fib(5);
-`;
-
-function logErrorMessage(result) {
-InspectorTest.log('Expected error: "' + result.error.message + '"');
-}
-
-(async function test() {
-await Protocol.Runtime.enable();
-
-// This should fail with "not enabled" error.
-logErrorMessage(await Protocol.Profiler.getRuntimeCallStats());
-
-// This should fail with "already enabled" error.
-await Protocol.Profiler.enableRuntimeCallStats();
-logErrorMessage(await Protocol.Profiler.enableRuntimeCallStats());
-
-// The result should not be empty.
-await Protocol.Runtime.evaluate({ expression: source, sourceURL: arguments.callee.name, persistScript: true });
-const counters = (await Protocol.Profiler.getRuntimeCallStats()).result.result;
-if (counters.length > 0)
- InspectorTest.log('Some counters reported');
-await Protocol.Profiler.disableRuntimeCallStats();
-
-// This should fail with "not enabled" error too.
-logErrorMessage(await Protocol.Profiler.getRuntimeCallStats());
-
-// The result should not be empty and have smaller amount of counters than
-// the first result.
-await Protocol.Profiler.enableRuntimeCallStats();
-const counters2 = (await Protocol.Profiler.getRuntimeCallStats()).result.result;
-if (counters2.length > 0 && counters2.length < counters.length)
- InspectorTest.log('Less counters reported');
-await Protocol.Profiler.disableRuntimeCallStats();
-
-await Protocol.Runtime.disable();
-InspectorTest.completeTest();
-})().catch(e => InspectorTest.log('caught: ' + e));
diff --git a/deps/v8/test/inspector/runtime/get-properties-expected.txt b/deps/v8/test/inspector/runtime/get-properties-expected.txt
index b67f470971..09d3b8c682 100644
--- a/deps/v8/test/inspector/runtime/get-properties-expected.txt
+++ b/deps/v8/test/inspector/runtime/get-properties-expected.txt
@@ -26,6 +26,34 @@ Running test: testNotOwn
Internal properties
[[Prototype]] object undefined
+Running test: testNotOwnSet
+ Symbol(Symbol.iterator) inherited function undefined
+ Symbol(Symbol.toStringTag) inherited string Set
+ __defineGetter__ inherited function undefined
+ __defineSetter__ inherited function undefined
+ __lookupGetter__ inherited function undefined
+ __lookupSetter__ inherited function undefined
+ __proto__ inherited no value, getter, setter
+ add inherited function undefined
+ clear inherited function undefined
+ constructor inherited function undefined
+ delete inherited function undefined
+ entries inherited function undefined
+ forEach inherited function undefined
+ has inherited function undefined
+ hasOwnProperty inherited function undefined
+ isPrototypeOf inherited function undefined
+ keys inherited function undefined
+ propertyIsEnumerable inherited function undefined
+ size inherited number 3
+ toLocaleString inherited function undefined
+ toString inherited function undefined
+ valueOf inherited function undefined
+ values inherited function undefined
+Internal properties
+ [[Entries]] object undefined
+ [[Prototype]] object undefined
+
Running test: testAccessorsOnly
b own no value, getter, setter
d own no value, setter
@@ -157,3 +185,12 @@ Running test: testObjectWithProtoProperty
__proto__ own object undefined
Internal properties
[[Prototype]] object undefined
+
+Running test: testArrayNonIndexedPropertiesOnly
+ length own number 2
+Internal properties
+ [[Prototype]] object undefined
+
+Running test: testTypedArrayNonIndexedPropertiesOnly
+Internal properties
+ [[Prototype]] object undefined
diff --git a/deps/v8/test/inspector/runtime/get-properties.js b/deps/v8/test/inspector/runtime/get-properties.js
index 69305e241d..c2ffedfd0a 100644
--- a/deps/v8/test/inspector/runtime/get-properties.js
+++ b/deps/v8/test/inspector/runtime/get-properties.js
@@ -15,6 +15,10 @@ InspectorTest.runAsyncTestSuite([
return logExpressionProperties('({ a: 2, set b(_) {}, get b() {return 5;}, __proto__: { a: 3, c: 4, get d() {return 6;} }})', { ownProperties: false });
},
+ function testNotOwnSet() {
+ return logExpressionProperties('new Set([1, 2, 3])', { ownProperties: false });
+ },
+
function testAccessorsOnly() {
return logExpressionProperties('({ a: 2, set b(_) {}, get b() {return 5;}, c: \'c\', set d(_){} })', { ownProperties: true, accessorPropertiesOnly: true});
},
@@ -102,6 +106,14 @@ InspectorTest.runAsyncTestSuite([
async function testObjectWithProtoProperty() {
await logExpressionProperties('Object.defineProperty({}, "__proto__", {enumerable: true, value: {b:"aaa"}})');
+ },
+
+ function testArrayNonIndexedPropertiesOnly() {
+ return logExpressionProperties('[1, 2]', {nonIndexedPropertiesOnly: true, ownProperties: true});
+ },
+
+ function testTypedArrayNonIndexedPropertiesOnly() {
+ return logExpressionProperties('new Int8Array(1)', {nonIndexedPropertiesOnly: true, ownProperties: true});
}
]);
diff --git a/deps/v8/test/inspector/task-runner.cc b/deps/v8/test/inspector/task-runner.cc
index d1031f92ac..3049a1c3f8 100644
--- a/deps/v8/test/inspector/task-runner.cc
+++ b/deps/v8/test/inspector/task-runner.cc
@@ -5,6 +5,9 @@
#include "test/inspector/task-runner.h"
#include "include/libplatform/libplatform.h"
+#include "include/v8-exception.h"
+#include "include/v8-local-handle.h"
+#include "include/v8-primitive.h"
#include "src/flags/flags.h"
#if !defined(_WIN32) && !defined(_WIN64)
@@ -35,11 +38,10 @@ void ReportUncaughtException(v8::Isolate* isolate,
} // namespace
-TaskRunner::TaskRunner(IsolateData::SetupGlobalTasks setup_global_tasks,
- CatchExceptions catch_exceptions,
- v8::base::Semaphore* ready_semaphore,
- v8::StartupData* startup_data,
- WithInspector with_inspector)
+TaskRunner::TaskRunner(
+ InspectorIsolateData::SetupGlobalTasks setup_global_tasks,
+ CatchExceptions catch_exceptions, v8::base::Semaphore* ready_semaphore,
+ v8::StartupData* startup_data, WithInspector with_inspector)
: Thread(Options("Task Runner")),
setup_global_tasks_(std::move(setup_global_tasks)),
startup_data_(startup_data),
@@ -56,8 +58,8 @@ TaskRunner::TaskRunner(IsolateData::SetupGlobalTasks setup_global_tasks,
TaskRunner::~TaskRunner() {}
void TaskRunner::Run() {
- data_.reset(new IsolateData(this, std::move(setup_global_tasks_),
- startup_data_, with_inspector_));
+ data_.reset(new InspectorIsolateData(this, std::move(setup_global_tasks_),
+ startup_data_, with_inspector_));
if (ready_semaphore_) ready_semaphore_->Signal();
RunMessageLoop(false);
}
diff --git a/deps/v8/test/inspector/task-runner.h b/deps/v8/test/inspector/task-runner.h
index db99f15e22..c6f792f964 100644
--- a/deps/v8/test/inspector/task-runner.h
+++ b/deps/v8/test/inspector/task-runner.h
@@ -10,7 +10,6 @@
#include "include/v8-inspector.h"
#include "include/v8-platform.h"
-#include "include/v8.h"
#include "src/base/macros.h"
#include "src/base/platform/platform.h"
#include "src/base/vector.h"
@@ -18,6 +17,9 @@
#include "test/inspector/isolate-data.h"
namespace v8 {
+
+class StartupData;
+
namespace internal {
enum CatchExceptions {
@@ -32,17 +34,17 @@ class TaskRunner : public v8::base::Thread {
public:
virtual ~Task() = default;
virtual bool is_priority_task() = 0;
- virtual void Run(IsolateData* data) = 0;
+ virtual void Run(InspectorIsolateData* data) = 0;
};
- TaskRunner(IsolateData::SetupGlobalTasks setup_global_tasks,
+ TaskRunner(InspectorIsolateData::SetupGlobalTasks setup_global_tasks,
CatchExceptions catch_exceptions,
v8::base::Semaphore* ready_semaphore,
v8::StartupData* startup_data, WithInspector with_inspector);
~TaskRunner() override;
TaskRunner(const TaskRunner&) = delete;
TaskRunner& operator=(const TaskRunner&) = delete;
- IsolateData* data() const { return data_.get(); }
+ InspectorIsolateData* data() const { return data_.get(); }
// Thread implementation.
void Run() override;
@@ -59,12 +61,12 @@ class TaskRunner : public v8::base::Thread {
std::unique_ptr<Task> GetNext(bool only_protocol);
v8::Isolate* isolate() const { return data_->isolate(); }
- IsolateData::SetupGlobalTasks setup_global_tasks_;
+ InspectorIsolateData::SetupGlobalTasks setup_global_tasks_;
v8::StartupData* startup_data_;
WithInspector with_inspector_;
CatchExceptions catch_exceptions_;
v8::base::Semaphore* ready_semaphore_;
- std::unique_ptr<IsolateData> data_;
+ std::unique_ptr<InspectorIsolateData> data_;
// deferred_queue_ combined with queue_ (in this order) have all tasks in the
// correct order. Sometimes we skip non-protocol tasks by moving them from
diff --git a/deps/v8/test/inspector/tasks.cc b/deps/v8/test/inspector/tasks.cc
index 79f40c0e27..13168f2839 100644
--- a/deps/v8/test/inspector/tasks.cc
+++ b/deps/v8/test/inspector/tasks.cc
@@ -6,15 +6,15 @@
#include <vector>
-#include "include/v8-inspector.h"
-#include "include/v8.h"
+#include "include/v8-isolate.h"
+#include "include/v8-script.h"
#include "test/inspector/isolate-data.h"
#include "test/inspector/utils.h"
namespace v8 {
namespace internal {
-void ExecuteStringTask::Run(IsolateData* data) {
+void ExecuteStringTask::Run(InspectorIsolateData* data) {
v8::MicrotasksScope microtasks_scope(data->isolate(),
v8::MicrotasksScope::kRunMicrotasks);
v8::HandleScope handle_scope(data->isolate());
diff --git a/deps/v8/test/inspector/tasks.h b/deps/v8/test/inspector/tasks.h
index 28d38f2a2a..33d7135a59 100644
--- a/deps/v8/test/inspector/tasks.h
+++ b/deps/v8/test/inspector/tasks.h
@@ -7,8 +7,11 @@
#include <vector>
+#include "include/v8-context.h"
+#include "include/v8-function.h"
#include "include/v8-inspector.h"
-#include "include/v8.h"
+#include "include/v8-microtask-queue.h"
+#include "include/v8-primitive.h"
#include "src/base/platform/semaphore.h"
#include "test/inspector/isolate-data.h"
#include "test/inspector/task-runner.h"
@@ -27,7 +30,7 @@ void RunSyncTask(TaskRunner* task_runner, T callback) {
bool is_priority_task() final { return true; }
private:
- void Run(IsolateData* data) override {
+ void Run(InspectorIsolateData* data) override {
callback_(data);
if (ready_semaphore_) ready_semaphore_->Signal();
}
@@ -48,7 +51,7 @@ class SendMessageToBackendTask : public TaskRunner::Task {
bool is_priority_task() final { return true; }
private:
- void Run(IsolateData* data) override {
+ void Run(InspectorIsolateData* data) override {
v8_inspector::StringView message_view(message_.data(), message_.size());
data->SendMessage(session_id_, message_view);
}
@@ -68,7 +71,7 @@ inline void RunAsyncTask(TaskRunner* task_runner,
AsyncTask(const AsyncTask&) = delete;
AsyncTask& operator=(const AsyncTask&) = delete;
bool is_priority_task() override { return inner_->is_priority_task(); }
- void Run(IsolateData* data) override {
+ void Run(InspectorIsolateData* data) override {
data->AsyncTaskStarted(inner_.get());
inner_->Run(data);
data->AsyncTaskFinished(inner_.get());
@@ -104,7 +107,7 @@ class ExecuteStringTask : public TaskRunner::Task {
ExecuteStringTask(const ExecuteStringTask&) = delete;
ExecuteStringTask& operator=(const ExecuteStringTask&) = delete;
bool is_priority_task() override { return false; }
- void Run(IsolateData* data) override;
+ void Run(InspectorIsolateData* data) override;
private:
std::vector<uint16_t> expression_;
@@ -125,7 +128,7 @@ class SetTimeoutTask : public TaskRunner::Task {
bool is_priority_task() final { return false; }
private:
- void Run(IsolateData* data) override {
+ void Run(InspectorIsolateData* data) override {
v8::MicrotasksScope microtasks_scope(data->isolate(),
v8::MicrotasksScope::kRunMicrotasks);
v8::HandleScope handle_scope(data->isolate());
@@ -141,7 +144,7 @@ class SetTimeoutTask : public TaskRunner::Task {
int context_group_id_;
};
-class SetTimeoutExtension : public IsolateData::SetupGlobalTask {
+class SetTimeoutExtension : public InspectorIsolateData::SetupGlobalTask {
public:
void Run(v8::Isolate* isolate,
v8::Local<v8::ObjectTemplate> global) override {
@@ -159,7 +162,7 @@ class SetTimeoutExtension : public IsolateData::SetupGlobalTask {
}
v8::Isolate* isolate = args.GetIsolate();
v8::Local<v8::Context> context = isolate->GetCurrentContext();
- IsolateData* data = IsolateData::FromContext(context);
+ InspectorIsolateData* data = InspectorIsolateData::FromContext(context);
int context_group_id = data->GetContextGroupId(context);
const char* task_name = "setTimeout";
v8_inspector::StringView task_name_view(
diff --git a/deps/v8/test/inspector/utils.cc b/deps/v8/test/inspector/utils.cc
index c70382f57f..2f0431be85 100644
--- a/deps/v8/test/inspector/utils.cc
+++ b/deps/v8/test/inspector/utils.cc
@@ -7,7 +7,7 @@
#include <vector>
#include "include/v8-inspector.h"
-#include "include/v8.h"
+#include "include/v8-primitive.h"
namespace v8 {
namespace internal {
diff --git a/deps/v8/test/inspector/utils.h b/deps/v8/test/inspector/utils.h
index 845a1c0311..9fad25bc6e 100644
--- a/deps/v8/test/inspector/utils.h
+++ b/deps/v8/test/inspector/utils.h
@@ -8,10 +8,14 @@
#include <vector>
#include "include/v8-inspector.h"
-#include "include/v8.h"
+#include "include/v8-local-handle.h"
#include "src/base/macros.h"
namespace v8 {
+
+class Isolate;
+class String;
+
namespace internal {
std::vector<uint8_t> ToBytes(v8::Isolate*, v8::Local<v8::String>);
diff --git a/deps/v8/test/intl/enumeration/calendar-sorted.js b/deps/v8/test/intl/enumeration/calendar-sorted.js
new file mode 100644
index 0000000000..11e19c06f8
--- /dev/null
+++ b/deps/v8/test/intl/enumeration/calendar-sorted.js
@@ -0,0 +1,11 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony_intl_enumeration
+
+// Test the return items of calendar is sorted
+let name = "calendar";
+let items = Intl.supportedValuesOf(name);
+assertEquals([...items].sort(), items,
+ "return value of Intl.supportedValuesOf('" + name + "') should be sorted");
diff --git a/deps/v8/test/intl/enumeration/callendar-syntax-valid.js b/deps/v8/test/intl/enumeration/callendar-syntax-valid.js
new file mode 100644
index 0000000000..881c7e603c
--- /dev/null
+++ b/deps/v8/test/intl/enumeration/callendar-syntax-valid.js
@@ -0,0 +1,14 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony_intl_enumeration
+
+// Test the return items of calendar fit 'type'
+let regex = /^[a-zA-Z0-9]{3,8}(-[a-zA-Z0-9]{3,8})*$/;
+Intl.supportedValuesOf("calendar").forEach(
+ function(calendar) {
+ assertTrue(regex.test(calendar),
+ "Intl.supportedValuesOf('calendar') return " + calendar +
+ " which does not meet 'type: alphanum{3,8}(sep alphanum{3,8})*'");
+ });
diff --git a/deps/v8/test/intl/enumeration/collation-sorted.js b/deps/v8/test/intl/enumeration/collation-sorted.js
new file mode 100644
index 0000000000..d40e9be3bb
--- /dev/null
+++ b/deps/v8/test/intl/enumeration/collation-sorted.js
@@ -0,0 +1,11 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony_intl_enumeration
+
+// Test the return items of collation is sorted
+let name = "collation";
+let items = Intl.supportedValuesOf(name);
+assertEquals([...items].sort(), items,
+ "return value of Intl.supportedValuesOf('" + name + "') should be sorted");
diff --git a/deps/v8/test/intl/enumeration/collation-syntax-valid.js b/deps/v8/test/intl/enumeration/collation-syntax-valid.js
new file mode 100644
index 0000000000..e68d565fa8
--- /dev/null
+++ b/deps/v8/test/intl/enumeration/collation-syntax-valid.js
@@ -0,0 +1,14 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony_intl_enumeration
+
+// Test the return items of collation fit 'type'
+let regex = /^[a-zA-Z0-9]{3,8}(-[a-zA-Z0-9]{3,8})*$/;
+Intl.supportedValuesOf("collation").forEach(
+ function(collation) {
+ assertTrue(regex.test(collation),
+ "Intl.supportedValuesOf('collation') return " + collation +
+ " which does not meet 'type: alphanum{3,8}(sep alphanum{3,8})*'");
+ });
diff --git a/deps/v8/test/intl/enumeration/currency-sorted.js b/deps/v8/test/intl/enumeration/currency-sorted.js
new file mode 100644
index 0000000000..55ff1bc611
--- /dev/null
+++ b/deps/v8/test/intl/enumeration/currency-sorted.js
@@ -0,0 +1,11 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony_intl_enumeration
+
+// Test the return items of currency is sorted
+let name = "currency";
+let items = Intl.supportedValuesOf(name);
+assertEquals([...items].sort(), items,
+ "return value of Intl.supportedValuesOf('" + name + "') should be sorted");
diff --git a/deps/v8/test/intl/enumeration/currency-syntax-valid.js b/deps/v8/test/intl/enumeration/currency-syntax-valid.js
new file mode 100644
index 0000000000..cce3c612bd
--- /dev/null
+++ b/deps/v8/test/intl/enumeration/currency-syntax-valid.js
@@ -0,0 +1,14 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony_intl_enumeration
+
+// Test the return items of currency fit 'type'
+let regex = /^[A-Z]{3}$/;
+Intl.supportedValuesOf("currency").forEach(
+ function(currency) {
+ assertTrue(regex.test(currency),
+ "Intl.supportedValuesOf('currency') return " + currency +
+ " which does not meet 'alpha{3}'");
+ });
diff --git a/deps/v8/test/intl/enumeration/numberingSystem-no-algorithm.js b/deps/v8/test/intl/enumeration/numberingSystem-no-algorithm.js
new file mode 100644
index 0000000000..bf8c1a11eb
--- /dev/null
+++ b/deps/v8/test/intl/enumeration/numberingSystem-no-algorithm.js
@@ -0,0 +1,20 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony_intl_enumeration
+
+// Chrome filter out data of algorithm numberingSystems so we need to test none
+// of them got returned.
+let name = "numberingSystem";
+let items = Intl.supportedValuesOf(name);
+
+function verifyNoAlgorithm(nu) {
+ assertTrue(items.indexOf(nu) < 0, "should not return '" + nu + "' which is algorithmic");
+}
+
+["armn", "armnlow", "cyrl", "ethi", "finance", "geor", "grek", "greklow",
+ "hans", "hansfin", "hant", "hantfin", "hebr", "japn", "japnfin",
+ "roman", "romanlow", "taml", "traditio"].forEach(function(nu) {
+ verifyNoAlgorithm(nu);
+});
diff --git a/deps/v8/test/intl/enumeration/numberingSystem-sorted.js b/deps/v8/test/intl/enumeration/numberingSystem-sorted.js
new file mode 100644
index 0000000000..7cd0d85052
--- /dev/null
+++ b/deps/v8/test/intl/enumeration/numberingSystem-sorted.js
@@ -0,0 +1,11 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony_intl_enumeration
+
+// Test the return items of numberingSystem is sorted
+let name = "numberingSystem";
+let items = Intl.supportedValuesOf(name);
+assertEquals([...items].sort(), items,
+ "return value of Intl.supportedValuesOf('" + name + "') should be sorted");
diff --git a/deps/v8/test/intl/enumeration/numberingSystem-syntax-valid.js b/deps/v8/test/intl/enumeration/numberingSystem-syntax-valid.js
new file mode 100644
index 0000000000..d80976d519
--- /dev/null
+++ b/deps/v8/test/intl/enumeration/numberingSystem-syntax-valid.js
@@ -0,0 +1,14 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony_intl_enumeration
+
+// Test the return items of numberingSystem fit 'type'
+let regex = /^[a-zA-Z0-9]{3,8}(-[a-zA-Z0-9]{3,8})*$/;
+Intl.supportedValuesOf("numberingSystem").forEach(
+ function(numberingSystem) {
+ assertTrue(regex.test(numberingSystem),
+ "Intl.supportedValuesOf('numberingSystem') return " + numberingSystem +
+ " which does not meet 'type: alphanum{3,8}(sep alphanum{3,8})*'");
+ });
diff --git a/deps/v8/test/intl/enumeration/supported-values-of-invalid-key.js b/deps/v8/test/intl/enumeration/supported-values-of-invalid-key.js
new file mode 100644
index 0000000000..27651df74c
--- /dev/null
+++ b/deps/v8/test/intl/enumeration/supported-values-of-invalid-key.js
@@ -0,0 +1,12 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony_intl_enumeration
+
+// Test invalid keys
+["calendars", "collations", "currencies", "numberingSystems", "timeZones", "units",
+ 1, 0.3, true, false, {}, [] ].forEach(
+ function(key) {
+ assertThrows(() => Intl.supportedValuesOf(key), RangeError);
+ });
diff --git a/deps/v8/test/intl/enumeration/supported-values-of-name.js b/deps/v8/test/intl/enumeration/supported-values-of-name.js
new file mode 100644
index 0000000000..a0cbfd5333
--- /dev/null
+++ b/deps/v8/test/intl/enumeration/supported-values-of-name.js
@@ -0,0 +1,7 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony_intl_enumeration
+
+assertEquals("supportedValuesOf", Intl.supportedValuesOf.name);
diff --git a/deps/v8/test/intl/enumeration/supported-values-of-property.js b/deps/v8/test/intl/enumeration/supported-values-of-property.js
new file mode 100644
index 0000000000..52b0778b54
--- /dev/null
+++ b/deps/v8/test/intl/enumeration/supported-values-of-property.js
@@ -0,0 +1,11 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony_intl_enumeration
+
+let descriptor = Object.getOwnPropertyDescriptor(
+ Intl, "supportedValuesOf");
+assertTrue(descriptor.writable);
+assertFalse(descriptor.enumerable);
+assertTrue(descriptor.configurable);
diff --git a/deps/v8/test/intl/enumeration/supported-values-of-valid-key.js b/deps/v8/test/intl/enumeration/supported-values-of-valid-key.js
new file mode 100644
index 0000000000..ac5b4d8e5d
--- /dev/null
+++ b/deps/v8/test/intl/enumeration/supported-values-of-valid-key.js
@@ -0,0 +1,12 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony_intl_enumeration
+
+// Test valid keys
+["calendar", "collation", "currency", "numberingSystem", "timeZone", "unit"].forEach(
+ function(key) {
+ assertDoesNotThrow(() => Intl.supportedValuesOf(key));
+ assertEquals("object", typeof Intl.supportedValuesOf(key));
+ });
diff --git a/deps/v8/test/intl/enumeration/timeZone-sorted.js b/deps/v8/test/intl/enumeration/timeZone-sorted.js
new file mode 100644
index 0000000000..e3b5b484e2
--- /dev/null
+++ b/deps/v8/test/intl/enumeration/timeZone-sorted.js
@@ -0,0 +1,11 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony_intl_enumeration
+
+// Test the return items of timeZone is sorted
+let name = "timeZone";
+let items = Intl.supportedValuesOf(name);
+assertEquals([...items].sort(), items,
+ "return value of Intl.supportedValuesOf('" + name + "') should be sorted");
diff --git a/deps/v8/test/intl/enumeration/unit-sorted.js b/deps/v8/test/intl/enumeration/unit-sorted.js
new file mode 100644
index 0000000000..08dd1d93e8
--- /dev/null
+++ b/deps/v8/test/intl/enumeration/unit-sorted.js
@@ -0,0 +1,11 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony_intl_enumeration
+
+// Test the return items of unit is sorted
+let name = "unit";
+let items = Intl.supportedValuesOf(name);
+assertEquals([...items].sort(), items,
+ "return value of Intl.supportedValuesOf('" + name + "') should be sorted");
diff --git a/deps/v8/test/intl/locale/locale-calendars.js b/deps/v8/test/intl/locale/locale-calendars.js
index f5a10c6dc3..620440b01e 100644
--- a/deps/v8/test/intl/locale/locale-calendars.js
+++ b/deps/v8/test/intl/locale/locale-calendars.js
@@ -14,7 +14,7 @@ for (var i = 0; i < a_to_z.length; i++) {
locale.calendars.forEach(function(tokens) {
assertTrue(regex.test(tokens),
locale + ".calendars [" + locale.calendars +
- "] does not meet 'type: alphanum{3,8}(sep alphanum{3,8})*'");
+ "] but '" + tokens + "' does not meet 'type: alphanum{3,8}(sep alphanum{3,8})*'");
});
}
}
diff --git a/deps/v8/test/intl/regress-7770.js b/deps/v8/test/intl/regress-7770.js
index 2e7c2ce22d..0d2ddf13ca 100644
--- a/deps/v8/test/intl/regress-7770.js
+++ b/deps/v8/test/intl/regress-7770.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Environment Variables: TZ=Indian/Kerguelen LANG=uk
+// Environment Variables: TZ=Indian/Kerguelen LANG=uk LC_MESSAGES=uk
assertEquals(
"Fri Feb 01 2019 00:00:00 GMT+0500 (за часом на Французьких Південних і Антарктичних територіях)",
new Date(2019, 1,1).toString());
diff --git a/deps/v8/test/js-perf-test/Array/find-index.js b/deps/v8/test/js-perf-test/Array/find-index.js
index 1029b26124..f909328937 100644
--- a/deps/v8/test/js-perf-test/Array/find-index.js
+++ b/deps/v8/test/js-perf-test/Array/find-index.js
@@ -5,19 +5,19 @@
// Make sure we inline the callback, pick up all possible TurboFan
// optimizations.
-function RunOptFast(multiple) {
- // Use of variable multiple in the callback function forces
+function RunOptFast(value) {
+ // Use of variable {value} in the callback function forces
// context creation without escape analysis.
//
// Also, the arrow function requires inlining based on
// SharedFunctionInfo.
- result = array.findIndex((v, i, a) => v === `value ${multiple}`);
+ result = array.findIndex((v, i, a) => v === value);
}
// Don't optimize because I want to optimize RunOptFast with a parameter
// to be used in the callback.
%NeverOptimizeFunction(OptFast);
-function OptFast() { RunOptFast(max_index); }
+function OptFast() { RunOptFast(max_index_value); }
function side_effect(a) { return a; }
%NeverOptimizeFunction(side_effect);
@@ -59,7 +59,7 @@ DefineHigherOrderTests([
['SmiFindIndex', newClosure('findIndex'), SmiSetup, v => v === max_index],
[
'FastFindIndex', newClosure('findIndex'), FastSetup,
- v => v === `value ${max_index}`
+ v => v === max_index_value
],
[
'GenericFindIndex', newClosure('findIndex', true), ObjectSetup,
diff --git a/deps/v8/test/js-perf-test/Array/find.js b/deps/v8/test/js-perf-test/Array/find.js
index 580d646a30..d4fd06f0ee 100644
--- a/deps/v8/test/js-perf-test/Array/find.js
+++ b/deps/v8/test/js-perf-test/Array/find.js
@@ -5,19 +5,19 @@
// Make sure we inline the callback, pick up all possible TurboFan
// optimizations.
-function RunOptFast(multiple) {
- // Use of variable multiple in the callback function forces
+function RunOptFast(value) {
+ // Use of variable {value} in the callback function forces
// context creation without escape analysis.
//
// Also, the arrow function requires inlining based on
// SharedFunctionInfo.
- result = array.find((v, i, a) => v === `value ${multiple}`);
+ result = array.find((v, i, a) => v === value);
}
// Don't optimize because I want to optimize RunOptFast with a parameter
// to be used in the callback.
%NeverOptimizeFunction(OptFast);
-function OptFast() { RunOptFast(max_index); }
+function OptFast() { RunOptFast(max_index_value); }
function side_effect(a) { return a; }
%NeverOptimizeFunction(side_effect);
@@ -54,7 +54,7 @@ DefineHigherOrderTests([
['NaiveFindReplacement', Naive, NaiveSetup, v => v === max_index],
['DoubleFind', newClosure('find'), DoubleSetup, v => v === max_index + 0.5],
['SmiFind', newClosure('find'), SmiSetup, v => v === max_index],
- ['FastFind', newClosure('find'), FastSetup, v => v === `value ${max_index}`],
+ ['FastFind', newClosure('find'), FastSetup, v => v === max_index_value],
['GenericFind', newClosure('find', true), ObjectSetup, v => v === max_index],
['OptFastFind', OptFast, FastSetup, undefined],
['OptUnreliableFind', OptUnreliable, FastSetup, v => v === max_index]
diff --git a/deps/v8/test/js-perf-test/Array/for-each.js b/deps/v8/test/js-perf-test/Array/for-each.js
index c87d5406e0..3220c95556 100644
--- a/deps/v8/test/js-perf-test/Array/for-each.js
+++ b/deps/v8/test/js-perf-test/Array/for-each.js
@@ -29,19 +29,19 @@ function NaiveSetup() {
// Make sure we inline the callback, pick up all possible TurboFan
// optimizations.
-function RunOptFast(multiple) {
- // Use of variable multiple in the callback function forces
+function RunOptFast(value) {
+ // Use of variable {value} in the callback function forces
// context creation without escape analysis.
//
// Also, the arrow function requires inlining based on
// SharedFunctionInfo.
- result = array.forEach((v, i, a) => v === `value ${multiple}`);
+ result = array.forEach((v, i, a) => v === value);
}
// Don't optimize because I want to optimize RunOptFast with a parameter
// to be used in the callback.
%NeverOptimizeFunction(OptFast);
-function OptFast() { RunOptFast(max_index); }
+function OptFast() { RunOptFast(max_index_value); }
function side_effect(a) { return a; }
%NeverOptimizeFunction(side_effect);
@@ -58,7 +58,7 @@ DefineHigherOrderTests([
['SmiForEach', newClosure('forEach'), SmiSetup, v => v === max_index],
[
'FastForEach', newClosure('forEach'), FastSetup,
- v => v === `value ${max_index}`
+ v => v === max_index_value
],
[
'GenericForEach', newClosure('forEach', true), ObjectSetup,
@@ -67,7 +67,7 @@ DefineHigherOrderTests([
['OptFastForEach', OptFast, FastSetup, undefined],
[
'OptUnreliableForEach', OptUnreliable, FastSetup,
- v => v === `value ${max_index}`
+ v => v === max_index_value
]
]);
diff --git a/deps/v8/test/js-perf-test/Array/map.js b/deps/v8/test/js-perf-test/Array/map.js
index 4b278b8882..16b190b703 100644
--- a/deps/v8/test/js-perf-test/Array/map.js
+++ b/deps/v8/test/js-perf-test/Array/map.js
@@ -27,19 +27,19 @@ function NaiveMapSetup() {
// Make sure we inline the callback, pick up all possible TurboFan
// optimizations.
-function RunOptFastMap(multiple) {
- // Use of variable multiple in the callback function forces
+function RunOptFastMap(value) {
+ // Use of variable {value} in the callback function forces
// context creation without escape analysis.
//
// Also, the arrow function requires inlining based on
// SharedFunctionInfo.
- result = array.map((v, i, a) => v + ' ' + multiple);
+ result = array.map((v, i, a) => v + value);
}
// Don't optimize because I want to optimize RunOptFastMap with a parameter
// to be used in the callback.
%NeverOptimizeFunction(OptFastMap);
-function OptFastMap() { RunOptFastMap(3); }
+function OptFastMap() { RunOptFastMap(" 3"); }
function side_effect(a) { return a; }
%NeverOptimizeFunction(side_effect);
diff --git a/deps/v8/test/js-perf-test/Array/reduce-right.js b/deps/v8/test/js-perf-test/Array/reduce-right.js
index c643c2b383..ed6f22a3ad 100644
--- a/deps/v8/test/js-perf-test/Array/reduce-right.js
+++ b/deps/v8/test/js-perf-test/Array/reduce-right.js
@@ -5,19 +5,19 @@
// Make sure we inline the callback, pick up all possible TurboFan
// optimizations.
-function RunOptFastReduceRight(multiple) {
- // Use of variable multiple in the callback function forces
+function RunOptFastReduceRight(value) {
+ // Use of variable {value} in the callback function forces
// context creation without escape analysis.
//
// Also, the arrow function requires inlining based on
// SharedFunctionInfo.
- result = array.reduceRight((p, v, i, a) => p + multiple);
+ result = array.reduceRight((p, v, i, a) => p + value);
}
// Don't optimize because I want to optimize RunOptFastMap with a parameter
// to be used in the callback.
%NeverOptimizeFunction(OptFastReduceRight);
-function OptFastReduceRight() { RunOptFastReduceRight(3); }
+function OptFastReduceRight() { RunOptFastReduceRight("3"); }
function side_effect(a) { return a; }
%NeverOptimizeFunction(side_effect);
diff --git a/deps/v8/test/js-perf-test/Array/reduce.js b/deps/v8/test/js-perf-test/Array/reduce.js
index 3b07969669..38494749b0 100644
--- a/deps/v8/test/js-perf-test/Array/reduce.js
+++ b/deps/v8/test/js-perf-test/Array/reduce.js
@@ -5,19 +5,19 @@
// Make sure we inline the callback, pick up all possible TurboFan
// optimizations.
-function RunOptFastReduce(multiple) {
- // Use of multiple variables in the callback function forces
+function RunOptFastReduce(value) {
+ // Use of variable {value} in the callback function forces
// context creation without escape analysis.
//
// Also, the arrow function requires inlining based on
// SharedFunctionInfo.
- result = array.reduce((p, v, i, a) => p + multiple);
+ result = array.reduce((p, v, i, a) => p + value);
}
// Don't optimize because I want to optimize RunOptFastMap with a parameter
// to be used in the callback.
%NeverOptimizeFunction(OptFastReduce);
-function OptFastReduce() { RunOptFastReduce(3); }
+function OptFastReduce() { RunOptFastReduce("3"); }
function side_effect(a) { return a; }
%NeverOptimizeFunction(side_effect);
diff --git a/deps/v8/test/js-perf-test/Array/run.js b/deps/v8/test/js-perf-test/Array/run.js
index f4604edeff..c73f5531a2 100644
--- a/deps/v8/test/js-perf-test/Array/run.js
+++ b/deps/v8/test/js-perf-test/Array/run.js
@@ -12,6 +12,8 @@ let this_arg;
let result;
const array_size = 100;
const max_index = array_size - 1;
+// Matches what {FastSetup} below produces.
+const max_index_value = `value ${max_index}`;
// newClosure is a handy function to get a fresh
// closure unpolluted by IC feedback for a 2nd-order array builtin
diff --git a/deps/v8/test/js-perf-test/ClassFields.json b/deps/v8/test/js-perf-test/ClassFields.json
new file mode 100644
index 0000000000..a547d0a89e
--- /dev/null
+++ b/deps/v8/test/js-perf-test/ClassFields.json
@@ -0,0 +1,118 @@
+{
+ "owners": ["caitp@igalia.com", "joyee@igalia.com"],
+ "name": "ClassFields",
+ "path": ["ClassFields"],
+ "run_count": 3,
+ "run_count_arm": 1,
+ "run_count_arm64": 1,
+ "timeout": 120,
+ "timeout_arm64": 240,
+ "units": "score",
+ "total": true,
+ "resources": ["base.js"],
+ "tests": [
+ {
+ "name": "evaluate-class",
+ "flags": ["--allow-natives-syntax"],
+ "resources": [ "evaluate-class.js", "classes.js" ],
+ "results_regexp": "^%s\\-ClassFields\\(Score\\): (.+)$",
+ "tests": [
+ {
+ "name": "evaluate-class-public-field-single-opt",
+ "main": "run.js",
+ "test_flags": [ "evaluate-class", "public-field-single", "opt" ]
+ },
+ {
+ "name": "evaluate-class-public-field-single-noopt",
+ "main": "run.js",
+ "test_flags": [ "evaluate-class", "public-field-single", "noopt" ]
+ },
+ {
+ "name": "evaluate-class-private-field-single-opt",
+ "main": "run.js",
+ "test_flags": [ "evaluate-class", "private-field-single", "opt" ]
+ },
+ {
+ "name": "evaluate-class-private-field-single-noopt",
+ "main": "run.js",
+ "test_flags": [ "evaluate-class", "private-field-single", "noopt" ]
+ }, {
+ "name": "evaluate-class-public-field-multiple-opt",
+ "main": "run.js",
+ "test_flags": [ "evaluate-class", "public-field-multiple", "opt" ]
+ },
+ {
+ "name": "evaluate-class-public-field-multiple-noopt",
+ "main": "run.js",
+ "test_flags": [ "evaluate-class", "public-field-multiple", "noopt" ]
+ },
+ {
+ "name": "evaluate-class-private-field-multiple-opt",
+ "main": "run.js",
+ "test_flags": [ "evaluate-class", "private-field-multiple", "opt" ]
+ },
+ {
+ "name": "evaluate-class-private-field-multiple-noopt",
+ "main": "run.js",
+ "test_flags": [ "evaluate-class", "private-field-multiple", "noopt" ]
+ }
+ ]
+ },
+ {
+ "name": "define-public-field",
+ "flags": ["--allow-natives-syntax"],
+ "resources": [ "define-public-field.js", "classes.js" ],
+ "results_regexp": "^%s\\-ClassFields\\(Score\\): (.+)$",
+ "tests": [
+ {
+ "name": "define-public-field-single-opt",
+ "main": "run.js",
+ "test_flags": [ "define-public-field", "single", "opt" ]
+ },
+ {
+ "name": "define-public-field-single-noopt",
+ "main": "run.js",
+ "test_flags": [ "define-public-field", "single", "noopt" ]
+ },
+ {
+ "name": "define-public-field-multiple-opt",
+ "main": "run.js",
+ "test_flags": [ "define-public-field", "multiple", "opt" ]
+ },
+ {
+ "name": "define-public-field-multiple-noopt",
+ "main": "run.js",
+ "test_flags": [ "define-public-field", "multiple", "noopt" ]
+ }
+ ]
+ },
+ {
+ "name": "define-private-field",
+ "resources": [ "define-private-field.js", "classes.js" ],
+ "flags": ["--allow-natives-syntax"],
+ "results_regexp": "^%s\\-ClassFields\\(Score\\): (.+)$",
+ "tests": [
+ {
+ "name": "define-private-field-single-opt",
+ "main": "run.js",
+ "test_flags": [ "define-private-field", "single", "opt" ]
+ },
+ {
+ "name": "define-private-field-single-noopt",
+ "main": "run.js",
+ "test_flags": [ "define-private-field", "single", "noopt" ]
+ },
+ {
+ "name": "define-private-field-multiple-opt",
+ "main": "run.js",
+ "test_flags": [ "define-private-field", "multiple", "opt" ]
+ },
+ {
+ "name": "define-private-field-multiple-noopt",
+ "main": "run.js",
+ "test_flags": [ "define-private-field", "multiple", "noopt" ]
+ }
+ ]
+ }
+ ]
+}
diff --git a/deps/v8/test/js-perf-test/ClassFields/classes.js b/deps/v8/test/js-perf-test/ClassFields/classes.js
new file mode 100644
index 0000000000..57b46edd49
--- /dev/null
+++ b/deps/v8/test/js-perf-test/ClassFields/classes.js
@@ -0,0 +1,59 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+"use strict";
+
+let i = 0;
+function EvaluateSinglePublicFieldClass() {
+ return class SinglePublicFieldClass {
+ x = i;
+
+ check() {
+ return this.x === i;
+ }
+ };
+}
+
+function EvaluateMultiPublicFieldClass() {
+ return class MultiPublicFieldClass {
+ x = i;
+ y = i+1;
+ z = i+2;
+ q = i+3;
+ r = i+4;
+ a = i+5;
+
+ check() {
+ return this.x + 1 === this.y && this.y + 1 === this.z &&
+ this.z + 1 === this.q && this.q + 1 === this.r &&
+ this.r + 1 === this.a;
+ }
+ };
+}
+
+function EvaluateSinglePrivateFieldClass() {
+ return class SinglePrivateFieldClass {
+ #x = i;
+
+ check() {
+ return this.#x === i;
+ }
+ }
+}
+
+function EvaluateMultiPrivateFieldClass() {
+ return class MultiPrivateFieldClass {
+ #x = i;
+ #y = i+1;
+ #z = i+2;
+ #q = i+3;
+ #r = i+4;
+ #a = i+5;
+
+ check() {
+ return this.#x + 1 === this.#y && this.#y + 1 === this.#z &&
+ this.#z + 1 === this.#q && this.#q + 1 === this.#r &&
+ this.#r + 1 === this.#a;
+ }
+ };
+}
diff --git a/deps/v8/test/js-perf-test/ClassFields/define-private-field.js b/deps/v8/test/js-perf-test/ClassFields/define-private-field.js
new file mode 100644
index 0000000000..72811de124
--- /dev/null
+++ b/deps/v8/test/js-perf-test/ClassFields/define-private-field.js
@@ -0,0 +1,74 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+'use strict';
+
+d8.file.execute('classes.js');
+
+const BENCHMARK_NAME = arguments[0];
+const TEST_TYPE = arguments[1];
+const optimize_param = arguments[2];
+let optimize;
+if (optimize_param == "opt") {
+ optimize = true;
+} else if (optimize_param == "noopt"){
+ optimize = false;
+} else {
+ throw new Error("Unknown optimization configuration " + arguments.join(' '));
+}
+
+let klass;
+let array;
+
+switch (TEST_TYPE) {
+ case "single":
+ klass = EvaluateSinglePrivateFieldClass();
+ break;
+ case "multiple":
+ klass = EvaluateMultiPrivateFieldClass();
+ break;
+ default:
+ throw new Error("Unknown optimization configuration " + arguments.join(' '));
+}
+
+if (optimize) {
+ %PrepareFunctionForOptimization(klass);
+} else {
+ %NeverOptimizeFunction(klass);
+}
+
+function setUp() {
+ array = [new klass(), new klass()];
+ // Populate the array first to reduce the impact of
+ // array allocations.
+ for (let i = 0; i < LOCAL_ITERATIONS - 2; ++i) {
+ array.push(array[0]);
+ }
+ if (optimize) {
+ %OptimizeFunctionOnNextCall(klass);
+ }
+}
+
+function runBenchmark() {
+ for (let i = 0; i < LOCAL_ITERATIONS; ++i) {
+ array[i] = new klass();
+ }
+}
+
+function tearDown() {
+ if (array.length < 3) {
+ throw new Error(`Check failed, array length ${array.length}`);
+ }
+ for (const instance of array) {
+ if (!instance.check())
+ throw new Error(`instance.check() failed`);
+ }
+}
+
+const DETERMINISTIC_RUNS = 1;
+const LOCAL_ITERATIONS = 10000;
+new BenchmarkSuite(`${BENCHMARK_NAME}`, [1000], [
+ new Benchmark(
+ `${BENCHMARK_NAME}-${TEST_TYPE}-${optimize_param}`,
+ false, false, DETERMINISTIC_RUNS, runBenchmark, setUp, tearDown)
+]);
diff --git a/deps/v8/test/js-perf-test/ClassFields/define-public-field.js b/deps/v8/test/js-perf-test/ClassFields/define-public-field.js
new file mode 100644
index 0000000000..4b6c9188cc
--- /dev/null
+++ b/deps/v8/test/js-perf-test/ClassFields/define-public-field.js
@@ -0,0 +1,75 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+'use strict';
+
+d8.file.execute('classes.js');
+
+const BENCHMARK_NAME = arguments[0];
+const TEST_TYPE = arguments[1];
+const optimize_param = arguments[2];
+let optimize;
+if (optimize_param == "opt") {
+ optimize = true;
+} else if (optimize_param == "noopt"){
+ optimize = false;
+} else {
+ throw new Error("Unknown optimization configuration " + arguments.join(' '));
+}
+
+let klass;
+let array;
+
+switch (TEST_TYPE) {
+ case "single":
+ klass = EvaluateSinglePublicFieldClass();
+ break;
+ case "multiple":
+ klass = EvaluateMultiPublicFieldClass();
+ break;
+ default:
+ throw new Error("Unknown optimization configuration " + arguments.join(' '));
+}
+
+if (optimize) {
+ %PrepareFunctionForOptimization(klass);
+} else {
+ %NeverOptimizeFunction(klass);
+}
+
+function setUp() {
+ array = [new klass(), new klass()];
+ // Populate the array first to reduce the impact of
+ // array allocations.
+ for (let i = 0; i < LOCAL_ITERATIONS - 2; ++i) {
+ array.push(array[0]);
+ }
+ if (optimize) {
+ %OptimizeFunctionOnNextCall(klass);
+ }
+}
+
+function runBenchmark() {
+ for (let i = 0; i < LOCAL_ITERATIONS; ++i) {
+ array[i] = new klass();
+ }
+}
+
+function tearDown() {
+ if (array.length < 3) {
+ throw new Error(`Check failed, array length ${array.length}`);
+ }
+
+ for (const instance of array) {
+ if (!instance.check())
+ throw new Error(`instance.check() failed`);
+ }
+}
+
+const DETERMINISTIC_RUNS = 1;
+const LOCAL_ITERATIONS = 10000;
+new BenchmarkSuite(`${BENCHMARK_NAME}`, [1000], [
+ new Benchmark(
+ `${BENCHMARK_NAME}-${TEST_TYPE}-${optimize_param}`,
+ false, false, DETERMINISTIC_RUNS, runBenchmark, setUp, tearDown)
+]);
diff --git a/deps/v8/test/js-perf-test/ClassFields/evaluate-class.js b/deps/v8/test/js-perf-test/ClassFields/evaluate-class.js
new file mode 100644
index 0000000000..01d210854e
--- /dev/null
+++ b/deps/v8/test/js-perf-test/ClassFields/evaluate-class.js
@@ -0,0 +1,83 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+'use strict';
+
+d8.file.execute('classes.js');
+
+const BENCHMARK_NAME = arguments[0];
+const TEST_TYPE = arguments[1];
+const optimize_param = arguments[2];
+let optimize;
+if (optimize_param == "opt") {
+ optimize = true;
+} else if (optimize_param == "noopt"){
+ optimize = false;
+} else {
+ throw new Error("Unknown optimization configuration " + arguments.join(' '));
+}
+
+let factory;
+let array;
+
+switch (TEST_TYPE) {
+ case "public-field-single":
+ factory = EvaluateSinglePublicFieldClass;
+ break;
+ case "public-field-multiple":
+ factory = EvaluateMultiPublicFieldClass;
+ break;
+ case "private-field-single":
+ factory = EvaluateSinglePrivateFieldClass;
+ break;
+ case "private-field-multiple":
+ factory = EvaluateMultiPrivateFieldClass;
+ break;
+
+ default:
+ throw new Error("Unknown optimization configuration " + arguments.join(' '));
+}
+
+if (optimize) {
+ %PrepareFunctionForOptimization(factory);
+} else {
+ %NeverOptimizeFunction(factory);
+}
+
+function setUp() {
+ array = [factory(), factory()];
+ // Populate the array first to reduce the impact of
+ // array allocations.
+ for (let i = 0; i < LOCAL_ITERATIONS - 2; ++i) {
+ array.push(array[0]);
+ }
+ if (optimize) {
+ %OptimizeFunctionOnNextCall(factory);
+ }
+}
+
+function runBenchmark() {
+ for (let i = 0; i < LOCAL_ITERATIONS; ++i) {
+ array[i] = factory();
+ }
+}
+
+function tearDown() {
+ if (array.length < 3) {
+ throw new Error(`Check failed, array length ${array.length}`);
+ }
+
+ for (const klass of array) {
+ const instance = new klass();
+ if (!instance.check())
+ throw new Error(`instance.check() failed`);
+ }
+}
+
+const DETERMINISTIC_RUNS = 1;
+const LOCAL_ITERATIONS = 10000;
+new BenchmarkSuite(`${BENCHMARK_NAME}`, [1000], [
+ new Benchmark(
+ `${BENCHMARK_NAME}-${TEST_TYPE}-${optimize_param}`,
+ false, false, DETERMINISTIC_RUNS, runBenchmark, setUp, tearDown)
+]);
diff --git a/deps/v8/test/js-perf-test/ClassFields/run.js b/deps/v8/test/js-perf-test/ClassFields/run.js
new file mode 100644
index 0000000000..2982d5d8e1
--- /dev/null
+++ b/deps/v8/test/js-perf-test/ClassFields/run.js
@@ -0,0 +1,25 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+load('../base.js');
+load(arguments[0] + '.js');
+
+var success = true;
+
+function PrintResult(name, result, mean) {
+ print(`${name}-ClassFields(Score): ${result} (mean: ${mean})`);
+}
+
+
+function PrintError(name, error) {
+ PrintResult(name, error);
+ success = false;
+}
+
+
+BenchmarkSuite.config.doWarmup = undefined;
+BenchmarkSuite.config.doDeterministic = undefined;
+
+BenchmarkSuite.RunSuites({ NotifyResult: PrintResult,
+ NotifyError: PrintError });
diff --git a/deps/v8/test/message/fail/class-private-brand-reinitialization-anonymous.js b/deps/v8/test/message/fail/class-private-brand-reinitialization-anonymous.js
new file mode 100644
index 0000000000..95b54396a6
--- /dev/null
+++ b/deps/v8/test/message/fail/class-private-brand-reinitialization-anonymous.js
@@ -0,0 +1,10 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+class A { constructor(o) { return o } }
+
+let B = (0, class extends A { #x() { } });
+
+let o = new B({});
+new B(o);
diff --git a/deps/v8/test/message/fail/class-private-brand-reinitialization-anonymous.out b/deps/v8/test/message/fail/class-private-brand-reinitialization-anonymous.out
new file mode 100644
index 0000000000..2005f5f716
--- /dev/null
+++ b/deps/v8/test/message/fail/class-private-brand-reinitialization-anonymous.out
@@ -0,0 +1,6 @@
+*%(basename)s:7: TypeError: Cannot initialize private methods of class anonymous twice on the same object
+let B = (0, class extends A { #x() { } });
+ ^
+TypeError: Cannot initialize private methods of class anonymous twice on the same object
+ at new B (*%(basename)s:7:13)
+ at *%(basename)s:10:1
diff --git a/deps/v8/test/message/fail/class-private-brand-reinitialization.js b/deps/v8/test/message/fail/class-private-brand-reinitialization.js
new file mode 100644
index 0000000000..55f3fe73db
--- /dev/null
+++ b/deps/v8/test/message/fail/class-private-brand-reinitialization.js
@@ -0,0 +1,10 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+class A { constructor(o) { return o } }
+
+class B extends A { #x() {} }
+
+let o = new B({});
+new B(o);
diff --git a/deps/v8/test/message/fail/class-private-brand-reinitialization.out b/deps/v8/test/message/fail/class-private-brand-reinitialization.out
new file mode 100644
index 0000000000..6241c334bd
--- /dev/null
+++ b/deps/v8/test/message/fail/class-private-brand-reinitialization.out
@@ -0,0 +1,6 @@
+*%(basename)s:7: TypeError: Cannot initialize private methods of class B twice on the same object
+class B extends A { #x() {} }
+^
+TypeError: Cannot initialize private methods of class B twice on the same object
+ at new B (*%(basename)s:7:1)
+ at *%(basename)s:10:1
diff --git a/deps/v8/test/message/fail/class-private-field-reinitialization.js b/deps/v8/test/message/fail/class-private-field-reinitialization.js
new file mode 100644
index 0000000000..8202948c95
--- /dev/null
+++ b/deps/v8/test/message/fail/class-private-field-reinitialization.js
@@ -0,0 +1,10 @@
+// Copyright 2019 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+class A { constructor(o) { return o } }
+
+class B extends A { #x; }
+
+let o = new B({});
+new B(o);
diff --git a/deps/v8/test/message/fail/class-private-field-reinitialization.out b/deps/v8/test/message/fail/class-private-field-reinitialization.out
new file mode 100644
index 0000000000..6e8024f587
--- /dev/null
+++ b/deps/v8/test/message/fail/class-private-field-reinitialization.out
@@ -0,0 +1,7 @@
+*%(basename)s:7: TypeError: Cannot initialize #x twice on the same object
+class B extends A { #x; }
+ ^
+TypeError: Cannot initialize #x twice on the same object
+ at Object.<instance_members_initializer> (*%(basename)s:7:21)
+ at new B (*%(basename)s:7:1)
+ at *%(basename)s:10:1
diff --git a/deps/v8/test/message/fail/map-grow-failed.js b/deps/v8/test/message/fail/map-grow-failed.js
new file mode 100644
index 0000000000..3f55cc73d5
--- /dev/null
+++ b/deps/v8/test/message/fail/map-grow-failed.js
@@ -0,0 +1,9 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+const kMapSizeLimit = 1 << 24;
+let m = new Map();
+for (let i = 0; i < kMapSizeLimit + 1; i++) {
+ m.set(i, 0);
+}
diff --git a/deps/v8/test/message/fail/map-grow-failed.out b/deps/v8/test/message/fail/map-grow-failed.out
new file mode 100644
index 0000000000..3ec5a71c95
--- /dev/null
+++ b/deps/v8/test/message/fail/map-grow-failed.out
@@ -0,0 +1,6 @@
+*%(basename)s:{NUMBER}: RangeError: Map maximum size exceeded
+ m.set(i, 0);
+ ^
+RangeError: Map maximum size exceeded
+ at Map.set (<anonymous>)
+ at *%(basename)s:{NUMBER}:{NUMBER}
diff --git a/deps/v8/test/message/fail/set-grow-failed.js b/deps/v8/test/message/fail/set-grow-failed.js
new file mode 100644
index 0000000000..31fd7d938d
--- /dev/null
+++ b/deps/v8/test/message/fail/set-grow-failed.js
@@ -0,0 +1,9 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+const kSetSizeLimit = 1 << 24;
+let s = new Set();
+for (let i = 0; i < kSetSizeLimit + 1; i++) {
+ s.add(i);
+}
diff --git a/deps/v8/test/message/fail/set-grow-failed.out b/deps/v8/test/message/fail/set-grow-failed.out
new file mode 100644
index 0000000000..98231fa36c
--- /dev/null
+++ b/deps/v8/test/message/fail/set-grow-failed.out
@@ -0,0 +1,6 @@
+*%(basename)s:{NUMBER}: RangeError: Set maximum size exceeded
+ s.add(i);
+ ^
+RangeError: Set maximum size exceeded
+ at Set.add (<anonymous>)
+ at *%(basename)s:{NUMBER}:{NUMBER}
diff --git a/deps/v8/test/message/message.status b/deps/v8/test/message/message.status
index 4a6e1176bc..b0d74da267 100644
--- a/deps/v8/test/message/message.status
+++ b/deps/v8/test/message/message.status
@@ -70,15 +70,10 @@
}],
################################################################################
-['arch == mips64el or arch == mipsel', {
+['arch == mips64el or arch == mipsel or arch == riscv64 or arch == loong64', {
# Tests that require Simd enabled.
'wasm-trace-memory': [SKIP],
-}], # arch == mips64el or arch == mipsel
-
-['arch == riscv64', {
- # Tests that require Simd enabled.
- 'wasm-trace-memory': [SKIP],
-}],
+}], # arch == mips64el or arch == mipsel or arch == riscv64 or arch == loong64
##############################################################################
['no_simd_hardware == True', {
@@ -92,4 +87,25 @@
'weakref-finalizationregistry-error': [SKIP],
}], # third_party_heap
+##############################################################################
+['msan == True', {
+ # Large allocations
+ 'fail/map-grow-failed': [SKIP],
+ 'fail/set-grow-failed': [SKIP],
+}], # 'msan == True'
+
+##############################################################################
+['simulator_run', {
+ # Too slow on simulators
+ 'fail/map-grow-failed': [SKIP],
+ 'fail/set-grow-failed': [SKIP],
+}], # simulator_run
+
+##############################################################################
+['is_full_debug', {
+ # Too slow in non-optimized debug mode
+ 'fail/map-grow-failed': [SKIP],
+ 'fail/set-grow-failed': [SKIP],
+}], # is_full_debug
+
]
diff --git a/deps/v8/test/mjsunit/asm/regress-674089.js b/deps/v8/test/mjsunit/asm/regress-674089.js
deleted file mode 100644
index 7c0e89d3ee..0000000000
--- a/deps/v8/test/mjsunit/asm/regress-674089.js
+++ /dev/null
@@ -1,13 +0,0 @@
-// Copyright 2017 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Flags: --validate-asm --lazy-inner-functions
-
-function outer() {
- "use asm";
- function inner() {
- /f(/
- }
-}
-outer();
diff --git a/deps/v8/test/mjsunit/baseline/flush-baseline-code.js b/deps/v8/test/mjsunit/baseline/flush-baseline-code.js
index 8599fcdadf..a26d2a4f71 100644
--- a/deps/v8/test/mjsunit/baseline/flush-baseline-code.js
+++ b/deps/v8/test/mjsunit/baseline/flush-baseline-code.js
@@ -5,7 +5,8 @@
// Flags: --expose-gc --stress-flush-code --allow-natives-syntax
// Flags: --baseline-batch-compilation-threshold=0 --sparkplug
// Flags: --no-always-sparkplug --lazy-feedback-allocation
-// Flags: --flush-baseline-code --flush-bytecode
+// Flags: --flush-baseline-code --flush-bytecode --no-opt
+// Flags: --no-stress-concurrent-inlining
function HasBaselineCode(f) {
let opt_status = %GetOptimizationStatus(f);
diff --git a/deps/v8/test/mjsunit/baseline/flush-only-baseline-code.js b/deps/v8/test/mjsunit/baseline/flush-only-baseline-code.js
index 4b4dde93c7..ea140d1b1c 100644
--- a/deps/v8/test/mjsunit/baseline/flush-only-baseline-code.js
+++ b/deps/v8/test/mjsunit/baseline/flush-only-baseline-code.js
@@ -5,7 +5,8 @@
// Flags: --expose-gc --stress-flush-code --allow-natives-syntax
// Flags: --baseline-batch-compilation-threshold=0 --sparkplug
// Flags: --no-always-sparkplug --lazy-feedback-allocation
-// Flags: --flush-baseline-code --no-flush-bytecode
+// Flags: --flush-baseline-code --no-flush-bytecode --no-opt
+// Flags: --no-stress-concurrent-inlining
function HasBaselineCode(f) {
let opt_status = %GetOptimizationStatus(f);
diff --git a/deps/v8/test/mjsunit/compiler/concurrent-invalidate-transition-map.js b/deps/v8/test/mjsunit/compiler/concurrent-invalidate-transition-map.js
index 941944119b..1071d22aee 100644
--- a/deps/v8/test/mjsunit/compiler/concurrent-invalidate-transition-map.js
+++ b/deps/v8/test/mjsunit/compiler/concurrent-invalidate-transition-map.js
@@ -58,7 +58,7 @@ add_field(o);
// Invalidate transition map after compile graph has been created.
%WaitForBackgroundOptimization();
o.c = 2.2;
-assertUnoptimized(add_field, "no sync");
+assertUnoptimized(add_field);
// Sync with background thread to conclude optimization that bailed out.
%FinalizeOptimization();
if (!%IsDictPropertyConstTrackingEnabled()) {
diff --git a/deps/v8/test/mjsunit/compiler/concurrent-proto-change.js b/deps/v8/test/mjsunit/compiler/concurrent-proto-change.js
index 8de72c0cd9..5a184d87f4 100644
--- a/deps/v8/test/mjsunit/compiler/concurrent-proto-change.js
+++ b/deps/v8/test/mjsunit/compiler/concurrent-proto-change.js
@@ -27,11 +27,6 @@
// Flags: --allow-natives-syntax --no-always-opt --concurrent-recompilation
-if (!%IsConcurrentRecompilationSupported()) {
- print("Concurrent recompilation is disabled. Skipping this test.");
- quit();
-}
-
function f(foo) { return foo.bar(); }
%PrepareFunctionForOptimization(f);
@@ -49,7 +44,7 @@ assertEquals(1, f(o));
// Change the prototype chain after compile graph has been created.
%WaitForBackgroundOptimization();
o.__proto__.__proto__ = { bar: function() { return 2; } };
-assertUnoptimized(f, "no sync");
+assertUnoptimized(f);
%FinalizeOptimization();
// Optimization failed due to map dependency.
assertUnoptimized(f);
diff --git a/deps/v8/test/mjsunit/compiler/fast-api-calls.js b/deps/v8/test/mjsunit/compiler/fast-api-calls.js
index bdda760c19..7484ac17a2 100644
--- a/deps/v8/test/mjsunit/compiler/fast-api-calls.js
+++ b/deps/v8/test/mjsunit/compiler/fast-api-calls.js
@@ -5,7 +5,7 @@
// This file excercises basic fast API calls and enables fuzzing of this
// functionality.
-// Flags: --turbo-fast-api-calls --allow-natives-syntax --opt
+// Flags: --turbo-fast-api-calls --expose-fast-api --allow-natives-syntax --opt
// --always-opt is disabled because we rely on particular feedback for
// optimizing to the fastest path.
// Flags: --no-always-opt
diff --git a/deps/v8/test/mjsunit/compiler/fast-api-helpers.js b/deps/v8/test/mjsunit/compiler/fast-api-helpers.js
index 587cfbc539..2989eab8c9 100644
--- a/deps/v8/test/mjsunit/compiler/fast-api-helpers.js
+++ b/deps/v8/test/mjsunit/compiler/fast-api-helpers.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --turbo-fast-api-calls --allow-natives-syntax --opt
+// Flags: --turbo-fast-api-calls --expose-fast-api --allow-natives-syntax --opt
// Helper for sequence tests.
function optimize_and_check(func, fast_count, slow_count, expected) {
diff --git a/deps/v8/test/mjsunit/compiler/fast-api-interface-types.js b/deps/v8/test/mjsunit/compiler/fast-api-interface-types.js
index 33a17ebe04..13da7b0cc8 100644
--- a/deps/v8/test/mjsunit/compiler/fast-api-interface-types.js
+++ b/deps/v8/test/mjsunit/compiler/fast-api-interface-types.js
@@ -4,7 +4,7 @@
// This file interface types used with fast API calls.
-// Flags: --turbo-fast-api-calls --allow-natives-syntax --opt
+// Flags: --turbo-fast-api-calls --expose-fast-api --allow-natives-syntax --opt
// Flags: --no-always-opt
// Flags: --deopt-every-n-times=0
diff --git a/deps/v8/test/mjsunit/compiler/fast-api-sequences-x64.js b/deps/v8/test/mjsunit/compiler/fast-api-sequences-x64.js
index 7bc8db4ec7..543d80b6c5 100644
--- a/deps/v8/test/mjsunit/compiler/fast-api-sequences-x64.js
+++ b/deps/v8/test/mjsunit/compiler/fast-api-sequences-x64.js
@@ -4,7 +4,7 @@
// This file adds x64 specific tests to the ones in fast-api-sequence.js.
-// Flags: --turbo-fast-api-calls --allow-natives-syntax --opt
+// Flags: --turbo-fast-api-calls --expose-fast-api --allow-natives-syntax --opt
// --always-opt is disabled because we rely on particular feedback for
// optimizing to the fastest path.
// Flags: --no-always-opt
diff --git a/deps/v8/test/mjsunit/compiler/fast-api-sequences.js b/deps/v8/test/mjsunit/compiler/fast-api-sequences.js
index f37c68cb5e..0adb64f762 100644
--- a/deps/v8/test/mjsunit/compiler/fast-api-sequences.js
+++ b/deps/v8/test/mjsunit/compiler/fast-api-sequences.js
@@ -4,7 +4,7 @@
// This file excercises sequences support for fast API calls.
-// Flags: --turbo-fast-api-calls --allow-natives-syntax --opt
+// Flags: --turbo-fast-api-calls --expose-fast-api --allow-natives-syntax --opt
// --always-opt is disabled because we rely on particular feedback for
// optimizing to the fastest path.
// Flags: --no-always-opt
diff --git a/deps/v8/test/mjsunit/compiler/manual-concurrent-recompile.js b/deps/v8/test/mjsunit/compiler/manual-concurrent-recompile.js
index 65ac39bd18..2d566d0f63 100644
--- a/deps/v8/test/mjsunit/compiler/manual-concurrent-recompile.js
+++ b/deps/v8/test/mjsunit/compiler/manual-concurrent-recompile.js
@@ -26,14 +26,9 @@
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// Flags: --allow-natives-syntax --expose-gc
-// Flags: --concurrent-recompilation --block-concurrent-recompilation
+// Flags: --concurrent-recompilation
// Flags: --opt --no-always-opt
-if (!%IsConcurrentRecompilationSupported()) {
- print("Concurrent recompilation is disabled. Skipping this test.");
- quit();
-}
-
function f(x) {
var xx = x * x;
var xxstr = xx.toString();
@@ -55,15 +50,13 @@ f(g(2));
assertUnoptimized(f);
assertUnoptimized(g);
+%DisableOptimizationFinalization();
%OptimizeFunctionOnNextCall(f, "concurrent");
%OptimizeFunctionOnNextCall(g, "concurrent");
-f(g(3)); // Kick off recompilation.
-
-assertUnoptimized(f, 'no sync'); // Not yet optimized since recompilation
-assertUnoptimized(g, 'no sync'); // is still blocked.
+f(g(3));
-// Let concurrent recompilation proceed.
-%UnblockConcurrentRecompilation();
-
-assertOptimized(f, 'sync'); // Optimized once we sync with the
-assertOptimized(g, 'sync'); // background thread.
+assertUnoptimized(f);
+assertUnoptimized(g);
+%FinalizeOptimization();
+assertOptimized(f);
+assertOptimized(g);
diff --git a/deps/v8/test/mjsunit/compiler/regress-1236716.js b/deps/v8/test/mjsunit/compiler/regress-1236716.js
new file mode 100644
index 0000000000..bc0f416bf6
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/regress-1236716.js
@@ -0,0 +1,16 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+function foo() {
+ for (let i = new Number(0); i < new Number(64); i += new Number(1)) {
+ i = Math.max(i);
+ }
+}
+
+%PrepareFunctionForOptimization(foo);
+foo();
+%OptimizeFunctionOnNextCall(foo);
+foo();
diff --git a/deps/v8/test/mjsunit/compiler/regress-1239601.js b/deps/v8/test/mjsunit/compiler/regress-1239601.js
new file mode 100644
index 0000000000..47578b6b4e
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/regress-1239601.js
@@ -0,0 +1,24 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+function foo() {
+ const a = {};
+ const b = [];
+ const unused = {__proto__: [], p1: a, p2: 0, p3: 0, p4: 0};
+
+ function inline(x) { x.gaga; }
+
+ inline(a);
+ inline(b);
+
+ b.p1 = 42;
+}
+
+
+%PrepareFunctionForOptimization(foo);
+for (var i = 0; i < 10; i++) foo();
+%OptimizeFunctionOnNextCall(foo);
+foo();
diff --git a/deps/v8/test/mjsunit/compiler/regress-1245949.js b/deps/v8/test/mjsunit/compiler/regress-1245949.js
new file mode 100644
index 0000000000..800907c276
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/regress-1245949.js
@@ -0,0 +1,16 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+function main() {
+ const v2 = Date.now();
+ const v3 = /z/;
+ v3.test(v2);
+}
+
+%PrepareFunctionForOptimization(main);
+main();
+%OptimizeFunctionOnNextCall(main);
+main();
diff --git a/deps/v8/test/mjsunit/compiler/regress-1250216.js b/deps/v8/test/mjsunit/compiler/regress-1250216.js
new file mode 100644
index 0000000000..87a1ae3707
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/regress-1250216.js
@@ -0,0 +1,16 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+{
+ const realm = Realm.createAllowCrossRealmAccess();
+ const foo = Realm.eval(realm, "function foo() {return globalThis.foo}; foo");
+ assertSame(foo(), foo);
+}
+
+{
+ const realm = Realm.createAllowCrossRealmAccess();
+ const foo = Realm.eval(realm, "function foo() {return globalThis.foo}; foo");
+ assertSame(foo(), foo);
+ Realm.detachGlobal(realm);
+}
diff --git a/deps/v8/test/mjsunit/compiler/regress-9017.js b/deps/v8/test/mjsunit/compiler/regress-9017.js
index c484e177c6..72652f22f4 100644
--- a/deps/v8/test/mjsunit/compiler/regress-9017.js
+++ b/deps/v8/test/mjsunit/compiler/regress-9017.js
@@ -5,6 +5,8 @@
// Flags: --allow-natives-syntax --noturbo-inlining --noturbo-verify-allocation
// This test invokes optimization manually, no need for stress modes:
// Flags: --nostress-opt --noalways-opt
+// This neuters too low stack size passed by the flag fuzzer.
+// Flags: --stack-size=864
// Ensure that very large stack frames can be used successfully.
// The flag --noturbo-verify-allocation is to make this run a little faster; it
diff --git a/deps/v8/test/mjsunit/compiler/regress-9945-1.js b/deps/v8/test/mjsunit/compiler/regress-9945-1.js
index 6421d9bcd7..3206d44627 100644
--- a/deps/v8/test/mjsunit/compiler/regress-9945-1.js
+++ b/deps/v8/test/mjsunit/compiler/regress-9945-1.js
@@ -3,7 +3,7 @@
// found in the LICENSE file.
// Flags: --allow-natives-syntax --opt --no-always-opt
-// Flags: --concurrent-recompilation --block-concurrent-recompilation
+// Flags: --concurrent-recompilation
function foo(x) { bar(x) }
function bar(x) { x.p }
@@ -27,19 +27,21 @@ foo(a);
foo(a);
// Trigger optimization of bar but don't yet complete it.
+%DisableOptimizationFinalization();
%OptimizeFunctionOnNextCall(bar, "concurrent");
foo(a);
%PrepareFunctionForOptimization(bar);
+%WaitForBackgroundOptimization();
// Change a's map from PACKED_SMI_ELEMENTS to PACKED_ELEMENTS and run bar in the
// interpreter (via foo) s.t. bar's load feedback changes accordingly.
a[0] = {};
foo(a);
-assertUnoptimized(bar, "no sync");
+assertUnoptimized(bar);
// Now finish the optimization of bar, which was based on the old
// PACKED_SMI_ELEMENTS feedback.
-%UnblockConcurrentRecompilation();
+%FinalizeOptimization();
assertOptimized(bar);
// If we were to call the optimized bar now, it would deopt.
diff --git a/deps/v8/test/mjsunit/compiler/regress-crbug-1201011.js b/deps/v8/test/mjsunit/compiler/regress-crbug-1201011.js
index f521a60b85..9086fa794b 100644
--- a/deps/v8/test/mjsunit/compiler/regress-crbug-1201011.js
+++ b/deps/v8/test/mjsunit/compiler/regress-crbug-1201011.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --turbo-fast-api-calls
+// Flags: --turbo-fast-api-calls --expose-fast-api
const fast_c_api = new d8.test.FastCAPI();
function foo(obj) {
diff --git a/deps/v8/test/mjsunit/compiler/regress-crbug-1201057.js b/deps/v8/test/mjsunit/compiler/regress-crbug-1201057.js
index 7afaaa6e38..4315175ad5 100644
--- a/deps/v8/test/mjsunit/compiler/regress-crbug-1201057.js
+++ b/deps/v8/test/mjsunit/compiler/regress-crbug-1201057.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --turbo-fast-api-calls
+// Flags: --turbo-fast-api-calls --expose-fast-api
const fast_c_api = new d8.test.FastCAPI();
const fast_obj = Object.create(fast_c_api);
diff --git a/deps/v8/test/mjsunit/compiler/regress-crbug-1201082.js b/deps/v8/test/mjsunit/compiler/regress-crbug-1201082.js
index 2ec25b3a15..142d0d7861 100644
--- a/deps/v8/test/mjsunit/compiler/regress-crbug-1201082.js
+++ b/deps/v8/test/mjsunit/compiler/regress-crbug-1201082.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --turbo-fast-api-calls
+// Flags: --turbo-fast-api-calls --expose-fast-api
const fast_c_api = new d8.test.FastCAPI();
function foo(obj) {
diff --git a/deps/v8/test/mjsunit/compiler/regress-crbug-1223107.js b/deps/v8/test/mjsunit/compiler/regress-crbug-1223107.js
index 1cec034063..fa2d62a673 100644
--- a/deps/v8/test/mjsunit/compiler/regress-crbug-1223107.js
+++ b/deps/v8/test/mjsunit/compiler/regress-crbug-1223107.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --turbo-fast-api-calls --allow-natives-syntax --opt
+// Flags: --turbo-fast-api-calls --expose-fast-api --allow-natives-syntax --opt
const fast_c_api = new d8.test.FastCAPI();
assertThrows(() => {fast_c_api.add_all_sequence()});
diff --git a/deps/v8/test/mjsunit/compiler/regress-crbug-1241464.js b/deps/v8/test/mjsunit/compiler/regress-crbug-1241464.js
new file mode 100644
index 0000000000..45af9bf375
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/regress-crbug-1241464.js
@@ -0,0 +1,15 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --turbo-fast-api-calls --expose-fast-api
+
+(function() {
+ const fast_c_api = new d8.test.FastCAPI();
+ const func1 = fast_c_api.fast_call_count;
+ assertThrows(() => new func1());
+ const func2 = fast_c_api.slow_call_count;
+ assertThrows(() => new func2());
+ const func3 = fast_c_api.reset_counts;
+ assertThrows(() => new func3());
+})();
diff --git a/deps/v8/test/mjsunit/concurrent-initial-prototype-change-1.js b/deps/v8/test/mjsunit/concurrent-initial-prototype-change-1.js
index 981e8e6bb6..2ca1f8a7d9 100644
--- a/deps/v8/test/mjsunit/concurrent-initial-prototype-change-1.js
+++ b/deps/v8/test/mjsunit/concurrent-initial-prototype-change-1.js
@@ -25,11 +25,9 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --allow-natives-syntax
-// Flags: --concurrent-recompilation
-// Flags: --nostress-opt --no-always-opt
-// Flags: --no-turboprop
-
+// Flags: --allow-natives-syntax --concurrent-recompilation
+// Flags: --no-stress-opt --no-always-opt --no-turboprop
+//
// --nostress-opt is in place because this particular optimization
// (guaranteeing that the Array prototype chain has no elements) is
// maintained isolate-wide. Once it's been "broken" by the change
@@ -37,11 +35,6 @@
// optimization anymore, and the code will remain optimized despite
// additional changes to the prototype chain.
-if (!%IsConcurrentRecompilationSupported()) {
- print("Concurrent recompilation is disabled. Skipping this test.");
- quit();
-}
-
function f1(a, i) {
return a[i] + 0.5;
}
@@ -62,7 +55,7 @@ assertEquals(0.5, f1(arr, 0));
%WaitForBackgroundOptimization();
Object.prototype[1] = 1.5;
assertEquals(2, f1(arr, 1));
-assertUnoptimized(f1, "no sync");
+assertUnoptimized(f1);
// Sync with background thread to conclude optimization, which bails out
// due to map dependency.
%FinalizeOptimization();
diff --git a/deps/v8/test/mjsunit/concurrent-initial-prototype-change-2.js b/deps/v8/test/mjsunit/concurrent-initial-prototype-change-2.js
index 585388468f..b9505abfaa 100644
--- a/deps/v8/test/mjsunit/concurrent-initial-prototype-change-2.js
+++ b/deps/v8/test/mjsunit/concurrent-initial-prototype-change-2.js
@@ -25,8 +25,7 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --allow-natives-syntax
-// Flags: --concurrent-recompilation --block-concurrent-recompilation
+// Flags: --allow-natives-syntax --concurrent-recompilation
// Flags: --nostress-opt --no-always-opt
// --nostress-opt is in place because this particular optimization
@@ -36,11 +35,6 @@
// optimization anymore, and the code will remain optimized despite
// additional changes to the prototype chain.
-if (!%IsConcurrentRecompilationSupported()) {
- print("Concurrent recompilation is disabled. Skipping this test.");
- quit();
-}
-
function f1(a, i) {
return a[i] + 0.5;
}
@@ -51,19 +45,21 @@ assertEquals(0.5, f1(arr, 0));
assertEquals(0.5, f1(arr, 0));
// Optimized code of f1 depends on initial object and array maps.
+%DisableOptimizationFinalization();
%OptimizeFunctionOnNextCall(f1, "concurrent");
// Kick off recompilation.
assertEquals(0.5, f1(arr, 0));
+%WaitForBackgroundOptimization();
// Invalidate current initial object map.
Object.prototype[1] = 1.5;
assertEquals(2, f1(arr, 1));
// Not yet optimized since concurrent recompilation is blocked.
-assertUnoptimized(f1, "no sync");
-// Let concurrent recompilation proceed.
-%UnblockConcurrentRecompilation();
-// Sync with background thread to conclude optimization, which may or may not
-// bailout due to map dependency, depending on whether the compiler read the
-// NoElements protector before or after the store to Object.prototype above.
+assertUnoptimized(f1);
+// Sync with background thread to conclude optimization, which does bailout due
+// to map dependency, because the compiler read the NoElements protector before
+// the store to Object.prototype above.
+%FinalizeOptimization();
+assertUnoptimized(f1);
assertEquals(2, f1(arr, 1));
// Clear type info for stress runs.
%ClearFunctionFeedback(f1);
diff --git a/deps/v8/test/mjsunit/const-dict-tracking.js b/deps/v8/test/mjsunit/const-dict-tracking.js
index c5f7ee3af3..b7773a15f1 100644
--- a/deps/v8/test/mjsunit/const-dict-tracking.js
+++ b/deps/v8/test/mjsunit/const-dict-tracking.js
@@ -715,7 +715,7 @@ function testbench(o, proto, update_proto, check_constness) {
%DisableOptimizationFinalization();
%OptimizeFunctionOnNextCall(read_length, "concurrent");
assertEquals(1, read_length(o));
- assertUnoptimized(read_length, "no sync");
+ assertUnoptimized(read_length);
%WaitForBackgroundOptimization();
var other_proto1 = [];
diff --git a/deps/v8/test/mjsunit/es6/typedarray-detached.js b/deps/v8/test/mjsunit/es6/typedarray-detached.js
index fc5421b8b1..d005454506 100644
--- a/deps/v8/test/mjsunit/es6/typedarray-detached.js
+++ b/deps/v8/test/mjsunit/es6/typedarray-detached.js
@@ -529,8 +529,10 @@ function TestTypedArraySet() {
assertThrows(function() { a.set.call({}) }, TypeError);
assertThrows(function() { a.set.call([]) }, TypeError);
- assertThrows(function() { a.set(0); }, TypeError);
- assertThrows(function() { a.set(0, 1); }, TypeError);
+ a.set(0);
+ assertArrayPrefix(expected, a);
+ a.set(0, 1);
+ assertArrayPrefix(expected, a);
assertEquals(1, a.set.length);
}
diff --git a/deps/v8/test/mjsunit/es6/typedarray-every.js b/deps/v8/test/mjsunit/es6/typedarray-every.js
index feb7cc90cc..a4a3f7c8d8 100644
--- a/deps/v8/test/mjsunit/es6/typedarray-every.js
+++ b/deps/v8/test/mjsunit/es6/typedarray-every.js
@@ -82,18 +82,16 @@ function TestTypedArrayForEach(constructor) {
CheckWrapping({}, Object);
// Detaching the buffer backing the typed array mid-way should
- // still make .forEach() finish, and the array should keep being
+ // still make .every() finish, and the array should keep being
// empty after detaching it.
count = 0;
a = new constructor(3);
result = a.every(function (n, index, array) {
- assertFalse(array[index] === undefined); // don't get here if detached
- if (count > 0) %ArrayBufferDetach(array.buffer);
- array[index] = n + 1;
count++;
- return count > 1 ? array[index] === undefined : true;
+ if (count > 1) %ArrayBufferDetach(array.buffer);
+ return count > 2 ? n === undefined : true;
});
- assertEquals(2, count);
+ assertEquals(3, count);
assertEquals(true, result);
CheckTypedArrayIsDetached(a);
assertEquals(undefined, a[0]);
diff --git a/deps/v8/test/mjsunit/es6/typedarray-filter.js b/deps/v8/test/mjsunit/es6/typedarray-filter.js
index 077016a26c..2d2ef5d1ee 100644
--- a/deps/v8/test/mjsunit/es6/typedarray-filter.js
+++ b/deps/v8/test/mjsunit/es6/typedarray-filter.js
@@ -19,10 +19,22 @@ function TestTypedArrayFilter(constructor) {
assertEquals(1, constructor.prototype.filter.length);
// Throw type error if source array is detached while executing a callback
- let ta1 = new constructor(10);
- assertThrows(() =>
- ta1.filter(() => %ArrayBufferDetach(ta1.buffer))
- , TypeError);
+ let ta1 = new constructor(4);
+ let seen = [];
+ let result = ta1.filter((val, idx) => {
+ if (idx === 0) {
+ %ArrayBufferDetach(ta1.buffer);
+ }
+ seen.push(val);
+ return idx < 3;
+ });
+ assertArrayEquals(seen, [0, undefined, undefined, undefined]);
+ // https://tc39.es/ecma262/#sec-setvalueinbuffer
+ // undefined values should be converted to numerics.
+ const expectedResult = [Float32Array, Float64Array].includes(constructor) ?
+ [0, NaN, NaN] :
+ [0, 0, 0];
+ assertArrayEquals(result, expectedResult);
// A new typed array should be created after finishing callbacks
var speciesCreated = 0;
diff --git a/deps/v8/test/mjsunit/es6/typedarray-foreach.js b/deps/v8/test/mjsunit/es6/typedarray-foreach.js
index 81c212046a..413cf68105 100644
--- a/deps/v8/test/mjsunit/es6/typedarray-foreach.js
+++ b/deps/v8/test/mjsunit/es6/typedarray-foreach.js
@@ -85,20 +85,18 @@ function TestTypedArrayForEach(constructor) {
assertEquals(42, a[1]);
// Detaching the buffer backing the typed array mid-way should
- // still make .forEach() finish, but exiting early due to the missing
- // elements, and the array should keep being empty after detaching it.
- // TODO(dehrenberg): According to the ES6 spec, accessing or testing
- // for members on a detached TypedArray should throw, so really this
- // should throw in the third iteration. However, this behavior matches
- // the Khronos spec.
+ // still make .forEach() finish, but the first argument of the callback
+ // should be undefined value, and the array should keep being empty after
+ // detaching it.
a = new constructor(3);
count = 0;
a.forEach(function (n, index, array) {
if (count > 0) %ArrayBufferDetach(array.buffer);
+ if (count > 1) assertTrue(n === undefined);
array[index] = n + 1;
count++;
});
- assertEquals(2, count);
+ assertEquals(3, count);
CheckTypedArrayIsDetached(a);
assertEquals(undefined, a[0]);
diff --git a/deps/v8/test/mjsunit/es6/typedarray.js b/deps/v8/test/mjsunit/es6/typedarray.js
index 9d1e9d782c..d5aecce519 100644
--- a/deps/v8/test/mjsunit/es6/typedarray.js
+++ b/deps/v8/test/mjsunit/es6/typedarray.js
@@ -609,8 +609,10 @@ function TestTypedArraySet() {
assertThrows(function() { a.set.call({}) }, TypeError);
assertThrows(function() { a.set.call([]) }, TypeError);
- assertThrows(function() { a.set(0); }, TypeError);
- assertThrows(function() { a.set(0, 1); }, TypeError);
+ a.set(0);
+ assertArrayPrefix(expected, a);
+ a.set(0, 1);
+ assertArrayPrefix(expected, a);
assertEquals(1, a.set.length);
diff --git a/deps/v8/test/mjsunit/external-array.js b/deps/v8/test/mjsunit/external-array.js
index 3f0614032c..0299914169 100644
--- a/deps/v8/test/mjsunit/external-array.js
+++ b/deps/v8/test/mjsunit/external-array.js
@@ -614,7 +614,7 @@ a61.set(a62)
assertArrayPrefix([1, 12], a61)
// Invalid source
-assertThrows(function() { a.set(0); }, TypeError);
+a.set(0); // does not throw
assertArrayPrefix([1,2,3,4,5,6], a);
a.set({}); // does not throw
assertArrayPrefix([1,2,3,4,5,6], a);
diff --git a/deps/v8/test/mjsunit/harmony/bigint/tostring-toolong.js b/deps/v8/test/mjsunit/harmony/bigint/tostring-toolong.js
new file mode 100644
index 0000000000..c62051b252
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/bigint/tostring-toolong.js
@@ -0,0 +1,17 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+const kLongString = 100_000; // Bigger than kMaxRenderedLength in bigint.cc.
+
+const str = 'z'.repeat(kLongString);
+try {
+ BigInt(str);
+ assertUnreachable("should have thrown");
+} catch (e) {
+ assertTrue(e instanceof SyntaxError);
+ assertTrue(e.message.startsWith("Cannot convert zzz"));
+ // Despite adding "Cannot convert", the overall message is shorter than
+ // the invalid string, because it only includes a prefix of the string.
+ assertTrue(e.message.length < kLongString);
+}
diff --git a/deps/v8/test/mjsunit/harmony/sharedarraybuffer.js b/deps/v8/test/mjsunit/harmony/sharedarraybuffer.js
index 7b66118a73..433c14e195 100644
--- a/deps/v8/test/mjsunit/harmony/sharedarraybuffer.js
+++ b/deps/v8/test/mjsunit/harmony/sharedarraybuffer.js
@@ -416,8 +416,10 @@ function TestTypedArraySet() {
assertThrows(function() { a.set.call({}) }, TypeError);
assertThrows(function() { a.set.call([]) }, TypeError);
- assertThrows(function() { a.set(0); }, TypeError);
- assertThrows(function() { a.set(0, 1); }, TypeError);
+ a.set(0);
+ assertArrayPrefix(expected, a);
+ a.set(0, 1);
+ assertArrayPrefix(expected, a);
}
TestTypedArraySet();
diff --git a/deps/v8/test/mjsunit/messages.js b/deps/v8/test/mjsunit/messages.js
index 7c3521b685..774bd3d2a3 100644
--- a/deps/v8/test/mjsunit/messages.js
+++ b/deps/v8/test/mjsunit/messages.js
@@ -496,7 +496,7 @@ test(function() {
// kMalformedRegExp
test(function() {
- /(/.test("a");
+ new Function('/(/.test("a");');
}, "Invalid regular expression: /(/: Unterminated group", SyntaxError);
// kParenthesisInArgString
diff --git a/deps/v8/test/mjsunit/mjsunit.js b/deps/v8/test/mjsunit/mjsunit.js
index 061fc291ab..42455baa61 100644
--- a/deps/v8/test/mjsunit/mjsunit.js
+++ b/deps/v8/test/mjsunit/mjsunit.js
@@ -132,9 +132,7 @@ var assertInstanceof;
// Assert that this code is never executed (i.e., always fails if executed).
var assertUnreachable;
-// Assert that the function code is (not) optimized. If "no sync" is passed
-// as second argument, we do not wait for the concurrent optimization thread to
-// finish when polling for optimization status.
+// Assert that the function code is (not) optimized.
// Only works with --allow-natives-syntax.
var assertOptimized;
var assertUnoptimized;
@@ -657,22 +655,21 @@ var prettyPrinted;
var OptimizationStatusImpl = undefined;
- var OptimizationStatus = function(fun, sync_opt) {
+ var OptimizationStatus = function(fun) {
if (OptimizationStatusImpl === undefined) {
try {
OptimizationStatusImpl = new Function(
- "fun", "sync", "return %GetOptimizationStatus(fun, sync);");
+ "fun", "return %GetOptimizationStatus(fun);");
} catch (e) {
throw new Error("natives syntax not allowed");
}
}
- return OptimizationStatusImpl(fun, sync_opt);
+ return OptimizationStatusImpl(fun);
}
assertUnoptimized = function assertUnoptimized(
- fun, sync_opt, name_opt, skip_if_maybe_deopted = true) {
- if (sync_opt === undefined) sync_opt = "";
- var opt_status = OptimizationStatus(fun, sync_opt);
+ fun, name_opt, skip_if_maybe_deopted = true) {
+ var opt_status = OptimizationStatus(fun);
// Tests that use assertUnoptimized() do not make sense if --always-opt
// option is provided. Such tests must add --no-always-opt to flags comment.
assertFalse((opt_status & V8OptimizationStatus.kAlwaysOptimize) !== 0,
@@ -690,9 +687,8 @@ var prettyPrinted;
}
assertOptimized = function assertOptimized(
- fun, sync_opt, name_opt, skip_if_maybe_deopted = true) {
- if (sync_opt === undefined) sync_opt = "";
- var opt_status = OptimizationStatus(fun, sync_opt);
+ fun, name_opt, skip_if_maybe_deopted = true) {
+ var opt_status = OptimizationStatus(fun);
// Tests that use assertOptimized() do not make sense for Lite mode where
// optimization is always disabled, explicitly exit the test with a warning.
if (opt_status & V8OptimizationStatus.kLiteMode) {
diff --git a/deps/v8/test/mjsunit/mjsunit.status b/deps/v8/test/mjsunit/mjsunit.status
index 4e4e5ec61b..0a19067cc6 100644
--- a/deps/v8/test/mjsunit/mjsunit.status
+++ b/deps/v8/test/mjsunit/mjsunit.status
@@ -43,7 +43,11 @@
##############################################################################
# Temporal tests to be implemented
# https://crbug.com/v8/11544
- 'temporal/*': [FAIL],
+ 'temporal/plain*': [FAIL],
+ 'temporal/duration*': [FAIL],
+ 'temporal/calendar*': [FAIL],
+ 'temporal/zoned*': [FAIL],
+ 'temporal/instant*': [FAIL],
##############################################################################
# Open bugs.
@@ -160,12 +164,12 @@
'wasm/compare-exchange-stress': [PASS, SLOW, NO_VARIANTS],
'wasm/compare-exchange64-stress': [PASS, SLOW, NO_VARIANTS],
- # Very slow on ARM and MIPS, contains no architecture dependent code.
- 'unicode-case-overoptimization0': [PASS, NO_VARIANTS, ['arch in (arm, arm64, mipsel, mips64el, mips64, mips, riscv64)', SKIP]],
- 'unicode-case-overoptimization1': [PASS, NO_VARIANTS, ['arch in (arm, arm64, mipsel, mips64el, mips64, mips, riscv64)', SKIP]],
- 'regress/regress-3976': [PASS, NO_VARIANTS, ['arch in (arm, arm64, mipsel, mips64el, mips64, mips, riscv64)', SKIP]],
- 'regress/regress-crbug-482998': [PASS, NO_VARIANTS, ['arch in (arm, arm64, mipsel, mips64el, mips, riscv64)', SKIP]],
- 'regress/regress-740784': [PASS, NO_VARIANTS, ['arch in (arm, arm64, mipsel, mips64el, mips, riscv64)', SKIP]],
+ # Very slow on ARM, MIPS, RISCV and LOONG, contains no architecture dependent code.
+ 'unicode-case-overoptimization0': [PASS, NO_VARIANTS, ['arch in (arm, arm64, mipsel, mips64el, mips64, mips, riscv64, loong64)', SKIP]],
+ 'unicode-case-overoptimization1': [PASS, NO_VARIANTS, ['arch in (arm, arm64, mipsel, mips64el, mips64, mips, riscv64, loong64)', SKIP]],
+ 'regress/regress-3976': [PASS, NO_VARIANTS, ['arch in (arm, arm64, mipsel, mips64el, mips64, mips, riscv64, loong64)', SKIP]],
+ 'regress/regress-crbug-482998': [PASS, NO_VARIANTS, ['arch in (arm, arm64, mipsel, mips64el, mips, riscv64, loong64)', SKIP]],
+ 'regress/regress-740784': [PASS, NO_VARIANTS, ['arch in (arm, arm64, mipsel, mips64el, mips, riscv64, loong64)', SKIP]],
# TODO(bmeurer): Flaky timeouts (sometimes <1s, sometimes >3m).
'unicodelctest': [PASS, NO_VARIANTS],
@@ -207,6 +211,11 @@
'regress/regress-crbug-941743': [PASS, HEAVY],
'regress/regress-crbug-1191886': [PASS, HEAVY],
'wasm/externref-globals': [PASS, HEAVY],
+
+ # BUG(v8:12173).
+ 'compiler/call-with-arraylike-or-spread-7': [PASS, FAIL],
+ 'ic-migrated-map-add-when-monomorphic': [PASS, FAIL],
+ 'es6/map-constructor-entry-side-effect2': [PASS, FAIL]
}], # ALWAYS
##############################################################################
@@ -811,6 +820,15 @@
}], # 'arch == mips64el or arch == mips64'
##############################################################################
+['arch == loong64', {
+
+ # This test fail because when convert sNaN to qNaN, loong64 use a different
+ # qNaN encoding with x86 architectures
+ 'wasm/float-constant-folding': [SKIP],
+
+}], # 'arch == loong64'
+
+##############################################################################
['arch == riscv64', {
# Slow tests which times out in debug mode.
@@ -878,6 +896,7 @@
'regress/wasm/regress-1161954': [SKIP],
'regress/wasm/regress-1187831': [SKIP],
'regress/wasm/regress-1199662': [SKIP],
+ 'regress/wasm/regress-1231950': [SKIP],
'regress/regress-1172797': [SKIP],
'regress/wasm/regress-1179025': [SKIP],
'wasm/multi-value-simd': [SKIP],
@@ -1013,6 +1032,10 @@
# BUG(v8:11656) Skipped until we make progress on NumFuzz.
'baseline/test-osr': [SKIP],
+
+ # BUG(v8:12013) Skipped until we remove flakes on NumFuzz.
+ 'compiler/concurrent-inlining-1': [SKIP],
+ 'compiler/inlined-call-polymorphic': [SKIP],
}], # 'deopt_fuzzer'
##############################################################################
@@ -1051,6 +1074,15 @@
# BUG(v8:11656) Skipped until we make progress on NumFuzz.
'baseline/test-osr': [SKIP],
+
+ # BUG(v8:11826) Skipped until we remove flakes on NumFuzz.
+ 'baseline/flush-only-baseline-code': [SKIP],
+ 'baseline/flush-baseline-code': [SKIP],
+ 'regress/wasm/regress-1231950': [SKIP],
+
+ # BUG(v8:12013) Skipped until we remove flakes on NumFuzz.
+ 'compiler/concurrent-inlining-1': [SKIP],
+ 'compiler/inlined-call-polymorphic': [SKIP],
}], # 'gc_fuzzer'
##############################################################################
@@ -1084,18 +1116,6 @@
# BUG(v8:7166).
'd8/enable-tracing': [SKIP],
- # Rely on (blocking) concurrent compilation.
- 'compiler/concurrent-invalidate-transition-map': [SKIP],
- 'compiler/concurrent-proto-change': [SKIP],
- 'compiler/manual-concurrent-recompile': [SKIP],
- 'compiler/regress-905555-2': [SKIP],
- 'compiler/regress-905555': [SKIP],
- 'compiler/regress-9945-1': [SKIP],
- 'concurrent-initial-prototype-change-1': [SKIP],
- 'concurrent-initial-prototype-change-2': [SKIP],
- 'regress/regress-356053': [SKIP],
- 'regress/regress-embedded-cons-string': [SKIP],
-
# Intentionally non-deterministic using shared arraybuffers between workers.
'wasm/atomics-stress': [SKIP],
'wasm/atomics64-stress': [SKIP],
@@ -1103,6 +1123,8 @@
'regress/regress-1205290': [SKIP],
'regress/regress-1212404': [SKIP],
'regress/regress-1221035': [SKIP],
+ 'regress/regress-1232620': [SKIP],
+ 'regress/regress-crbug-1237153': [SKIP],
'regress/wasm/regress-1067621': [SKIP],
# BUG(v8:9975).
@@ -1275,8 +1297,9 @@
}], # arch not in (x64, ia32, arm64, arm)
##############################################################################
-['system != linux', {
- # Multi-mapped mock allocator is only available on Linux.
+['system != linux or virtual_memory_cage == True', {
+ # Multi-mapped mock allocator is only available on Linux, and only if the
+ # virtual memory cage is not enabled.
'regress/regress-crbug-1041232': [SKIP],
'regress/regress-crbug-1104608': [SKIP],
}],
@@ -1432,13 +1455,15 @@
'regress/wasm/regress-1187831': [SKIP],
'regress/wasm/regress-1199662': [SKIP],
'regress/wasm/regress-1231950': [SKIP],
+ 'regress/wasm/regress-1242300': [SKIP],
'regress/wasm/regress-1242689': [SKIP],
}], # no_simd_hardware == True
##############################################################################
# TODO(v8:11421): Port baseline compiler to other architectures.
-['arch not in (x64, arm64, ia32, arm, mips64el, mipsel)', {
+['arch not in (x64, arm64, ia32, arm, mips64el, mipsel, riscv64, loong64)', {
'baseline/*': [SKIP],
+ 'regress/regress-1242306': [SKIP],
}],
##############################################################################
@@ -1453,14 +1478,10 @@
}], # variant == experimental_regexp
##############################################################################
-['variant == concurrent_inlining', {
- 'concurrent-initial-prototype-change-1': [SKIP],
-}], # variant == concurrent_inlining
-
-##############################################################################
['variant == instruction_scheduling or variant == stress_instruction_scheduling', {
- # BUG(12018): This test currently fails with --turbo-instruction-scheduling.
+ # BUG(12018): These tests currently fail with --turbo-instruction-scheduling.
'regress/wasm/regress-1231950': [SKIP],
+ 'regress/wasm/regress-1242300': [SKIP],
}], # variant == instruction_scheduling or variant == stress_instruction_scheduling
################################################################################
@@ -1510,7 +1531,6 @@
'compiler/regress-9945-1': [SKIP],
'concurrent-initial-prototype-change-1': [SKIP],
'concurrent-initial-prototype-change-2': [SKIP],
- 'regress/regress-356053': [SKIP],
'regress/regress-embedded-cons-string': [SKIP],
# Requires a second isolate
'regress/regress-1212404': [SKIP],
diff --git a/deps/v8/test/mjsunit/regexp.js b/deps/v8/test/mjsunit/regexp.js
index e652525914..84d8464ee5 100644
--- a/deps/v8/test/mjsunit/regexp.js
+++ b/deps/v8/test/mjsunit/regexp.js
@@ -280,14 +280,8 @@ re.compile(void 0);
assertEquals('/(?:)/', re.toString());
-// Check for lazy RegExp literal creation
-function lazyLiteral(doit) {
- if (doit) return "".replace(/foo(/gi, "");
- return true;
-}
-
-assertTrue(lazyLiteral(false));
-assertThrows("lazyLiteral(true)");
+// Check for early syntax errors.
+assertThrows("/foo(/gi");
// Check $01 and $10
re = new RegExp("(.)(.)(.)(.)(.)(.)(.)(.)(.)(.)");
diff --git a/deps/v8/test/mjsunit/regress/regress-1193903.js b/deps/v8/test/mjsunit/regress/regress-1193903.js
index 491ba1150d..8afbf584b5 100644
--- a/deps/v8/test/mjsunit/regress/regress-1193903.js
+++ b/deps/v8/test/mjsunit/regress/regress-1193903.js
@@ -4,9 +4,12 @@
//
// Flags: --allow-natives-syntax
-var no_sync_uninternalized = "no " + "sync";
-%InternalizeString(no_sync_uninternalized);
+var s_uninternalized = "concurrent" + "-skip-finalization";
+%InternalizeString(s_uninternalized);
-// Make sure %GetOptimizationStatus works with a non-internalized string
-// parameter.
-%GetOptimizationStatus(function() {}, no_sync_uninternalized)
+function foo() {}
+
+// Make sure %OptimizeFunctionOnNextCall works with a non-internalized
+// string parameter.
+%PrepareFunctionForOptimization(foo);
+%OptimizeFunctionOnNextCall(foo, s_uninternalized)
diff --git a/deps/v8/test/mjsunit/regress/regress-1209444.js b/deps/v8/test/mjsunit/regress/regress-1209444.js
new file mode 100644
index 0000000000..3706bb3d53
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-1209444.js
@@ -0,0 +1,9 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+var __v_1 = Array();
+Object.freeze(__v_1);
+for (__v_0 = -21; __v_0 < 12800; ++__v_0) {
+ __v_1[0] += __v_1[-32];
+}
diff --git a/deps/v8/test/mjsunit/regress/regress-1242306.js b/deps/v8/test/mjsunit/regress/regress-1242306.js
new file mode 100644
index 0000000000..9c1c4bb310
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-1242306.js
@@ -0,0 +1,20 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// Flags: --allow-natives-syntax --sparkplug
+
+function foo(){
+ // __proto__ is a setter that is defined to return undefined.
+ return __proto__ = 5;
+}
+assertEquals(foo(), 5);
+assertEquals(foo(), 5);
+
+%EnsureFeedbackVectorForFunction(foo);
+assertEquals(foo(), 5);
+assertEquals(foo(), 5);
+
+%CompileBaseline(foo);
+assertEquals(foo(), 5);
+assertEquals(foo(), 5);
diff --git a/deps/v8/test/mjsunit/regress/regress-1243989.js b/deps/v8/test/mjsunit/regress/regress-1243989.js
new file mode 100644
index 0000000000..37c7ea75ff
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-1243989.js
@@ -0,0 +1,18 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// Flags: --stack-size=100
+
+function f(x) {
+ new x.Uint16Array();
+ function h(y) { /[\cA]/; }
+}
+let i = 0;
+function g() {
+ try { g(); } catch (e) {}
+ if (i++ > 200) return; // The original error was at i == 116.
+ f();
+}
+f(this);
+g();
diff --git a/deps/v8/test/mjsunit/regress/regress-343609.js b/deps/v8/test/mjsunit/regress/regress-343609.js
index d7bfe63543..bad116e306 100644
--- a/deps/v8/test/mjsunit/regress/regress-343609.js
+++ b/deps/v8/test/mjsunit/regress/regress-343609.js
@@ -2,8 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --allow-natives-syntax --block-concurrent-recompilation
-// Flags: --expose-gc
+// Flags: --allow-natives-syntax --expose-gc
function Ctor() {
this.a = 1;
diff --git a/deps/v8/test/mjsunit/regress/regress-347914.js b/deps/v8/test/mjsunit/regress/regress-347914.js
index 6ec0ea5bf5..4e5c1cdf2b 100644
--- a/deps/v8/test/mjsunit/regress/regress-347914.js
+++ b/deps/v8/test/mjsunit/regress/regress-347914.js
@@ -46,7 +46,7 @@ assertInstanceof = function assertInstanceof(obj, type) { if (!(obj instanceof t
assertDoesNotThrow = function assertDoesNotThrow(code, name_opt) { try { if (typeof code == 'function') { code(); } else { eval(code); } } catch (e) { fail("threw an exception: ", e.message || e, name_opt); } };
assertUnreachable = function assertUnreachable(name_opt) { var message = "Fail" + "ure: unreachable"; if (name_opt) { message += " - " + name_opt; } };
var OptimizationStatus;
-try { OptimizationStatus = new Function("fun", "sync", "return %GetOptimizationStatus(fun, sync);"); } catch (e) { OptimizationStatus = function() { } }
+try { OptimizationStatus = new Function("fun", "sync", "if (sync) %WaitForBackgroundOptimization(); return %GetOptimizationStatus(fun);"); } catch (e) { OptimizationStatus = function() { } }
assertUnoptimized = function assertUnoptimized(fun, sync_opt, name_opt) { if (sync_opt === undefined) sync_opt = ""; assertTrue(OptimizationStatus(fun, sync_opt) != 1, name_opt); }
assertOptimized = function assertOptimized(fun, sync_opt, name_opt) { if (sync_opt === undefined) sync_opt = ""; assertTrue(OptimizationStatus(fun, sync_opt) != 2, name_opt); }
triggerAssertFalse = function() { }
diff --git a/deps/v8/test/mjsunit/regress/regress-356053.js b/deps/v8/test/mjsunit/regress/regress-356053.js
deleted file mode 100644
index ea84973d8e..0000000000
--- a/deps/v8/test/mjsunit/regress/regress-356053.js
+++ /dev/null
@@ -1,9 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Flags: --expose-gc --allow-natives-syntax
-// Flags: --concurrent-recompilation --block-concurrent-recompilation
-
-gc();
-try { %UnblockConcurrentRecompilation(); } catch (e) { }
diff --git a/deps/v8/test/mjsunit/regress/regress-4023.js b/deps/v8/test/mjsunit/regress/regress-4023.js
index 16bbeccbe8..5f79e67389 100644
--- a/deps/v8/test/mjsunit/regress/regress-4023.js
+++ b/deps/v8/test/mjsunit/regress/regress-4023.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --allow-natives-syntax --expose-gc --block-concurrent-recompilation
+// Flags: --allow-natives-syntax --expose-gc
function Inner() {
this.property = "OK";
@@ -36,8 +36,10 @@ SetInner(outer, inner);
// on the compiler thread :-)
KeepMapAlive(outer);
KeepMapAlive(outer);
+%DisableOptimizationFinalization();
%OptimizeFunctionOnNextCall(KeepMapAlive, "concurrent");
KeepMapAlive(outer);
+%WaitForBackgroundOptimization();
// So far, all is well. Collect type feedback and optimize.
print(Crash(outer));
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-1031479.js b/deps/v8/test/mjsunit/regress/regress-crbug-1031479.js
index 2c3fded9e0..2b26148cd0 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-1031479.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-1031479.js
@@ -4,7 +4,7 @@
// Flags: --interrupt-budget=200 --stack-size=200
// Flags: --budget-for-feedback-vector-allocation=100 --expose-gc
-// Flags: --stress-flush-code --flus-bytecode
+// Flags: --stress-flush-code --flush-bytecode
var i = 0;
function main() {
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-1203122-1.js b/deps/v8/test/mjsunit/regress/regress-crbug-1203122-1.js
new file mode 100644
index 0000000000..480bf83751
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-1203122-1.js
@@ -0,0 +1,23 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+function main() {
+ class C {
+ m() {
+ super.prototype;
+ }
+ }
+ // Home object's __proto__ is a function.
+ function f() {}
+ C.prototype.__proto__ = f;
+
+ let c = new C();
+
+ f.prototype;
+ c.m();
+}
+
+for (let i = 0; i < 100; ++i) {
+ main();
+}
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-1203122-2.js b/deps/v8/test/mjsunit/regress/regress-crbug-1203122-2.js
new file mode 100644
index 0000000000..cf1fe0dfce
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-1203122-2.js
@@ -0,0 +1,23 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+function main() {
+ class A {}
+ A.prototype.prototype = 'a string';
+ class C extends A {
+ m() {
+ super.prototype;
+ }
+ }
+ function f() {}
+
+ // Create handler; receiver is a function.
+ C.prototype.m.call(f);
+ // Use handler; receiver not a function.
+ C.prototype.m.call('not a function');
+}
+
+for (let i = 0; i < 100; ++i) {
+ main();
+}
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-1216261.js b/deps/v8/test/mjsunit/regress/regress-crbug-1216261.js
new file mode 100644
index 0000000000..4448d5d0e3
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-1216261.js
@@ -0,0 +1,5 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+assertThrows("function f() {class T { a = {arguments}}}", SyntaxError);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-1236286.js b/deps/v8/test/mjsunit/regress/regress-crbug-1236286.js
new file mode 100644
index 0000000000..f8f3c66ab8
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-1236286.js
@@ -0,0 +1,20 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --fuzzing
+// Without --fuzzing, %PrepareFunctionForOptimization would fail.
+
+for (let i = 0; i < 2; i++) {
+ try { new this.invalid(); } catch {}
+
+ function invalid(x) {
+ "use asm";
+ var y = x.Math.fround;
+ function foo() {}
+ return {foo: foo};
+ }
+
+ %PrepareFunctionForOptimization(invalid);
+ %OptimizeFunctionOnNextCall(invalid);
+}
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-1238467.js b/deps/v8/test/mjsunit/regress/regress-crbug-1238467.js
new file mode 100644
index 0000000000..9ef3820a7b
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-1238467.js
@@ -0,0 +1,17 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+// Flags: --expose-async-hooks --ignore-unhandled-promises
+
+const ah = async_hooks.createHook({});
+ah.enable();
+
+import("./does_not_exist.js").then();
+
+function target() {
+ isFinite.__proto__.__proto__ = new Proxy(target, {
+ get() {
+ return Promise.resolve();
+ }})
+}
+target();
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-1239907.js b/deps/v8/test/mjsunit/regress/regress-crbug-1239907.js
new file mode 100644
index 0000000000..8563428c04
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-1239907.js
@@ -0,0 +1,36 @@
+
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --expose-async-hooks --ignore-unhandled-promises --stack-size=100
+
+async_hooks.createHook({ promiseResolve() { throw new Error(); } }).enable()
+
+import("./does_not_exist.js").then();
+function target() {
+ isFinite.__proto__.__proto__ = new Proxy(target, {
+ get() { return Promise.resolve(); }
+ })
+}
+target();
+
+function runNearStackLimit(f) {
+ function t() {
+ try {
+ return t();
+ } catch (e) {
+ return f();
+ }
+ }
+ return t();
+}
+
+function __f_2() {
+ return runNearStackLimit(() => {
+ return new Promise(function () {
+ });
+ });
+}
+__f_2().then();
+__f_2().then();
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-1240661.js b/deps/v8/test/mjsunit/regress/regress-crbug-1240661.js
new file mode 100644
index 0000000000..bc2a8cd78b
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-1240661.js
@@ -0,0 +1,11 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --gdbjit --allow-natives-syntax
+
+let f = new Function("boom");
+
+%PrepareFunctionForOptimization(f);
+%OptimizeFunctionOnNextCall(f);
+assertThrows(f, ReferenceError);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-1245870.js b/deps/v8/test/mjsunit/regress/regress-crbug-1245870.js
new file mode 100644
index 0000000000..2ef3f753d5
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-1245870.js
@@ -0,0 +1,14 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+class Outer {
+ test() {
+ return class {
+ static #a() { }
+ b = eval();
+ };
+ }
+}
+const obj = new Outer();
+obj.test();
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-435825.js b/deps/v8/test/mjsunit/regress/regress-crbug-435825.js
index 959535bcb5..f338a0b1e0 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-435825.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-435825.js
@@ -5,7 +5,8 @@
Error.prepareStackTrace = function (a,b) { return b; };
try {
- /(invalid regexp/;
+ eval("/(invalid regexp/;");
+ assertUnreachable();
} catch (e) {
assertEquals("[object global]", e.stack[0].getThis().toString());
}
diff --git a/deps/v8/test/mjsunit/regress/regress-embedded-cons-string.js b/deps/v8/test/mjsunit/regress/regress-embedded-cons-string.js
index f371c994aa..6d3496da54 100644
--- a/deps/v8/test/mjsunit/regress/regress-embedded-cons-string.js
+++ b/deps/v8/test/mjsunit/regress/regress-embedded-cons-string.js
@@ -25,31 +25,26 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --expose-gc --allow-natives-syntax
-// Flags: --concurrent-recompilation --block-concurrent-recompilation
+// Flags: --expose-gc --allow-natives-syntax --concurrent-recompilation
// Flags: --opt --no-always-opt
-if (!%IsConcurrentRecompilationSupported()) {
- print("Concurrent recompilation is disabled. Skipping this test.");
- quit();
-}
-
function test(fun) {
%PrepareFunctionForOptimization(fun);
fun();
fun();
- // Mark for concurrent optimization.
+ %DisableOptimizationFinalization();
%OptimizeFunctionOnNextCall(fun, "concurrent");
// Kick off recompilation.
fun();
// Tenure cons string after compile graph has been created.
+ %WaitForBackgroundOptimization();
gc();
// In the mean time, concurrent recompiling is still blocked.
- assertUnoptimized(fun, "no sync");
- // Let concurrent recompilation proceed.
- %UnblockConcurrentRecompilation();
+ assertUnoptimized(fun);
+ // Let concurrent recompilation finish.
+ %FinalizeOptimization();
// Concurrent recompilation eventually finishes, embeds tenured cons string.
- assertOptimized(fun, "sync");
+ assertOptimized(fun);
// Visit embedded cons string during mark compact.
gc();
}
diff --git a/deps/v8/test/mjsunit/regress/regress-sync-optimized-lists.js b/deps/v8/test/mjsunit/regress/regress-sync-optimized-lists.js
index 2e2cfd465c..e990eaab0b 100644
--- a/deps/v8/test/mjsunit/regress/regress-sync-optimized-lists.js
+++ b/deps/v8/test/mjsunit/regress/regress-sync-optimized-lists.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --allow-natives-syntax --block-concurrent-recompilation
+// Flags: --allow-natives-syntax
function Ctor() {
this.a = 1;
diff --git a/deps/v8/test/mjsunit/regress/regress-v8-12060.mjs b/deps/v8/test/mjsunit/regress/regress-v8-12060.mjs
new file mode 100644
index 0000000000..8ad25bb1d8
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-v8-12060.mjs
@@ -0,0 +1,24 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --no-fuzzy-module-file-extensions
+
+const paths = [
+ ".",
+ "..",
+ "...",
+ "/",
+ "/..",
+ "/../..",
+ "../..",
+ "./..",
+ "./../",
+ "./../..",
+];
+
+const results = await Promise.allSettled(
+ paths.map(path => import(path + "/___._._")));
+for (let result of results) {
+ assertEquals(result.status, 'rejected');
+}
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-1237024.js b/deps/v8/test/mjsunit/regress/wasm/regress-1237024.js
new file mode 100644
index 0000000000..04dd8018bf
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-1237024.js
@@ -0,0 +1,26 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --experimental-wasm-gc
+
+d8.file.execute('test/mjsunit/wasm/wasm-module-builder.js');
+
+var builder = new WasmModuleBuilder();
+
+let array_index = builder.addArray(kWasmS128, true);
+
+builder.addFunction("main", kSig_i_i)
+ .addBody([
+ kExprLocalGet, 0,
+ kGCPrefix, kExprRttCanon, array_index,
+ kGCPrefix, kExprArrayNewDefault, array_index,
+ kGCPrefix, kExprArrayLen, array_index,
+ ])
+ .exportFunc();
+
+var instance = builder.instantiate();
+
+assertThrows(
+ () => instance.exports.main(1 << 26), WebAssembly.RuntimeError,
+ 'requested new array is too large');
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-1239954.js b/deps/v8/test/mjsunit/regress/wasm/regress-1239954.js
new file mode 100644
index 0000000000..e7d411040a
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-1239954.js
@@ -0,0 +1,37 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --experimental-wasm-gc
+
+d8.file.execute('test/mjsunit/wasm/wasm-module-builder.js');
+
+var builder = new WasmModuleBuilder();
+
+let array_index = builder.addArray(kWasmI64, true);
+let sig_index = builder.addType(kSig_v_v);
+
+let main = builder.addFunction("main", kSig_v_i);
+let other = builder.addFunction("other", sig_index).addBody([]);
+
+let table = builder.addTable(kWasmAnyFunc, 1, 1);
+builder.addActiveElementSegment(
+ 0, // table
+ WasmInitExpr.I32Const(0), // offset
+ [1]); // values
+
+main.addBody([
+ kExprI64Const, 0x33,
+ kExprLocalGet, 0,
+ kGCPrefix, kExprRttCanon, array_index,
+ kGCPrefix, kExprArrayNewWithRtt, array_index,
+ kExprDrop,
+ kExprI32Const, 0,
+ kExprCallIndirect, sig_index, table.index,
+]).exportFunc();
+
+var instance = builder.instantiate();
+
+assertThrows(
+ () => instance.exports.main(1<<29), WebAssembly.RuntimeError,
+ 'requested new array is too large');
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-1242300.js b/deps/v8/test/mjsunit/regress/wasm/regress-1242300.js
new file mode 100644
index 0000000000..21dcf99ab7
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-1242300.js
@@ -0,0 +1,24 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+load('test/mjsunit/wasm/wasm-module-builder.js');
+
+const builder = new WasmModuleBuilder();
+builder.addMemory(16, 32);
+builder.addFunction(undefined, kSig_i_iii)
+ .addBody([
+ kExprI32Const, 0x7f, // i32.const
+ kExprI32Const, 0x1e, // i32.const
+ kSimdPrefix, kExprI8x16Splat, // i8x16.splat
+ kExprI32Const, 0, // i32.const
+ kSimdPrefix, kExprI8x16Splat, // i8x16.splat
+ kExprI32Const, 0, // i32.const
+ kSimdPrefix, kExprI8x16Splat, // i8x16.splat
+ kSimdPrefix, kExprS128Select, // s128.select
+ kSimdPrefix, kExprS128Load32Lane, 0x00, 0x89, 0xfe, 0x03, 0x00, // s128.load32_lane
+ kExprUnreachable,
+]);
+builder.addExport('main', 0);
+const instance = builder.instantiate();
+assertTraps(kTrapMemOutOfBounds, () => instance.exports.main(1, 2, 3));
diff --git a/deps/v8/test/mjsunit/regress/wasm/regress-1247659.js b/deps/v8/test/mjsunit/regress/wasm/regress-1247659.js
new file mode 100644
index 0000000000..753128c8a9
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/wasm/regress-1247659.js
@@ -0,0 +1,87 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+d8.file.execute('test/mjsunit/wasm/wasm-module-builder.js');
+
+const builder = new WasmModuleBuilder();
+builder.addMemory(16, 17);
+builder.addGlobal(kWasmI32, 1, WasmInitExpr.I32Const(10));
+// Generate function 1 (out of 3).
+builder.addFunction('load', kSig_i_v)
+ .addBody([
+ kExprI32Const, 0, // i32.const
+ kExprI32LoadMem8U, 0, 5, // i32.load8_u
+ ])
+ .exportFunc();
+// Generate function 2 (out of 3).
+builder.addFunction(undefined, makeSig([kWasmI64, kWasmI32], []))
+ .addLocals(kWasmI64, 3)
+ .addLocals(kWasmI32, 5)
+ .addBody([
+ // locals: [1, 0, 0, 0, 0, 0, 0, 0, 0, 0]; stack: []
+ kExprGlobalGet, 0, // global.get
+ // locals: [1, 0, 0, 0, 0, 0, 0, 0, 0, 0]; stack: [10]
+ kExprLocalSet, 5, // local.set
+ // locals: [1, 0, 0, 0, 0, 10, 0, 0, 0, 0]; stack: []
+ kExprI32Const, 0, // i32.const
+ // locals: [1, 0, 0, 0, 0, 10, 0, 0, 0, 0]; stack: [0]
+ kExprI32Eqz, // i32.eqz
+ // locals: [1, 0, 0, 0, 0, 10, 0, 0, 0, 0]; stack: [1]
+ kExprLocalSet, 6, // local.set
+ // locals: [1, 0, 0, 0, 0, 10, 1, 0, 0, 0]; stack: []
+ kExprGlobalGet, 0, // global.get
+ // locals: [1, 0, 0, 0, 0, 10, 1, 0, 0, 0]; stack: [10]
+ kExprLocalSet, 7, // local.set
+ // locals: [1, 0, 0, 0, 0, 10, 1, 10, 0, 0]; stack: []
+ kExprI32Const, 0, // i32.const
+ // locals: [1, 0, 0, 0, 0, 10, 1, 10, 0, 0]; stack: [0]
+ kExprI32Const, 1, // i32.const
+ // locals: [1, 0, 0, 0, 0, 10, 1, 10, 0, 0]; stack: [0, 1]
+ kExprI32Sub, // i32.sub
+ // locals: [1, 0, 0, 0, 0, 10, 1, 10, 0, 0]; stack: [-1]
+ kExprLocalSet, 8, // local.set
+ // locals: [1, 0, 0, 0, 0, 10, 1, 10, -1, 0]; stack: []
+ kExprI32Const, 1, // i32.const
+ // locals: [1, 0, 0, 0, 0, 10, 1, 10, -1, 0]; stack: [1]
+ kExprI32Const, 15, // i32.const
+ // locals: [1, 0, 0, 0, 0, 10, 1, 10, -1, 0]; stack: [1, 15]
+ kExprI32And, // i32.and
+ // locals: [1, 0, 0, 0, 0, 10, 1, 10, -1, 0]; stack: [1]
+ kExprLocalSet, 9, // local.set
+ // locals: [1, 0, 0, 0, 0, 10, 1, 10, -1, 1]; stack: []
+ kExprLocalGet, 0, // local.get
+ // locals: [1, 0, 0, 0, 0, 10, 1, 10, -1, 1]; stack: [1]
+ kExprLocalSet, 2, // local.set
+ // locals: [1, 0, 1, 0, 0, 10, 1, 10, -1, 1]; stack: []
+ kExprLocalGet, 0, // local.get
+ // locals: [1, 0, 1, 0, 0, 10, 1, 10, -1, 1]; stack: [1]
+ kExprLocalSet, 3, // local.set
+ // locals: [1, 0, 1, 1, 0, 10, 1, 10, -1, 1]; stack: []
+ kExprLocalGet, 2, // local.get
+ // locals: [1, 0, 1, 1, 0, 10, 1, 10, -1, 1]; stack: [1]
+ kExprLocalGet, 3, // local.get
+ // locals: [1, 0, 1, 1, 0, 10, 1, 10, -1, 1]; stack: [1, 1]
+ kExprI64Sub, // i64.sub
+ // locals: [1, 0, 1, 1, 0, 10, 1, 10, -1, 1]; stack: [0]
+ kExprLocalSet, 4, // local.set
+ // locals: [1, 0, 1, 1, 0, 10, 1, 10, -1, 1]; stack: []
+ kExprI32Const, 1, // i32.const
+ // locals: [1, 0, 1, 1, 0, 10, 1, 10, -1, 1]; stack: [1]
+ kExprLocalGet, 4, // local.get
+ // locals: [1, 0, 1, 1, 0, 10, 1, 10, -1, 1]; stack: [1, 0]
+ kExprI64StoreMem16, 1, 0x03, // i64.store16
+ ]);
+// Generate function 3 (out of 3).
+builder.addFunction('invoker', kSig_v_v)
+ .addBody([
+ ...wasmI64Const(1), // i64.const
+ ...wasmI32Const(0), // i32.const
+ kExprCallFunction, 1, // call function #1
+ ])
+ .exportFunc();
+const instance = builder.instantiate();
+
+var exports = instance.exports;
+exports.invoker();
+assertEquals(0, exports.load());
diff --git a/deps/v8/test/mjsunit/runtime-call-stats.js b/deps/v8/test/mjsunit/runtime-call-stats.js
index e962cde5ef..42231fb5e5 100644
--- a/deps/v8/test/mjsunit/runtime-call-stats.js
+++ b/deps/v8/test/mjsunit/runtime-call-stats.js
@@ -26,11 +26,13 @@ for (var i = 0; i < 100; ++i) {
%PrepareFunctionForOptimization(testConcurrent);
testConcurrent(0.5);
testConcurrent(0.6);
+%DisableOptimizationFinalization();
%OptimizeFunctionOnNextCall(testConcurrent, 'concurrent');
for (var i = 0; i < 100; ++i) {
testConcurrent(0.7);
}
-%GetOptimizationStatus(testConcurrent, 'sync');
+%FinalizeOptimization();
+%GetOptimizationStatus(testConcurrent);
gc();
diff --git a/deps/v8/test/mjsunit/temporal/duration-abs.js b/deps/v8/test/mjsunit/temporal/duration-abs.js
new file mode 100644
index 0000000000..181122058f
--- /dev/null
+++ b/deps/v8/test/mjsunit/temporal/duration-abs.js
@@ -0,0 +1,20 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+// Flags: --harmony-temporal
+
+d8.file.execute('test/mjsunit/temporal/temporal-helpers.js');
+
+let d1 = new Temporal.Duration();
+assertDuration(d1.abs(), 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, true);
+
+let d2 = new Temporal.Duration(1, 2, 3, 4, 5, 6, 7, 8, 9, 10);
+assertDuration(d2.abs(), 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 1, false);
+
+// Test large number
+let d3 = new Temporal.Duration(1e5, 2e5, 3e5, 4e5, 5e5, 6e5, 7e5, 8e5, 9e5, 10e5);
+assertDuration(d3.abs(), 1e5, 2e5, 3e5, 4e5, 5e5, 6e5, 7e5, 8e5, 9e5, 10e5, 1, false);
+
+// Test negative values
+let d4 = new Temporal.Duration(-1, -2, -3, -4, -5, -6, -7, -8, -9, -10);
+assertDuration(d4.abs(), 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 1, false);
diff --git a/deps/v8/test/mjsunit/temporal/duration-add.js b/deps/v8/test/mjsunit/temporal/duration-add.js
new file mode 100644
index 0000000000..38d63cfd42
--- /dev/null
+++ b/deps/v8/test/mjsunit/temporal/duration-add.js
@@ -0,0 +1,40 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+// Flags: --harmony-temporal
+
+d8.file.execute('test/mjsunit/temporal/temporal-helpers.js');
+
+let d1 = new Temporal.Duration();
+let badDur = {add: d1.add};
+assertThrows(() => badDur.add(d1), TypeError,
+ "Method Temporal.Duration.prototype.add called on incompatible receiver #<Object>");
+
+let relativeToOptions = {relativeTo: "2021-08-01"};
+
+let d2 = new Temporal.Duration(1, 2, 3, 4, 5, 6, 7, 8, 9, 10);
+assertThrows(() => d2.add(d1), RangeError, "Invalid time value");
+assertThrows(() => d1.add(d2), RangeError, "Invalid time value");
+assertThrows(() => d2.add(d2), RangeError, "Invalid time value");
+assertDuration(d2.add(d1, relativeToOptions), 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 1, false);
+assertDuration(d1.add(d2, relativeToOptions), 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 1, false);
+assertDuration(d1.add(d1, relativeToOptions), 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, true);
+assertDuration(d2.add(d2, relativeToOptions), 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 1, false);
+
+// Test large number
+let d3 = new Temporal.Duration(1e5, 2e5, 3e5, 4e5, 5e5, 6e5, 7e5, 8e5, 9e5, 10e5);
+assertThrows(() => d3.add(d3), RangeError, "Invalid time value");
+assertDuration(d3.add(d3, relativeToOptions), 2e5, 4e5, 6e5, 8e5, 1e6, 12e5, 14e5, 16e5, 18e5, 2e6, 1, false);
+
+// Test negative values
+let d4 = new Temporal.Duration(-1, -2, -3, -4, -5, -6, -7, -8, -9, -10);
+assertThrows(() => d4.add(d0), RangeError, "Invalid time value");
+assertThrows(() => d0.add(d4), RangeError, "Invalid time value");
+assertThrows(() => d4.add(d4), RangeError, "Invalid time value");
+assertThrows(() => d2.add(d4), RangeError, "Invalid time value");
+assertThrows(() => d4.add(d2), RangeError, "Invalid time value");
+assertDuration(d4.add(d0, relativeToOptions), -1, -2, -3, -4, -5, -6, -7, -8, -9, -10, -1, false);
+assertDuration(d0.add(d4, relativeToOptions), -1, -2, -3, -4, -5, -6, -7, -8, -9, -10, -1, false);
+assertDuration(d4.add(d4, relativeToOptions), -2, -4, -6, -8, -10, -12, -14, -16, -18, -20, -1, false);
+assertDuration(d2.add(d4, relativeToOptions), 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, true);
+assertDuration(d4.add(d2, relativeToOptions), 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, true);
diff --git a/deps/v8/test/mjsunit/temporal/duration-constructor.js b/deps/v8/test/mjsunit/temporal/duration-constructor.js
new file mode 100644
index 0000000000..3f02034c38
--- /dev/null
+++ b/deps/v8/test/mjsunit/temporal/duration-constructor.js
@@ -0,0 +1,93 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+// Flags: --harmony-temporal
+
+d8.file.execute('test/mjsunit/temporal/temporal-helpers.js');
+
+let d1 = new Temporal.Duration();
+assertDuration(d1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, true);
+
+let d2 = new Temporal.Duration(1, 2, 3, 4, 5, 6, 7, 8, 9, 10);
+assertDuration(d2, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 1, false);
+
+// Test large number
+let d3 = new Temporal.Duration(1e5, 2e5, 3e5, 4e5, 5e5, 6e5, 7e5, 8e5, 9e5, 10e5);
+assertDuration(d3, 1e5, 2e5, 3e5, 4e5, 5e5, 6e5, 7e5, 8e5, 9e5, 10e5, 1, false);
+
+// Test negative values
+let d4 = new Temporal.Duration(-1, -2, -3, -4, -5, -6, -7, -8, -9, -10);
+assertDuration(d4, -1, -2, -3, -4, -5, -6, -7, -8, -9, -10, -1, false);
+
+// Test NaN
+let d5 = new Temporal.Duration(NaN, NaN, NaN);
+assertDuration(d5, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, true);
+// 1. If NewTarget is undefined, then
+// a. Throw a TypeError exception.
+assertThrows(() => Temporal.Duration(), TypeError,
+ "Method invoked on an object that is not Temporal.Duration.");
+
+// 1. Let number be ? ToNumber(argument).
+assertDuration(new Temporal.Duration(undefined, 234, true, false, "567"),
+ 0, 234, 1, 0, 567, 0, 0, 0, 0, 0, 1, false);
+assertThrows(() => new Temporal.Duration(Symbol(123)), TypeError,
+ "Cannot convert a Symbol value to a number");
+assertThrows(() => new Temporal.Duration(123n), TypeError,
+ "Cannot convert a BigInt value to a number");
+
+// Test Infinity
+// 7.5.4 IsValidDuration ( years, months, weeks, days, hours, minutes, seconds,
+// milliseconds, microseconds, nanoseconds )
+// a. If v is not finite, return false.
+assertThrows(() => new Temporal.Duration(1, 2, 3, 4, 5, 6, 7, 8, 9, Infinity),
+ RangeError, "Invalid time value");
+assertThrows(() => new Temporal.Duration(1, 2, 3, 4, 5, 6, 7, 8, Infinity),
+ RangeError, "Invalid time value");
+assertThrows(() => new Temporal.Duration(1, 2, 3, 4, 5, 6, 7, Infinity),
+ RangeError, "Invalid time value");
+assertThrows(() => new Temporal.Duration(1, 2, 3, 4, 5, 6, Infinity),
+ RangeError, "Invalid time value");
+assertThrows(() => new Temporal.Duration(1, 2, 3, 4, 5, Infinity),
+ RangeError, "Invalid time value");
+assertThrows(() => new Temporal.Duration(1, 2, 3, 4, Infinity),
+ RangeError, "Invalid time value");
+assertThrows(() => new Temporal.Duration(1, 2, 3, Infinity),
+ RangeError, "Invalid time value");
+assertThrows(() => new Temporal.Duration(1, 2, Infinity),
+ RangeError, "Invalid time value");
+assertThrows(() => new Temporal.Duration(1, Infinity),
+ RangeError, "Invalid time value");
+assertThrows(() => new Temporal.Duration(Infinity),
+ RangeError, "Invalid time value");
+assertThrows(() => new Temporal.Duration(-1, -2, -3, -4, -5, -6, -7, -8, -9,
+ -Infinity), RangeError, "Invalid time value");
+assertThrows(() => new Temporal.Duration(-1, -2, -3, -4, -5, -6, -7, -8,
+ -Infinity), RangeError, "Invalid time value");
+assertThrows(() => new Temporal.Duration(-1, -2, -3, -4, -5, -6, -7,
+ -Infinity), RangeError, "Invalid time value");
+assertThrows(() => new Temporal.Duration(-1, -2, -3, -4, -5, -6, -Infinity),
+ RangeError, "Invalid time value");
+assertThrows(() => new Temporal.Duration(-1, -2, -3, -4, -5, -Infinity),
+ RangeError, "Invalid time value");
+assertThrows(() => new Temporal.Duration(-1, -2, -3, -4, -Infinity),
+ RangeError, "Invalid time value");
+assertThrows(() => new Temporal.Duration(-1, -2, -3, -Infinity),
+ RangeError, "Invalid time value");
+assertThrows(() => new Temporal.Duration(-1, -2, -Infinity),
+ RangeError, "Invalid time value");
+assertThrows(() => new Temporal.Duration(-1, -Infinity),
+ RangeError, "Invalid time value");
+assertThrows(() => new Temporal.Duration(-Infinity),
+ RangeError, "Invalid time value");
+
+// Sign different
+assertThrows(() => new Temporal.Duration(1, -2),
+ RangeError, "Invalid time value");
+assertThrows(() => new Temporal.Duration(1, 0, -2),
+ RangeError, "Invalid time value");
+assertThrows(() => new Temporal.Duration(-1, 0, 0, 3),
+ RangeError, "Invalid time value");
+assertThrows(() => new Temporal.Duration(0, 0, 0, 0, 0, 0, 0, 0, 1, -1),
+ RangeError, "Invalid time value");
+assertThrows(() => new Temporal.Duration(0, 0, 0, 0, 0, 0, 0, 0, -1, 1),
+ RangeError, "Invalid time value");
diff --git a/deps/v8/test/mjsunit/temporal/duration-from.js b/deps/v8/test/mjsunit/temporal/duration-from.js
new file mode 100644
index 0000000000..8e19063c8a
--- /dev/null
+++ b/deps/v8/test/mjsunit/temporal/duration-from.js
@@ -0,0 +1,227 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+// Flags: --harmony-temporal
+
+d8.file.execute('test/mjsunit/temporal/temporal-helpers.js');
+
+let d1 = new Temporal.Duration(1, 2, 3, 4, 5, 6, 7, 8, 9, 10);
+assertDuration(d1, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 1, false);
+let d2 = Temporal.Duration.from(d1);
+assertDuration(d2, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 1, false);
+assertNotSame(d1, d2);
+
+assertDuration(Temporal.Duration.from("PT0S"),
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, true);
+assertDuration(Temporal.Duration.from("P1Y"),
+ 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, false);
+assertDuration(Temporal.Duration.from("P2M"),
+ 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 1, false);
+assertDuration(Temporal.Duration.from("P3W"),
+ 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 1, false);
+assertDuration(Temporal.Duration.from("P4D"),
+ 0, 0, 0, 4, 0, 0, 0, 0, 0, 0, 1, false);
+assertDuration(Temporal.Duration.from("PT5H"),
+ 0, 0, 0, 0, 5, 0, 0, 0, 0, 0, 1, false);
+assertDuration(Temporal.Duration.from("PT6M"),
+ 0, 0, 0, 0, 0, 6, 0, 0, 0, 0, 1, false);
+assertDuration(Temporal.Duration.from("PT7S"),
+ 0, 0, 0, 0, 0, 0, 7, 0, 0, 0, 1, false);
+assertDuration(Temporal.Duration.from("PT0.008S"),
+ 0, 0, 0, 0, 0, 0, 0, 8, 0, 0, 1, false);
+assertDuration(Temporal.Duration.from("PT0.000009S"),
+ 0, 0, 0, 0, 0, 0, 0, 0, 9, 0, 1, false);
+assertDuration(Temporal.Duration.from("PT0.000000001S"),
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, false);
+assertDuration(Temporal.Duration.from("P1Y2M3W4DT5H6M7.008009001S"),
+ 1, 2, 3, 4, 5, 6, 7, 8, 9, 1, 1, false);
+assertDuration(Temporal.Duration.from(
+ "P111111111Y222222222M333333333W444444444D" +
+ "T555555555H666666666M777777777.987654321S"),
+ 111111111, 222222222, 333333333, 444444444,
+ 555555555, 666666666, 777777777, 987, 654, 321, 1, false);
+
+assertDuration(Temporal.Duration.from("P1Y3WT5H7.000009001S"),
+ 1, 0, 3, 0, 5, 0, 7, 0, 9, 1, 1, false);
+assertDuration(Temporal.Duration.from("P2M4DT6M0.008000001S"),
+ 0, 2, 0, 4, 0, 6, 0, 8, 0, 1, 1, false);
+assertDuration(Temporal.Duration.from("P1Y4DT7.000000001S"),
+ 1, 0, 0, 4, 0, 0, 7, 0, 0, 1, 1, false);
+assertDuration(Temporal.Duration.from("P2MT5H0.008S"),
+ 0, 2, 0, 0, 5, 0, 0, 8, 0, 0, 1, false);
+assertDuration(Temporal.Duration.from("P3WT6M0.000009S"),
+ 0, 0, 3, 0, 0, 6, 0, 0, 9, 0, 1, false);
+assertDuration(Temporal.Duration.from("P1YT5H0.000009000S"),
+ 1, 0, 0, 0, 5, 0, 0, 0, 9, 0, 1, false);
+assertDuration(Temporal.Duration.from("P2MT6M0.000000001S"),
+ 0, 2, 0, 0, 0, 6, 0, 0, 0, 1, 1, false);
+assertDuration(Temporal.Duration.from("P3WT7S"),
+ 0, 0, 3, 0, 0, 0, 7, 0, 0, 0, 1, false);
+assertDuration(Temporal.Duration.from("P4DT0.008S"),
+ 0, 0, 0, 4, 0, 0, 0, 8, 0, 0, 1, false);
+assertDuration(Temporal.Duration.from("P1YT5H0.000000001S"),
+ 1, 0, 0, 0, 5, 0, 0, 0, 0, 1, 1, false);
+assertDuration(Temporal.Duration.from("P2MT6M"),
+ 0, 2, 0, 0, 0, 6, 0, 0, 0, 0, 1, false);
+assertDuration(Temporal.Duration.from("P3WT7S"),
+ 0, 0, 3, 0, 0, 0, 7, 0, 0, 0, 1, false);
+assertDuration(Temporal.Duration.from("P4DT0.008S"),
+ 0, 0, 0, 4, 0, 0, 0, 8, 0, 0, 1, false);
+assertDuration(Temporal.Duration.from("PT5H0.000009S"),
+ 0, 0, 0, 0, 5, 0, 0, 0, 9, 0, 1, false);
+assertDuration(Temporal.Duration.from("P1YT6M"),
+ 1, 0, 0, 0, 0, 6, 0, 0, 0, 0, 1, false);
+assertDuration(Temporal.Duration.from("P2MT7S"),
+ 0, 2, 0, 0, 0, 0, 7, 0, 0, 0, 1, false);
+assertDuration(Temporal.Duration.from("P3WT0.008S"),
+ 0, 0, 3, 0, 0, 0, 0, 8, 0, 0, 1, false);
+assertDuration(Temporal.Duration.from("P4DT0.000009S"),
+ 0, 0, 0, 4, 0, 0, 0, 0, 9, 0, 1, false);
+assertDuration(Temporal.Duration.from("PT5H0.000000001S"),
+ 0, 0, 0, 0, 5, 0, 0, 0, 0, 1, 1, false);
+assertDuration(Temporal.Duration.from("P1YT7S"),
+ 1, 0, 0, 0, 0, 0, 7, 0, 0, 0, 1, false);
+assertDuration(Temporal.Duration.from("P2MT0.008S"),
+ 0, 2, 0, 0, 0, 0, 0, 8, 0, 0, 1, false);
+assertDuration(Temporal.Duration.from("P3WT0.000009S"),
+ 0, 0, 3, 0, 0, 0, 0, 0, 9, 0, 1, false);
+assertDuration(Temporal.Duration.from("P4DT0.000000001S"),
+ 0, 0, 0, 4, 0, 0, 0, 0, 0, 1, 1, false);
+assertDuration(Temporal.Duration.from("PT5H"),
+ 0, 0, 0, 0, 5, 0, 0, 0, 0, 0, 1, false);
+
+assertDuration(Temporal.Duration.from("-P1Y2M3W4DT5H6M7.008009001S"),
+ -1, -2, -3, -4, -5, -6, -7, -8, -9, -1, -1, false);
+assertDuration(Temporal.Duration.from(
+ "-P111111111Y222222222M333333333W444444444D" +
+ "T555555555H666666666M777777777.987654321S"),
+ -111111111, -222222222, -333333333, -444444444,
+ -555555555, -666666666, -777777777, -987, -654, -321, -1, false);
+// \\u2212
+assertDuration(Temporal.Duration.from("\u2212P1Y2M3W4DT5H6M7.008009001S"),
+ -1, -2, -3, -4, -5, -6, -7, -8, -9, -1, -1, false);
+// positive sign
+assertDuration(Temporal.Duration.from("+P1Y2M3W4DT5H6M7.008009001S"),
+ 1, 2, 3, 4, 5, 6, 7, 8, 9, 1, 1, false);
+
+assertDuration(Temporal.Duration.from("PT2.5H"),
+ 0, 0, 0, 0, 2, 30, 0, 0, 0, 0, 1, false);
+assertDuration(Temporal.Duration.from("PT2.25H"),
+ 0, 0, 0, 0, 2, 15, 0, 0, 0, 0, 1, false);
+assertDuration(Temporal.Duration.from("PT2.05H"),
+ 0, 0, 0, 0, 2, 03, 0, 0, 0, 0, 1, false);
+assertDuration(Temporal.Duration.from("PT2.005H"),
+ 0, 0, 0, 0, 2, 0, 18, 0, 0, 0, 1, false);
+assertDuration(Temporal.Duration.from("PT2.505H"),
+ 0, 0, 0, 0, 2, 30, 18, 0, 0, 0, 1, false);
+assertDuration(Temporal.Duration.from("PT2.0025H"),
+ 0, 0, 0, 0, 2, 0, 9, 0, 0, 0, 1, false);
+assertDuration(Temporal.Duration.from("PT3.5M"),
+ 0, 0, 0, 0, 0, 3, 30, 0, 0, 0, 1, false);
+assertDuration(Temporal.Duration.from("PT3.25M"),
+ 0, 0, 0, 0, 0, 3, 15, 0, 0, 0, 1, false);
+assertDuration(Temporal.Duration.from("PT3.125M"),
+ 0, 0, 0, 0, 0, 3, 7, 500, 0, 0, 1, false);
+assertDuration(Temporal.Duration.from("PT3.025M"),
+ 0, 0, 0, 0, 0, 3, 1, 500, 0, 0, 1, false);
+assertDuration(Temporal.Duration.from("PT3.01M"),
+ 0, 0, 0, 0, 0, 3, 0, 600, 0, 0, 1, false);
+assertDuration(Temporal.Duration.from("PT3.005M"),
+ 0, 0, 0, 0, 0, 3, 0, 300, 0, 0, 1, false);
+assertDuration(Temporal.Duration.from("PT3.001M"),
+ 0, 0, 0, 0, 0, 3, 0, 60, 0, 0, 1, false);
+assertDuration(Temporal.Duration.from("PT3.006M"),
+ 0, 0, 0, 0, 0, 3, 0, 360, 0, 0, 1, false);
+
+// Use , instead of .
+assertDuration(Temporal.Duration.from("PT2,5H"),
+ 0, 0, 0, 0, 2, 30, 0, 0, 0, 0, 1, false);
+assertDuration(Temporal.Duration.from("PT2,25H"),
+ 0, 0, 0, 0, 2, 15, 0, 0, 0, 0, 1, false);
+assertDuration(Temporal.Duration.from("PT2,05H"),
+ 0, 0, 0, 0, 2, 03, 0, 0, 0, 0, 1, false);
+assertDuration(Temporal.Duration.from("PT2,005H"),
+ 0, 0, 0, 0, 2, 0, 18, 0, 0, 0, 1, false);
+assertDuration(Temporal.Duration.from("PT2,505H"),
+ 0, 0, 0, 0, 2, 30, 18, 0, 0, 0, 1, false);
+assertDuration(Temporal.Duration.from("PT2,0025H"),
+ 0, 0, 0, 0, 2, 0, 9, 0, 0, 0, 1, false);
+assertDuration(Temporal.Duration.from("PT3,5M"),
+ 0, 0, 0, 0, 0, 3, 30, 0, 0, 0, 1, false);
+assertDuration(Temporal.Duration.from("PT3,25M"),
+ 0, 0, 0, 0, 0, 3, 15, 0, 0, 0, 1, false);
+assertDuration(Temporal.Duration.from("PT3,125M"),
+ 0, 0, 0, 0, 0, 3, 7, 500, 0, 0, 1, false);
+assertDuration(Temporal.Duration.from("PT3,025M"),
+ 0, 0, 0, 0, 0, 3, 1, 500, 0, 0, 1, false);
+assertDuration(Temporal.Duration.from("PT3,01M"),
+ 0, 0, 0, 0, 0, 3, 0, 600, 0, 0, 1, false);
+assertDuration(Temporal.Duration.from("PT3,005M"),
+ 0, 0, 0, 0, 0, 3, 0, 300, 0, 0, 1, false);
+assertDuration(Temporal.Duration.from("PT3,001M"),
+ 0, 0, 0, 0, 0, 3, 0, 60, 0, 0, 1, false);
+assertDuration(Temporal.Duration.from("PT3,006M"),
+ 0, 0, 0, 0, 0, 3, 0, 360, 0, 0, 1, false);
+
+assertThrows(() => Temporal.Duration.from("P2H"), RangeError,
+ "Invalid time value");
+assertThrows(() => Temporal.Duration.from("P2.5M"), RangeError,
+ "Invalid time value");
+assertThrows(() => Temporal.Duration.from("P2,5M"), RangeError,
+ "Invalid time value");
+assertThrows(() => Temporal.Duration.from("P2S"), RangeError,
+ "Invalid time value");
+assertThrows(() => Temporal.Duration.from("PT2.H3M"), RangeError,
+ "Invalid time value");
+assertThrows(() => Temporal.Duration.from("PT2,H3M"), RangeError,
+ "Invalid time value");
+assertThrows(() => Temporal.Duration.from("PT2.H3S"), RangeError,
+ "Invalid time value");
+assertThrows(() => Temporal.Duration.from("PT2,H3S"), RangeError,
+ "Invalid time value");
+assertThrows(() => Temporal.Duration.from("PT2.H0.5M"), RangeError,
+ "Invalid time value");
+assertThrows(() => Temporal.Duration.from("PT2,H0,5M"), RangeError,
+ "Invalid time value");
+assertThrows(() => Temporal.Duration.from("PT2.H0.5S"), RangeError,
+ "Invalid time value");
+assertThrows(() => Temporal.Duration.from("PT2,H0,5S"), RangeError,
+ "Invalid time value");
+assertThrows(() => Temporal.Duration.from("PT2H3.2M3S"), RangeError,
+ "Invalid time value");
+assertThrows(() => Temporal.Duration.from("PT2H3,2M3S"), RangeError,
+ "Invalid time value");
+assertThrows(() => Temporal.Duration.from("PT2H3.2M0.3S"), RangeError,
+ "Invalid time value");
+assertThrows(() => Temporal.Duration.from("PT2H3,2M0,3S"), RangeError,
+ "Invalid time value");
+assertThrows(() => Temporal.Duration.from("PT.1H"), RangeError,
+ "Invalid time value");
+assertThrows(() => Temporal.Duration.from("PT,1H"), RangeError,
+ "Invalid time value");
+assertThrows(() => Temporal.Duration.from("PT.1M"), RangeError,
+ "Invalid time value");
+assertThrows(() => Temporal.Duration.from("PT,1M"), RangeError,
+ "Invalid time value");
+assertThrows(() => Temporal.Duration.from("PT.1S"), RangeError,
+ "Invalid time value");
+assertThrows(() => Temporal.Duration.from("PT,1S"), RangeError,
+ "Invalid time value");
+
+assertDuration(Temporal.Duration.from(
+ {years: 0, months: 0, weeks: 0, days: 0,
+ hours: 0, minutes: 0, seconds: 0,
+ milliseconds: 0, microseconds: 0, nanoseconds:0}),
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, true);
+
+assertDuration(Temporal.Duration.from(
+ {years: 1, months: 2, weeks: 3, days: 4,
+ hours: 5, minutes: 6, seconds: 7,
+ milliseconds: 8, microseconds: 9, nanoseconds:10}),
+ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 1, false);
+
+assertDuration(Temporal.Duration.from(
+ {years: -1, months: -2, weeks: -3, days: -4,
+ hours: -5, minutes: -6, seconds: -7,
+ milliseconds: -8, microseconds: -9, nanoseconds:-10}),
+ -1, -2, -3, -4, -5, -6, -7, -8, -9, -10, -1, false);
diff --git a/deps/v8/test/mjsunit/temporal/duration-negated.js b/deps/v8/test/mjsunit/temporal/duration-negated.js
new file mode 100644
index 0000000000..a161a819d3
--- /dev/null
+++ b/deps/v8/test/mjsunit/temporal/duration-negated.js
@@ -0,0 +1,24 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+// Flags: --harmony-temporal
+
+d8.file.execute('test/mjsunit/temporal/temporal-helpers.js');
+
+let d1 = new Temporal.Duration();
+assertDuration(d1.negated(), -0, -0, -0, -0, -0, -0, -0, -0, -0, -0, 0, true);
+
+let d2 = new Temporal.Duration(1, 2, 3, 4, 5, 6, 7, 8, 9, 10);
+assertDuration(d2.negated(), -1, -2, -3, -4, -5, -6, -7, -8, -9, -10, -1, false);
+
+// Test large number
+let d3 = new Temporal.Duration(1e5, 2e5, 3e5, 4e5, 5e5, 6e5, 7e5, 8e5, 9e5, 10e5);
+assertDuration(d3.negated(), -1e5, -2e5, -3e5, -4e5, -5e5, -6e5, -7e5, -8e5, -9e5, -10e5, -1, false);
+
+
+// Test negative values
+let d4 = new Temporal.Duration(-1, -2, -3, -4, -5, -6, -7, -8, -9, -10);
+assertDuration(d4.negated(), 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 1, false);
+
+let d5 = new Temporal.Duration(-1e5, -2e5, -3e5, -4e5, -5e5, -6e5, -7e5, -8e5, -9e5, -10e5);
+assertDuration(d5.negated(), 1e5, 2e5, 3e5, 4e5, 5e5, 6e5, 7e5, 8e5, 9e5, 10e5, 1, false);
diff --git a/deps/v8/test/mjsunit/temporal/duration-to-json.js b/deps/v8/test/mjsunit/temporal/duration-to-json.js
new file mode 100644
index 0000000000..fbd2fc320b
--- /dev/null
+++ b/deps/v8/test/mjsunit/temporal/duration-to-json.js
@@ -0,0 +1,184 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+// Flags: --harmony-temporal
+
+assertEquals("PT0S", (new Temporal.Duration()).toJSON());
+
+assertEquals("P1Y", (new Temporal.Duration(1)).toJSON());
+assertEquals("-P1Y", (new Temporal.Duration(-1)).toJSON());
+assertEquals("P1234567890Y", (new Temporal.Duration(1234567890)).toJSON());
+assertEquals("-P1234567890Y", (new Temporal.Duration(-1234567890)).toJSON());
+
+assertEquals("P1Y2M", (new Temporal.Duration(1, 2)).toJSON());
+assertEquals("-P1Y2M", (new Temporal.Duration(-1, -2)).toJSON());
+assertEquals("P2M", (new Temporal.Duration(0, 2)).toJSON());
+assertEquals("-P2M", (new Temporal.Duration(0,-2)).toJSON());
+assertEquals("P1234567890M", (new Temporal.Duration(0, 1234567890)).toJSON());
+assertEquals("-P1234567890M", (new Temporal.Duration(0,-1234567890)).toJSON());
+
+assertEquals("P1Y2M3W", (new Temporal.Duration(1, 2, 3)).toJSON());
+assertEquals("-P1Y2M3W", (new Temporal.Duration(-1, -2, -3)).toJSON());
+assertEquals("P3W", (new Temporal.Duration(0, 0, 3)).toJSON());
+assertEquals("-P3W", (new Temporal.Duration(0, 0, -3)).toJSON());
+assertEquals("P1Y3W", (new Temporal.Duration(1, 0, 3)).toJSON());
+assertEquals("-P1Y3W", (new Temporal.Duration(-1, 0, -3)).toJSON());
+assertEquals("P2M3W", (new Temporal.Duration(0, 2, 3)).toJSON());
+assertEquals("-P2M3W", (new Temporal.Duration(0, -2, -3)).toJSON());
+assertEquals("P1234567890W",
+ (new Temporal.Duration(0, 0, 1234567890)).toJSON());
+assertEquals("-P1234567890W",
+ (new Temporal.Duration(0, 0, -1234567890)).toJSON());
+
+assertEquals("P1Y2M3W4D", (new Temporal.Duration(1, 2, 3, 4)).toJSON());
+assertEquals("-P1Y2M3W4D", (new Temporal.Duration(-1, -2, -3, -4)).toJSON());
+assertEquals("P1234567890D",
+ (new Temporal.Duration(0, 0, 0, 1234567890)).toJSON());
+assertEquals("-P1234567890D",
+ (new Temporal.Duration(0, 0, 0, -1234567890)).toJSON());
+assertEquals("P4D", (new Temporal.Duration(0, 0, 0, 4)).toJSON());
+assertEquals("-P4D", (new Temporal.Duration(0, 0, 0, -4)).toJSON());
+assertEquals("P1Y4D", (new Temporal.Duration(1, 0, 0, 4)).toJSON());
+assertEquals("-P1Y4D", (new Temporal.Duration(-1, 0, 0, -4)).toJSON());
+assertEquals("P2M4D", (new Temporal.Duration(0, 2, 0, 4)).toJSON());
+assertEquals("-P2M4D", (new Temporal.Duration(0, -2, 0, -4)).toJSON());
+assertEquals("P3W4D", (new Temporal.Duration(0, 0, 3, 4)).toJSON());
+assertEquals("-P3W4D", (new Temporal.Duration(0, 0, -3, -4)).toJSON());
+
+assertEquals("PT5H", (new Temporal.Duration(0, 0, 0, 0, 5)).toJSON());
+assertEquals("-PT5H", (new Temporal.Duration(0, 0, 0, 0, -5)).toJSON());
+assertEquals("P1YT5H", (new Temporal.Duration(1, 0, 0, 0, 5)).toJSON());
+assertEquals("-P1YT5H", (new Temporal.Duration(-1, 0, 0, 0, -5)).toJSON());
+assertEquals("P2MT5H", (new Temporal.Duration(0, 2, 0, 0, 5)).toJSON());
+assertEquals("-P2MT5H", (new Temporal.Duration(0, -2, 0, 0, -5)).toJSON());
+
+assertEquals("PT6M", (new Temporal.Duration(0, 0, 0, 0, 0, 6)).toJSON());
+assertEquals("-PT6M", (new Temporal.Duration(0, 0, 0, 0, 0, -6)).toJSON());
+assertEquals("PT5H6M", (new Temporal.Duration(0, 0, 0, 0, 5, 6)).toJSON());
+assertEquals("-PT5H6M", (new Temporal.Duration(0, 0, 0, 0, -5, -6)).toJSON());
+assertEquals("P3WT6M", (new Temporal.Duration(0, 0, 3, 0, 0, 6)).toJSON());
+assertEquals("-P3WT6M", (new Temporal.Duration(0, 0, -3, 0, 0, -6)).toJSON());
+assertEquals("P4DT6M", (new Temporal.Duration(0, 0, 0, 4, 0, 6)).toJSON());
+assertEquals("-P4DT6M", (new Temporal.Duration(0, 0, 0, -4, 0, -6)).toJSON());
+
+assertEquals("PT7S", (new Temporal.Duration(0, 0, 0, 0, 0, 0, 7)).toJSON());
+assertEquals("-PT7S", (new Temporal.Duration(0, 0, 0, 0, 0, 0, -7)).toJSON());
+assertEquals("PT5H7S", (new Temporal.Duration(0, 0, 0, 0, 5, 0, 7)).toJSON());
+assertEquals("-PT5H7S",
+ (new Temporal.Duration(0, 0, 0, 0, -5, 0, -7)).toJSON());
+assertEquals("PT6M7S", (new Temporal.Duration(0, 0, 0, 0, 0, 6, 7)).toJSON());
+assertEquals("-PT6M7S",
+ (new Temporal.Duration(0, 0, 0, 0, 0, -6, -7)).toJSON());
+assertEquals("PT5H6M7S", (new Temporal.Duration(0, 0, 0, 0, 5, 6, 7)).toJSON());
+assertEquals("-PT5H6M7S",
+ (new Temporal.Duration(0, 0, 0, 0, -5, -6, -7)).toJSON());
+assertEquals("P1YT5H6M7S",
+ (new Temporal.Duration(1, 0, 0, 0, 5, 6, 7)).toJSON());
+assertEquals("-P1YT5H6M7S",
+ (new Temporal.Duration(-1, 0, 0, 0, -5, -6, -7)).toJSON());
+
+assertEquals("PT0.008S",
+ (new Temporal.Duration(0, 0, 0, 0, 0, 0, 0, 8)).toJSON());
+assertEquals("-PT0.008S",
+ (new Temporal.Duration(0, 0, 0, 0, 0, 0, 0, -8)).toJSON());
+assertEquals("PT0.08S",
+ (new Temporal.Duration(0, 0, 0, 0, 0, 0, 0, 80)).toJSON());
+assertEquals("-PT0.08S",
+ (new Temporal.Duration(0, 0, 0, 0, 0, 0, 0, -80)).toJSON());
+assertEquals("PT0.087S",
+ (new Temporal.Duration(0, 0, 0, 0, 0, 0, 0, 87)).toJSON());
+assertEquals("-PT0.087S",
+ (new Temporal.Duration(0, 0, 0, 0, 0, 0, 0, -87)).toJSON());
+assertEquals("PT0.876S",
+ (new Temporal.Duration(0, 0, 0, 0, 0, 0, 0, 876)).toJSON());
+assertEquals("-PT0.876S",
+ (new Temporal.Duration(0, 0, 0, 0, 0, 0, 0, -876)).toJSON());
+assertEquals("PT876.543S",
+ (new Temporal.Duration(0, 0, 0, 0, 0, 0, 0, 876543)).toJSON());
+assertEquals("-PT876.543S",
+ (new Temporal.Duration(0, 0, 0, 0, 0, 0, 0, -876543)).toJSON());
+
+assertEquals("PT0.000009S",
+ (new Temporal.Duration(0, 0, 0, 0, 0, 0, 0, 0, 9)).toJSON());
+assertEquals("-PT0.000009S",
+ (new Temporal.Duration(0, 0, 0, 0, 0, 0, 0, 0, -9)).toJSON());
+assertEquals("PT0.00009S",
+ (new Temporal.Duration(0, 0, 0, 0, 0, 0, 0, 0, 90)).toJSON());
+assertEquals("-PT0.00009S",
+ (new Temporal.Duration(0, 0, 0, 0, 0, 0, 0, 0, -90)).toJSON());
+assertEquals("PT0.000098S",
+ (new Temporal.Duration(0, 0, 0, 0, 0, 0, 0, 0, 98)).toJSON());
+assertEquals("-PT0.000098S",
+ (new Temporal.Duration(0, 0, 0, 0, 0, 0, 0, 0, -98)).toJSON());
+assertEquals("PT0.0009S",
+ (new Temporal.Duration(0, 0, 0, 0, 0, 0, 0, 0, 900)).toJSON());
+assertEquals("-PT0.0009S",
+ (new Temporal.Duration(0, 0, 0, 0, 0, 0, 0, 0, -900)).toJSON());
+assertEquals("PT0.000987S",
+ (new Temporal.Duration(0, 0, 0, 0, 0, 0, 0, 0, 987)).toJSON());
+assertEquals("-PT0.000987S",
+ (new Temporal.Duration(0, 0, 0, 0, 0, 0, 0, 0, -987)).toJSON());
+assertEquals("PT0.987654S",
+ (new Temporal.Duration(0, 0, 0, 0, 0, 0, 0, 0, 987654)).toJSON());
+assertEquals("-PT0.987654S",
+ (new Temporal.Duration(0, 0, 0, 0, 0, 0, 0, 0, -987654)).toJSON());
+assertEquals("PT987.654321S",
+ (new Temporal.Duration(0, 0, 0, 0, 0, 0, 0, 0, 987654321)).toJSON());
+assertEquals("-PT987.654321S",
+ (new Temporal.Duration(0, 0, 0, 0, 0, 0, 0, 0, -987654321)).toJSON());
+
+assertEquals("PT0.000000001S",
+ (new Temporal.Duration(0, 0, 0, 0, 0, 0, 0, 0, 0, 1)).toJSON());
+assertEquals("-PT0.000000001S",
+ (new Temporal.Duration(0, 0, 0, 0, 0, 0, 0, 0, 0, -1)).toJSON());
+assertEquals("PT0.00000001S",
+ (new Temporal.Duration(0, 0, 0, 0, 0, 0, 0, 0, 0, 10)).toJSON());
+assertEquals("-PT0.00000001S",
+ (new Temporal.Duration(0, 0, 0, 0, 0, 0, 0, 0, 0, -10)).toJSON());
+assertEquals("PT0.000000012S",
+ (new Temporal.Duration(0, 0, 0, 0, 0, 0, 0, 0, 0, 12)).toJSON());
+assertEquals("-PT0.000000012S",
+ (new Temporal.Duration(0, 0, 0, 0, 0, 0, 0, 0, 0, -12)).toJSON());
+assertEquals("PT0.0000001S",
+ (new Temporal.Duration(0, 0, 0, 0, 0, 0, 0, 0, 0, 100)).toJSON());
+assertEquals("-PT0.0000001S",
+ (new Temporal.Duration(0, 0, 0, 0, 0, 0, 0, 0, 0, -100)).toJSON());
+assertEquals("PT0.000000123S",
+ (new Temporal.Duration(0, 0, 0, 0, 0, 0, 0, 0, 0, 123)).toJSON());
+assertEquals("-PT0.000000123S",
+ (new Temporal.Duration(0, 0, 0, 0, 0, 0, 0, 0, 0, -123)).toJSON());
+assertEquals("PT0.000123456S",
+ (new Temporal.Duration(0, 0, 0, 0, 0, 0, 0, 0, 0, 123456)).toJSON());
+assertEquals("-PT0.000123456S",
+ (new Temporal.Duration(0, 0, 0, 0, 0, 0, 0, 0, 0, -123456)).toJSON());
+assertEquals("PT0.123456789S",
+ (new Temporal.Duration(0, 0, 0, 0, 0, 0, 0, 0, 0, 123456789)).toJSON());
+assertEquals("-PT0.123456789S",
+ (new Temporal.Duration(0, 0, 0, 0, 0, 0, 0, 0, 0, -123456789)).toJSON());
+assertEquals("PT1.234567891S",
+ (new Temporal.Duration(0, 0, 0, 0, 0, 0, 0, 0, 0, 1234567891)).toJSON());
+assertEquals("-PT1.234567891S",
+ (new Temporal.Duration(0, 0, 0, 0, 0, 0, 0, 0, 0, -1234567891)).toJSON());
+assertEquals("PT4.003002001S",
+ (new Temporal.Duration(0, 0, 0, 0, 0, 0, 4, 3, 2, 1)).toJSON());
+assertEquals("-PT4.003002001S",
+ (new Temporal.Duration(0, 0, 0, 0, 0, 0, -4, -3, -2, -1)).toJSON());
+assertEquals("PT4.003092001S",
+ (new Temporal.Duration(0, 0, 0, 0, 0, 0, 4, 3, 2, 90001)).toJSON());
+assertEquals("-PT4.003092001S",
+ (new Temporal.Duration(0, 0, 0, 0, 0, 0, -4, -3, -2, -90001)).toJSON());
+assertEquals("PT4.093082001S",
+ (new Temporal.Duration(0, 0, 0, 0, 0, 0, 4, 3, 2, 90080001)).toJSON());
+assertEquals("-PT4.093082001S",
+ (new Temporal.Duration(0, 0, 0, 0, 0, 0, -4, -3, -2, -90080001)).toJSON());
+
+assertEquals("P1Y2M3W4DT5H6M7.008009001S",
+ (new Temporal.Duration(1, 2, 3, 4, 5, 6, 7, 8, 9, 1)).toJSON());
+assertEquals("-P1Y2M3W4DT5H6M7.008009001S",
+ (new Temporal.Duration(-1, -2, -3, -4, -5, -6, -7, -8, -9, -1)).toJSON());
+assertEquals("P1234Y2345M3456W4567DT5678H6789M7890.890901123S",
+ (new Temporal.Duration(1234, 2345, 3456, 4567, 5678, 6789, 7890,
+ 890, 901, 123)).toJSON());
+assertEquals("-P1234Y2345M3456W4567DT5678H6789M7890.890901123S",
+ (new Temporal.Duration(-1234, -2345, -3456, -4567, -5678, -6789, -7890,
+ -890, -901, -123)).toJSON());
diff --git a/deps/v8/test/mjsunit/temporal/duration-valueOf.js b/deps/v8/test/mjsunit/temporal/duration-valueOf.js
new file mode 100644
index 0000000000..1a948868f3
--- /dev/null
+++ b/deps/v8/test/mjsunit/temporal/duration-valueOf.js
@@ -0,0 +1,8 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+// Flags: --harmony-temporal
+
+let d1 = new Temporal.Duration();
+assertThrows(() => d1.valueOf(), TypeError,
+ "Method Temporal.Duration called on a non-object or on a wrong type of object.");
diff --git a/deps/v8/test/mjsunit/temporal/duration-with.js b/deps/v8/test/mjsunit/temporal/duration-with.js
new file mode 100644
index 0000000000..ec9d775057
--- /dev/null
+++ b/deps/v8/test/mjsunit/temporal/duration-with.js
@@ -0,0 +1,112 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+// Flags: --harmony-temporal
+
+d8.file.execute('test/mjsunit/temporal/temporal-helpers.js');
+
+let like1 = {years:9, months:8, weeks:7, days:6, hours: 5, minutes: 4,
+ seconds: 3, milliseconds: 2, microseconds: 1, nanoseconds: 10};
+let like2 = {years: 9, hours:5};
+let like3 = {months: 8, minutes:4};
+let like4 = {weeks: 7, seconds:3};
+let like5 = {days: 6, milliseconds:2};
+let like6 = {microseconds: 987, nanoseconds: 123};
+let like7 = {years:-9, months:-8, weeks:-7, days:-6, hours: -5, minutes: -4,
+ seconds: -3, milliseconds: -2, microseconds: -1, nanoseconds: -10};
+
+let d1 = new Temporal.Duration();
+assertDuration(d1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, true);
+assertDuration(d1.with(like1), 9, 8, 7, 6, 5, 4, 3, 2, 1, 10, 1, false);
+assertDuration(d1.with(like2), 9, 0, 0, 0, 5, 0, 0, 0, 0, 0, 1, false);
+assertDuration(d1.with(like3), 0, 8, 0, 0, 0, 4, 0, 0, 0, 0, 1, false);
+assertDuration(d1.with(like4), 0, 0, 7, 0, 0, 0, 3, 0, 0, 0, 1, false);
+assertDuration(d1.with(like5), 0, 0, 0, 6, 0, 0, 0, 2, 0, 0, 1, false);
+assertDuration(d1.with(like6), 0, 0, 0, 0, 0, 0, 0, 0, 987, 123, 1, false);
+assertDuration(d1.with(like7), -9, -8, -7, -6, -5, -4, -3, -2, -1, -10, -1,
+ false);
+
+let d2 = new Temporal.Duration(1, 2, 3, 4, 5, 6, 7, 8, 9, 10);
+assertDuration(d2, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 1, false);
+assertDuration(d2.with(like1), 9, 8, 7, 6, 5, 4, 3, 2, 1, 10, 1, false);
+assertDuration(d2.with(like7), -9, -8, -7, -6, -5, -4, -3, -2, -1, -10, -1,
+ false);
+// Different sign
+assertThrows(() => d2.with({years: -1}), RangeError,
+ "Invalid time value");
+assertThrows(() => d2.with({months: -2}), RangeError,
+ "Invalid time value");
+assertThrows(() => d2.with({weeks: -3}), RangeError,
+ "Invalid time value");
+assertThrows(() => d2.with({days: -4}), RangeError,
+ "Invalid time value");
+assertThrows(() => d2.with({hours: -5}), RangeError,
+ "Invalid time value");
+assertThrows(() => d2.with({minutes: -6}), RangeError,
+ "Invalid time value");
+assertThrows(() => d2.with({seconds: -7}), RangeError,
+ "Invalid time value");
+assertThrows(() => d2.with({milliseconds: -8}), RangeError,
+ "Invalid time value");
+assertThrows(() => d2.with({microseconds: -9}), RangeError,
+ "Invalid time value");
+assertThrows(() => d2.with({nanoseconds: -10}), RangeError,
+ "Invalid time value");
+
+
+// Test large number
+let d3 = new Temporal.Duration(1e5, 2e5, 3e5, 4e5, 5e5, 6e5, 7e5, 8e5, 9e5,
+ 10e5);
+assertDuration(d3, 1e5, 2e5, 3e5, 4e5, 5e5, 6e5, 7e5, 8e5, 9e5, 10e5, 1,
+ false);
+assertDuration(d3.with(like1), 9, 8, 7, 6, 5, 4, 3, 2, 1, 10, 1, false);
+assertDuration(d3.with(like7), -9, -8, -7, -6, -5, -4, -3, -2, -1, -10, -1,
+ false);
+
+// Test negative values
+let d4 = new Temporal.Duration(-1, -2, -3, -4, -5, -6, -7, -8, -9, -10);
+assertDuration(d4, -1, -2, -3, -4, -5, -6, -7, -8, -9, -10, -1, false);
+assertDuration(d4.with(like1), 9, 8, 7, 6, 5, 4, 3, 2, 1, 10, 1, false);
+// Throw when sign flip
+assertThrows(() => d4.with({years: 1}), RangeError,
+ "Invalid time value");
+assertThrows(() => d4.with({months: 2}), RangeError,
+ "Invalid time value");
+assertThrows(() => d4.with({weeks: 3}), RangeError,
+ "Invalid time value");
+assertThrows(() => d4.with({days: 4}), RangeError,
+ "Invalid time value");
+assertThrows(() => d4.with({hours: 5}), RangeError,
+ "Invalid time value");
+assertThrows(() => d4.with({minutes: 6}), RangeError,
+ "Invalid time value");
+assertThrows(() => d4.with({seconds: 7}), RangeError,
+ "Invalid time value");
+assertThrows(() => d4.with({milliseconds: 8}), RangeError,
+ "Invalid time value");
+assertThrows(() => d4.with({microseconds: 9}), RangeError,
+ "Invalid time value");
+assertThrows(() => d4.with({nanoseconds: 10}), RangeError,
+ "Invalid time value");
+
+// singular throw
+assertThrows(() => d1.with({year:1}), TypeError,
+ "invalid_argument");
+assertThrows(() => d1.with({month:1}), TypeError,
+ "invalid_argument");
+assertThrows(() => d1.with({week:1}), TypeError,
+ "invalid_argument");
+assertThrows(() => d1.with({day:1}), TypeError,
+ "invalid_argument");
+assertThrows(() => d1.with({hour:1}), TypeError,
+ "invalid_argument");
+assertThrows(() => d1.with({minute:1}), TypeError,
+ "invalid_argument");
+assertThrows(() => d1.with({second:1}), TypeError,
+ "invalid_argument");
+assertThrows(() => d1.with({millisecond:1}), TypeError,
+ "invalid_argument");
+assertThrows(() => d1.with({microsecond:1}), TypeError,
+ "invalid_argument");
+assertThrows(() => d1.with({nanosecond:1}), TypeError,
+ "invalid_argument");
diff --git a/deps/v8/test/mjsunit/temporal/instant-add.js b/deps/v8/test/mjsunit/temporal/instant-add.js
new file mode 100644
index 0000000000..bd664870c8
--- /dev/null
+++ b/deps/v8/test/mjsunit/temporal/instant-add.js
@@ -0,0 +1,48 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+// Flags: --harmony-temporal
+
+let i1 = new Temporal.Instant(50000n);
+assertEquals(3052001n,
+ i1.add(new Temporal.Duration(0,0,0,0,0,0,0,3,2,1)).epochNanoseconds);
+
+assertEquals(BigInt(4 * 1e9) + 3052001n,
+ i1.add(new Temporal.Duration(0,0,0,0,0,0,4,3,2,1)).epochNanoseconds);
+
+assertEquals(BigInt(5 * 60 + 4) * 1000000000n + 3052001n,
+ i1.add(new Temporal.Duration(0,0,0,0,0,5,4,3,2,1)).epochNanoseconds);
+
+assertEquals(BigInt(6 * 3600 + 5 * 60 + 4) * 1000000000n + 3052001n,
+ i1.add(new Temporal.Duration(0,0,0,0,6,5,4,3,2,1)).epochNanoseconds);
+
+assertEquals(-2952001n,
+ i1.add(new Temporal.Duration(0,0,0,0,0,0,0,-3,-2,-1)).epochNanoseconds);
+
+assertEquals(BigInt(-4 * 1e9) - 2952001n,
+ i1.add(new Temporal.Duration(0,0,0,0,0,0,-4,-3,-2,-1)).epochNanoseconds);
+
+assertEquals(BigInt(5 * 60 + 4) * -1000000000n - 2952001n,
+ i1.add(new Temporal.Duration(0,0,0,0,0,-5,-4,-3,-2,-1)).epochNanoseconds);
+
+assertEquals(BigInt(6 * 3600 + 5 * 60 + 4) * -1000000000n - 2952001n,
+ i1.add(new Temporal.Duration(0,0,0,0,-6,-5,-4,-3,-2,-1)).epochNanoseconds);
+
+
+// Test RequireInternalSlot Throw TypeError
+let badInstant = { add: i1.add };
+assertThrows(() => badInstant.add(new Temporal.Duration(0, 0, 0, 0, 5)), TypeError);
+
+// Test ToLimitedTemporalDuration Throw RangeError
+assertThrows(() => i1.add(new Temporal.Duration(1)), RangeError);
+assertThrows(() => i1.add(new Temporal.Duration(0, 2)), RangeError);
+assertThrows(() => i1.add(new Temporal.Duration(0, 0, 3)), RangeError);
+assertThrows(() => i1.add(new Temporal.Duration(0, 0, 0, 4)), RangeError);
+assertThrows(() => i1.add(new Temporal.Duration(-1)), RangeError);
+assertThrows(() => i1.add(new Temporal.Duration(0, -2)), RangeError);
+assertThrows(() => i1.add(new Temporal.Duration(0, 0, -3)), RangeError);
+assertThrows(() => i1.add(new Temporal.Duration(0, 0, 0, -4)), RangeError);
+
+// Test AddInstant Throw RangeError
+let i2 = new Temporal.Instant(86400n * 99999999999999999n);
+assertThrows(() => i2.add(new Temporal.Duration(0, 0, 0, 0, 999999999)), RangeError);
diff --git a/deps/v8/test/mjsunit/temporal/instant-compare.js b/deps/v8/test/mjsunit/temporal/instant-compare.js
new file mode 100644
index 0000000000..1f27d75cef
--- /dev/null
+++ b/deps/v8/test/mjsunit/temporal/instant-compare.js
@@ -0,0 +1,21 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+// Flags: --harmony-temporal
+
+let inst1 = new Temporal.Instant(1234567890123456789n);
+let inst2 = new Temporal.Instant(1234567890123456000n);
+let inst3 = new Temporal.Instant(1234567890123456000n);
+
+assertEquals(Temporal.Instant.compare(inst2, inst3), 0);
+assertEquals(Temporal.Instant.compare(inst1, inst2), 1);
+assertEquals(Temporal.Instant.compare(inst3, inst1), -1);
+
+assertThrows(() => Temporal.Instant.compare(inst1, "invalid iso8601 string"),
+ RangeError);
+assertThrows(() => Temporal.Instant.compare("invalid iso8601 string", inst1),
+ RangeError);
+
+
+// TODO Test Temporal.compare with Temporal.ZonedDateTime object
+// TODO Test Temporal.compare with TemporalInstantString
diff --git a/deps/v8/test/mjsunit/temporal/instant-constructor.js b/deps/v8/test/mjsunit/temporal/instant-constructor.js
new file mode 100644
index 0000000000..74b96583fb
--- /dev/null
+++ b/deps/v8/test/mjsunit/temporal/instant-constructor.js
@@ -0,0 +1,43 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+// Flags: --harmony-temporal
+
+let inst1 = new Temporal.Instant(1234567890123456789n);
+assertEquals(1234567890123456789n , inst1.epochNanoseconds);
+assertEquals(1234567890123456n , inst1.epochMicroseconds);
+assertEquals(1234567890123n , inst1.epochMilliseconds);
+assertEquals(1234567890n , inst1.epochSeconds);
+
+let inst2 = new Temporal.Instant(-1234567890123456789n);
+assertEquals(-1234567890123456789n , inst2.epochNanoseconds);
+assertEquals(-1234567890123456n , inst2.epochMicroseconds);
+assertEquals(-1234567890123n , inst2.epochMilliseconds);
+assertEquals(-1234567890n , inst2.epochSeconds);
+
+// 1. If NewTarget is undefined, then
+// a. Throw a TypeError exception.
+assertThrows(() => Temporal.Instant(1234567890123456789n), TypeError);
+
+// 2. Let epochNanoseconds be ? ToBigInt(epochNanoseconds).
+assertThrows(() => {let inst = new Temporal.Instant(undefined)},
+ TypeError);
+assertThrows(() => {let inst = new Temporal.Instant(null)}, TypeError);
+assertEquals(1n, (new Temporal.Instant(true)).epochNanoseconds);
+assertEquals(0n, (new Temporal.Instant(false)).epochNanoseconds);
+assertThrows(() => {let inst = Temporal.Instant(12345)}, TypeError);
+assertEquals(1234567890123456789n,
+ (new Temporal.Instant("1234567890123456789")).epochNanoseconds);
+assertThrows(() => {let inst = new Temporal.Instant(Symbol(12345n))},
+ TypeError);
+
+// 3. If ! IsValidEpochNanoseconds(epochNanoseconds) is false,
+// throw a RangeError exception.
+assertThrows(() => {let inst = new Temporal.Instant(8640000000000000000001n)},
+ RangeError);
+assertThrows(() => {let inst = new Temporal.Instant(-8640000000000000000001n)},
+ RangeError);
+assertEquals(8640000000000000000000n,
+ (new Temporal.Instant(8640000000000000000000n)).epochNanoseconds);
+assertEquals(-8640000000000000000000n,
+ (new Temporal.Instant(-8640000000000000000000n)).epochNanoseconds);
diff --git a/deps/v8/test/mjsunit/temporal/instant-equals.js b/deps/v8/test/mjsunit/temporal/instant-equals.js
new file mode 100644
index 0000000000..0a9eb3005a
--- /dev/null
+++ b/deps/v8/test/mjsunit/temporal/instant-equals.js
@@ -0,0 +1,17 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+// Flags: --harmony-temporal
+
+let inst1 = new Temporal.Instant(1234567890123456789n);
+let inst2 = new Temporal.Instant(1234567890123456000n);
+let inst3 = new Temporal.Instant(1234567890123456000n);
+
+assertEquals(inst1.equals(inst2), false);
+assertEquals(inst2.equals(inst3), true);
+
+let badInst = {equals: inst1.equals};
+assertThrows(() => badInst.equals(inst1), TypeError);
+
+// TODO Test Temporal.compare with Temporal.ZonedDateTime object
+// TODO Test Temporal.compare with TemporalInstantString
diff --git a/deps/v8/test/mjsunit/temporal/instant-from-epoch-microseconds.js b/deps/v8/test/mjsunit/temporal/instant-from-epoch-microseconds.js
new file mode 100644
index 0000000000..dbb7608437
--- /dev/null
+++ b/deps/v8/test/mjsunit/temporal/instant-from-epoch-microseconds.js
@@ -0,0 +1,28 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+// Flags: --harmony-temporal
+
+let bigint_nano = 1234567890123456789000n;
+let bigint_micro = 1234567890123456789n;
+let inst1 = new Temporal.Instant(bigint_nano);
+let inst2 = Temporal.Instant.fromEpochMicroseconds(bigint_micro);
+assertEquals(inst1, inst2);
+
+let just_fit_neg_bigint = -8640000000000000000n;
+let just_fit_pos_bigint = 8640000000000000000n;
+let too_big_bigint = 8640000000000000001n;
+let too_small_bigint = -8640000000000000001n;
+
+assertThrows(() =>
+ {let inst = Temporal.Instant.fromEpochMicroseconds(too_small_bigint)},
+ RangeError);
+assertThrows(() =>
+ {let inst = Temporal.Instant.fromEpochMicroseconds(too_big_bigint)},
+ RangeError);
+assertEquals(just_fit_neg_bigint,
+ (Temporal.Instant.fromEpochMicroseconds(
+ just_fit_neg_bigint)).epochMicroseconds);
+assertEquals(just_fit_pos_bigint,
+ (Temporal.Instant.fromEpochMicroseconds(
+ just_fit_pos_bigint)).epochMicroseconds);
diff --git a/deps/v8/test/mjsunit/temporal/instant-from-epoch-milliseconds.js b/deps/v8/test/mjsunit/temporal/instant-from-epoch-milliseconds.js
new file mode 100644
index 0000000000..50eec50da7
--- /dev/null
+++ b/deps/v8/test/mjsunit/temporal/instant-from-epoch-milliseconds.js
@@ -0,0 +1,28 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+// Flags: --harmony-temporal
+
+let bigint_nano = 567890123456789000000n;
+let bigint_milli = 567890123456789n;
+let inst1 = new Temporal.Instant(bigint_nano);
+let inst2 = Temporal.Instant.fromEpochMilliseconds(bigint_milli);
+assertEquals(inst1, inst2);
+
+let just_fit_neg_bigint = -8640000000000000n;
+let just_fit_pos_bigint = 8640000000000000n;
+let too_big_bigint = 8640000000000001n;
+let too_small_bigint = -8640000000000001n;
+
+assertThrows(() =>
+ {let inst = Temporal.Instant.fromEpochMilliseconds(too_small_bigint)},
+ RangeError);
+assertThrows(() =>
+ {let inst = Temporal.Instant.fromEpochMilliseconds(too_big_bigint)},
+ RangeError);
+assertEquals(just_fit_neg_bigint,
+ (Temporal.Instant.fromEpochMilliseconds(
+ just_fit_neg_bigint)).epochMilliseconds);
+assertEquals(just_fit_pos_bigint,
+ (Temporal.Instant.fromEpochMilliseconds(
+ just_fit_pos_bigint)).epochMilliseconds);
diff --git a/deps/v8/test/mjsunit/temporal/instant-from-epoch-nanoseconds.js b/deps/v8/test/mjsunit/temporal/instant-from-epoch-nanoseconds.js
new file mode 100644
index 0000000000..533a708b03
--- /dev/null
+++ b/deps/v8/test/mjsunit/temporal/instant-from-epoch-nanoseconds.js
@@ -0,0 +1,27 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+// Flags: --harmony-temporal
+
+let bigint1 = 1234567890123456789n;
+let inst1 = new Temporal.Instant(bigint1);
+let inst2 = Temporal.Instant.fromEpochNanoseconds(bigint1);
+assertEquals(inst1, inst2);
+
+let just_fit_neg_bigint = -8640000000000000000000n;
+let just_fit_pos_bigint = 8640000000000000000000n;
+let too_big_bigint = 8640000000000000000001n;
+let too_small_bigint = -8640000000000000000001n;
+
+assertThrows(() =>
+ {let inst = Temporal.Instant.fromEpochNanoseconds(too_small_bigint)},
+ RangeError);
+assertThrows(() =>
+ {let inst = Temporal.Instant.fromEpochNanoseconds(too_big_bigint)},
+ RangeError);
+assertEquals(just_fit_neg_bigint,
+ (Temporal.Instant.fromEpochNanoseconds(
+ just_fit_neg_bigint)).epochNanoseconds);
+assertEquals(just_fit_pos_bigint,
+ (Temporal.Instant.fromEpochNanoseconds(
+ just_fit_pos_bigint)).epochNanoseconds);
diff --git a/deps/v8/test/mjsunit/temporal/instant-from-epoch-seconds.js b/deps/v8/test/mjsunit/temporal/instant-from-epoch-seconds.js
new file mode 100644
index 0000000000..8fdbd55580
--- /dev/null
+++ b/deps/v8/test/mjsunit/temporal/instant-from-epoch-seconds.js
@@ -0,0 +1,26 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+// Flags: --harmony-temporal
+
+let bigint_nano = 7890123456789000000000n;
+let bigint_sec = 7890123456789n;
+let inst1 = new Temporal.Instant(bigint_nano);
+let inst2 = Temporal.Instant.fromEpochSeconds(bigint_sec);
+assertEquals(inst1, inst2);
+
+let just_fit_neg_bigint = -8640000000000n;
+let just_fit_pos_bigint = 8640000000000n;
+let too_big_bigint = 8640000000001n;
+let too_small_bigint = -8640000000001n;
+
+assertThrows(() =>
+ {let inst = Temporal.Instant.fromEpochSeconds(too_small_bigint)},
+ RangeError)
+assertThrows(() =>
+ {let inst = Temporal.Instant.fromEpochSeconds(too_big_bigint)},
+ RangeError)
+assertEquals(just_fit_neg_bigint,
+ (Temporal.Instant.fromEpochSeconds(just_fit_neg_bigint)).epochSeconds);
+assertEquals(just_fit_pos_bigint,
+ (Temporal.Instant.fromEpochSeconds(just_fit_pos_bigint)).epochSeconds);
diff --git a/deps/v8/test/mjsunit/temporal/instant-subtract.js b/deps/v8/test/mjsunit/temporal/instant-subtract.js
new file mode 100644
index 0000000000..5e33ca6a32
--- /dev/null
+++ b/deps/v8/test/mjsunit/temporal/instant-subtract.js
@@ -0,0 +1,54 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+// Flags: --harmony-temporal
+
+let i1 = new Temporal.Instant(50000n);
+assertEquals(3052001n,
+ i1.subtract(new Temporal.Duration(
+ 0,0,0,0,0,0,0,-3,-2,-1)).epochNanoseconds);
+
+assertEquals(BigInt(4 * 1e9) + 3052001n,
+ i1.subtract(new Temporal.Duration(
+ 0,0,0,0,0,0,-4,-3,-2,-1)).epochNanoseconds);
+
+assertEquals(BigInt(5 * 60 + 4) * 1000000000n + 3052001n,
+ i1.subtract(new Temporal.Duration(
+ 0,0,0,0,0,-5,-4,-3,-2,-1)).epochNanoseconds);
+
+assertEquals(BigInt(6 * 3600 + 5 * 60 + 4) * 1000000000n + 3052001n,
+ i1.subtract(new Temporal.Duration(
+ 0,0,0,0,-6,-5,-4,-3,-2,-1)).epochNanoseconds);
+
+assertEquals(-2952001n,
+ i1.subtract(new Temporal.Duration(0,0,0,0,0,0,0,3,2,1)).epochNanoseconds);
+
+assertEquals(BigInt(-4 * 1e9) - 2952001n,
+ i1.subtract(new Temporal.Duration(0,0,0,0,0,0,4,3,2,1)).epochNanoseconds);
+
+assertEquals(BigInt(5 * 60 + 4) * -1000000000n - 2952001n,
+ i1.subtract(new Temporal.Duration(0,0,0,0,0,5,4,3,2,1)).epochNanoseconds);
+
+assertEquals(BigInt(6 * 3600 + 5 * 60 + 4) * -1000000000n - 2952001n,
+ i1.subtract(new Temporal.Duration(0,0,0,0,6,5,4,3,2,1)).epochNanoseconds);
+
+
+// Test RequireInternalSlot Throw TypeError
+let badInstant = { subtract: i1.subtract };
+assertThrows(() => badInstant.subtract(
+ new Temporal.Duration(0, 0, 0, 0, 5)), TypeError);
+
+// Test ToLimitedTemporalDuration Throw RangeError
+assertThrows(() => i1.subtract(new Temporal.Duration(1)), RangeError);
+assertThrows(() => i1.subtract(new Temporal.Duration(0, 2)), RangeError);
+assertThrows(() => i1.subtract(new Temporal.Duration(0, 0, 3)), RangeError);
+assertThrows(() => i1.subtract(new Temporal.Duration(0, 0, 0, 4)), RangeError);
+assertThrows(() => i1.subtract(new Temporal.Duration(-1)), RangeError);
+assertThrows(() => i1.subtract(new Temporal.Duration(0, -2)), RangeError);
+assertThrows(() => i1.subtract(new Temporal.Duration(0, 0, -3)), RangeError);
+assertThrows(() => i1.subtract(new Temporal.Duration(0, 0, 0, -4)), RangeError);
+
+// Test AddInstant Throw RangeError
+let i2 = new Temporal.Instant(-86400n * 99999999999999999n);
+assertThrows(() => i2.subtract(new Temporal.Duration(0, 0, 0, 0, 999999999)),
+ RangeError);
diff --git a/deps/v8/test/mjsunit/temporal/instant-to-json.js b/deps/v8/test/mjsunit/temporal/instant-to-json.js
new file mode 100644
index 0000000000..dbd5205883
--- /dev/null
+++ b/deps/v8/test/mjsunit/temporal/instant-to-json.js
@@ -0,0 +1,46 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+// Flags: --harmony-temporal
+
+
+assertEquals("1970-01-01T00:00:00Z",
+ (Temporal.Instant.fromEpochSeconds(0n)).toJSON());
+
+let days_in_sec = 24n * 60n * 60n;
+assertEquals("1970-12-31T23:59:59Z",
+ Temporal.Instant.fromEpochSeconds((365n * days_in_sec) - 1n).toJSON());
+assertEquals("1971-01-01T00:00:00Z",
+ Temporal.Instant.fromEpochSeconds((365n * days_in_sec)).toJSON());
+
+assertEquals("1971-12-31T23:59:59Z",
+ Temporal.Instant.fromEpochSeconds((2n *365n * days_in_sec - 1n)).toJSON());
+assertEquals("1972-01-01T00:00:00Z",
+ Temporal.Instant.fromEpochSeconds((2n *365n * days_in_sec)).toJSON());
+
+// 1972 is a leap year
+assertEquals("1972-02-28T00:00:00Z",
+ Temporal.Instant.fromEpochSeconds(((2n *365n + 58n) * days_in_sec)).toJSON());
+assertEquals("1972-02-29T00:00:00Z",
+ Temporal.Instant.fromEpochSeconds(((2n *365n + 59n) * days_in_sec)).toJSON());
+
+assertEquals("1985-01-01T00:00:00Z",
+ Temporal.Instant.fromEpochSeconds(((15n *365n + 4n) * days_in_sec)).toJSON());
+
+// Test with Date
+
+const year_in_sec = 24*60*60*365;
+const number_of_random_test = 500;
+for (i = 0; i < number_of_random_test ; i++) {
+ // bertween -5000 years and +5000 years
+ let ms = Math.floor(Math.random() * year_in_sec * 1000 * 10000) - year_in_sec * 1000 * 5000;
+ // Temporal auto precision will remove trailing zeros in milliseconds so we only
+ // compare the first 19 char- to second.
+ let d = new Date(ms)
+ let bigd = BigInt(d)
+ dateout = d.toJSON().substr(0,19);
+ temporalout = Temporal.Instant.fromEpochMilliseconds(bigd).toJSON().substr(0, 19);
+ if (dateout[0] != '0') {
+ assertEquals(dateout, temporalout, ms);
+ }
+}
diff --git a/deps/v8/test/mjsunit/temporal/instant-toJSON.js b/deps/v8/test/mjsunit/temporal/instant-toJSON.js
new file mode 100644
index 0000000000..131b4ed8f7
--- /dev/null
+++ b/deps/v8/test/mjsunit/temporal/instant-toJSON.js
@@ -0,0 +1,52 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+// Flags: --harmony-temporal
+
+let d = new Temporal.Instant(0n);
+assertEquals("1970-01-01T00:00:00Z", d.toJSON());
+d = new Temporal.Instant(1n);
+assertEquals("1970-01-01T00:00:00.000000001Z", d.toJSON());
+d = new Temporal.Instant(12n);
+assertEquals("1970-01-01T00:00:00.000000012Z", d.toJSON());
+d = new Temporal.Instant(123n);
+assertEquals("1970-01-01T00:00:00.000000123Z", d.toJSON());
+d = new Temporal.Instant(1234n);
+assertEquals("1970-01-01T00:00:00.000001234Z", d.toJSON());
+d = new Temporal.Instant(12345n);
+assertEquals("1970-01-01T00:00:00.000012345Z", d.toJSON());
+d = new Temporal.Instant(123456n);
+assertEquals("1970-01-01T00:00:00.000123456Z", d.toJSON());
+d = new Temporal.Instant(1234567n);
+assertEquals("1970-01-01T00:00:00.001234567Z", d.toJSON());
+d = new Temporal.Instant(12345678n);
+assertEquals("1970-01-01T00:00:00.012345678Z", d.toJSON());
+d = new Temporal.Instant(123456789n);
+assertEquals("1970-01-01T00:00:00.123456789Z", d.toJSON());
+d = new Temporal.Instant(1234567891n);
+assertEquals("1970-01-01T00:00:01.234567891Z", d.toJSON());
+d = new Temporal.Instant(12345678912n);
+assertEquals("1970-01-01T00:00:12.345678912Z", d.toJSON());
+
+d = new Temporal.Instant(-1n);
+assertEquals("1969-12-31T23:59:59.999999999Z", d.toJSON());
+d = new Temporal.Instant(-12n);
+assertEquals("1969-12-31T23:59:59.999999988Z", d.toJSON());
+d = new Temporal.Instant(-123n);
+assertEquals("1969-12-31T23:59:59.999999877Z", d.toJSON());
+d = new Temporal.Instant(-1234n);
+assertEquals("1969-12-31T23:59:59.999998766Z", d.toJSON());
+d = new Temporal.Instant(-12345n);
+assertEquals("1969-12-31T23:59:59.999987655Z", d.toJSON());
+d = new Temporal.Instant(-123456n);
+assertEquals("1969-12-31T23:59:59.999876544Z", d.toJSON());
+d = new Temporal.Instant(-1234567n);
+assertEquals("1969-12-31T23:59:59.998765433Z", d.toJSON());
+d = new Temporal.Instant(-12345678n);
+assertEquals("1969-12-31T23:59:59.987654322Z", d.toJSON());
+d = new Temporal.Instant(-123456789n);
+assertEquals("1969-12-31T23:59:59.876543211Z", d.toJSON());
+d = new Temporal.Instant(-1234567891n);
+assertEquals("1969-12-31T23:59:58.765432109Z", d.toJSON());
+d = new Temporal.Instant(-12345678912n);
+assertEquals("1969-12-31T23:59:47.654321088Z", d.toJSON());
diff --git a/deps/v8/test/mjsunit/temporal/instant-valueOf.js b/deps/v8/test/mjsunit/temporal/instant-valueOf.js
new file mode 100644
index 0000000000..18acba5285
--- /dev/null
+++ b/deps/v8/test/mjsunit/temporal/instant-valueOf.js
@@ -0,0 +1,7 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+// Flags: --harmony-temporal
+
+let d1 = Temporal.Now.instant( );
+assertThrows(() => d1.valueOf(), TypeError);
diff --git a/deps/v8/test/mjsunit/temporal/plain-date-add.js b/deps/v8/test/mjsunit/temporal/plain-date-add.js
new file mode 100644
index 0000000000..4734dede1d
--- /dev/null
+++ b/deps/v8/test/mjsunit/temporal/plain-date-add.js
@@ -0,0 +1,25 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+// Flags: --harmony-temporal
+
+d8.file.execute('test/mjsunit/temporal/temporal-helpers.js');
+
+// Simple add
+let d = new Temporal.PlainDate(2021, 7, 20);
+assertPlainDate(d.add("P1D"), 2021, 7, 21);
+assertPlainDate(d.subtract("-P1D"), 2021, 7, 21);
+assertPlainDate(d.add("-P1D"), 2021, 7, 19);
+assertPlainDate(d.subtract("P1D"), 2021, 7, 19);
+assertPlainDate(d.add("P11D"), 2021, 7, 31);
+assertPlainDate(d.subtract("-P11D"), 2021, 7, 31);
+assertPlainDate(d.add("P12D"), 2021, 8, 1);
+assertPlainDate(d.subtract("-P12D"), 2021, 8, 1);
+
+let goodDate = new Temporal.PlainDate(2021, 7, 20);
+let badDate = {add: goodDate.add};
+assertThrows(() => badDateTime.add("P1D"), TypeError);
+
+// Throw in ToLimitedTemporalDuration
+assertThrows(() => (new Temporal.PlainDate(2021, 7, 20)).add("bad duration"),
+ RangeError);
diff --git a/deps/v8/test/mjsunit/temporal/plain-date-compare.js b/deps/v8/test/mjsunit/temporal/plain-date-compare.js
new file mode 100644
index 0000000000..f13a96bd66
--- /dev/null
+++ b/deps/v8/test/mjsunit/temporal/plain-date-compare.js
@@ -0,0 +1,30 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+// Flags: --harmony-temporal
+
+let t1 = new Temporal.PlainDate(2021, 3, 14);
+let t2 = new Temporal.PlainDate(2021, 3, 14);
+let t3 = t1;
+let t4 = new Temporal.PlainDate(2021, 3, 15);
+let t5 = new Temporal.PlainDate(2021, 4, 14);
+let t6 = new Temporal.PlainDate(2022, 3, 14);
+// years in 4 digits range
+assertEquals(0, Temporal.PlainDate.compare(t1, t1));
+assertEquals(0, Temporal.PlainDate.compare(t1, t2));
+assertEquals(0, Temporal.PlainDate.compare(t1, t3));
+assertEquals(0, Temporal.PlainDate.compare(t1, "2021-03-14"));
+assertEquals(0, Temporal.PlainDate.compare(t1, "2021-03-14T23:59:59"));
+assertEquals(1, Temporal.PlainDate.compare(t4, t1));
+assertEquals(1, Temporal.PlainDate.compare(t5, t1));
+assertEquals(1, Temporal.PlainDate.compare(t6, t1));
+assertEquals(-1, Temporal.PlainDate.compare(t1, t4));
+assertEquals(-1, Temporal.PlainDate.compare(t1, t5));
+assertEquals(-1, Temporal.PlainDate.compare(t1, t6));
+assertEquals(1, Temporal.PlainDate.compare("2021-07-21", t1));
+
+// Test Throw
+assertThrows(() => Temporal.PlainDate.compare(t1, "invalid iso8601 string"),
+ RangeError);
+assertThrows(() => Temporal.PlainDate.compare("invalid iso8601 string", t1),
+ RangeError);
diff --git a/deps/v8/test/mjsunit/temporal/plain-date-constructor.js b/deps/v8/test/mjsunit/temporal/plain-date-constructor.js
new file mode 100644
index 0000000000..8d51e85ae8
--- /dev/null
+++ b/deps/v8/test/mjsunit/temporal/plain-date-constructor.js
@@ -0,0 +1,82 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+// Flags: --harmony-temporal
+
+d8.file.execute('test/mjsunit/temporal/temporal-helpers.js');
+
+let d1 = new Temporal.PlainDate(1911, 10, 10);
+assertPlainDate(d1, 1911, 10, 10);
+let d2 = new Temporal.PlainDate(2020, 3, 12);
+assertPlainDate(d2, 2020, 3, 12);
+let d3 = new Temporal.PlainDate(1, 12, 25);
+assertPlainDate(d3, 1, 12, 25);
+let d4 = new Temporal.PlainDate(1970, 1, 1);
+assertPlainDate(d4, 1970, 1, 1);
+let d5 = new Temporal.PlainDate(-10, 12, 1);
+assertPlainDate(d5, -10, 12, 1);
+let d6 = new Temporal.PlainDate(-25406, 1, 1);
+assertPlainDate(d6, -25406, 1, 1);
+let d7 = new Temporal.PlainDate(26890, 12, 31);
+assertPlainDate(d7, 26890, 12, 31);
+
+assertThrows(() => Temporal.PlainDate(2021, 7, 1), TypeError);
+assertThrows(() => new Temporal.PlainDate(), RangeError);
+assertThrows(() => new Temporal.PlainDate(2021), RangeError);
+assertThrows(() => new Temporal.PlainDate(2021, 0), RangeError);
+assertThrows(() => new Temporal.PlainDate(2021, 7), RangeError);
+assertThrows(() => new Temporal.PlainDate(2021, 13), RangeError);
+assertThrows(() => new Temporal.PlainDate(2021, 7, 0), RangeError);
+assertThrows(() => new Temporal.PlainDate(2021, 7, 32), RangeError);
+assertThrows(() => new Temporal.PlainDate(2021, -7, 1), RangeError);
+assertThrows(() => new Temporal.PlainDate(2021, -7, -1), RangeError);
+// Wrong month
+assertThrows(() => new Temporal.PlainDate(2021, 0, 1), RangeError);
+assertThrows(() => new Temporal.PlainDate(2021, 13, 1), RangeError);
+// Right day for month
+assertPlainDate((new Temporal.PlainDate(2021, 1, 31)), 2021, 1, 31);
+assertPlainDate((new Temporal.PlainDate(2021, 2, 28)), 2021, 2, 28);
+assertPlainDate((new Temporal.PlainDate(2021, 3, 31)), 2021, 3, 31);
+assertPlainDate((new Temporal.PlainDate(2021, 4, 30)), 2021, 4, 30);
+assertPlainDate((new Temporal.PlainDate(2021, 5, 31)), 2021, 5, 31);
+assertPlainDate((new Temporal.PlainDate(2021, 6, 30)), 2021, 6, 30);
+assertPlainDate((new Temporal.PlainDate(2021, 7, 31)), 2021, 7, 31);
+assertPlainDate((new Temporal.PlainDate(2021, 8, 31)), 2021, 8, 31);
+assertPlainDate((new Temporal.PlainDate(2021, 9, 30)), 2021, 9, 30);
+assertPlainDate((new Temporal.PlainDate(2021, 10, 31)), 2021, 10, 31);
+assertPlainDate((new Temporal.PlainDate(2021, 11, 30)), 2021, 11, 30);
+assertPlainDate((new Temporal.PlainDate(2021, 12, 31)), 2021, 12, 31);
+
+// Check Feb 29 for Leap year
+assertThrows(() => new Temporal.PlainDate(1900, 2, 29), RangeError);
+assertPlainDate((new Temporal.PlainDate(2000, 2, 29)), 2000, 2, 29);
+assertThrows(() => new Temporal.PlainDate(2001, 2, 29), RangeError);
+assertThrows(() => new Temporal.PlainDate(2002, 2, 29), RangeError);
+assertThrows(() => new Temporal.PlainDate(2003, 2, 29), RangeError);
+assertPlainDate((new Temporal.PlainDate(2004, 2, 29)), 2004, 2, 29);
+assertThrows(() => new Temporal.PlainDate(2100, 2, 29), RangeError);
+
+// Wrong day for month
+assertThrows(() => new Temporal.PlainDate(2021, 1, 32), RangeError);
+assertThrows(() => new Temporal.PlainDate(2021, 2, 29), RangeError);
+assertThrows(() => new Temporal.PlainDate(2021, 3, 32), RangeError);
+assertThrows(() => new Temporal.PlainDate(2021, 4, 31), RangeError);
+assertThrows(() => new Temporal.PlainDate(2021, 5, 32), RangeError);
+assertThrows(() => new Temporal.PlainDate(2021, 6, 31), RangeError);
+assertThrows(() => new Temporal.PlainDate(2021, 7, 32), RangeError);
+assertThrows(() => new Temporal.PlainDate(2021, 8, 32), RangeError);
+assertThrows(() => new Temporal.PlainDate(2021, 9, 31), RangeError);
+assertThrows(() => new Temporal.PlainDate(2021, 10, 32), RangeError);
+assertThrows(() => new Temporal.PlainDate(2021, 11, 31), RangeError);
+assertThrows(() => new Temporal.PlainDate(2021, 12, 32), RangeError);
+
+// Infinty
+assertThrows(() => new Temporal.PlainDate(Infinity, 12, 1), RangeError);
+assertThrows(() => new Temporal.PlainDate(-Infinity, 12, 1), RangeError);
+assertThrows(() => new Temporal.PlainDate(2021, 12, Infinity), RangeError);
+assertThrows(() => new Temporal.PlainDate(2021, 12, -Infinity), RangeError);
+assertThrows(() => new Temporal.PlainDate(2021, -Infinity, 1), RangeError);
+assertThrows(() => new Temporal.PlainDate(2021, Infinity, 1), RangeError);
+
+// TODO Test calendar
+//
diff --git a/deps/v8/test/mjsunit/temporal/plain-date-equals.js b/deps/v8/test/mjsunit/temporal/plain-date-equals.js
new file mode 100644
index 0000000000..db4107b626
--- /dev/null
+++ b/deps/v8/test/mjsunit/temporal/plain-date-equals.js
@@ -0,0 +1,15 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+// Flags: --harmony-temporal
+
+let d1 = new Temporal.PlainDate(2021, 2, 28);
+let d2 = Temporal.PlainDate.from("2021-02-28");
+let d3 = Temporal.PlainDate.from("2021-01-28");
+
+assertEquals(d1.equals(d2), true);
+assertEquals(d1.equals(d3), false);
+assertEquals(d2.equals(d3), false);
+
+let badDate = {equals: d1.equals};
+assertThrows(() => badDate.equals(d1), TypeError);
diff --git a/deps/v8/test/mjsunit/temporal/plain-date-from.js b/deps/v8/test/mjsunit/temporal/plain-date-from.js
new file mode 100644
index 0000000000..55311e51ee
--- /dev/null
+++ b/deps/v8/test/mjsunit/temporal/plain-date-from.js
@@ -0,0 +1,51 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+// Flags: --harmony-temporal
+
+let d1 = Temporal.Now.plainDateISO();
+// 1. Set options to ? GetOptionsObject(options).
+[true, false, "string is invalid", Symbol(),
+ 123, 456n, Infinity, NaN, null].forEach(function(invalidOptions) {
+
+ assertThrows(() => Temporal.PlainDate.from( d1, invalidOptions), TypeError);
+ });
+
+// a. Perform ? ToTemporalOverflow(options).
+assertThrows(() => Temporal.PlainDate.from(
+ d1, {overflow: "invalid overflow"}), RangeError);
+
+[undefined, {}, {overflow: "constrain"}, {overflow: "reject"}].forEach(
+ function(validOptions) {
+ let d = new Temporal.PlainDate(1, 2, 3);
+ let d2 = Temporal.PlainDate.from(d, validOptions);
+ assertEquals(1, d2.year);
+ assertEquals(2, d2.month);
+ assertEquals(3, d2.day);
+ assertEquals("iso8601", d2.calendar.id);
+});
+
+[undefined, {}, {overflow: "constrain"}, {overflow: "reject"}].forEach(
+ function(validOptions) {
+ let d3 = Temporal.PlainDate.from( {year:9, month: 8, day:7}, validOptions);
+ assertEquals(9, d3.year);
+ assertEquals(8, d3.month);
+ assertEquals("M08", d3.monthCode);
+ assertEquals(7, d3.day);
+ assertEquals("iso8601", d3.calendar.id);
+});
+
+[undefined, {}, {overflow: "constrain"}].forEach(
+ function(validOptions) {
+ let d4 = Temporal.PlainDate.from( {year:9, month: 14, day:32}, validOptions);
+ assertEquals(9, d4.year);
+ assertEquals(12, d4.month);
+ assertEquals("M12", d4.monthCode);
+ assertEquals(31, d4.day);
+ assertEquals("iso8601", d4.calendar.id);
+});
+
+assertThrows(() => Temporal.PlainDate.from(
+ {year:9, month: 14, day:30}, {overflow: "reject"}), RangeError);
+assertThrows(() => Temporal.PlainDate.from(
+ {year:9, month: 12, day:32}, {overflow: "reject"}), RangeError);
diff --git a/deps/v8/test/mjsunit/temporal/plain-date-get-calendar.js b/deps/v8/test/mjsunit/temporal/plain-date-get-calendar.js
new file mode 100644
index 0000000000..f9f4b901d1
--- /dev/null
+++ b/deps/v8/test/mjsunit/temporal/plain-date-get-calendar.js
@@ -0,0 +1,8 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+// Flags: --harmony-temporal
+
+let d1 = new Temporal.PlainDate(2021, 12, 11, {get id() {return "custom";}});
+
+assertEquals("custom", d1.calendar.id);
diff --git a/deps/v8/test/mjsunit/temporal/plain-date-get-day.js b/deps/v8/test/mjsunit/temporal/plain-date-get-day.js
new file mode 100644
index 0000000000..099b2b2bda
--- /dev/null
+++ b/deps/v8/test/mjsunit/temporal/plain-date-get-day.js
@@ -0,0 +1,9 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+// Flags: --harmony-temporal
+
+let d1 = new Temporal.PlainDate(2021, 12, 11,
+ {day: function(like) {return 2;}});
+
+assertEquals(2, d1.day);
diff --git a/deps/v8/test/mjsunit/temporal/plain-date-get-dayOfWeek.js b/deps/v8/test/mjsunit/temporal/plain-date-get-dayOfWeek.js
new file mode 100644
index 0000000000..016efc5480
--- /dev/null
+++ b/deps/v8/test/mjsunit/temporal/plain-date-get-dayOfWeek.js
@@ -0,0 +1,9 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+// Flags: --harmony-temporal
+
+let d1 = new Temporal.PlainDate(2021, 12, 11,
+ {dayOfWeek: function(like) {return 2;}});
+
+assertEquals(2, d1.dayOfWeek);
diff --git a/deps/v8/test/mjsunit/temporal/plain-date-get-dayOfYear.js b/deps/v8/test/mjsunit/temporal/plain-date-get-dayOfYear.js
new file mode 100644
index 0000000000..93b5875e62
--- /dev/null
+++ b/deps/v8/test/mjsunit/temporal/plain-date-get-dayOfYear.js
@@ -0,0 +1,9 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+// Flags: --harmony-temporal
+
+let d1 = new Temporal.PlainDate(2021, 12, 11,
+ {dayOfYear: function(like) {return 2;}});
+
+assertEquals(2, d1.dayOfYear);
diff --git a/deps/v8/test/mjsunit/temporal/plain-date-get-daysInMonth.js b/deps/v8/test/mjsunit/temporal/plain-date-get-daysInMonth.js
new file mode 100644
index 0000000000..34e04d1431
--- /dev/null
+++ b/deps/v8/test/mjsunit/temporal/plain-date-get-daysInMonth.js
@@ -0,0 +1,9 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+// Flags: --harmony-temporal
+
+let d1 = new Temporal.PlainDate(2021, 12, 11,
+ {daysInMonth: function(like) {return 2;}});
+
+assertEquals(2, d1.daysInMonth);
diff --git a/deps/v8/test/mjsunit/temporal/plain-date-get-daysInWeek.js b/deps/v8/test/mjsunit/temporal/plain-date-get-daysInWeek.js
new file mode 100644
index 0000000000..b04bbb2026
--- /dev/null
+++ b/deps/v8/test/mjsunit/temporal/plain-date-get-daysInWeek.js
@@ -0,0 +1,9 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+// Flags: --harmony-temporal
+
+let d1 = new Temporal.PlainDate(2021, 12, 11,
+ {daysInWeek: function(like) {return 2;}});
+
+assertEquals(2, d1.daysInWeek);
diff --git a/deps/v8/test/mjsunit/temporal/plain-date-get-daysInYear.js b/deps/v8/test/mjsunit/temporal/plain-date-get-daysInYear.js
new file mode 100644
index 0000000000..c7d4c8efe5
--- /dev/null
+++ b/deps/v8/test/mjsunit/temporal/plain-date-get-daysInYear.js
@@ -0,0 +1,9 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+// Flags: --harmony-temporal
+
+let d1 = new Temporal.PlainDate(2021, 12, 11,
+ {daysInYear: function(like) {return 350;}});
+
+assertEquals(350, d1.daysInYear);
diff --git a/deps/v8/test/mjsunit/temporal/plain-date-get-era.js b/deps/v8/test/mjsunit/temporal/plain-date-get-era.js
new file mode 100644
index 0000000000..70048812fd
--- /dev/null
+++ b/deps/v8/test/mjsunit/temporal/plain-date-get-era.js
@@ -0,0 +1,10 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+// Flags: --harmony-temporal
+
+let ret = "foo bar";
+let d1 = new Temporal.PlainDate(2021, 12, 11,
+ {era: function(like) {return ret;}});
+
+assertEquals(ret, d1.era);
diff --git a/deps/v8/test/mjsunit/temporal/plain-date-get-eraYear.js b/deps/v8/test/mjsunit/temporal/plain-date-get-eraYear.js
new file mode 100644
index 0000000000..7b4c522d72
--- /dev/null
+++ b/deps/v8/test/mjsunit/temporal/plain-date-get-eraYear.js
@@ -0,0 +1,9 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+// Flags: --harmony-temporal
+
+let d1 = new Temporal.PlainDate(2021, 12, 11,
+ {eraYear: function(like) {return 3838;}});
+
+assertEquals(3838, d1.eraYear);
diff --git a/deps/v8/test/mjsunit/temporal/plain-date-get-inLeapYear.js b/deps/v8/test/mjsunit/temporal/plain-date-get-inLeapYear.js
new file mode 100644
index 0000000000..95e062d895
--- /dev/null
+++ b/deps/v8/test/mjsunit/temporal/plain-date-get-inLeapYear.js
@@ -0,0 +1,10 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+// Flags: --harmony-temporal
+
+let ret = "foo bar";
+let d1 = new Temporal.PlainDate(2021, 12, 11,
+ {inLeapYear: function(like) {return ret;}});
+
+assertEquals(ret, d1.inLeapYear);
diff --git a/deps/v8/test/mjsunit/temporal/plain-date-get-iso-fields.js b/deps/v8/test/mjsunit/temporal/plain-date-get-iso-fields.js
new file mode 100644
index 0000000000..c1172ce528
--- /dev/null
+++ b/deps/v8/test/mjsunit/temporal/plain-date-get-iso-fields.js
@@ -0,0 +1,21 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+// Flags: --harmony-temporal
+
+d8.file.execute('test/mjsunit/temporal/temporal-helpers.js');
+
+let d1 = new Temporal.PlainDate(1911, 10, 10);
+assertPlainDate(d1, 1911, 10, 10);
+let d2 = new Temporal.PlainDate(2020, 3, 12);
+assertPlainDate(d2, 2020, 3, 12);
+let d3 = new Temporal.PlainDate(1, 12, 25);
+assertPlainDate(d3, 1, 12, 25);
+let d4 = new Temporal.PlainDate(1970, 1, 1);
+assertPlainDate(d4, 1970, 1, 1);
+let d5 = new Temporal.PlainDate(-10, 12, 1);
+assertPlainDate(d5, -10, 12, 1);
+let d6 = new Temporal.PlainDate(-25406, 1, 1);
+assertPlainDate(d6, -25406, 1, 1);
+let d7 = new Temporal.PlainDate(26890, 12, 31);
+assertPlainDate(d7, 26890, 12, 31);
diff --git a/deps/v8/test/mjsunit/temporal/plain-date-get-month.js b/deps/v8/test/mjsunit/temporal/plain-date-get-month.js
new file mode 100644
index 0000000000..2c0b60cefc
--- /dev/null
+++ b/deps/v8/test/mjsunit/temporal/plain-date-get-month.js
@@ -0,0 +1,9 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+// Flags: --harmony-temporal
+
+let d1 = new Temporal.PlainDate(2021, 12, 11,
+ {month: function(like) {return 2;}});
+
+assertEquals(2, d1.month);
diff --git a/deps/v8/test/mjsunit/temporal/plain-date-get-monthCode.js b/deps/v8/test/mjsunit/temporal/plain-date-get-monthCode.js
new file mode 100644
index 0000000000..b81a682e28
--- /dev/null
+++ b/deps/v8/test/mjsunit/temporal/plain-date-get-monthCode.js
@@ -0,0 +1,9 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+// Flags: --harmony-temporal
+
+let d1 = new Temporal.PlainDate(2021, 12, 11,
+ {monthCode: function(like) {return "M02";}});
+
+assertEquals("M02", d1.monthCode);
diff --git a/deps/v8/test/mjsunit/temporal/plain-date-get-monthsInYear.js b/deps/v8/test/mjsunit/temporal/plain-date-get-monthsInYear.js
new file mode 100644
index 0000000000..d1b19a3c29
--- /dev/null
+++ b/deps/v8/test/mjsunit/temporal/plain-date-get-monthsInYear.js
@@ -0,0 +1,9 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+// Flags: --harmony-temporal
+
+let d1 = new Temporal.PlainDate(2021, 12, 11,
+ {monthsInYear: function(like) {return 14;}});
+
+assertEquals(14, d1.monthsInYear);
diff --git a/deps/v8/test/mjsunit/temporal/plain-date-get-weekOfYear.js b/deps/v8/test/mjsunit/temporal/plain-date-get-weekOfYear.js
new file mode 100644
index 0000000000..570d293bc1
--- /dev/null
+++ b/deps/v8/test/mjsunit/temporal/plain-date-get-weekOfYear.js
@@ -0,0 +1,9 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+// Flags: --harmony-temporal
+
+let d1 = new Temporal.PlainDate(2021, 12, 11,
+ {weekOfYear: function(like) {return 20;}});
+
+assertEquals(20, d1.weekOfYear);
diff --git a/deps/v8/test/mjsunit/temporal/plain-date-get-year.js b/deps/v8/test/mjsunit/temporal/plain-date-get-year.js
new file mode 100644
index 0000000000..f16f9d6bbd
--- /dev/null
+++ b/deps/v8/test/mjsunit/temporal/plain-date-get-year.js
@@ -0,0 +1,9 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+// Flags: --harmony-temporal
+
+let d1 = new Temporal.PlainDate(2021, 12, 11,
+ {year: function(like) {return 2;}});
+
+assertEquals(2, d1.year);
diff --git a/deps/v8/test/mjsunit/temporal/plain-date-to-json.js b/deps/v8/test/mjsunit/temporal/plain-date-to-json.js
new file mode 100644
index 0000000000..2d7d9890a3
--- /dev/null
+++ b/deps/v8/test/mjsunit/temporal/plain-date-to-json.js
@@ -0,0 +1,18 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+// Flags: --harmony-temporal
+
+// years in 4 digits range
+assertEquals("2021-07-01", (new Temporal.PlainDate(2021, 7, 1)).toJSON());
+assertEquals("9999-12-31", (new Temporal.PlainDate(9999, 12, 31)).toJSON());
+assertEquals("1000-01-01", (new Temporal.PlainDate(1000, 1, 1)).toJSON());
+
+// years out of 4 digits range
+assertEquals("+010000-01-01", (new Temporal.PlainDate(10000, 1, 1)).toJSON());
+assertEquals("+025021-07-01", (new Temporal.PlainDate(25021, 7, 1)).toJSON());
+assertEquals("+000999-12-31", (new Temporal.PlainDate(999, 12, 31)).toJSON());
+assertEquals("+000099-08-01", (new Temporal.PlainDate(99, 8, 1)).toJSON());
+assertEquals("-000020-09-30", (new Temporal.PlainDate(-20, 9, 30)).toJSON());
+assertEquals("-002021-07-01", (new Temporal.PlainDate(-2021, 7, 1)).toJSON());
+assertEquals("-022021-07-01", (new Temporal.PlainDate(-22021, 7, 1)).toJSON());
diff --git a/deps/v8/test/mjsunit/temporal/plain-date-to-plain-date-time.js b/deps/v8/test/mjsunit/temporal/plain-date-to-plain-date-time.js
new file mode 100644
index 0000000000..8dab292485
--- /dev/null
+++ b/deps/v8/test/mjsunit/temporal/plain-date-to-plain-date-time.js
@@ -0,0 +1,33 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+// Flags: --harmony-temporal
+
+d8.file.execute('test/mjsunit/temporal/temporal-helpers.js');
+
+let d1 = new Temporal.PlainDate(2021, 12, 11);
+let badDate = { toPlainDateTime: d1.toPlainDateTime }
+assertThrows(() => badDate.toPlainDateTime(), TypeError);
+
+assertThrows(() => d1.toPlainDateTime(null), RangeError);
+assertThrows(() => d1.toPlainDateTime("string is invalid"), RangeError);
+assertThrows(() => d1.toPlainDateTime(true), RangeError);
+assertThrows(() => d1.toPlainDateTime(false), RangeError);
+assertThrows(() => d1.toPlainDateTime(NaN), RangeError);
+assertThrows(() => d1.toPlainDateTime(Infinity), RangeError);
+// assertThrows(() => d1.toPlainDateTime(123), RangeError);
+//assertThrows(() => d1.toPlainDateTime(456n), RangeError);
+assertThrows(() => d1.toPlainDateTime(Symbol()), TypeError);
+assertThrows(() => d1.toPlainDateTime({}), TypeError);
+assertThrows(() => d1.toPlainDateTime({hour: 23}), TypeError);
+assertThrows(() => d1.toPlainDateTime({minute: 23}), TypeError);
+assertThrows(() => d1.toPlainDateTime({second: 23}), TypeError);
+assertThrows(() => d1.toPlainDateTime({millisecond: 23}), TypeError);
+assertThrows(() => d1.toPlainDateTime({microecond: 23}), TypeError);
+assertThrows(() => d1.toPlainDateTime({nanosecond: 23}), TypeError);
+
+assertPlainDateTime(d1.toPlainDateTime(),
+ 2021, 12, 11, 0, 0, 0, 0, 0, 0);
+assertPlainDateTime(d1.toPlainDateTime(
+ {hour: 9, minute: 8, second: 7, millisecond: 6, microsecond: 5, nanosecond: 4}),
+ 2021, 12, 11, 9, 8, 7, 6, 5, 4);
diff --git a/deps/v8/test/mjsunit/temporal/plain-date-to-plain-month-day.js b/deps/v8/test/mjsunit/temporal/plain-date-to-plain-month-day.js
new file mode 100644
index 0000000000..c3a46c20f5
--- /dev/null
+++ b/deps/v8/test/mjsunit/temporal/plain-date-to-plain-month-day.js
@@ -0,0 +1,12 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+// Flags: --harmony-temporal
+
+d8.file.execute('test/mjsunit/temporal/temporal-helpers.js');
+
+let d1 = new Temporal.PlainDate(2021, 12, 11);
+let badDateTime = { toPlainMonthDay: d1.toPlainMonthDay }
+assertThrows(() => badDateTime.toPlainMonthDay(), TypeError);
+
+assertPlainMonthDay(d1.toPlainMonthDay(), "M12", 11);
diff --git a/deps/v8/test/mjsunit/temporal/plain-date-to-plain-year-month.js b/deps/v8/test/mjsunit/temporal/plain-date-to-plain-year-month.js
new file mode 100644
index 0000000000..ed0124cc1c
--- /dev/null
+++ b/deps/v8/test/mjsunit/temporal/plain-date-to-plain-year-month.js
@@ -0,0 +1,12 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+// Flags: --harmony-temporal
+
+d8.file.execute('test/mjsunit/temporal/temporal-helpers.js');
+
+let d1 = new Temporal.PlainDate(2021, 12, 11);
+let badDate = { toPlainYearMonth: d1.toPlainYearMonth }
+assertThrows(() => badDate.toPlainYearMonth(), TypeError);
+
+assertPlainYearMonth(d1.toPlainYearMonth(), 2021, 12);
diff --git a/deps/v8/test/mjsunit/temporal/plain-date-valueOf.js b/deps/v8/test/mjsunit/temporal/plain-date-valueOf.js
new file mode 100644
index 0000000000..aa3181caf0
--- /dev/null
+++ b/deps/v8/test/mjsunit/temporal/plain-date-valueOf.js
@@ -0,0 +1,7 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+// Flags: --harmony-temporal
+
+let d1 = Temporal.Now.plainDateISO();
+assertThrows(() => d1.valueOf(), TypeError);
diff --git a/deps/v8/test/mjsunit/temporal/plain-date-with-calendar.js b/deps/v8/test/mjsunit/temporal/plain-date-with-calendar.js
new file mode 100644
index 0000000000..d68594f05d
--- /dev/null
+++ b/deps/v8/test/mjsunit/temporal/plain-date-with-calendar.js
@@ -0,0 +1,32 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+// Flags: --harmony-temporal
+
+d8.file.execute('test/mjsunit/temporal/temporal-helpers.js');
+
+let d1 = new Temporal.PlainDate(1911, 10, 10);
+let badDate = { withCalendar: d1.withCalendar }
+assertThrows(() => badDate.withCalendar("iso8601"), TypeError);
+
+// A simplified version of Republic of China calendar
+let rocCal = {
+ iso8601: new Temporal.Calendar("iso8601"),
+ get id() {return "roc";},
+ dateFromFields: function(fields, options) {
+ fields.year -= 1911;
+ return this.iso8601.dateFromFields(fields, options);
+ },
+ year: function(date) { return this.iso8601.year(date) - 1911; },
+ month: function(date) { return this.iso8601.month(date); },
+ monthCode: function(date) { return this.iso8601.monthCode(date); },
+ day: function(date) { return this.iso8601.day(date); },
+};
+
+let d2 = d1.withCalendar(rocCal);
+assertEquals(d2.calendar.id, "roc");
+assertPlainDate(d2, 0, 10, 10);
+
+let d3 = d2.withCalendar("iso8601");
+assertEquals(d3.calendar.id, "iso8601");
+assertPlainDate(d3, 1911, 10, 10);
diff --git a/deps/v8/test/mjsunit/temporal/plain-date-with.js b/deps/v8/test/mjsunit/temporal/plain-date-with.js
new file mode 100644
index 0000000000..a64041643c
--- /dev/null
+++ b/deps/v8/test/mjsunit/temporal/plain-date-with.js
@@ -0,0 +1,59 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+// Flags: --harmony-temporal
+
+d8.file.execute('test/mjsunit/temporal/temporal-helpers.js');
+
+let d1 = new Temporal.PlainDate(1911, 10, 10);
+
+assertPlainDate(d1.with({year:2021}), 2021, 10, 10);
+assertPlainDate(d1.with({month:11}), 1911, 11, 10);
+assertPlainDate(d1.with({monthCode:"M05"}), 1911, 5, 10);
+assertPlainDate(d1.with({day:30}), 1911, 10, 30);
+assertPlainDate(d1.with({year:2021, hour: 30}), 2021, 10, 10);
+assertPlainDate(d1.with({month:11, minute: 71}), 1911, 11, 10);
+assertPlainDate(d1.with({monthCode:"M05", second: 90}), 1911, 5, 10);
+assertPlainDate(d1.with({day:30, era:"BC" }), 1911, 10, 30);
+
+// A simplified version of Republic of China calendar
+let rocCal = {
+ iso8601: new Temporal.Calendar("iso8601"),
+ get id() {return "roc";},
+ dateFromFields: function(fields, options) {
+ fields.year -= 1911;
+ return this.iso8601.dateFromFields(fields, options);
+ },
+ year: function(date) { return this.iso8601.year(date) - 1911; },
+ month: function(date) { return this.iso8601.month(date); },
+ monthCode: function(date) { return this.iso8601.monthCode(date); },
+ day: function(date) { return this.iso8601.day(date); },
+};
+
+let d2 = new Temporal.PlainDate(2021, 7, 20, rocCal);
+
+assertPlainDate(d2, 110, 7, 20);
+assertPlainDate(d2.with({year: 1912}), 1, 7, 20);
+assertPlainDate(d2.with({year: 1987}), 76, 7, 20);
+
+assertThrows(() => d1.with(new Temporal.PlainDate(2021, 7, 1)), TypeError);
+assertThrows(() => d1.with(new Temporal.PlainDateTime(2021, 7, 1, 12, 13)), TypeError);
+assertThrows(() => d1.with(new Temporal.PlainTime(1, 12, 13)), TypeError);
+assertThrows(() => d1.with(new Temporal.PlainYearMonth(1991, 12)), TypeError);
+assertThrows(() => d1.with(new Temporal.PlainMonthDay(5, 12)), TypeError);
+assertThrows(() => d1.with("2012-05-13"), TypeError);
+assertThrows(() => d1.with({calendar: "iso8601"}), TypeError);
+assertThrows(() => d1.with({timeZone: "UTC"}), TypeError);
+assertThrows(() => d1.with(true), TypeError);
+assertThrows(() => d1.with(false), TypeError);
+assertThrows(() => d1.with(NaN), TypeError);
+assertThrows(() => d1.with(Infinity), TypeError);
+assertThrows(() => d1.with(1234), TypeError);
+assertThrows(() => d1.with(567n), TypeError);
+assertThrows(() => d1.with(Symbol()), TypeError);
+assertThrows(() => d1.with("string"), TypeError);
+assertThrows(() => d1.with({}), TypeError);
+assertThrows(() => d1.with([]), TypeError);
+
+let badDate = { with: d1.with }
+assertThrows(() => badDate.with({day: 3}), TypeError);
diff --git a/deps/v8/test/mjsunit/temporal/temporal-helpers.js b/deps/v8/test/mjsunit/temporal/temporal-helpers.js
new file mode 100644
index 0000000000..129f918608
--- /dev/null
+++ b/deps/v8/test/mjsunit/temporal/temporal-helpers.js
@@ -0,0 +1,107 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Helper function to test Tempral
+
+function assertDuration(duration, years, months, weeks, days, hours,
+ minutes, seconds, milliseconds, microseconds, nanoseconds, sign, blank) {
+ assertEquals(years, duration.years, duration);
+ assertEquals(months, duration.months, duration);
+ assertEquals(weeks, duration.weeks, duration);
+ assertEquals(days, duration.days, duration);
+ assertEquals(hours, duration.hours, duration);
+ assertEquals(minutes, duration.minutes, duration);
+ assertEquals(seconds, duration.seconds, duration);
+ assertEquals(milliseconds, duration.milliseconds, duration);
+ assertEquals(microseconds, duration.microseconds, duration);
+ assertEquals(nanoseconds, duration.nanoseconds, duration);
+ assertEquals(sign, duration.sign, duration);
+ assertEquals(blank, duration.blank, duration);
+}
+
+function assertPlainDate(time, year, month, day) {
+ let fields = time.getISOFields();
+ let keys = Object.keys(fields);
+ assertEquals(4, keys.length);
+ assertEquals("calendar", keys[0]);
+ assertEquals("isoDay", keys[1]);
+ assertEquals("isoMonth", keys[2]);
+ assertEquals("isoYear", keys[3]);
+ if (fields.calendar == "iso8601") {
+ assertEquals(year, fields.isoYear, "isoYear");
+ assertEquals(month, fields.isoMonth, "isoMonth");
+ assertEquals(day, fields.isoDay, "isoDay");
+ }
+}
+
+function assertPlainDateTime(datetime, year, month, day, hour, minute, second,
+ millisecond, microsecond, nanosecond) {
+ let fields = datetime.getISOFields();
+ let keys = Object.keys(fields);
+ assertEquals(10, keys.length);
+ assertEquals("calendar", keys[0]);
+ assertEquals("isoDay", keys[1]);
+ assertEquals("isoHour", keys[2]);
+ assertEquals("isoMicrosecond", keys[3]);
+ assertEquals("isoMillisecond", keys[4]);
+ assertEquals("isoMinute", keys[5]);
+ assertEquals("isoMonth", keys[6]);
+ assertEquals("isoNanosecond", keys[7]);
+ assertEquals("isoSecond", keys[8]);
+ assertEquals("isoYear", keys[9]);
+ if (fields.calendar == "iso8601") {
+ assertEquals(year, fields.isoYear, "isoYear");
+ assertEquals(month, fields.isoMonth, "isoMonth");
+ assertEquals(day, fields.isoDay, "isoDay");
+ assertEquals(hour, fields.isoHour, "isoHour");
+ assertEquals(minute, fields.isoMinute, "isoMinute");
+ assertEquals(second, fields.isoSecond, "isoSecond");
+ assertEquals(millisecond, fields.isoMillisecond, "isoMillisecond");
+ assertEquals(microsecond, fields.isoMicrosecond, "isoMicorsecond");
+ assertEquals(nanosecond, fields.isoNanosecond, "isoNanosecond");
+ assertEquals(datetime.calendar, fields.calendar, "calendar");
+ }
+}
+
+function assertPlainTime(time, hour, minute, second, millisecond, microsecond, nanosecond) {
+ assertEquals(hour, time.hour, "hour");
+ assertEquals(minute, time.minute, "minute");
+ assertEquals(second, time.second, "second");
+ assertEquals(millisecond, time.millisecond, "millisecond");
+ assertEquals(microsecond, time.microsecond, "microsecond");
+ assertEquals(nanosecond, time.nanosecond, "nanosecond");
+}
+
+function assertPlainMonthDay(md, monthCode, day) {
+ let fields = md.getISOFields();
+ let keys = Object.keys(fields);
+ assertEquals(4, keys.length);
+ assertEquals("calendar", keys[0]);
+ assertEquals("isoDay", keys[1]);
+ assertEquals("isoMonth", keys[2]);
+ assertEquals("isoYear", keys[3]);
+ assertEquals(monthCode, md.monthCode, "monthCode");
+ assertEquals(day, md.day, "day");
+
+ if (fields.calendar == "iso8601") {
+ assertEquals(monthCode, md.monthCode, "monthCode");
+ assertEquals(day, md.day, "day");
+ assertEquals(md.calendar, fields.calendar, "calendar");
+ }
+}
+
+function assertPlainYearMonth(ym, year, month) {
+ let fields = ym.getISOFields();
+ let keys = Object.keys(fields);
+ assertEquals(4, keys.length);
+ assertEquals("calendar", keys[0]);
+ assertEquals("isoDay", keys[1]);
+ assertEquals("isoMonth", keys[2]);
+ assertEquals("isoYear", keys[3]);
+ if (fields.calendar == "iso8601") {
+ assertEquals(year, fields.isoYear, "isoYear");
+ assertEquals(month, fields.isoMonth, "isoMonth");
+ assertEquals(ym.calendar, fields.calendar, "calendar");
+ }
+}
diff --git a/deps/v8/test/mjsunit/tools/log_two_byte.js b/deps/v8/test/mjsunit/tools/log_two_byte.js
index e181b88cd2..1b09a680c8 100644
--- a/deps/v8/test/mjsunit/tools/log_two_byte.js
+++ b/deps/v8/test/mjsunit/tools/log_two_byte.js
@@ -3,6 +3,7 @@
// found in the LICENSE file.
// Flags: --logfile='+' --log --log-code --log-function-events --no-stress-opt
+// Flags: --no-stress-background-compile
let twoByteName = "twoByteName_🍕"
let o = {
diff --git a/deps/v8/tools/clusterfuzz/js_fuzzer/test_data/regress/build_db/cross_over_mutator_input.js b/deps/v8/test/mjsunit/verify-no-fail.js
index 3d7ed65c78..030940304f 100644
--- a/deps/v8/tools/clusterfuzz/js_fuzzer/test_data/regress/build_db/cross_over_mutator_input.js
+++ b/deps/v8/test/mjsunit/verify-no-fail.js
@@ -1,7 +1,7 @@
-// Copyright 2020 the V8 project authors. All rights reserved.
+// Copyright 2021 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-let x = 2;
-let y = 2;
-Math.pow(x, y);
+// Flags: --no-fail
+
+assertTrue(false);
diff --git a/deps/v8/test/mjsunit/wasm/array-copy-benchmark.js b/deps/v8/test/mjsunit/wasm/array-copy-benchmark.js
index 942be1957e..0018f15659 100644
--- a/deps/v8/test/mjsunit/wasm/array-copy-benchmark.js
+++ b/deps/v8/test/mjsunit/wasm/array-copy-benchmark.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --experimental-wasm-gc-experiments --no-liftoff
+// Flags: --experimental-wasm-gc --no-liftoff
d8.file.execute("test/mjsunit/wasm/wasm-module-builder.js");
@@ -14,11 +14,10 @@ d8.file.execute("test/mjsunit/wasm/wasm-module-builder.js");
// - Change the value of {length} to find point at which the builtin becomes
// faster.
// - Change {array_type} if you want to test different types.
-// Right now, the limit is found to be in the 25-30 range.
-// TODO(7748): Measure again if we implement array.copy with a fast C call.
+// Right now, the limit is found to be around 10.
(function ArrayCopyBenchmark() {
- let array_length = 27;
+ let array_length = 10;
let iterations = 1;
var builder = new WasmModuleBuilder();
diff --git a/deps/v8/test/mjsunit/wasm/exceptions.js b/deps/v8/test/mjsunit/wasm/exceptions.js
index db4d6ed9bb..26c813dd0a 100644
--- a/deps/v8/test/mjsunit/wasm/exceptions.js
+++ b/deps/v8/test/mjsunit/wasm/exceptions.js
@@ -1073,3 +1073,41 @@ d8.file.execute("test/mjsunit/wasm/exceptions-utils.js");
assertDoesNotThrow(() => instance.exports.catchless_try(0));
assertWasmThrows(instance, except, [], () => instance.exports.catchless_try(1));
})();
+
+// Delegate to a regular block inside a try block.
+(function TestDelegateToBlock() {
+ print(arguments.callee.name);
+ let builder = new WasmModuleBuilder();
+ let except = builder.addTag(kSig_v_v);
+ builder.addFunction('test', kSig_i_v)
+ .addBody([
+ kExprTry, kWasmI32,
+ kExprBlock, kWasmI32,
+ kExprTry, kWasmI32,
+ kExprThrow, except,
+ kExprDelegate, 0,
+ kExprEnd,
+ kExprCatch, except,
+ kExprI32Const, 2,
+ kExprEnd,
+ ]).exportFunc();
+ instance = builder.instantiate();
+ assertEquals(2, instance.exports.test());
+})();
+
+// Delegate to a regular block with no outer try (delegate to caller).
+(function TestDelegateToCallerWithBlock() {
+ print(arguments.callee.name);
+ let builder = new WasmModuleBuilder();
+ let except = builder.addTag(kSig_v_v);
+ builder.addFunction('test', kSig_v_v)
+ .addBody([
+ kExprBlock, kWasmVoid,
+ kExprTry, kWasmVoid,
+ kExprThrow, except,
+ kExprDelegate, 0,
+ kExprEnd
+ ]).exportFunc();
+ instance = builder.instantiate();
+ assertThrows(() => instance.exports.test(), WebAssembly.Exception);
+})();
diff --git a/deps/v8/test/mjsunit/wasm/externref-table.js b/deps/v8/test/mjsunit/wasm/externref-table.js
index 307f916b27..0cfb656cb1 100644
--- a/deps/v8/test/mjsunit/wasm/externref-table.js
+++ b/deps/v8/test/mjsunit/wasm/externref-table.js
@@ -79,3 +79,36 @@ d8.file.execute("test/mjsunit/wasm/wasm-module-builder.js");
assertTraps(kTrapTableOutOfBounds, () => instance.exports.init());
})();
+
+
+(function TestExternRefTableConstructorWithDefaultValue() {
+ print(arguments.callee.name);
+ const testObject = {};
+ const argument = { "element": "externref", "initial": 3 };
+ const table = new WebAssembly.Table(argument, testObject);
+ assertEquals(table.length, 3);
+ assertEquals(table.get(0), testObject);
+ assertEquals(table.get(1), testObject);
+ assertEquals(table.get(2), testObject);
+})();
+
+(function TestFuncRefTableConstructorWithDefaultValue() {
+ print(arguments.callee.name);
+
+ const expected = 6;
+ let dummy =
+ (() => {
+ let builder = new WasmModuleBuilder();
+ builder.addFunction('dummy', kSig_i_v)
+ .addBody([kExprI32Const, expected])
+ .exportAs('dummy');
+ return builder.instantiate().exports.dummy;
+ })();
+
+ const argument = { "element": "anyfunc", "initial": 3 };
+ const table = new WebAssembly.Table(argument, dummy);
+ assertEquals(table.length, 3);
+ assertEquals(table.get(0)(), expected);
+ assertEquals(table.get(1)(), expected);
+ assertEquals(table.get(2)(), expected);
+})();
diff --git a/deps/v8/test/mjsunit/wasm/gc-nominal.js b/deps/v8/test/mjsunit/wasm/gc-nominal.js
index 0483b4a78f..a58a51d732 100644
--- a/deps/v8/test/mjsunit/wasm/gc-nominal.js
+++ b/deps/v8/test/mjsunit/wasm/gc-nominal.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --experimental-wasm-gc-experiments
+// Flags: --experimental-wasm-gc
d8.file.execute("test/mjsunit/wasm/wasm-module-builder.js");
diff --git a/deps/v8/test/mjsunit/wasm/gdbjit.js b/deps/v8/test/mjsunit/wasm/gdbjit.js
new file mode 100644
index 0000000000..cc002b96fa
--- /dev/null
+++ b/deps/v8/test/mjsunit/wasm/gdbjit.js
@@ -0,0 +1,20 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --gdbjit
+
+d8.file.execute('test/mjsunit/wasm/wasm-module-builder.js');
+
+// A simple test to ensure that passing the --gdbjit flag doesn't crash.
+(function testGdbJitFlag() {
+ const builder = new WasmModuleBuilder();
+ builder.addFunction('i32_add', kSig_i_ii)
+ .addBody([kExprLocalGet, 0, kExprLocalGet, 1, kExprI32Add])
+ .exportFunc();
+
+ const module = new WebAssembly.Module(builder.toBuffer());
+ const instance = new WebAssembly.Instance(module);
+
+ assertEquals(instance.exports.i32_add(1, 2), 3);
+}());
diff --git a/deps/v8/test/mjsunit/wasm/inlining.js b/deps/v8/test/mjsunit/wasm/inlining.js
new file mode 100644
index 0000000000..3fd5179b32
--- /dev/null
+++ b/deps/v8/test/mjsunit/wasm/inlining.js
@@ -0,0 +1,77 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --wasm-inlining --no-liftoff
+
+d8.file.execute("test/mjsunit/wasm/wasm-module-builder.js");
+
+// TODO(12166): Consider running tests with --trace-wasm and inspecting their
+// output.
+
+(function SimpleInliningTest() {
+ let builder = new WasmModuleBuilder();
+
+ // f(x) = x - 1
+ let callee = builder.addFunction("callee", kSig_i_i)
+ .addBody([kExprLocalGet, 0, kExprI32Const, 1, kExprI32Sub]);
+ // g(x) = f(5) + x
+ builder.addFunction("main", kSig_i_i)
+ .addBody([kExprI32Const, 5, kExprCallFunction, callee.index,
+ kExprLocalGet, 0, kExprI32Add])
+ .exportAs("main");
+
+ let instance = builder.instantiate();
+ assertEquals(instance.exports.main(10), 14);
+})();
+
+(function MultiReturnTest() {
+ let builder = new WasmModuleBuilder();
+
+ // f(x) = (x - 1, x + 1)
+ let callee = builder.addFunction("callee", kSig_ii_i)
+ .addBody([kExprLocalGet, 0, kExprI32Const, 1, kExprI32Sub,
+ kExprLocalGet, 0, kExprI32Const, 1, kExprI32Add]);
+ // g(x) = { let (a, b) = f(x); a * b}
+ builder.addFunction("main", kSig_i_i)
+ .addBody([kExprLocalGet, 0, kExprCallFunction, callee.index, kExprI32Mul])
+ .exportAs("main");
+
+ let instance = builder.instantiate();
+ assertEquals(instance.exports.main(10), 9 * 11);
+})();
+
+(function NoReturnTest() {
+ let builder = new WasmModuleBuilder();
+
+ let global = builder.addGlobal(kWasmI32, true);
+
+ let callee = builder.addFunction("callee", kSig_v_i)
+ .addBody([kExprLocalGet, 0, kExprGlobalSet, global.index]);
+
+ builder.addFunction("main", kSig_i_i)
+ .addBody([kExprLocalGet, 0, kExprCallFunction, callee.index,
+ kExprGlobalGet, global.index])
+ .exportAs("main");
+
+ let instance = builder.instantiate();
+ assertEquals(instance.exports.main(10), 10);
+})();
+
+(function InfiniteLoopTest() {
+ let builder = new WasmModuleBuilder();
+
+ let callee = builder.addFunction("callee", kSig_i_i)
+ .addBody([kExprLoop, kWasmVoid,
+ kExprLocalGet, 0, kExprI32Const, 1, kExprI32Add,
+ kExprLocalSet, 0, kExprBr, 0,
+ kExprEnd,
+ kExprLocalGet, 0]);
+
+ builder.addFunction("main", kSig_i_i)
+ .addBody([kExprI32Const, 5, kExprCallFunction, callee.index,
+ kExprLocalGet, 0, kExprI32Add])
+ .exportAs("main");
+
+ builder.instantiate();
+})();
diff --git a/deps/v8/test/mjsunit/wasm/reference-globals.js b/deps/v8/test/mjsunit/wasm/reference-globals.js
index 0dd3282ecc..76d41f8f97 100644
--- a/deps/v8/test/mjsunit/wasm/reference-globals.js
+++ b/deps/v8/test/mjsunit/wasm/reference-globals.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-// Flags: --experimental-wasm-gc-experiments
+// Flags: --experimental-wasm-gc
d8.file.execute("test/mjsunit/wasm/wasm-module-builder.js");
diff --git a/deps/v8/test/mjsunit/wasm/test-serialization-with-lazy-compilation.js b/deps/v8/test/mjsunit/wasm/test-serialization-with-lazy-compilation.js
index ad1d54a594..95884c32f6 100644
--- a/deps/v8/test/mjsunit/wasm/test-serialization-with-lazy-compilation.js
+++ b/deps/v8/test/mjsunit/wasm/test-serialization-with-lazy-compilation.js
@@ -10,6 +10,7 @@ const num_functions = 2;
function create_builder() {
const builder = new WasmModuleBuilder();
+ builder.addImport("foo", "bar", kSig_i_v);
for (let i = 0; i < num_functions; ++i) {
builder.addFunction('f' + i, kSig_i_v)
.addBody(wasmI32Const(i))
@@ -37,7 +38,7 @@ gc();
print(arguments.callee.name);
const module = %DeserializeWasmModule(serialized_module, wire_bytes);
- const instance = new WebAssembly.Instance(module);
+ const instance = new WebAssembly.Instance(module, {foo: {bar: () => 1}});
assertEquals(0, instance.exports.f0());
assertEquals(1, instance.exports.f1());
})();
diff --git a/deps/v8/test/mkgrokdump/mkgrokdump.cc b/deps/v8/test/mkgrokdump/mkgrokdump.cc
index a9121d4767..6e50031daf 100644
--- a/deps/v8/test/mkgrokdump/mkgrokdump.cc
+++ b/deps/v8/test/mkgrokdump/mkgrokdump.cc
@@ -5,7 +5,8 @@
#include <stdio.h>
#include "include/libplatform/libplatform.h"
-#include "include/v8.h"
+#include "include/v8-array-buffer.h"
+#include "include/v8-initialization.h"
#include "src/execution/frames.h"
#include "src/execution/isolate.h"
#include "src/heap/heap-inl.h"
@@ -113,6 +114,11 @@ static int DumpHeapConstants(FILE* out, const char* argv0) {
// Start up V8.
std::unique_ptr<v8::Platform> platform = v8::platform::NewDefaultPlatform();
v8::V8::InitializePlatform(platform.get());
+#ifdef V8_VIRTUAL_MEMORY_CAGE
+ if (!v8::V8::InitializeVirtualMemoryCage()) {
+ FATAL("Could not initialize the virtual memory cage");
+ }
+#endif
v8::V8::Initialize();
v8::V8::InitializeExternalStartupData(argv0);
Isolate::CreateParams create_params;
diff --git a/deps/v8/test/test262/test262.status b/deps/v8/test/test262/test262.status
index 61c6339330..66c2b7539d 100644
--- a/deps/v8/test/test262/test262.status
+++ b/deps/v8/test/test262/test262.status
@@ -69,28 +69,6 @@
# https://code.google.com/p/v8/issues/detail?id=10958
'language/module-code/eval-gtbndng-indirect-faux-assertion': [FAIL],
- # https://bugs.chromium.org/p/v8/issues/detail?id=4895
- # Some TypedArray methods throw due to the same bug, from Get
- 'built-ins/TypedArray/prototype/every/callbackfn-detachbuffer': [FAIL],
- 'built-ins/TypedArray/prototype/every/BigInt/callbackfn-detachbuffer': [FAIL],
- 'built-ins/TypedArray/prototype/filter/callbackfn-detachbuffer': [FAIL],
- 'built-ins/TypedArray/prototype/filter/BigInt/callbackfn-detachbuffer': [FAIL],
- 'built-ins/TypedArray/prototype/find/predicate-may-detach-buffer': [FAIL],
- 'built-ins/TypedArray/prototype/find/BigInt/predicate-may-detach-buffer': [FAIL],
- 'built-ins/TypedArray/prototype/findIndex/predicate-may-detach-buffer': [FAIL],
- 'built-ins/TypedArray/prototype/findIndex/BigInt/predicate-may-detach-buffer': [FAIL],
- 'built-ins/TypedArray/prototype/forEach/callbackfn-detachbuffer': [FAIL],
- 'built-ins/TypedArray/prototype/forEach/BigInt/callbackfn-detachbuffer': [FAIL],
- 'built-ins/TypedArray/prototype/map/callbackfn-detachbuffer': [FAIL],
- 'built-ins/TypedArray/prototype/map/BigInt/callbackfn-detachbuffer': [FAIL],
- 'built-ins/TypedArray/prototype/reduce/callbackfn-detachbuffer': [FAIL],
- 'built-ins/TypedArray/prototype/reduce/BigInt/callbackfn-detachbuffer': [FAIL],
- 'built-ins/TypedArray/prototype/reduceRight/callbackfn-detachbuffer': [FAIL],
- 'built-ins/TypedArray/prototype/reduceRight/BigInt/callbackfn-detachbuffer': [FAIL],
- 'built-ins/TypedArray/prototype/set/array-arg-primitive-toobject': [FAIL],
- 'built-ins/TypedArray/prototype/set/BigInt/array-arg-primitive-toobject': [FAIL],
- 'built-ins/TypedArray/prototype/some/callbackfn-detachbuffer': [FAIL],
- 'built-ins/TypedArray/prototype/some/BigInt/callbackfn-detachbuffer': [FAIL],
# DataView functions should also throw on detached buffers
'built-ins/DataView/detached-buffer': [FAIL],
'built-ins/DataView/prototype/byteLength/detached-buffer': [FAIL],
@@ -112,232 +90,6 @@
'language/expressions/assignment/destructuring/iterator-destructuring-property-reference-target-evaluation-order': [FAIL],
'language/expressions/assignment/destructuring/keyed-destructuring-property-reference-target-evaluation-order': [FAIL],
- # https://bugs.chromium.org/p/v8/issues/detail?id=896
- 'built-ins/RegExp/property-escapes/binary-property-with-value-ASCII_-_F': [FAIL_PHASE_ONLY],
- 'built-ins/RegExp/property-escapes/binary-property-with-value-ASCII_-_F-negated': [FAIL_PHASE_ONLY],
- 'built-ins/RegExp/property-escapes/binary-property-with-value-ASCII_-_Invalid': [FAIL_PHASE_ONLY],
- 'built-ins/RegExp/property-escapes/binary-property-with-value-ASCII_-_Invalid-negated': [FAIL_PHASE_ONLY],
- 'built-ins/RegExp/property-escapes/binary-property-with-value-ASCII_-_N': [FAIL_PHASE_ONLY],
- 'built-ins/RegExp/property-escapes/binary-property-with-value-ASCII_-_N-negated': [FAIL_PHASE_ONLY],
- 'built-ins/RegExp/property-escapes/binary-property-with-value-ASCII_-_No': [FAIL_PHASE_ONLY],
- 'built-ins/RegExp/property-escapes/binary-property-with-value-ASCII_-_No-negated': [FAIL_PHASE_ONLY],
- 'built-ins/RegExp/property-escapes/binary-property-with-value-ASCII_-_T': [FAIL_PHASE_ONLY],
- 'built-ins/RegExp/property-escapes/binary-property-with-value-ASCII_-_T-negated': [FAIL_PHASE_ONLY],
- 'built-ins/RegExp/property-escapes/binary-property-with-value-ASCII_-_Y': [FAIL_PHASE_ONLY],
- 'built-ins/RegExp/property-escapes/binary-property-with-value-ASCII_-_Y-negated': [FAIL_PHASE_ONLY],
- 'built-ins/RegExp/property-escapes/binary-property-with-value-ASCII_-_Yes': [FAIL_PHASE_ONLY],
- 'built-ins/RegExp/property-escapes/binary-property-with-value-ASCII_-_Yes-negated': [FAIL_PHASE_ONLY],
- 'built-ins/RegExp/property-escapes/character-class-range-end': [FAIL_PHASE_ONLY],
- 'built-ins/RegExp/property-escapes/character-class-range-no-dash-end': [FAIL_PHASE_ONLY],
- 'built-ins/RegExp/property-escapes/character-class-range-no-dash-start': [FAIL_PHASE_ONLY],
- 'built-ins/RegExp/property-escapes/character-class-range-start': [FAIL_PHASE_ONLY],
- 'built-ins/RegExp/property-escapes/grammar-extension-In-prefix-Block-implicit': [FAIL_PHASE_ONLY],
- 'built-ins/RegExp/property-escapes/grammar-extension-In-prefix-Block-implicit-negated': [FAIL_PHASE_ONLY],
- 'built-ins/RegExp/property-escapes/grammar-extension-In-prefix-Script': [FAIL_PHASE_ONLY],
- 'built-ins/RegExp/property-escapes/grammar-extension-In-prefix-Script-implicit': [FAIL_PHASE_ONLY],
- 'built-ins/RegExp/property-escapes/grammar-extension-In-prefix-Script-implicit-negated': [FAIL_PHASE_ONLY],
- 'built-ins/RegExp/property-escapes/grammar-extension-In-prefix-Script-negated': [FAIL_PHASE_ONLY],
- 'built-ins/RegExp/property-escapes/grammar-extension-Is-prefix-Script': [FAIL_PHASE_ONLY],
- 'built-ins/RegExp/property-escapes/grammar-extension-Is-prefix-Script-negated': [FAIL_PHASE_ONLY],
- 'built-ins/RegExp/property-escapes/grammar-extension-circumflex-negation': [FAIL_PHASE_ONLY],
- 'built-ins/RegExp/property-escapes/grammar-extension-circumflex-negation-negated': [FAIL_PHASE_ONLY],
- 'built-ins/RegExp/property-escapes/grammar-extension-empty': [FAIL_PHASE_ONLY],
- 'built-ins/RegExp/property-escapes/grammar-extension-empty-negated': [FAIL_PHASE_ONLY],
- 'built-ins/RegExp/property-escapes/grammar-extension-invalid': [FAIL_PHASE_ONLY],
- 'built-ins/RegExp/property-escapes/grammar-extension-invalid-negated': [FAIL_PHASE_ONLY],
- 'built-ins/RegExp/property-escapes/grammar-extension-no-braces': [FAIL_PHASE_ONLY],
- 'built-ins/RegExp/property-escapes/grammar-extension-no-braces-negated': [FAIL_PHASE_ONLY],
- 'built-ins/RegExp/property-escapes/grammar-extension-no-braces-value': [FAIL_PHASE_ONLY],
- 'built-ins/RegExp/property-escapes/grammar-extension-no-braces-value-negated': [FAIL_PHASE_ONLY],
- 'built-ins/RegExp/property-escapes/grammar-extension-separator': [FAIL_PHASE_ONLY],
- 'built-ins/RegExp/property-escapes/grammar-extension-separator-and-value-only': [FAIL_PHASE_ONLY],
- 'built-ins/RegExp/property-escapes/grammar-extension-separator-and-value-only-negated': [FAIL_PHASE_ONLY],
- 'built-ins/RegExp/property-escapes/grammar-extension-separator-negated': [FAIL_PHASE_ONLY],
- 'built-ins/RegExp/property-escapes/grammar-extension-separator-only': [FAIL_PHASE_ONLY],
- 'built-ins/RegExp/property-escapes/grammar-extension-separator-only-negated': [FAIL_PHASE_ONLY],
- 'built-ins/RegExp/property-escapes/grammar-extension-unclosed': [FAIL_PHASE_ONLY],
- 'built-ins/RegExp/property-escapes/grammar-extension-unclosed-negated': [FAIL_PHASE_ONLY],
- 'built-ins/RegExp/property-escapes/grammar-extension-unopened': [FAIL_PHASE_ONLY],
- 'built-ins/RegExp/property-escapes/grammar-extension-unopened-negated': [FAIL_PHASE_ONLY],
- 'built-ins/RegExp/property-escapes/loose-matching-01': [FAIL_PHASE_ONLY],
- 'built-ins/RegExp/property-escapes/loose-matching-01-negated': [FAIL_PHASE_ONLY],
- 'built-ins/RegExp/property-escapes/loose-matching-02': [FAIL_PHASE_ONLY],
- 'built-ins/RegExp/property-escapes/loose-matching-02-negated': [FAIL_PHASE_ONLY],
- 'built-ins/RegExp/property-escapes/loose-matching-03': [FAIL_PHASE_ONLY],
- 'built-ins/RegExp/property-escapes/loose-matching-03-negated': [FAIL_PHASE_ONLY],
- 'built-ins/RegExp/property-escapes/loose-matching-04': [FAIL_PHASE_ONLY],
- 'built-ins/RegExp/property-escapes/loose-matching-04-negated': [FAIL_PHASE_ONLY],
- 'built-ins/RegExp/property-escapes/loose-matching-05': [FAIL_PHASE_ONLY],
- 'built-ins/RegExp/property-escapes/loose-matching-05-negated': [FAIL_PHASE_ONLY],
- 'built-ins/RegExp/property-escapes/loose-matching-06': [FAIL_PHASE_ONLY],
- 'built-ins/RegExp/property-escapes/loose-matching-06-negated': [FAIL_PHASE_ONLY],
- 'built-ins/RegExp/property-escapes/loose-matching-07': [FAIL_PHASE_ONLY],
- 'built-ins/RegExp/property-escapes/loose-matching-07-negated': [FAIL_PHASE_ONLY],
- 'built-ins/RegExp/property-escapes/loose-matching-08': [FAIL_PHASE_ONLY],
- 'built-ins/RegExp/property-escapes/loose-matching-08-negated': [FAIL_PHASE_ONLY],
- 'built-ins/RegExp/property-escapes/loose-matching-09': [FAIL_PHASE_ONLY],
- 'built-ins/RegExp/property-escapes/loose-matching-09-negated': [FAIL_PHASE_ONLY],
- 'built-ins/RegExp/property-escapes/loose-matching-10': [FAIL_PHASE_ONLY],
- 'built-ins/RegExp/property-escapes/loose-matching-10-negated': [FAIL_PHASE_ONLY],
- 'built-ins/RegExp/property-escapes/loose-matching-11': [FAIL_PHASE_ONLY],
- 'built-ins/RegExp/property-escapes/loose-matching-11-negated': [FAIL_PHASE_ONLY],
- 'built-ins/RegExp/property-escapes/loose-matching-12': [FAIL_PHASE_ONLY],
- 'built-ins/RegExp/property-escapes/loose-matching-12-negated': [FAIL_PHASE_ONLY],
- 'built-ins/RegExp/property-escapes/loose-matching-13': [FAIL_PHASE_ONLY],
- 'built-ins/RegExp/property-escapes/loose-matching-13-negated': [FAIL_PHASE_ONLY],
- 'built-ins/RegExp/property-escapes/loose-matching-14': [FAIL_PHASE_ONLY],
- 'built-ins/RegExp/property-escapes/loose-matching-14-negated': [FAIL_PHASE_ONLY],
- 'built-ins/RegExp/property-escapes/non-binary-property-without-value-General_Category': [FAIL_PHASE_ONLY],
- 'built-ins/RegExp/property-escapes/non-binary-property-without-value-General_Category-equals': [FAIL_PHASE_ONLY],
- 'built-ins/RegExp/property-escapes/non-binary-property-without-value-General_Category-equals-negated': [FAIL_PHASE_ONLY],
- 'built-ins/RegExp/property-escapes/non-binary-property-without-value-General_Category-negated': [FAIL_PHASE_ONLY],
- 'built-ins/RegExp/property-escapes/non-binary-property-without-value-Script': [FAIL_PHASE_ONLY],
- 'built-ins/RegExp/property-escapes/non-binary-property-without-value-Script-equals': [FAIL_PHASE_ONLY],
- 'built-ins/RegExp/property-escapes/non-binary-property-without-value-Script-equals-negated': [FAIL_PHASE_ONLY],
- 'built-ins/RegExp/property-escapes/non-binary-property-without-value-Script-negated': [FAIL_PHASE_ONLY],
- 'built-ins/RegExp/property-escapes/non-binary-property-without-value-Script_Extensions': [FAIL_PHASE_ONLY],
- 'built-ins/RegExp/property-escapes/non-binary-property-without-value-Script_Extensions-equals': [FAIL_PHASE_ONLY],
- 'built-ins/RegExp/property-escapes/non-binary-property-without-value-Script_Extensions-equals-negated': [FAIL_PHASE_ONLY],
- 'built-ins/RegExp/property-escapes/non-binary-property-without-value-Script_Extensions-negated': [FAIL_PHASE_ONLY],
- 'built-ins/RegExp/property-escapes/non-existent-binary-property': [FAIL_PHASE_ONLY],
- 'built-ins/RegExp/property-escapes/non-existent-binary-property-negated': [FAIL_PHASE_ONLY],
- 'built-ins/RegExp/property-escapes/non-existent-property-and-value': [FAIL_PHASE_ONLY],
- 'built-ins/RegExp/property-escapes/non-existent-property-and-value-negated': [FAIL_PHASE_ONLY],
- 'built-ins/RegExp/property-escapes/non-existent-property-existing-value': [FAIL_PHASE_ONLY],
- 'built-ins/RegExp/property-escapes/non-existent-property-existing-value-negated': [FAIL_PHASE_ONLY],
- 'built-ins/RegExp/property-escapes/non-existent-property-value-General_Category-negated': [FAIL_PHASE_ONLY],
- 'built-ins/RegExp/property-escapes/non-existent-property-value-Script': [FAIL_PHASE_ONLY],
- 'built-ins/RegExp/property-escapes/non-existent-property-value-Script-negated': [FAIL_PHASE_ONLY],
- 'built-ins/RegExp/property-escapes/non-existent-property-value-Script_Extensions': [FAIL_PHASE_ONLY],
- 'built-ins/RegExp/property-escapes/non-existent-property-value-Script_Extensions-negated': [FAIL_PHASE_ONLY],
- 'built-ins/RegExp/property-escapes/non-existent-property-value-general-category': [FAIL_PHASE_ONLY],
- 'built-ins/RegExp/property-escapes/unsupported-binary-property-Composition_Exclusion': [FAIL_PHASE_ONLY],
- 'built-ins/RegExp/property-escapes/unsupported-binary-property-Composition_Exclusion-negated': [FAIL_PHASE_ONLY],
- 'built-ins/RegExp/property-escapes/unsupported-binary-property-Expands_On_NFC': [FAIL_PHASE_ONLY],
- 'built-ins/RegExp/property-escapes/unsupported-binary-property-Expands_On_NFC-negated': [FAIL_PHASE_ONLY],
- 'built-ins/RegExp/property-escapes/unsupported-binary-property-Expands_On_NFD': [FAIL_PHASE_ONLY],
- 'built-ins/RegExp/property-escapes/unsupported-binary-property-Expands_On_NFD-negated': [FAIL_PHASE_ONLY],
- 'built-ins/RegExp/property-escapes/unsupported-binary-property-Expands_On_NFKC': [FAIL_PHASE_ONLY],
- 'built-ins/RegExp/property-escapes/unsupported-binary-property-Expands_On_NFKC-negated': [FAIL_PHASE_ONLY],
- 'built-ins/RegExp/property-escapes/unsupported-binary-property-Expands_On_NFKD': [FAIL_PHASE_ONLY],
- 'built-ins/RegExp/property-escapes/unsupported-binary-property-Expands_On_NFKD-negated': [FAIL_PHASE_ONLY],
- 'built-ins/RegExp/property-escapes/unsupported-binary-property-FC_NFKC_Closure': [FAIL_PHASE_ONLY],
- 'built-ins/RegExp/property-escapes/unsupported-binary-property-FC_NFKC_Closure-negated': [FAIL_PHASE_ONLY],
- 'built-ins/RegExp/property-escapes/unsupported-binary-property-Full_Composition_Exclusion': [FAIL_PHASE_ONLY],
- 'built-ins/RegExp/property-escapes/unsupported-binary-property-Full_Composition_Exclusion-negated': [FAIL_PHASE_ONLY],
- 'built-ins/RegExp/property-escapes/unsupported-binary-property-Grapheme_Link': [FAIL_PHASE_ONLY],
- 'built-ins/RegExp/property-escapes/unsupported-binary-property-Grapheme_Link-negated': [FAIL_PHASE_ONLY],
- 'built-ins/RegExp/property-escapes/unsupported-binary-property-Hyphen': [FAIL_PHASE_ONLY],
- 'built-ins/RegExp/property-escapes/unsupported-binary-property-Hyphen-negated': [FAIL_PHASE_ONLY],
- 'built-ins/RegExp/property-escapes/unsupported-binary-property-Other_Alphabetic': [FAIL_PHASE_ONLY],
- 'built-ins/RegExp/property-escapes/unsupported-binary-property-Other_Alphabetic-negated': [FAIL_PHASE_ONLY],
- 'built-ins/RegExp/property-escapes/unsupported-binary-property-Other_Default_Ignorable_Code_Point': [FAIL_PHASE_ONLY],
- 'built-ins/RegExp/property-escapes/unsupported-binary-property-Other_Default_Ignorable_Code_Point-negated': [FAIL_PHASE_ONLY],
- 'built-ins/RegExp/property-escapes/unsupported-binary-property-Other_Grapheme_Extend': [FAIL_PHASE_ONLY],
- 'built-ins/RegExp/property-escapes/unsupported-binary-property-Other_Grapheme_Extend-negated': [FAIL_PHASE_ONLY],
- 'built-ins/RegExp/property-escapes/unsupported-binary-property-Other_ID_Continue': [FAIL_PHASE_ONLY],
- 'built-ins/RegExp/property-escapes/unsupported-binary-property-Other_ID_Continue-negated': [FAIL_PHASE_ONLY],
- 'built-ins/RegExp/property-escapes/unsupported-binary-property-Other_ID_Start': [FAIL_PHASE_ONLY],
- 'built-ins/RegExp/property-escapes/unsupported-binary-property-Other_ID_Start-negated': [FAIL_PHASE_ONLY],
- 'built-ins/RegExp/property-escapes/unsupported-binary-property-Other_Lowercase': [FAIL_PHASE_ONLY],
- 'built-ins/RegExp/property-escapes/unsupported-binary-property-Other_Lowercase-negated': [FAIL_PHASE_ONLY],
- 'built-ins/RegExp/property-escapes/unsupported-binary-property-Other_Math': [FAIL_PHASE_ONLY],
- 'built-ins/RegExp/property-escapes/unsupported-binary-property-Other_Math-negated': [FAIL_PHASE_ONLY],
- 'built-ins/RegExp/property-escapes/unsupported-binary-property-Other_Uppercase': [FAIL_PHASE_ONLY],
- 'built-ins/RegExp/property-escapes/unsupported-binary-property-Other_Uppercase-negated': [FAIL_PHASE_ONLY],
- 'built-ins/RegExp/property-escapes/unsupported-binary-property-Prepended_Concatenation_Mark': [FAIL_PHASE_ONLY],
- 'built-ins/RegExp/property-escapes/unsupported-binary-property-Prepended_Concatenation_Mark-negated': [FAIL_PHASE_ONLY],
- 'built-ins/RegExp/property-escapes/unsupported-property-Block-with-value': [FAIL_PHASE_ONLY],
- 'built-ins/RegExp/property-escapes/unsupported-property-Block-with-value-negated': [FAIL_PHASE_ONLY],
- 'built-ins/RegExp/property-escapes/unsupported-property-FC_NFKC_Closure': [FAIL_PHASE_ONLY],
- 'built-ins/RegExp/property-escapes/unsupported-property-FC_NFKC_Closure-negated': [FAIL_PHASE_ONLY],
- 'built-ins/RegExp/property-escapes/unsupported-property-Line_Break': [FAIL_PHASE_ONLY],
- 'built-ins/RegExp/property-escapes/unsupported-property-Line_Break-negated': [FAIL_PHASE_ONLY],
- 'built-ins/RegExp/property-escapes/unsupported-property-Line_Break-with-value': [FAIL_PHASE_ONLY],
- 'built-ins/RegExp/property-escapes/unsupported-property-Line_Break-with-value-negated': [FAIL_PHASE_ONLY],
- 'language/literals/regexp/early-err-pattern': [FAIL_PHASE_ONLY],
- 'language/literals/regexp/invalid-braced-quantifier-exact': [FAIL_PHASE_ONLY],
- 'language/literals/regexp/invalid-braced-quantifier-lower': [FAIL_PHASE_ONLY],
- 'language/literals/regexp/invalid-braced-quantifier-range': [FAIL_PHASE_ONLY],
- 'language/literals/regexp/invalid-optional-lookbehind': [FAIL_PHASE_ONLY],
- 'language/literals/regexp/invalid-optional-negative-lookbehind': [FAIL_PHASE_ONLY],
- 'language/literals/regexp/invalid-range-lookbehind': [FAIL_PHASE_ONLY],
- 'language/literals/regexp/invalid-range-negative-lookbehind': [FAIL_PHASE_ONLY],
- 'language/literals/regexp/named-groups/invalid-dangling-groupname': [FAIL_PHASE_ONLY],
- 'language/literals/regexp/named-groups/invalid-dangling-groupname-2': [FAIL_PHASE_ONLY],
- 'language/literals/regexp/named-groups/invalid-dangling-groupname-2-u': [FAIL_PHASE_ONLY],
- 'language/literals/regexp/named-groups/invalid-dangling-groupname-3': [FAIL_PHASE_ONLY],
- 'language/literals/regexp/named-groups/invalid-dangling-groupname-3-u': [FAIL_PHASE_ONLY],
- 'language/literals/regexp/named-groups/invalid-dangling-groupname-4': [FAIL_PHASE_ONLY],
- 'language/literals/regexp/named-groups/invalid-dangling-groupname-4-u': [FAIL_PHASE_ONLY],
- 'language/literals/regexp/named-groups/invalid-dangling-groupname-5': [FAIL_PHASE_ONLY],
- 'language/literals/regexp/named-groups/invalid-dangling-groupname-u': [FAIL_PHASE_ONLY],
- 'language/literals/regexp/named-groups/invalid-dangling-groupname-without-group-u': [FAIL_PHASE_ONLY],
- 'language/literals/regexp/named-groups/invalid-duplicate-groupspecifier': [FAIL_PHASE_ONLY],
- 'language/literals/regexp/named-groups/invalid-duplicate-groupspecifier-2': [FAIL_PHASE_ONLY],
- 'language/literals/regexp/named-groups/invalid-duplicate-groupspecifier-2-u': [FAIL_PHASE_ONLY],
- 'language/literals/regexp/named-groups/invalid-duplicate-groupspecifier-u': [FAIL_PHASE_ONLY],
- 'language/literals/regexp/named-groups/invalid-empty-groupspecifier': [FAIL_PHASE_ONLY],
- 'language/literals/regexp/named-groups/invalid-empty-groupspecifier-u': [FAIL_PHASE_ONLY],
- 'language/literals/regexp/named-groups/invalid-identity-escape-in-capture-u': [FAIL_PHASE_ONLY],
- 'language/literals/regexp/named-groups/invalid-incomplete-groupname': [FAIL_PHASE_ONLY],
- 'language/literals/regexp/named-groups/invalid-incomplete-groupname-2': [FAIL_PHASE_ONLY],
- 'language/literals/regexp/named-groups/invalid-incomplete-groupname-2-u': [FAIL_PHASE_ONLY],
- 'language/literals/regexp/named-groups/invalid-incomplete-groupname-3': [FAIL_PHASE_ONLY],
- 'language/literals/regexp/named-groups/invalid-incomplete-groupname-3-u': [FAIL_PHASE_ONLY],
- 'language/literals/regexp/named-groups/invalid-incomplete-groupname-4': [FAIL_PHASE_ONLY],
- 'language/literals/regexp/named-groups/invalid-incomplete-groupname-5': [FAIL_PHASE_ONLY],
- 'language/literals/regexp/named-groups/invalid-incomplete-groupname-6': [FAIL_PHASE_ONLY],
- 'language/literals/regexp/named-groups/invalid-incomplete-groupname-u': [FAIL_PHASE_ONLY],
- 'language/literals/regexp/named-groups/invalid-incomplete-groupname-without-group-2-u': [FAIL_PHASE_ONLY],
- 'language/literals/regexp/named-groups/invalid-incomplete-groupname-without-group-3-u': [FAIL_PHASE_ONLY],
- 'language/literals/regexp/named-groups/invalid-incomplete-groupname-without-group-u': [FAIL_PHASE_ONLY],
- 'language/literals/regexp/named-groups/invalid-non-id-continue-groupspecifier': [FAIL_PHASE_ONLY],
- 'language/literals/regexp/named-groups/invalid-non-id-continue-groupspecifier-4': [FAIL_PHASE_ONLY],
- 'language/literals/regexp/named-groups/invalid-non-id-continue-groupspecifier-4-u': [FAIL_PHASE_ONLY],
- 'language/literals/regexp/named-groups/invalid-non-id-start-groupspecifier': [FAIL_PHASE_ONLY],
- 'language/literals/regexp/named-groups/invalid-non-id-start-groupspecifier-2': [FAIL_PHASE_ONLY],
- 'language/literals/regexp/named-groups/invalid-non-id-start-groupspecifier-2-u': [FAIL_PHASE_ONLY],
- 'language/literals/regexp/named-groups/invalid-non-id-start-groupspecifier-3': [FAIL_PHASE_ONLY],
- 'language/literals/regexp/named-groups/invalid-non-id-start-groupspecifier-4': [FAIL_PHASE_ONLY],
- 'language/literals/regexp/named-groups/invalid-non-id-start-groupspecifier-4-u': [FAIL_PHASE_ONLY],
- 'language/literals/regexp/named-groups/invalid-non-id-start-groupspecifier-5': [FAIL_PHASE_ONLY],
- 'language/literals/regexp/named-groups/invalid-non-id-start-groupspecifier-5-u': [FAIL_PHASE_ONLY],
- 'language/literals/regexp/named-groups/invalid-non-id-start-groupspecifier-6': [FAIL_PHASE_ONLY],
- 'language/literals/regexp/named-groups/invalid-non-id-start-groupspecifier-7': [FAIL_PHASE_ONLY],
- 'language/literals/regexp/named-groups/invalid-non-id-start-groupspecifier-8': [FAIL_PHASE_ONLY],
- 'language/literals/regexp/named-groups/invalid-non-id-start-groupspecifier-8-u': [FAIL_PHASE_ONLY],
- 'language/literals/regexp/named-groups/invalid-non-id-start-groupspecifier-9-u': [FAIL_PHASE_ONLY],
- 'language/literals/regexp/named-groups/invalid-non-id-start-groupspecifier-u': [FAIL_PHASE_ONLY],
- 'language/literals/regexp/named-groups/invalid-numeric-groupspecifier': [FAIL_PHASE_ONLY],
- 'language/literals/regexp/named-groups/invalid-numeric-groupspecifier-u': [FAIL_PHASE_ONLY],
- 'language/literals/regexp/named-groups/invalid-punctuator-starting-groupspecifier': [FAIL_PHASE_ONLY],
- 'language/literals/regexp/named-groups/invalid-punctuator-starting-groupspecifier-u': [FAIL_PHASE_ONLY],
- 'language/literals/regexp/named-groups/invalid-punctuator-within-groupspecifier': [FAIL_PHASE_ONLY],
- 'language/literals/regexp/named-groups/invalid-punctuator-within-groupspecifier-u': [FAIL_PHASE_ONLY],
- 'language/literals/regexp/named-groups/invalid-unterminated-groupspecifier': [FAIL_PHASE_ONLY],
- 'language/literals/regexp/named-groups/invalid-unterminated-groupspecifier-u': [FAIL_PHASE_ONLY],
- 'language/literals/regexp/u-invalid-class-escape': [FAIL_PHASE_ONLY],
- 'language/literals/regexp/u-invalid-extended-pattern-char': [FAIL_PHASE_ONLY],
- 'language/literals/regexp/u-invalid-identity-escape': [FAIL_PHASE_ONLY],
- 'language/literals/regexp/u-invalid-legacy-octal-escape': [FAIL_PHASE_ONLY],
- 'language/literals/regexp/u-invalid-non-empty-class-ranges': [FAIL_PHASE_ONLY],
- 'language/literals/regexp/u-invalid-non-empty-class-ranges-no-dash-a': [FAIL_PHASE_ONLY],
- 'language/literals/regexp/u-invalid-non-empty-class-ranges-no-dash-ab': [FAIL_PHASE_ONLY],
- 'language/literals/regexp/u-invalid-non-empty-class-ranges-no-dash-b': [FAIL_PHASE_ONLY],
- 'language/literals/regexp/u-invalid-oob-decimal-escape': [FAIL_PHASE_ONLY],
- 'language/literals/regexp/u-invalid-optional-lookahead': [FAIL_PHASE_ONLY],
- 'language/literals/regexp/u-invalid-optional-lookbehind': [FAIL_PHASE_ONLY],
- 'language/literals/regexp/u-invalid-optional-negative-lookahead': [FAIL_PHASE_ONLY],
- 'language/literals/regexp/u-invalid-optional-negative-lookbehind': [FAIL_PHASE_ONLY],
- 'language/literals/regexp/u-invalid-range-lookahead': [FAIL_PHASE_ONLY],
- 'language/literals/regexp/u-invalid-range-lookbehind': [FAIL_PHASE_ONLY],
- 'language/literals/regexp/u-invalid-range-negative-lookahead': [FAIL_PHASE_ONLY],
- 'language/literals/regexp/u-invalid-range-negative-lookbehind': [FAIL_PHASE_ONLY],
- 'language/literals/regexp/u-unicode-esc-bounds': [FAIL_PHASE_ONLY],
- 'language/literals/regexp/u-unicode-esc-non-hex': [FAIL_PHASE_ONLY],
- 'language/literals/regexp/unicode-escape-nls-err': [FAIL_PHASE_ONLY],
-
# https://bugs.chromium.org/p/v8/issues/detail?id=10379
'built-ins/RegExp/named-groups/non-unicode-property-names-valid': [FAIL],
@@ -488,6 +240,9 @@
# https://bugs.chromium.org/p/v8/issues/detail?id=7472
'intl402/NumberFormat/currency-digits': [FAIL],
+ # https://bugs.chromium.org/p/v8/issues/detail?id=12167
+ 'intl402/DisplayNames/prototype/of/type-calendar-invalid': [FAIL],
+
# https://bugs.chromium.org/p/v8/issues/detail?id=7831
'language/statements/generators/generator-created-after-decl-inst': [FAIL],
'language/expressions/generators/generator-created-after-decl-inst': [FAIL],
@@ -549,9 +304,6 @@
# http://crbug/v8/11533
'language/statements/class/subclass/default-constructor-spread-override': [FAIL],
- # https://bugs.chromium.org/p/v8/issues/detail?id=11689
- 'built-ins/BigInt/wrapper-object-ordinary-toprimitive': [FAIL],
-
# https://bugs.chromium.org/p/v8/issues/detail?id=11690
'language/module-code/export-expname-binding-index': [FAIL],
@@ -594,50 +346,66 @@
# https://bugs.chromium.org/p/v8/issues/detail?id=11544
'built-ins/Temporal/*': [FAIL],
-
- # https://bugs.chromium.org/p/v8/issues/detail?id=12044
- 'built-ins/Array/prototype/Symbol.unscopables/value': [FAIL],
+ 'intl402/Temporal/*': [FAIL],
# https://bugs.chromium.org/p/v8/issues/detail?id=11989
- 'built-ins/Realm/constructor': [FAIL],
- 'built-ins/Realm/descriptor': [FAIL],
- 'built-ins/Realm/instance': [FAIL],
- 'built-ins/Realm/instance-extensibility': [FAIL],
- 'built-ins/Realm/length': [FAIL],
- 'built-ins/Realm/name': [FAIL],
- 'built-ins/Realm/proto': [FAIL],
- 'built-ins/Realm/prototype/evaluate/descriptor': [FAIL],
- 'built-ins/Realm/prototype/evaluate/errors-from-the-other-realm-is-wrapped-into-a-typeerror': [FAIL],
- 'built-ins/Realm/prototype/evaluate/length': [FAIL],
- 'built-ins/Realm/prototype/evaluate/name': [FAIL],
- 'built-ins/Realm/prototype/evaluate/not-constructor': [FAIL],
- 'built-ins/Realm/prototype/evaluate/proto': [FAIL],
- 'built-ins/Realm/prototype/evaluate/returns-primitive-values': [FAIL],
- 'built-ins/Realm/prototype/evaluate/returns-symbol-values': [FAIL],
- 'built-ins/Realm/prototype/evaluate/throws-typeerror-if-evaluation-resolves-to-non-primitive': [FAIL],
- 'built-ins/Realm/prototype/evaluate/throws-when-argument-is-not-a-string': [FAIL],
- 'built-ins/Realm/prototype/evaluate/validates-realm-object': [FAIL],
- 'built-ins/Realm/prototype/evaluate/wrapped-function-arguments-are-wrapped-into-the-inner-realm': [FAIL],
- 'built-ins/Realm/prototype/evaluate/wrapped-function-arguments-are-wrapped-into-the-inner-realm-extended': [FAIL],
- 'built-ins/Realm/prototype/evaluate/wrapped-function-from-return-values-share-no-identity': [FAIL],
- 'built-ins/Realm/prototype/evaluate/wrapped-function-observing-their-scopes': [FAIL],
- 'built-ins/Realm/prototype/evaluate/wrapped-functions-accepts-callable-objects': [FAIL],
- 'built-ins/Realm/prototype/evaluate/wrapped-functions-can-resolve-callable-returns': [FAIL],
- 'built-ins/Realm/prototype/evaluate/wrapped-functions-new-wrapping-on-each-evaluation': [FAIL],
- 'built-ins/Realm/prototype/evaluate/wrapped-functions-share-no-properties': [FAIL],
- 'built-ins/Realm/prototype/evaluate/wrapped-functions-share-no-properties-extended': [FAIL],
- 'built-ins/Realm/prototype/importValue/descriptor': [FAIL],
- 'built-ins/Realm/prototype/importValue/exportName-tostring': [FAIL],
- 'built-ins/Realm/prototype/importValue/import-value': [FAIL],
- 'built-ins/Realm/prototype/importValue/length': [FAIL],
- 'built-ins/Realm/prototype/importValue/name': [FAIL],
- 'built-ins/Realm/prototype/importValue/not-constructor': [FAIL],
- 'built-ins/Realm/prototype/importValue/proto': [FAIL],
- 'built-ins/Realm/prototype/importValue/specifier-tostring': [FAIL],
- 'built-ins/Realm/prototype/importValue/throws-if-import-value-does-not-exist': [FAIL],
- 'built-ins/Realm/prototype/importValue/validates-realm-object': [FAIL],
- 'built-ins/Realm/prototype/proto': [FAIL],
- 'built-ins/Realm/prototype/Symbol.toStringTag': [FAIL],
+ 'built-ins/ShadowRealm/constructor': [FAIL],
+ 'built-ins/ShadowRealm/descriptor': [FAIL],
+ 'built-ins/ShadowRealm/extensibility': [FAIL],
+ 'built-ins/ShadowRealm/instance': [FAIL],
+ 'built-ins/ShadowRealm/instance-extensibility': [FAIL],
+ 'built-ins/ShadowRealm/length': [FAIL],
+ 'built-ins/ShadowRealm/name': [FAIL],
+ 'built-ins/ShadowRealm/proto': [FAIL],
+ 'built-ins/ShadowRealm/prototype/evaluate/descriptor': [FAIL],
+ 'built-ins/ShadowRealm/prototype/evaluate/errors-from-the-other-realm-is-wrapped-into-a-typeerror': [FAIL],
+ 'built-ins/ShadowRealm/prototype/evaluate/length': [FAIL],
+ 'built-ins/ShadowRealm/prototype/evaluate/name': [FAIL],
+ 'built-ins/ShadowRealm/prototype/evaluate/not-constructor': [FAIL],
+ 'built-ins/ShadowRealm/prototype/evaluate/proto': [FAIL],
+ 'built-ins/ShadowRealm/prototype/evaluate/returns-primitive-values': [FAIL],
+ 'built-ins/ShadowRealm/prototype/evaluate/returns-symbol-values': [FAIL],
+ 'built-ins/ShadowRealm/prototype/evaluate/throws-typeerror-if-evaluation-resolves-to-non-primitive': [FAIL],
+ 'built-ins/ShadowRealm/prototype/evaluate/throws-when-argument-is-not-a-string': [FAIL],
+ 'built-ins/ShadowRealm/prototype/evaluate/validates-realm-object': [FAIL],
+ 'built-ins/ShadowRealm/prototype/evaluate/wrapped-function-arguments-are-wrapped-into-the-inner-realm': [FAIL],
+ 'built-ins/ShadowRealm/prototype/evaluate/wrapped-function-arguments-are-wrapped-into-the-inner-realm-extended': [FAIL],
+ 'built-ins/ShadowRealm/prototype/evaluate/wrapped-function-from-return-values-share-no-identity': [FAIL],
+ 'built-ins/ShadowRealm/prototype/evaluate/wrapped-function-observing-their-scopes': [FAIL],
+ 'built-ins/ShadowRealm/prototype/evaluate/wrapped-functions-accepts-callable-objects': [FAIL],
+ 'built-ins/ShadowRealm/prototype/evaluate/wrapped-functions-can-resolve-callable-returns': [FAIL],
+ 'built-ins/ShadowRealm/prototype/evaluate/wrapped-functions-new-wrapping-on-each-evaluation': [FAIL],
+ 'built-ins/ShadowRealm/prototype/evaluate/wrapped-functions-share-no-properties': [FAIL],
+ 'built-ins/ShadowRealm/prototype/evaluate/wrapped-functions-share-no-properties-extended': [FAIL],
+ 'built-ins/ShadowRealm/prototype/importValue/descriptor': [FAIL],
+ 'built-ins/ShadowRealm/prototype/importValue/exportName-tostring': [FAIL],
+ 'built-ins/ShadowRealm/prototype/importValue/import-value': [FAIL],
+ 'built-ins/ShadowRealm/prototype/importValue/length': [FAIL],
+ 'built-ins/ShadowRealm/prototype/importValue/name': [FAIL],
+ 'built-ins/ShadowRealm/prototype/importValue/not-constructor': [FAIL],
+ 'built-ins/ShadowRealm/prototype/importValue/proto': [FAIL],
+ 'built-ins/ShadowRealm/prototype/importValue/specifier-tostring': [FAIL],
+ 'built-ins/ShadowRealm/prototype/importValue/throws-if-import-value-does-not-exist': [FAIL],
+ 'built-ins/ShadowRealm/prototype/importValue/validates-realm-object': [FAIL],
+ 'built-ins/ShadowRealm/prototype/proto': [FAIL],
+ 'built-ins/ShadowRealm/prototype/Symbol.toStringTag': [FAIL],
+
+ # https://bugs.chromium.org/p/v8/issues/detail?id=12086
+ 'language/expressions/in/private-field-invalid-assignment-reference': [FAIL],
+ 'language/expressions/in/private-field-in-nested': [FAIL],
+
+ # https://bugs.chromium.org/p/v8/issues/detail?id=12085
+ 'language/statements/class/subclass/derived-class-return-override-catch-finally': [FAIL],
+ 'language/statements/class/subclass/derived-class-return-override-catch-finally-arrow': [FAIL],
+ 'language/statements/class/subclass/derived-class-return-override-finally-super': [FAIL],
+ 'language/statements/class/subclass/derived-class-return-override-finally-super-arrow': [FAIL],
+ 'language/statements/class/subclass/derived-class-return-override-for-of-arrow': [FAIL],
+
+ # https://bugs.chromium.org/p/v8/issues/detail?id=12168
+ 'built-ins/Date/prototype/valueOf/S9.4_A3_T1': [FAIL],
+ 'built-ins/Date/prototype/valueOf/S9.4_A3_T2': [FAIL],
+ 'built-ins/Error/prototype/S15.11.4_A3': [FAIL],
+ 'built-ins/Error/prototype/S15.11.4_A4': [FAIL],
######################## NEEDS INVESTIGATION ###########################
@@ -679,31 +447,58 @@
# https://github.com/tc39/test262/issues/3111
+ 'built-ins/TypedArray/prototype/at/BigInt/return-abrupt-from-this-out-of-bounds': [FAIL],
'built-ins/TypedArray/prototype/at/return-abrupt-from-this-out-of-bounds': [FAIL],
+ 'built-ins/TypedArray/prototype/byteOffset/BigInt/resizable-array-buffer-auto': [FAIL],
+ 'built-ins/TypedArray/prototype/copyWithin/BigInt/return-abrupt-from-this-out-of-bounds': [SKIP],
'built-ins/TypedArray/prototype/copyWithin/return-abrupt-from-this-out-of-bounds': [SKIP],
+ 'built-ins/TypedArray/prototype/entries/BigInt/return-abrupt-from-this-out-of-bounds': [FAIL],
'built-ins/TypedArray/prototype/entries/return-abrupt-from-this-out-of-bounds': [FAIL],
+ 'built-ins/TypedArray/prototype/every/BigInt/return-abrupt-from-this-out-of-bounds': [SKIP],
'built-ins/TypedArray/prototype/every/return-abrupt-from-this-out-of-bounds': [SKIP],
+ 'built-ins/TypedArray/prototype/fill/BigInt/return-abrupt-from-this-out-of-bounds': [FAIL],
'built-ins/TypedArray/prototype/fill/return-abrupt-from-this-out-of-bounds': [SKIP],
+ 'built-ins/TypedArray/prototype/filter/BigInt/return-abrupt-from-this-out-of-bounds': [SKIP],
'built-ins/TypedArray/prototype/filter/return-abrupt-from-this-out-of-bounds': [SKIP],
+ 'built-ins/TypedArray/prototype/find/BigInt/return-abrupt-from-this-out-of-bounds': [SKIP],
+ 'built-ins/TypedArray/prototype/findIndex/BigInt/return-abrupt-from-this-out-of-bounds': [SKIP],
'built-ins/TypedArray/prototype/findIndex/return-abrupt-from-this-out-of-bounds': [SKIP],
- 'built-ins/TypedArray/prototype/findLast/return-abrupt-from-this-out-of-bounds': [SKIP],
+ 'built-ins/TypedArray/prototype/findLast/BigInt/return-abrupt-from-this-out-of-bounds': [SKIP],
+ 'built-ins/TypedArray/prototype/findLastIndex/BigInt/return-abrupt-from-this-out-of-bounds': [SKIP],
'built-ins/TypedArray/prototype/findLastIndex/return-abrupt-from-this-out-of-bounds': [SKIP],
+ 'built-ins/TypedArray/prototype/findLast/return-abrupt-from-this-out-of-bounds': [SKIP],
'built-ins/TypedArray/prototype/find/return-abrupt-from-this-out-of-bounds': [SKIP],
+ 'built-ins/TypedArray/prototype/forEach/BigInt/return-abrupt-from-this-out-of-bounds': [SKIP],
'built-ins/TypedArray/prototype/forEach/return-abrupt-from-this-out-of-bounds': [SKIP],
+ 'built-ins/TypedArray/prototype/includes/BigInt/return-abrupt-from-this-out-of-bounds': [SKIP],
'built-ins/TypedArray/prototype/includes/return-abrupt-from-this-out-of-bounds': [SKIP],
+ 'built-ins/TypedArray/prototype/indexOf/BigInt/return-abrupt-from-this-out-of-bounds': [SKIP],
'built-ins/TypedArray/prototype/indexOf/return-abrupt-from-this-out-of-bounds': [SKIP],
+ 'built-ins/TypedArray/prototype/join/BigInt/return-abrupt-from-this-out-of-bounds': [SKIP],
'built-ins/TypedArray/prototype/join/return-abrupt-from-this-out-of-bounds': [SKIP],
+ 'built-ins/TypedArray/prototype/keys/BigInt/return-abrupt-from-this-out-of-bounds': [FAIL],
'built-ins/TypedArray/prototype/keys/return-abrupt-from-this-out-of-bounds': [FAIL],
+ 'built-ins/TypedArray/prototype/lastIndexOf/BigInt/return-abrupt-from-this-out-of-bounds': [SKIP],
'built-ins/TypedArray/prototype/lastIndexOf/return-abrupt-from-this-out-of-bounds': [SKIP],
+ 'built-ins/TypedArray/prototype/map/BigInt/return-abrupt-from-this-out-of-bounds': [FAIL],
'built-ins/TypedArray/prototype/map/return-abrupt-from-this-out-of-bounds': [FAIL],
+ 'built-ins/TypedArray/prototype/reduce/BigInt/return-abrupt-from-this-out-of-bounds': [SKIP],
'built-ins/TypedArray/prototype/reduce/return-abrupt-from-this-out-of-bounds': [SKIP],
+ 'built-ins/TypedArray/prototype/reduceRight/BigInt/return-abrupt-from-this-out-of-bounds': [SKIP],
'built-ins/TypedArray/prototype/reduceRight/return-abrupt-from-this-out-of-bounds': [SKIP],
+ 'built-ins/TypedArray/prototype/reverse/BigInt/return-abrupt-from-this-out-of-bounds': [SKIP],
'built-ins/TypedArray/prototype/reverse/return-abrupt-from-this-out-of-bounds': [SKIP],
+ 'built-ins/TypedArray/prototype/set/BigInt/typedarray-arg-set-values-same-buffer-same-type-resized': [FAIL],
'built-ins/TypedArray/prototype/set/typedarray-arg-target-out-of-bounds': [FAIL],
+ 'built-ins/TypedArray/prototype/slice/BigInt/return-abrupt-from-this-out-of-bounds': [FAIL],
'built-ins/TypedArray/prototype/slice/return-abrupt-from-this-out-of-bounds': [FAIL],
+ 'built-ins/TypedArray/prototype/some/BigInt/return-abrupt-from-this-out-of-bounds': [SKIP],
'built-ins/TypedArray/prototype/some/return-abrupt-from-this-out-of-bounds': [SKIP],
+ 'built-ins/TypedArray/prototype/sort/BigInt/return-abrupt-from-this-out-of-bounds': [FAIL],
'built-ins/TypedArray/prototype/sort/return-abrupt-from-this-out-of-bounds': [FAIL],
+ 'built-ins/TypedArray/prototype/toLocaleString/BigInt/return-abrupt-from-this-out-of-bounds': [SKIP],
'built-ins/TypedArray/prototype/toLocaleString/return-abrupt-from-this-out-of-bounds': [SKIP],
+ 'built-ins/TypedArray/prototype/values/BigInt/return-abrupt-from-this-out-of-bounds': [FAIL],
'built-ins/TypedArray/prototype/values/return-abrupt-from-this-out-of-bounds': [FAIL],
# Pending update after https://github.com/tc39/proposal-resizablearraybuffer/issues/68
diff --git a/deps/v8/test/unittests/BUILD.gn b/deps/v8/test/unittests/BUILD.gn
index 5331944241..f854cee8c1 100644
--- a/deps/v8/test/unittests/BUILD.gn
+++ b/deps/v8/test/unittests/BUILD.gn
@@ -119,6 +119,7 @@ v8_source_set("cppgc_unittests_sources") {
"heap/cppgc/object-start-bitmap-unittest.cc",
"heap/cppgc/page-memory-unittest.cc",
"heap/cppgc/persistent-family-unittest.cc",
+ "heap/cppgc/platform-unittest.cc",
"heap/cppgc/prefinalizer-unittest.cc",
"heap/cppgc/sanitizer-unittest.cc",
"heap/cppgc/source-location-unittest.cc",
@@ -305,6 +306,7 @@ v8_source_set("unittests_sources") {
"debug/debug-property-iterator-unittest.cc",
"diagnostics/eh-frame-iterator-unittest.cc",
"diagnostics/eh-frame-writer-unittest.cc",
+ "diagnostics/gdb-jit-unittest.cc",
"execution/microtask-queue-unittest.cc",
"heap/allocation-observer-unittest.cc",
"heap/barrier-unittest.cc",
@@ -327,6 +329,7 @@ v8_source_set("unittests_sources") {
"heap/memory-reducer-unittest.cc",
"heap/object-stats-unittest.cc",
"heap/persistent-handles-unittest.cc",
+ "heap/progressbar-unittest.cc",
"heap/safepoint-unittest.cc",
"heap/slot-set-unittest.cc",
"heap/spaces-unittest.cc",
@@ -413,6 +416,7 @@ v8_source_set("unittests_sources") {
"wasm/leb-helper-unittest.cc",
"wasm/liftoff-register-unittests.cc",
"wasm/loop-assignment-analysis-unittest.cc",
+ "wasm/memory-protection-unittest.cc",
"wasm/module-decoder-memory64-unittest.cc",
"wasm/module-decoder-unittest.cc",
"wasm/simd-shuffle-unittest.cc",
@@ -482,6 +486,11 @@ v8_source_set("unittests_sources") {
"assembler/turbo-assembler-s390-unittest.cc",
"compiler/s390/instruction-selector-s390-unittest.cc",
]
+ } else if (v8_current_cpu == "loong64") {
+ sources += [
+ "assembler/turbo-assembler-loong64-unittest.cc",
+ "compiler/loong64/instruction-selector-loong64-unittest.cc",
+ ]
}
if (is_posix && v8_enable_webassembly) {
diff --git a/deps/v8/test/unittests/api/access-check-unittest.cc b/deps/v8/test/unittests/api/access-check-unittest.cc
index cfd258aec0..ff4f726ff7 100644
--- a/deps/v8/test/unittests/api/access-check-unittest.cc
+++ b/deps/v8/test/unittests/api/access-check-unittest.cc
@@ -2,7 +2,13 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "include/v8.h"
+#include "include/v8-context.h"
+#include "include/v8-function.h"
+#include "include/v8-isolate.h"
+#include "include/v8-local-handle.h"
+#include "include/v8-primitive.h"
+#include "include/v8-script.h"
+#include "include/v8-template.h"
#include "src/debug/debug.h"
#include "test/unittests/test-utils.h"
#include "testing/gtest/include/gtest/gtest.h"
diff --git a/deps/v8/test/unittests/api/deserialize-unittest.cc b/deps/v8/test/unittests/api/deserialize-unittest.cc
index 53146ea549..5e6edcff6b 100644
--- a/deps/v8/test/unittests/api/deserialize-unittest.cc
+++ b/deps/v8/test/unittests/api/deserialize-unittest.cc
@@ -3,8 +3,13 @@
// found in the LICENSE file.
#include "include/libplatform/libplatform.h"
+#include "include/v8-context.h"
+#include "include/v8-function.h"
+#include "include/v8-isolate.h"
+#include "include/v8-local-handle.h"
#include "include/v8-platform.h"
-#include "include/v8.h"
+#include "include/v8-primitive.h"
+#include "include/v8-script.h"
#include "test/unittests/test-utils.h"
#include "testing/gtest/include/gtest/gtest.h"
diff --git a/deps/v8/test/unittests/api/exception-unittest.cc b/deps/v8/test/unittests/api/exception-unittest.cc
index 04d32691ad..40b7e92d3c 100644
--- a/deps/v8/test/unittests/api/exception-unittest.cc
+++ b/deps/v8/test/unittests/api/exception-unittest.cc
@@ -2,7 +2,11 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "include/v8.h"
+#include "include/v8-context.h"
+#include "include/v8-exception.h"
+#include "include/v8-isolate.h"
+#include "include/v8-local-handle.h"
+#include "include/v8-persistent-handle.h"
#include "src/flags/flags.h"
#include "test/unittests/test-utils.h"
#include "testing/gtest/include/gtest/gtest.h"
diff --git a/deps/v8/test/unittests/api/interceptor-unittest.cc b/deps/v8/test/unittests/api/interceptor-unittest.cc
index 8a1db3f823..a1f6cbdc36 100644
--- a/deps/v8/test/unittests/api/interceptor-unittest.cc
+++ b/deps/v8/test/unittests/api/interceptor-unittest.cc
@@ -2,7 +2,11 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "include/v8.h"
+#include "include/v8-exception.h"
+#include "include/v8-function.h"
+#include "include/v8-local-handle.h"
+#include "include/v8-object.h"
+#include "include/v8-template.h"
#include "test/unittests/test-utils.h"
#include "testing/gtest/include/gtest/gtest.h"
diff --git a/deps/v8/test/unittests/api/isolate-unittest.cc b/deps/v8/test/unittests/api/isolate-unittest.cc
index 52c7af0386..27d0285150 100644
--- a/deps/v8/test/unittests/api/isolate-unittest.cc
+++ b/deps/v8/test/unittests/api/isolate-unittest.cc
@@ -2,17 +2,17 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "testing/gtest/include/gtest/gtest.h"
+#include "src/execution/isolate.h"
#include "include/libplatform/libplatform.h"
#include "include/v8-platform.h"
-#include "include/v8.h"
+#include "include/v8-template.h"
#include "src/base/macros.h"
#include "src/base/platform/semaphore.h"
#include "src/execution/execution.h"
-#include "src/execution/isolate.h"
#include "src/init/v8.h"
#include "test/unittests/test-utils.h"
+#include "testing/gtest/include/gtest/gtest.h"
namespace v8 {
diff --git a/deps/v8/test/unittests/api/remote-object-unittest.cc b/deps/v8/test/unittests/api/remote-object-unittest.cc
index 5b350365c4..b7c65bd893 100644
--- a/deps/v8/test/unittests/api/remote-object-unittest.cc
+++ b/deps/v8/test/unittests/api/remote-object-unittest.cc
@@ -2,13 +2,15 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "testing/gtest/include/gtest/gtest.h"
-
-#include "include/v8.h"
+#include "include/v8-context.h"
+#include "include/v8-local-handle.h"
+#include "include/v8-primitive.h"
+#include "include/v8-template.h"
#include "src/api/api-inl.h"
#include "src/handles/handles.h"
#include "src/objects/objects-inl.h"
#include "test/unittests/test-utils.h"
+#include "testing/gtest/include/gtest/gtest.h"
namespace v8 {
namespace remote_object_unittest {
diff --git a/deps/v8/test/unittests/api/resource-constraints-unittest.cc b/deps/v8/test/unittests/api/resource-constraints-unittest.cc
index f9151edb94..0755ad72a5 100644
--- a/deps/v8/test/unittests/api/resource-constraints-unittest.cc
+++ b/deps/v8/test/unittests/api/resource-constraints-unittest.cc
@@ -2,11 +2,9 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "testing/gtest/include/gtest/gtest.h"
-
-#include "include/v8-platform.h"
-#include "include/v8.h"
+#include "include/v8-isolate.h"
#include "src/heap/heap.h"
+#include "testing/gtest/include/gtest/gtest.h"
namespace v8 {
diff --git a/deps/v8/test/unittests/api/v8-object-unittest.cc b/deps/v8/test/unittests/api/v8-object-unittest.cc
index 9ebdb12fa7..c3f7c70f27 100644
--- a/deps/v8/test/unittests/api/v8-object-unittest.cc
+++ b/deps/v8/test/unittests/api/v8-object-unittest.cc
@@ -2,7 +2,12 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "include/v8.h"
+#include "include/v8-context.h"
+#include "include/v8-function.h"
+#include "include/v8-isolate.h"
+#include "include/v8-local-handle.h"
+#include "include/v8-primitive.h"
+#include "include/v8-template.h"
#include "src/api/api.h"
#include "src/objects/objects-inl.h"
#include "test/unittests/test-utils.h"
diff --git a/deps/v8/test/unittests/assembler/turbo-assembler-loong64-unittest.cc b/deps/v8/test/unittests/assembler/turbo-assembler-loong64-unittest.cc
new file mode 100644
index 0000000000..5334fb4be3
--- /dev/null
+++ b/deps/v8/test/unittests/assembler/turbo-assembler-loong64-unittest.cc
@@ -0,0 +1,64 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/codegen/loong64/assembler-loong64-inl.h"
+#include "src/codegen/macro-assembler.h"
+#include "src/execution/simulator.h"
+#include "test/common/assembler-tester.h"
+#include "test/unittests/test-utils.h"
+#include "testing/gtest-support.h"
+
+namespace v8 {
+namespace internal {
+
+#define __ tasm.
+
+// Test the loong64 assembler by compiling some simple functions into
+// a buffer and executing them. These tests do not initialize the
+// V8 library, create a context, or use any V8 objects.
+
+class TurboAssemblerTest : public TestWithIsolate {};
+
+TEST_F(TurboAssemblerTest, TestHardAbort) {
+ auto buffer = AllocateAssemblerBuffer();
+ TurboAssembler tasm(isolate(), AssemblerOptions{}, CodeObjectRequired::kNo,
+ buffer->CreateView());
+ __ set_root_array_available(false);
+ __ set_abort_hard(true);
+ __ Abort(AbortReason::kNoReason);
+
+ CodeDesc desc;
+ tasm.GetCode(isolate(), &desc);
+ buffer->MakeExecutable();
+ // We need an isolate here to execute in the simulator.
+ auto f = GeneratedCode<void>::FromBuffer(isolate(), buffer->start());
+ ASSERT_DEATH_IF_SUPPORTED({ f.Call(); }, "abort: no reason");
+}
+
+TEST_F(TurboAssemblerTest, TestCheck) {
+ auto buffer = AllocateAssemblerBuffer();
+ TurboAssembler tasm(isolate(), AssemblerOptions{}, CodeObjectRequired::kNo,
+ buffer->CreateView());
+ __ set_root_array_available(false);
+ __ set_abort_hard(true);
+
+ // Fail if the first parameter (in {a0}) is 17.
+ __ Check(Condition::ne, AbortReason::kNoReason, a0, Operand(17));
+ __ Ret();
+
+ CodeDesc desc;
+ tasm.GetCode(isolate(), &desc);
+ buffer->MakeExecutable();
+ // We need an isolate here to execute in the simulator.
+ auto f = GeneratedCode<void, int>::FromBuffer(isolate(), buffer->start());
+
+ f.Call(0);
+ f.Call(18);
+ ASSERT_DEATH_IF_SUPPORTED({ f.Call(17); }, "abort: no reason");
+}
+
+#undef __
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/unittests/base/region-allocator-unittest.cc b/deps/v8/test/unittests/base/region-allocator-unittest.cc
index d229e4775b..ccfc2ab5ca 100644
--- a/deps/v8/test/unittests/base/region-allocator-unittest.cc
+++ b/deps/v8/test/unittests/base/region-allocator-unittest.cc
@@ -78,6 +78,27 @@ TEST(RegionAllocatorTest, SimpleAllocateRegion) {
CHECK_EQ(ra.free_size(), 0);
}
+TEST(RegionAllocatorTest, SimpleAllocateAlignedRegion) {
+ const size_t kPageSize = 4 * KB;
+ const size_t kPageCount = 16;
+ const size_t kSize = kPageSize * kPageCount;
+ const Address kBegin = static_cast<Address>(kPageSize * 153);
+
+ RegionAllocator ra(kBegin, kSize, kPageSize);
+
+ // Allocate regions with different alignments and verify that they are
+ // correctly aligned.
+ const size_t alignments[] = {kPageSize, kPageSize * 8, kPageSize,
+ kPageSize * 4, kPageSize * 2, kPageSize * 2,
+ kPageSize * 4, kPageSize * 2};
+ for (auto alignment : alignments) {
+ Address address = ra.AllocateAlignedRegion(kPageSize, alignment);
+ CHECK_NE(address, RegionAllocator::kAllocationFailure);
+ CHECK(IsAligned(address, alignment));
+ }
+ CHECK_EQ(ra.free_size(), 8 * kPageSize);
+}
+
TEST(RegionAllocatorTest, AllocateRegionRandom) {
const size_t kPageSize = 8 * KB;
const size_t kPageCountLog = 16;
diff --git a/deps/v8/test/unittests/codegen/code-stub-assembler-unittest.cc b/deps/v8/test/unittests/codegen/code-stub-assembler-unittest.cc
index 121190bdb8..b53461b8b0 100644
--- a/deps/v8/test/unittests/codegen/code-stub-assembler-unittest.cc
+++ b/deps/v8/test/unittests/codegen/code-stub-assembler-unittest.cc
@@ -21,10 +21,9 @@ namespace internal {
CodeStubAssemblerTestState::CodeStubAssemblerTestState(
CodeStubAssemblerTest* test)
- : compiler::CodeAssemblerState(
- test->isolate(), test->zone(), VoidDescriptor{},
- CodeKind::FOR_TESTING, "test",
- PoisoningMitigationLevel::kPoisonCriticalOnly) {}
+ : compiler::CodeAssemblerState(test->isolate(), test->zone(),
+ VoidDescriptor{}, CodeKind::FOR_TESTING,
+ "test") {}
TARGET_TEST_F(CodeStubAssemblerTest, SmiTag) {
CodeStubAssemblerTestState state(this);
diff --git a/deps/v8/test/unittests/compiler/arm64/instruction-selector-arm64-unittest.cc b/deps/v8/test/unittests/compiler/arm64/instruction-selector-arm64-unittest.cc
index 44e3bb457b..bff5697b3f 100644
--- a/deps/v8/test/unittests/compiler/arm64/instruction-selector-arm64-unittest.cc
+++ b/deps/v8/test/unittests/compiler/arm64/instruction-selector-arm64-unittest.cc
@@ -44,11 +44,9 @@ Node* BuildConstant(InstructionSelectorTest::StreamBuilder* m, MachineType type,
switch (type.representation()) {
case MachineRepresentation::kWord32:
return m->Int32Constant(static_cast<int32_t>(value));
- break;
case MachineRepresentation::kWord64:
return m->Int64Constant(value);
- break;
default:
UNIMPLEMENTED();
@@ -2165,6 +2163,7 @@ struct SIMDMulDPInst {
ArchOpcode multiply_add_arch_opcode;
ArchOpcode multiply_sub_arch_opcode;
MachineType machine_type;
+ const int lane_size;
};
std::ostream& operator<<(std::ostream& os, const SIMDMulDPInst& inst) {
@@ -2176,10 +2175,10 @@ std::ostream& operator<<(std::ostream& os, const SIMDMulDPInst& inst) {
static const SIMDMulDPInst kSIMDMulDPInstructions[] = {
{"I32x4Mul", &MachineOperatorBuilder::I32x4Mul,
&MachineOperatorBuilder::I32x4Add, &MachineOperatorBuilder::I32x4Sub,
- kArm64I32x4Mla, kArm64I32x4Mls, MachineType::Simd128()},
+ kArm64Mla, kArm64Mls, MachineType::Simd128(), 32},
{"I16x8Mul", &MachineOperatorBuilder::I16x8Mul,
&MachineOperatorBuilder::I16x8Add, &MachineOperatorBuilder::I16x8Sub,
- kArm64I16x8Mla, kArm64I16x8Mls, MachineType::Simd128()}};
+ kArm64Mla, kArm64Mls, MachineType::Simd128(), 16}};
using InstructionSelectorSIMDDPWithSIMDMulTest =
InstructionSelectorTestWithParam<SIMDMulDPInst>;
@@ -2195,6 +2194,7 @@ TEST_P(InstructionSelectorSIMDDPWithSIMDMulTest, AddWithMul) {
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
EXPECT_EQ(mdpi.multiply_add_arch_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(mdpi.lane_size, LaneSizeField::decode(s[0]->opcode()));
EXPECT_EQ(3U, s[0]->InputCount());
EXPECT_EQ(1U, s[0]->OutputCount());
}
@@ -2206,6 +2206,7 @@ TEST_P(InstructionSelectorSIMDDPWithSIMDMulTest, AddWithMul) {
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
EXPECT_EQ(mdpi.multiply_add_arch_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(mdpi.lane_size, LaneSizeField::decode(s[0]->opcode()));
EXPECT_EQ(3U, s[0]->InputCount());
EXPECT_EQ(1U, s[0]->OutputCount());
}
@@ -2222,6 +2223,7 @@ TEST_P(InstructionSelectorSIMDDPWithSIMDMulTest, SubWithMul) {
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
EXPECT_EQ(mdpi.multiply_sub_arch_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(mdpi.lane_size, LaneSizeField::decode(s[0]->opcode()));
EXPECT_EQ(3U, s[0]->InputCount());
EXPECT_EQ(1U, s[0]->OutputCount());
}
@@ -2231,6 +2233,178 @@ INSTANTIATE_TEST_SUITE_P(InstructionSelectorTest,
InstructionSelectorSIMDDPWithSIMDMulTest,
::testing::ValuesIn(kSIMDMulDPInstructions));
+namespace {
+
+struct SIMDShrAddInst {
+ const char* shradd_constructor_name;
+ const Operator* (MachineOperatorBuilder::*shr_s_operator)();
+ const Operator* (MachineOperatorBuilder::*shr_u_operator)();
+ const Operator* (MachineOperatorBuilder::*add_operator)();
+ const int laneSize;
+};
+
+std::ostream& operator<<(std::ostream& os, const SIMDShrAddInst& inst) {
+ return os << inst.shradd_constructor_name;
+}
+
+} // namespace
+
+static const SIMDShrAddInst kSIMDShrAddInstructions[] = {
+ {"I64x2ShrAdd", &MachineOperatorBuilder::I64x2ShrS,
+ &MachineOperatorBuilder::I64x2ShrU, &MachineOperatorBuilder::I64x2Add, 64},
+ {"I32x4ShrAdd", &MachineOperatorBuilder::I32x4ShrS,
+ &MachineOperatorBuilder::I32x4ShrU, &MachineOperatorBuilder::I32x4Add, 32},
+ {"I16x8ShrAdd", &MachineOperatorBuilder::I16x8ShrS,
+ &MachineOperatorBuilder::I16x8ShrU, &MachineOperatorBuilder::I16x8Add, 16},
+ {"I8x16ShrAdd", &MachineOperatorBuilder::I8x16ShrS,
+ &MachineOperatorBuilder::I8x16ShrU, &MachineOperatorBuilder::I8x16Add, 8}};
+
+using InstructionSelectorSIMDShrAddTest =
+ InstructionSelectorTestWithParam<SIMDShrAddInst>;
+
+TEST_P(InstructionSelectorSIMDShrAddTest, ShrAddS) {
+ const SIMDShrAddInst param = GetParam();
+ const MachineType type = MachineType::Simd128();
+ {
+ StreamBuilder m(this, type, type, type);
+ Node* n = m.AddNode((m.machine()->*param.shr_s_operator)(), m.Parameter(1),
+ m.Int32Constant(1));
+ m.Return(
+ m.AddNode((m.machine()->*param.add_operator)(), m.Parameter(0), n));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArm64Ssra, s[0]->arch_opcode());
+ EXPECT_EQ(3U, s[0]->InputCount());
+ EXPECT_EQ(param.laneSize, LaneSizeField::decode(s[0]->opcode()));
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ }
+ {
+ StreamBuilder m(this, type, type, type);
+ Node* n = m.AddNode((m.machine()->*param.shr_s_operator)(), m.Parameter(0),
+ m.Int32Constant(1));
+ m.Return(
+ m.AddNode((m.machine()->*param.add_operator)(), n, m.Parameter(1)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArm64Ssra, s[0]->arch_opcode());
+ EXPECT_EQ(3U, s[0]->InputCount());
+ EXPECT_EQ(param.laneSize, LaneSizeField::decode(s[0]->opcode()));
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ }
+}
+
+TEST_P(InstructionSelectorSIMDShrAddTest, ShrAddU) {
+ const SIMDShrAddInst param = GetParam();
+ const MachineType type = MachineType::Simd128();
+ {
+ StreamBuilder m(this, type, type, type);
+ Node* n = m.AddNode((m.machine()->*param.shr_u_operator)(), m.Parameter(1),
+ m.Int32Constant(1));
+ m.Return(
+ m.AddNode((m.machine()->*param.add_operator)(), m.Parameter(0), n));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArm64Usra, s[0]->arch_opcode());
+ EXPECT_EQ(3U, s[0]->InputCount());
+ EXPECT_EQ(param.laneSize, LaneSizeField::decode(s[0]->opcode()));
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ }
+ {
+ StreamBuilder m(this, type, type, type);
+ Node* n = m.AddNode((m.machine()->*param.shr_u_operator)(), m.Parameter(0),
+ m.Int32Constant(1));
+ m.Return(
+ m.AddNode((m.machine()->*param.add_operator)(), n, m.Parameter(1)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kArm64Usra, s[0]->arch_opcode());
+ EXPECT_EQ(3U, s[0]->InputCount());
+ EXPECT_EQ(param.laneSize, LaneSizeField::decode(s[0]->opcode()));
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ }
+}
+
+INSTANTIATE_TEST_SUITE_P(InstructionSelectorTest,
+ InstructionSelectorSIMDShrAddTest,
+ ::testing::ValuesIn(kSIMDShrAddInstructions));
+
+namespace {
+struct SIMDAddExtMulInst {
+ const char* mul_constructor_name;
+ const Operator* (MachineOperatorBuilder::*mul_operator)();
+ const Operator* (MachineOperatorBuilder::*add_operator)();
+ ArchOpcode multiply_add_arch_opcode;
+ MachineType machine_type;
+ int lane_size;
+};
+} // namespace
+
+static const SIMDAddExtMulInst kSimdAddExtMulInstructions[] = {
+ {"I16x8ExtMulLowI8x16S", &MachineOperatorBuilder::I16x8ExtMulLowI8x16S,
+ &MachineOperatorBuilder::I16x8Add, kArm64Smlal, MachineType::Simd128(),
+ 16},
+ {"I16x8ExtMulHighI8x16S", &MachineOperatorBuilder::I16x8ExtMulHighI8x16S,
+ &MachineOperatorBuilder::I16x8Add, kArm64Smlal2, MachineType::Simd128(),
+ 16},
+ {"I16x8ExtMulLowI8x16U", &MachineOperatorBuilder::I16x8ExtMulLowI8x16U,
+ &MachineOperatorBuilder::I16x8Add, kArm64Umlal, MachineType::Simd128(),
+ 16},
+ {"I16x8ExtMulHighI8x16U", &MachineOperatorBuilder::I16x8ExtMulHighI8x16U,
+ &MachineOperatorBuilder::I16x8Add, kArm64Umlal2, MachineType::Simd128(),
+ 16},
+ {"I32x4ExtMulLowI16x8S", &MachineOperatorBuilder::I32x4ExtMulLowI16x8S,
+ &MachineOperatorBuilder::I32x4Add, kArm64Smlal, MachineType::Simd128(),
+ 32},
+ {"I32x4ExtMulHighI16x8S", &MachineOperatorBuilder::I32x4ExtMulHighI16x8S,
+ &MachineOperatorBuilder::I32x4Add, kArm64Smlal2, MachineType::Simd128(),
+ 32},
+ {"I32x4ExtMulLowI16x8U", &MachineOperatorBuilder::I32x4ExtMulLowI16x8U,
+ &MachineOperatorBuilder::I32x4Add, kArm64Umlal, MachineType::Simd128(),
+ 32},
+ {"I32x4ExtMulHighI16x8U", &MachineOperatorBuilder::I32x4ExtMulHighI16x8U,
+ &MachineOperatorBuilder::I32x4Add, kArm64Umlal2, MachineType::Simd128(),
+ 32}};
+
+using InstructionSelectorSIMDAddExtMulTest =
+ InstructionSelectorTestWithParam<SIMDAddExtMulInst>;
+
+// TODO(zhin): This can be merged with InstructionSelectorSIMDDPWithSIMDMulTest
+// once sub+extmul matching is implemented.
+TEST_P(InstructionSelectorSIMDAddExtMulTest, AddExtMul) {
+ const SIMDAddExtMulInst mdpi = GetParam();
+ const MachineType type = mdpi.machine_type;
+ {
+ // Test Add(x, ExtMul(y, z)).
+ StreamBuilder m(this, type, type, type, type);
+ Node* n = m.AddNode((m.machine()->*mdpi.mul_operator)(), m.Parameter(1),
+ m.Parameter(2));
+ m.Return(m.AddNode((m.machine()->*mdpi.add_operator)(), m.Parameter(0), n));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(mdpi.multiply_add_arch_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(mdpi.lane_size, LaneSizeField::decode(s[0]->opcode()));
+ EXPECT_EQ(3U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ }
+ {
+ // Test Add(ExtMul(y, z), x), making sure it's commutative.
+ StreamBuilder m(this, type, type, type, type);
+ Node* n = m.AddNode((m.machine()->*mdpi.mul_operator)(), m.Parameter(0),
+ m.Parameter(1));
+ m.Return(m.AddNode((m.machine()->*mdpi.add_operator)(), n, m.Parameter(2)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(mdpi.multiply_add_arch_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(mdpi.lane_size, LaneSizeField::decode(s[0]->opcode()));
+ EXPECT_EQ(3U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ }
+}
+
+INSTANTIATE_TEST_SUITE_P(InstructionSelectorTest,
+ InstructionSelectorSIMDAddExtMulTest,
+ ::testing::ValuesIn(kSimdAddExtMulInstructions));
+
struct SIMDMulDupInst {
const uint8_t shuffle[16];
int32_t lane;
@@ -2293,7 +2467,8 @@ TEST_P(InstructionSelectorSimdF32x4MulWithDupTest, MulWithDup) {
m.Return(m.AddNode(m.machine()->F32x4Mul(), m.Parameter(2), shuffle));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
- EXPECT_EQ(kArm64F32x4MulElement, s[0]->arch_opcode());
+ EXPECT_EQ(kArm64FMulElement, s[0]->arch_opcode());
+ EXPECT_EQ(32, LaneSizeField::decode(s[0]->opcode()));
EXPECT_EQ(3U, s[0]->InputCount());
EXPECT_EQ(param.lane, s.ToInt32(s[0]->InputAt(2)));
EXPECT_EQ(1U, s[0]->OutputCount());
@@ -2309,7 +2484,8 @@ TEST_P(InstructionSelectorSimdF32x4MulWithDupTest, MulWithDup) {
m.Return(m.AddNode(m.machine()->F32x4Mul(), shuffle, m.Parameter(2)));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
- EXPECT_EQ(kArm64F32x4MulElement, s[0]->arch_opcode());
+ EXPECT_EQ(kArm64FMulElement, s[0]->arch_opcode());
+ EXPECT_EQ(32, LaneSizeField::decode(s[0]->opcode()));
EXPECT_EQ(3U, s[0]->InputCount());
EXPECT_EQ(param.lane, s.ToInt32(s[0]->InputAt(2)));
EXPECT_EQ(1U, s[0]->OutputCount());
@@ -2336,7 +2512,8 @@ TEST_F(InstructionSelectorTest, SimdF32x4MulWithDupNegativeTest) {
// The shuffle is a i8x16.dup of lane 0.
EXPECT_EQ(kArm64S128Dup, s[0]->arch_opcode());
EXPECT_EQ(3U, s[0]->InputCount());
- EXPECT_EQ(kArm64F32x4Mul, s[1]->arch_opcode());
+ EXPECT_EQ(kArm64FMul, s[1]->arch_opcode());
+ EXPECT_EQ(32, LaneSizeField::decode(s[1]->opcode()));
EXPECT_EQ(1U, s[0]->OutputCount());
EXPECT_EQ(2U, s[1]->InputCount());
EXPECT_EQ(1U, s[1]->OutputCount());
@@ -2379,7 +2556,8 @@ TEST_P(InstructionSelectorSimdF64x2MulWithDupTest, MulWithDup) {
m.Return(m.AddNode(m.machine()->F64x2Mul(), m.Parameter(2), shuffle));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
- EXPECT_EQ(kArm64F64x2MulElement, s[0]->arch_opcode());
+ EXPECT_EQ(kArm64FMulElement, s[0]->arch_opcode());
+ EXPECT_EQ(64, LaneSizeField::decode(s[0]->opcode()));
EXPECT_EQ(3U, s[0]->InputCount());
EXPECT_EQ(param.lane, s.ToInt32(s[0]->InputAt(2)));
EXPECT_EQ(1U, s[0]->OutputCount());
@@ -2395,7 +2573,8 @@ TEST_P(InstructionSelectorSimdF64x2MulWithDupTest, MulWithDup) {
m.Return(m.AddNode(m.machine()->F64x2Mul(), shuffle, m.Parameter(2)));
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
- EXPECT_EQ(kArm64F64x2MulElement, s[0]->arch_opcode());
+ EXPECT_EQ(kArm64FMulElement, s[0]->arch_opcode());
+ EXPECT_EQ(64, LaneSizeField::decode(s[0]->opcode()));
EXPECT_EQ(3U, s[0]->InputCount());
EXPECT_EQ(param.lane, s.ToInt32(s[0]->InputAt(2)));
EXPECT_EQ(1U, s[0]->OutputCount());
@@ -2422,7 +2601,8 @@ TEST_F(InstructionSelectorTest, SimdF64x2MulWithDupNegativeTest) {
// The shuffle is a i8x16.dup of lane 0.
EXPECT_EQ(kArm64S128Dup, s[0]->arch_opcode());
EXPECT_EQ(3U, s[0]->InputCount());
- EXPECT_EQ(kArm64F64x2Mul, s[1]->arch_opcode());
+ EXPECT_EQ(kArm64FMul, s[1]->arch_opcode());
+ EXPECT_EQ(64, LaneSizeField::decode(s[1]->opcode()));
EXPECT_EQ(1U, s[0]->OutputCount());
EXPECT_EQ(2U, s[1]->InputCount());
EXPECT_EQ(1U, s[1]->OutputCount());
diff --git a/deps/v8/test/unittests/compiler/backend/instruction-selector-unittest.cc b/deps/v8/test/unittests/compiler/backend/instruction-selector-unittest.cc
index f4e3ea07b1..e52661fae2 100644
--- a/deps/v8/test/unittests/compiler/backend/instruction-selector-unittest.cc
+++ b/deps/v8/test/unittests/compiler/backend/instruction-selector-unittest.cc
@@ -50,8 +50,7 @@ InstructionSelectorTest::Stream InstructionSelectorTest::StreamBuilder::Build(
InstructionSelector::kEnableSwitchJumpTable, &tick_counter, nullptr,
&max_unoptimized_frame_height, &max_pushed_argument_count,
source_position_mode, features, InstructionSelector::kDisableScheduling,
- InstructionSelector::kEnableRootsRelativeAddressing,
- PoisoningMitigationLevel::kPoisonAll);
+ InstructionSelector::kEnableRootsRelativeAddressing);
selector.SelectInstructions();
if (FLAG_trace_turbo) {
StdoutStream{} << "=== Code sequence after instruction selection ==="
@@ -452,7 +451,6 @@ TARGET_TEST_F(InstructionSelectorTest, CallStubWithDeopt) {
EXPECT_EQ(kArchCallCodeObject, call_instr->arch_opcode());
size_t num_operands =
1 + // Code object.
- 1 + // Poison index
6 + // Frame state deopt id + one input for each value in frame state.
1 + // Function.
1; // Context.
@@ -462,23 +460,23 @@ TARGET_TEST_F(InstructionSelectorTest, CallStubWithDeopt) {
EXPECT_TRUE(call_instr->InputAt(0)->IsImmediate());
// Deoptimization id.
- int32_t deopt_id_before = s.ToInt32(call_instr->InputAt(2));
+ int32_t deopt_id_before = s.ToInt32(call_instr->InputAt(1));
FrameStateDescriptor* desc_before =
s.GetFrameStateDescriptor(deopt_id_before);
EXPECT_EQ(bailout_id_before, desc_before->bailout_id());
EXPECT_EQ(1u, desc_before->parameters_count());
EXPECT_EQ(1u, desc_before->locals_count());
EXPECT_EQ(1u, desc_before->stack_count());
- EXPECT_EQ(43, s.ToInt32(call_instr->InputAt(4)));
- EXPECT_EQ(0, s.ToInt32(call_instr->InputAt(5))); // This should be a context.
+ EXPECT_EQ(43, s.ToInt32(call_instr->InputAt(3)));
+ EXPECT_EQ(0, s.ToInt32(call_instr->InputAt(4))); // This should be a context.
// We inserted 0 here.
- EXPECT_EQ(0.5, s.ToFloat64(call_instr->InputAt(6)));
- EXPECT_TRUE(s.ToHeapObject(call_instr->InputAt(7))->IsUndefined(isolate()));
+ EXPECT_EQ(0.5, s.ToFloat64(call_instr->InputAt(5)));
+ EXPECT_TRUE(s.ToHeapObject(call_instr->InputAt(6))->IsUndefined(isolate()));
// Function.
- EXPECT_EQ(s.ToVreg(function_node), s.ToVreg(call_instr->InputAt(8)));
+ EXPECT_EQ(s.ToVreg(function_node), s.ToVreg(call_instr->InputAt(7)));
// Context.
- EXPECT_EQ(s.ToVreg(context), s.ToVreg(call_instr->InputAt(9)));
+ EXPECT_EQ(s.ToVreg(context), s.ToVreg(call_instr->InputAt(8)));
EXPECT_EQ(kArchRet, s[index++]->arch_opcode());
@@ -559,7 +557,6 @@ TARGET_TEST_F(InstructionSelectorTest, CallStubWithDeoptRecursiveFrameState) {
EXPECT_EQ(kArchCallCodeObject, call_instr->arch_opcode());
size_t num_operands =
1 + // Code object.
- 1 + // Poison index.
1 + // Frame state deopt id
5 + // One input for each value in frame state + context.
5 + // One input for each value in the parent frame state + context.
@@ -570,7 +567,7 @@ TARGET_TEST_F(InstructionSelectorTest, CallStubWithDeoptRecursiveFrameState) {
EXPECT_TRUE(call_instr->InputAt(0)->IsImmediate());
// Deoptimization id.
- int32_t deopt_id_before = s.ToInt32(call_instr->InputAt(2));
+ int32_t deopt_id_before = s.ToInt32(call_instr->InputAt(1));
FrameStateDescriptor* desc_before =
s.GetFrameStateDescriptor(deopt_id_before);
FrameStateDescriptor* desc_before_outer = desc_before->outer_state();
@@ -579,24 +576,24 @@ TARGET_TEST_F(InstructionSelectorTest, CallStubWithDeoptRecursiveFrameState) {
EXPECT_EQ(1u, desc_before_outer->locals_count());
EXPECT_EQ(1u, desc_before_outer->stack_count());
// Values from parent environment.
- EXPECT_EQ(63, s.ToInt32(call_instr->InputAt(4)));
+ EXPECT_EQ(63, s.ToInt32(call_instr->InputAt(3)));
// Context:
- EXPECT_EQ(66, s.ToInt32(call_instr->InputAt(5)));
- EXPECT_EQ(64, s.ToInt32(call_instr->InputAt(6)));
- EXPECT_EQ(65, s.ToInt32(call_instr->InputAt(7)));
+ EXPECT_EQ(66, s.ToInt32(call_instr->InputAt(4)));
+ EXPECT_EQ(64, s.ToInt32(call_instr->InputAt(5)));
+ EXPECT_EQ(65, s.ToInt32(call_instr->InputAt(6)));
// Values from the nested frame.
EXPECT_EQ(1u, desc_before->parameters_count());
EXPECT_EQ(1u, desc_before->locals_count());
EXPECT_EQ(1u, desc_before->stack_count());
- EXPECT_EQ(43, s.ToInt32(call_instr->InputAt(9)));
- EXPECT_EQ(46, s.ToInt32(call_instr->InputAt(10)));
- EXPECT_EQ(0.25, s.ToFloat64(call_instr->InputAt(11)));
- EXPECT_EQ(44, s.ToInt32(call_instr->InputAt(12)));
+ EXPECT_EQ(43, s.ToInt32(call_instr->InputAt(8)));
+ EXPECT_EQ(46, s.ToInt32(call_instr->InputAt(9)));
+ EXPECT_EQ(0.25, s.ToFloat64(call_instr->InputAt(10)));
+ EXPECT_EQ(44, s.ToInt32(call_instr->InputAt(11)));
// Function.
- EXPECT_EQ(s.ToVreg(function_node), s.ToVreg(call_instr->InputAt(13)));
+ EXPECT_EQ(s.ToVreg(function_node), s.ToVreg(call_instr->InputAt(12)));
// Context.
- EXPECT_EQ(s.ToVreg(context2), s.ToVreg(call_instr->InputAt(14)));
+ EXPECT_EQ(s.ToVreg(context2), s.ToVreg(call_instr->InputAt(13)));
// Continuation.
EXPECT_EQ(kArchRet, s[index++]->arch_opcode());
diff --git a/deps/v8/test/unittests/compiler/effect-control-linearizer-unittest.cc b/deps/v8/test/unittests/compiler/effect-control-linearizer-unittest.cc
index 2b76e5289f..03960705e1 100644
--- a/deps/v8/test/unittests/compiler/effect-control-linearizer-unittest.cc
+++ b/deps/v8/test/unittests/compiler/effect-control-linearizer-unittest.cc
@@ -86,8 +86,7 @@ TEST_F(EffectControlLinearizerTest, SimpleLoad) {
// Run the state effect introducer.
LinearizeEffectControl(jsgraph(), &schedule, zone(), source_positions(),
- node_origins(), PoisoningMitigationLevel::kDontPoison,
- broker());
+ node_origins(), broker());
EXPECT_THAT(load,
IsLoadField(AccessBuilder::ForHeapNumberValue(), heap_number,
@@ -148,8 +147,7 @@ TEST_F(EffectControlLinearizerTest, DiamondLoad) {
// Run the state effect introducer.
LinearizeEffectControl(jsgraph(), &schedule, zone(), source_positions(),
- node_origins(), PoisoningMitigationLevel::kDontPoison,
- broker());
+ node_origins(), broker());
// The effect input to the return should be an effect phi with the
// newly introduced effectful change operators.
@@ -215,8 +213,7 @@ TEST_F(EffectControlLinearizerTest, LoopLoad) {
// Run the state effect introducer.
LinearizeEffectControl(jsgraph(), &schedule, zone(), source_positions(),
- node_origins(), PoisoningMitigationLevel::kDontPoison,
- broker());
+ node_origins(), broker());
ASSERT_THAT(ret, IsReturn(load, load, if_true));
EXPECT_THAT(load, IsLoadField(AccessBuilder::ForHeapNumberValue(),
@@ -278,8 +275,7 @@ TEST_F(EffectControlLinearizerTest, CloneBranch) {
schedule.AddNode(mblock, graph()->end());
LinearizeEffectControl(jsgraph(), &schedule, zone(), source_positions(),
- node_origins(), PoisoningMitigationLevel::kDontPoison,
- broker());
+ node_origins(), broker());
Capture<Node *> branch1_capture, branch2_capture;
EXPECT_THAT(
@@ -337,8 +333,7 @@ TEST_F(EffectControlLinearizerTest, UnreachableThenBranch) {
// Run the state effect linearizer and machine lowering, maintaining the
// schedule.
LowerToMachineSchedule(jsgraph(), &schedule, zone(), source_positions(),
- node_origins(), PoisoningMitigationLevel::kDontPoison,
- broker());
+ node_origins(), broker());
ASSERT_THAT(end(), IsEnd(IsThrow()));
}
@@ -390,8 +385,7 @@ TEST_F(EffectControlLinearizerTest, UnreachableThenDiamond) {
// Run the state effect linearizer and machine lowering, maintaining the
// schedule.
LowerToMachineSchedule(jsgraph(), &schedule, zone(), source_positions(),
- node_origins(), PoisoningMitigationLevel::kDontPoison,
- broker());
+ node_origins(), broker());
ASSERT_THAT(end(), IsEnd(IsThrow()));
}
@@ -448,8 +442,7 @@ TEST_F(EffectControlLinearizerTest, UnreachableThenLoop) {
// Run the state effect linearizer and machine lowering, maintaining the
// schedule.
LowerToMachineSchedule(jsgraph(), &schedule, zone(), source_positions(),
- node_origins(), PoisoningMitigationLevel::kDontPoison,
- broker());
+ node_origins(), broker());
ASSERT_THAT(end(), IsEnd(IsThrow()));
}
@@ -502,8 +495,7 @@ TEST_F(EffectControlLinearizerTest, UnreachableInChangedBlockThenBranch) {
// Run the state effect linearizer and machine lowering, maintaining the
// schedule.
LowerToMachineSchedule(jsgraph(), &schedule, zone(), source_positions(),
- node_origins(), PoisoningMitigationLevel::kDontPoison,
- broker());
+ node_origins(), broker());
ASSERT_THAT(end(), IsEnd(IsThrow()));
}
diff --git a/deps/v8/test/unittests/compiler/ia32/instruction-selector-ia32-unittest.cc b/deps/v8/test/unittests/compiler/ia32/instruction-selector-ia32-unittest.cc
index f3942b776e..08c16f60c0 100644
--- a/deps/v8/test/unittests/compiler/ia32/instruction-selector-ia32-unittest.cc
+++ b/deps/v8/test/unittests/compiler/ia32/instruction-selector-ia32-unittest.cc
@@ -321,8 +321,8 @@ INSTANTIATE_TEST_SUITE_P(InstructionSelectorTest,
class AddressingModeUnitTest : public InstructionSelectorTest {
public:
- AddressingModeUnitTest() : m(NULL) { Reset(); }
- ~AddressingModeUnitTest() { delete m; }
+ AddressingModeUnitTest() : m(nullptr) { Reset(); }
+ ~AddressingModeUnitTest() override { delete m; }
void Run(Node* base, Node* load_index, Node* store_index,
AddressingMode mode) {
@@ -743,7 +743,7 @@ TEST_F(InstructionSelectorTest, Float32Abs) {
m.Return(n);
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
- EXPECT_EQ(kSSEFloat32Abs, s[0]->arch_opcode());
+ EXPECT_EQ(kFloat32Abs, s[0]->arch_opcode());
ASSERT_EQ(1U, s[0]->InputCount());
EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
ASSERT_EQ(1U, s[0]->OutputCount());
@@ -758,7 +758,7 @@ TEST_F(InstructionSelectorTest, Float32Abs) {
m.Return(n);
Stream s = m.Build(AVX);
ASSERT_EQ(1U, s.size());
- EXPECT_EQ(kAVXFloat32Abs, s[0]->arch_opcode());
+ EXPECT_EQ(kFloat32Abs, s[0]->arch_opcode());
ASSERT_EQ(1U, s[0]->InputCount());
EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
ASSERT_EQ(1U, s[0]->OutputCount());
@@ -776,7 +776,7 @@ TEST_F(InstructionSelectorTest, Float64Abs) {
m.Return(n);
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
- EXPECT_EQ(kSSEFloat64Abs, s[0]->arch_opcode());
+ EXPECT_EQ(kFloat64Abs, s[0]->arch_opcode());
ASSERT_EQ(1U, s[0]->InputCount());
EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
ASSERT_EQ(1U, s[0]->OutputCount());
@@ -791,7 +791,7 @@ TEST_F(InstructionSelectorTest, Float64Abs) {
m.Return(n);
Stream s = m.Build(AVX);
ASSERT_EQ(1U, s.size());
- EXPECT_EQ(kAVXFloat64Abs, s[0]->arch_opcode());
+ EXPECT_EQ(kFloat64Abs, s[0]->arch_opcode());
ASSERT_EQ(1U, s[0]->InputCount());
EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
ASSERT_EQ(1U, s[0]->OutputCount());
@@ -812,10 +812,10 @@ TEST_F(InstructionSelectorTest, Float64BinopArithmetic) {
m.Return(ret);
Stream s = m.Build(AVX);
ASSERT_EQ(4U, s.size());
- EXPECT_EQ(kAVXFloat64Add, s[0]->arch_opcode());
- EXPECT_EQ(kAVXFloat64Mul, s[1]->arch_opcode());
- EXPECT_EQ(kAVXFloat64Sub, s[2]->arch_opcode());
- EXPECT_EQ(kAVXFloat64Div, s[3]->arch_opcode());
+ EXPECT_EQ(kFloat64Add, s[0]->arch_opcode());
+ EXPECT_EQ(kFloat64Mul, s[1]->arch_opcode());
+ EXPECT_EQ(kFloat64Sub, s[2]->arch_opcode());
+ EXPECT_EQ(kFloat64Div, s[3]->arch_opcode());
}
{
StreamBuilder m(this, MachineType::Float64(), MachineType::Float64(),
@@ -827,10 +827,10 @@ TEST_F(InstructionSelectorTest, Float64BinopArithmetic) {
m.Return(ret);
Stream s = m.Build();
ASSERT_EQ(4U, s.size());
- EXPECT_EQ(kSSEFloat64Add, s[0]->arch_opcode());
- EXPECT_EQ(kSSEFloat64Mul, s[1]->arch_opcode());
- EXPECT_EQ(kSSEFloat64Sub, s[2]->arch_opcode());
- EXPECT_EQ(kSSEFloat64Div, s[3]->arch_opcode());
+ EXPECT_EQ(kFloat64Add, s[0]->arch_opcode());
+ EXPECT_EQ(kFloat64Mul, s[1]->arch_opcode());
+ EXPECT_EQ(kFloat64Sub, s[2]->arch_opcode());
+ EXPECT_EQ(kFloat64Div, s[3]->arch_opcode());
}
}
diff --git a/deps/v8/test/unittests/compiler/loong64/instruction-selector-loong64-unittest.cc b/deps/v8/test/unittests/compiler/loong64/instruction-selector-loong64-unittest.cc
new file mode 100644
index 0000000000..fa0cd23a86
--- /dev/null
+++ b/deps/v8/test/unittests/compiler/loong64/instruction-selector-loong64-unittest.cc
@@ -0,0 +1,1564 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file
+
+#include "src/objects/objects-inl.h"
+#include "test/unittests/compiler/backend/instruction-selector-unittest.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+namespace {
+template <typename T>
+struct MachInst {
+ T constructor;
+ const char* constructor_name;
+ ArchOpcode arch_opcode;
+ MachineType machine_type;
+};
+
+template <typename T>
+std::ostream& operator<<(std::ostream& os, const MachInst<T>& mi) {
+ return os << mi.constructor_name;
+}
+
+using MachInst1 = MachInst<Node* (RawMachineAssembler::*)(Node*)>;
+using MachInst2 = MachInst<Node* (RawMachineAssembler::*)(Node*, Node*)>;
+
+// To avoid duplicated code IntCmp helper structure
+// is created. It contains MachInst2 with two nodes and expected_size
+// because different cmp instructions have different size.
+struct IntCmp {
+ MachInst2 mi;
+ uint32_t expected_size;
+};
+
+struct FPCmp {
+ MachInst2 mi;
+ FlagsCondition cond;
+};
+
+const FPCmp kFPCmpInstructions[] = {
+ {{&RawMachineAssembler::Float64Equal, "Float64Equal", kLoong64Float64Cmp,
+ MachineType::Float64()},
+ kEqual},
+ {{&RawMachineAssembler::Float64LessThan, "Float64LessThan",
+ kLoong64Float64Cmp, MachineType::Float64()},
+ kUnsignedLessThan},
+ {{&RawMachineAssembler::Float64LessThanOrEqual, "Float64LessThanOrEqual",
+ kLoong64Float64Cmp, MachineType::Float64()},
+ kUnsignedLessThanOrEqual},
+ {{&RawMachineAssembler::Float64GreaterThan, "Float64GreaterThan",
+ kLoong64Float64Cmp, MachineType::Float64()},
+ kUnsignedLessThan},
+ {{&RawMachineAssembler::Float64GreaterThanOrEqual,
+ "Float64GreaterThanOrEqual", kLoong64Float64Cmp, MachineType::Float64()},
+ kUnsignedLessThanOrEqual}};
+
+struct Conversion {
+ // The machine_type field in MachInst1 represents the destination type.
+ MachInst1 mi;
+ MachineType src_machine_type;
+};
+
+// ----------------------------------------------------------------------------
+// Logical instructions.
+// ----------------------------------------------------------------------------
+
+const MachInst2 kLogicalInstructions[] = {
+ {&RawMachineAssembler::Word32And, "Word32And", kLoong64And32,
+ MachineType::Int32()},
+ {&RawMachineAssembler::Word64And, "Word64And", kLoong64And,
+ MachineType::Int64()},
+ {&RawMachineAssembler::Word32Or, "Word32Or", kLoong64Or32,
+ MachineType::Int32()},
+ {&RawMachineAssembler::Word64Or, "Word64Or", kLoong64Or,
+ MachineType::Int64()},
+ {&RawMachineAssembler::Word32Xor, "Word32Xor", kLoong64Xor32,
+ MachineType::Int32()},
+ {&RawMachineAssembler::Word64Xor, "Word64Xor", kLoong64Xor,
+ MachineType::Int64()}};
+
+// ----------------------------------------------------------------------------
+// Shift instructions.
+// ----------------------------------------------------------------------------
+
+const MachInst2 kShiftInstructions[] = {
+ {&RawMachineAssembler::Word32Shl, "Word32Shl", kLoong64Sll_w,
+ MachineType::Int32()},
+ {&RawMachineAssembler::Word64Shl, "Word64Shl", kLoong64Sll_d,
+ MachineType::Int64()},
+ {&RawMachineAssembler::Word32Shr, "Word32Shr", kLoong64Srl_w,
+ MachineType::Int32()},
+ {&RawMachineAssembler::Word64Shr, "Word64Shr", kLoong64Srl_d,
+ MachineType::Int64()},
+ {&RawMachineAssembler::Word32Sar, "Word32Sar", kLoong64Sra_w,
+ MachineType::Int32()},
+ {&RawMachineAssembler::Word64Sar, "Word64Sar", kLoong64Sra_d,
+ MachineType::Int64()},
+ {&RawMachineAssembler::Word32Ror, "Word32Ror", kLoong64Rotr_w,
+ MachineType::Int32()},
+ {&RawMachineAssembler::Word64Ror, "Word64Ror", kLoong64Rotr_d,
+ MachineType::Int64()}};
+
+// ----------------------------------------------------------------------------
+// MUL/DIV instructions.
+// ----------------------------------------------------------------------------
+
+const MachInst2 kMulDivInstructions[] = {
+ {&RawMachineAssembler::Int32Mul, "Int32Mul", kLoong64Mul_w,
+ MachineType::Int32()},
+ {&RawMachineAssembler::Int32Div, "Int32Div", kLoong64Div_w,
+ MachineType::Int32()},
+ {&RawMachineAssembler::Uint32Div, "Uint32Div", kLoong64Div_wu,
+ MachineType::Uint32()},
+ {&RawMachineAssembler::Int64Mul, "Int64Mul", kLoong64Mul_d,
+ MachineType::Int64()},
+ {&RawMachineAssembler::Int64Div, "Int64Div", kLoong64Div_d,
+ MachineType::Int64()},
+ {&RawMachineAssembler::Uint64Div, "Uint64Div", kLoong64Div_du,
+ MachineType::Uint64()},
+ {&RawMachineAssembler::Float64Mul, "Float64Mul", kLoong64Float64Mul,
+ MachineType::Float64()},
+ {&RawMachineAssembler::Float64Div, "Float64Div", kLoong64Float64Div,
+ MachineType::Float64()}};
+
+// ----------------------------------------------------------------------------
+// MOD instructions.
+// ----------------------------------------------------------------------------
+
+const MachInst2 kModInstructions[] = {
+ {&RawMachineAssembler::Int32Mod, "Int32Mod", kLoong64Mod_w,
+ MachineType::Int32()},
+ {&RawMachineAssembler::Uint32Mod, "Uint32Mod", kLoong64Mod_wu,
+ MachineType::Int32()},
+ {&RawMachineAssembler::Float64Mod, "Float64Mod", kLoong64Float64Mod,
+ MachineType::Float64()}};
+
+// ----------------------------------------------------------------------------
+// Arithmetic FPU instructions.
+// ----------------------------------------------------------------------------
+
+const MachInst2 kFPArithInstructions[] = {
+ {&RawMachineAssembler::Float64Add, "Float64Add", kLoong64Float64Add,
+ MachineType::Float64()},
+ {&RawMachineAssembler::Float64Sub, "Float64Sub", kLoong64Float64Sub,
+ MachineType::Float64()}};
+
+// ----------------------------------------------------------------------------
+// IntArithTest instructions, two nodes.
+// ----------------------------------------------------------------------------
+
+const MachInst2 kAddSubInstructions[] = {
+ {&RawMachineAssembler::Int32Add, "Int32Add", kLoong64Add_w,
+ MachineType::Int32()},
+ {&RawMachineAssembler::Int64Add, "Int64Add", kLoong64Add_d,
+ MachineType::Int64()},
+ {&RawMachineAssembler::Int32Sub, "Int32Sub", kLoong64Sub_w,
+ MachineType::Int32()},
+ {&RawMachineAssembler::Int64Sub, "Int64Sub", kLoong64Sub_d,
+ MachineType::Int64()}};
+
+// ----------------------------------------------------------------------------
+// IntArithTest instructions, one node.
+// ----------------------------------------------------------------------------
+
+const MachInst1 kAddSubOneInstructions[] = {
+ {&RawMachineAssembler::Int32Neg, "Int32Neg", kLoong64Sub_w,
+ MachineType::Int32()},
+ {&RawMachineAssembler::Int64Neg, "Int64Neg", kLoong64Sub_d,
+ MachineType::Int64()}};
+
+// ----------------------------------------------------------------------------
+// Arithmetic compare instructions.
+// ----------------------------------------------------------------------------
+
+const IntCmp kCmpInstructions[] = {
+ {{&RawMachineAssembler::WordEqual, "WordEqual", kLoong64Cmp,
+ MachineType::Int64()},
+ 1U},
+ {{&RawMachineAssembler::WordNotEqual, "WordNotEqual", kLoong64Cmp,
+ MachineType::Int64()},
+ 1U},
+ {{&RawMachineAssembler::Word32Equal, "Word32Equal", kLoong64Cmp,
+ MachineType::Int32()},
+ 1U},
+ {{&RawMachineAssembler::Word32NotEqual, "Word32NotEqual", kLoong64Cmp,
+ MachineType::Int32()},
+ 1U},
+ {{&RawMachineAssembler::Int32LessThan, "Int32LessThan", kLoong64Cmp,
+ MachineType::Int32()},
+ 1U},
+ {{&RawMachineAssembler::Int32LessThanOrEqual, "Int32LessThanOrEqual",
+ kLoong64Cmp, MachineType::Int32()},
+ 1U},
+ {{&RawMachineAssembler::Int32GreaterThan, "Int32GreaterThan", kLoong64Cmp,
+ MachineType::Int32()},
+ 1U},
+ {{&RawMachineAssembler::Int32GreaterThanOrEqual, "Int32GreaterThanOrEqual",
+ kLoong64Cmp, MachineType::Int32()},
+ 1U},
+ {{&RawMachineAssembler::Uint32LessThan, "Uint32LessThan", kLoong64Cmp,
+ MachineType::Uint32()},
+ 1U},
+ {{&RawMachineAssembler::Uint32LessThanOrEqual, "Uint32LessThanOrEqual",
+ kLoong64Cmp, MachineType::Uint32()},
+ 1U}};
+
+// ----------------------------------------------------------------------------
+// Conversion instructions.
+// ----------------------------------------------------------------------------
+
+const Conversion kConversionInstructions[] = {
+ // Conversion instructions are related to machine_operator.h:
+ // FPU conversions:
+ // Convert representation of integers between float64 and int32/uint32.
+ // The precise rounding mode and handling of out of range inputs are *not*
+ // defined for these operators, since they are intended only for use with
+ // integers.
+ {{&RawMachineAssembler::ChangeInt32ToFloat64, "ChangeInt32ToFloat64",
+ kLoong64Int32ToFloat64, MachineType::Float64()},
+ MachineType::Int32()},
+
+ {{&RawMachineAssembler::ChangeUint32ToFloat64, "ChangeUint32ToFloat64",
+ kLoong64Uint32ToFloat64, MachineType::Float64()},
+ MachineType::Int32()},
+
+ {{&RawMachineAssembler::ChangeFloat64ToInt32, "ChangeFloat64ToInt32",
+ kLoong64Float64ToInt32, MachineType::Float64()},
+ MachineType::Int32()},
+
+ {{&RawMachineAssembler::ChangeFloat64ToUint32, "ChangeFloat64ToUint32",
+ kLoong64Float64ToUint32, MachineType::Float64()},
+ MachineType::Int32()}};
+
+// LOONG64 instructions that clear the top 32 bits of the destination.
+const MachInst2 kCanElideChangeUint32ToUint64[] = {
+ {&RawMachineAssembler::Uint32Div, "Uint32Div", kLoong64Div_wu,
+ MachineType::Uint32()},
+ {&RawMachineAssembler::Uint32Mod, "Uint32Mod", kLoong64Mod_wu,
+ MachineType::Uint32()},
+ {&RawMachineAssembler::Uint32MulHigh, "Uint32MulHigh", kLoong64Mulh_wu,
+ MachineType::Uint32()}};
+
+} // namespace
+
+using InstructionSelectorFPCmpTest = InstructionSelectorTestWithParam<FPCmp>;
+
+TEST_P(InstructionSelectorFPCmpTest, Parameter) {
+ const FPCmp cmp = GetParam();
+ StreamBuilder m(this, MachineType::Int32(), cmp.mi.machine_type,
+ cmp.mi.machine_type);
+ m.Return((m.*cmp.mi.constructor)(m.Parameter(0), m.Parameter(1)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(cmp.mi.arch_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(kFlags_set, s[0]->flags_mode());
+ EXPECT_EQ(cmp.cond, s[0]->flags_condition());
+}
+
+INSTANTIATE_TEST_SUITE_P(InstructionSelectorTest, InstructionSelectorFPCmpTest,
+ ::testing::ValuesIn(kFPCmpInstructions));
+
+// ----------------------------------------------------------------------------
+// Arithmetic compare instructions integers
+// ----------------------------------------------------------------------------
+
+using InstructionSelectorCmpTest = InstructionSelectorTestWithParam<IntCmp>;
+
+TEST_P(InstructionSelectorCmpTest, Parameter) {
+ const IntCmp cmp = GetParam();
+ const MachineType type = cmp.mi.machine_type;
+ StreamBuilder m(this, type, type, type);
+ m.Return((m.*cmp.mi.constructor)(m.Parameter(0), m.Parameter(1)));
+ Stream s = m.Build();
+
+ ASSERT_EQ(cmp.expected_size, s.size());
+ EXPECT_EQ(cmp.mi.arch_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+}
+
+INSTANTIATE_TEST_SUITE_P(InstructionSelectorTest, InstructionSelectorCmpTest,
+ ::testing::ValuesIn(kCmpInstructions));
+
+// ----------------------------------------------------------------------------
+// Shift instructions.
+// ----------------------------------------------------------------------------
+
+using InstructionSelectorShiftTest =
+ InstructionSelectorTestWithParam<MachInst2>;
+
+TEST_P(InstructionSelectorShiftTest, Immediate) {
+ const MachInst2 dpi = GetParam();
+ const MachineType type = dpi.machine_type;
+ TRACED_FORRANGE(int32_t, imm, 0,
+ ((1 << ElementSizeLog2Of(type.representation())) * 8) - 1) {
+ StreamBuilder m(this, type, type);
+ m.Return((m.*dpi.constructor)(m.Parameter(0), m.Int32Constant(imm)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(dpi.arch_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(2U, s[0]->InputCount());
+ EXPECT_TRUE(s[0]->InputAt(1)->IsImmediate());
+ EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(1)));
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ }
+}
+
+INSTANTIATE_TEST_SUITE_P(InstructionSelectorTest, InstructionSelectorShiftTest,
+ ::testing::ValuesIn(kShiftInstructions));
+
+TEST_F(InstructionSelectorTest, Word32ShrWithWord32AndWithImmediate) {
+ // The available shift operand range is `0 <= imm < 32`, but we also test
+ // that immediates outside this range are handled properly (modulo-32).
+ TRACED_FORRANGE(int32_t, shift, -32, 63) {
+ int32_t lsb = shift & 0x1F;
+ TRACED_FORRANGE(int32_t, width, 1, 32 - lsb) {
+ uint32_t jnk = rng()->NextInt();
+ jnk = (lsb > 0) ? (jnk >> (32 - lsb)) : 0;
+ uint32_t msk = ((0xFFFFFFFFu >> (32 - width)) << lsb) | jnk;
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
+ m.Return(m.Word32Shr(m.Word32And(m.Parameter(0), m.Int32Constant(msk)),
+ m.Int32Constant(shift)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kLoong64Bstrpick_w, s[0]->arch_opcode());
+ ASSERT_EQ(3U, s[0]->InputCount());
+ EXPECT_EQ(lsb, s.ToInt32(s[0]->InputAt(1)));
+ EXPECT_EQ(width, s.ToInt32(s[0]->InputAt(2)));
+ }
+ }
+ TRACED_FORRANGE(int32_t, shift, -32, 63) {
+ int32_t lsb = shift & 0x1F;
+ TRACED_FORRANGE(int32_t, width, 1, 32 - lsb) {
+ uint32_t jnk = rng()->NextInt();
+ jnk = (lsb > 0) ? (jnk >> (32 - lsb)) : 0;
+ uint32_t msk = ((0xFFFFFFFFu >> (32 - width)) << lsb) | jnk;
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
+ m.Return(m.Word32Shr(m.Word32And(m.Int32Constant(msk), m.Parameter(0)),
+ m.Int32Constant(shift)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kLoong64Bstrpick_w, s[0]->arch_opcode());
+ ASSERT_EQ(3U, s[0]->InputCount());
+ EXPECT_EQ(lsb, s.ToInt32(s[0]->InputAt(1)));
+ EXPECT_EQ(width, s.ToInt32(s[0]->InputAt(2)));
+ }
+ }
+}
+
+TEST_F(InstructionSelectorTest, Word64ShrWithWord64AndWithImmediate) {
+ // The available shift operand range is `0 <= imm < 64`, but we also test
+ // that immediates outside this range are handled properly (modulo-64).
+ TRACED_FORRANGE(int32_t, shift, -64, 127) {
+ int32_t lsb = shift & 0x3F;
+ TRACED_FORRANGE(int32_t, width, 1, 64 - lsb) {
+ uint64_t jnk = rng()->NextInt64();
+ jnk = (lsb > 0) ? (jnk >> (64 - lsb)) : 0;
+ uint64_t msk =
+ ((uint64_t{0xFFFFFFFFFFFFFFFF} >> (64 - width)) << lsb) | jnk;
+ StreamBuilder m(this, MachineType::Int64(), MachineType::Int64());
+ m.Return(m.Word64Shr(m.Word64And(m.Parameter(0), m.Int64Constant(msk)),
+ m.Int64Constant(shift)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kLoong64Bstrpick_d, s[0]->arch_opcode());
+ ASSERT_EQ(3U, s[0]->InputCount());
+ EXPECT_EQ(lsb, s.ToInt64(s[0]->InputAt(1)));
+ EXPECT_EQ(width, s.ToInt64(s[0]->InputAt(2)));
+ }
+ }
+ TRACED_FORRANGE(int32_t, shift, -64, 127) {
+ int32_t lsb = shift & 0x3F;
+ TRACED_FORRANGE(int32_t, width, 1, 64 - lsb) {
+ uint64_t jnk = rng()->NextInt64();
+ jnk = (lsb > 0) ? (jnk >> (64 - lsb)) : 0;
+ uint64_t msk =
+ ((uint64_t{0xFFFFFFFFFFFFFFFF} >> (64 - width)) << lsb) | jnk;
+ StreamBuilder m(this, MachineType::Int64(), MachineType::Int64());
+ m.Return(m.Word64Shr(m.Word64And(m.Int64Constant(msk), m.Parameter(0)),
+ m.Int64Constant(shift)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kLoong64Bstrpick_d, s[0]->arch_opcode());
+ ASSERT_EQ(3U, s[0]->InputCount());
+ EXPECT_EQ(lsb, s.ToInt64(s[0]->InputAt(1)));
+ EXPECT_EQ(width, s.ToInt64(s[0]->InputAt(2)));
+ }
+ }
+}
+
+TEST_F(InstructionSelectorTest, Word32AndToClearBits) {
+ TRACED_FORRANGE(int32_t, shift, 1, 31) {
+ int32_t mask = ~((1 << shift) - 1);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
+ m.Return(m.Word32And(m.Parameter(0), m.Int32Constant(mask)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kLoong64Bstrins_w, s[0]->arch_opcode());
+ ASSERT_EQ(3U, s[0]->InputCount());
+ EXPECT_EQ(0, s.ToInt32(s[0]->InputAt(1)));
+ EXPECT_EQ(shift, s.ToInt32(s[0]->InputAt(2)));
+ }
+ TRACED_FORRANGE(int32_t, shift, 1, 31) {
+ int32_t mask = ~((1 << shift) - 1);
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
+ m.Return(m.Word32And(m.Int32Constant(mask), m.Parameter(0)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kLoong64Bstrins_w, s[0]->arch_opcode());
+ ASSERT_EQ(3U, s[0]->InputCount());
+ EXPECT_EQ(0, s.ToInt32(s[0]->InputAt(1)));
+ EXPECT_EQ(shift, s.ToInt32(s[0]->InputAt(2)));
+ }
+}
+
+TEST_F(InstructionSelectorTest, Word64AndToClearBits) {
+ TRACED_FORRANGE(int32_t, shift, 1, 31) {
+ int64_t mask = ~((1 << shift) - 1);
+ StreamBuilder m(this, MachineType::Int64(), MachineType::Int64());
+ m.Return(m.Word64And(m.Parameter(0), m.Int64Constant(mask)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kLoong64Bstrins_d, s[0]->arch_opcode());
+ ASSERT_EQ(3U, s[0]->InputCount());
+ EXPECT_EQ(0, s.ToInt32(s[0]->InputAt(1)));
+ EXPECT_EQ(shift, s.ToInt32(s[0]->InputAt(2)));
+ }
+ TRACED_FORRANGE(int32_t, shift, 1, 31) {
+ int64_t mask = ~((1 << shift) - 1);
+ StreamBuilder m(this, MachineType::Int64(), MachineType::Int64());
+ m.Return(m.Word64And(m.Int64Constant(mask), m.Parameter(0)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kLoong64Bstrins_d, s[0]->arch_opcode());
+ ASSERT_EQ(3U, s[0]->InputCount());
+ EXPECT_EQ(0, s.ToInt32(s[0]->InputAt(1)));
+ EXPECT_EQ(shift, s.ToInt32(s[0]->InputAt(2)));
+ }
+}
+
+// ----------------------------------------------------------------------------
+// Logical instructions.
+// ----------------------------------------------------------------------------
+
+using InstructionSelectorLogicalTest =
+ InstructionSelectorTestWithParam<MachInst2>;
+
+TEST_P(InstructionSelectorLogicalTest, Parameter) {
+ const MachInst2 dpi = GetParam();
+ const MachineType type = dpi.machine_type;
+ StreamBuilder m(this, type, type, type);
+ m.Return((m.*dpi.constructor)(m.Parameter(0), m.Parameter(1)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(dpi.arch_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+}
+
+INSTANTIATE_TEST_SUITE_P(InstructionSelectorTest,
+ InstructionSelectorLogicalTest,
+ ::testing::ValuesIn(kLogicalInstructions));
+
+TEST_F(InstructionSelectorTest, Word64XorMinusOneWithParameter) {
+ {
+ StreamBuilder m(this, MachineType::Int64(), MachineType::Int64());
+ m.Return(m.Word64Xor(m.Parameter(0), m.Int64Constant(-1)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kLoong64Nor, s[0]->arch_opcode());
+ EXPECT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ }
+ {
+ StreamBuilder m(this, MachineType::Int64(), MachineType::Int64());
+ m.Return(m.Word64Xor(m.Int64Constant(-1), m.Parameter(0)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kLoong64Nor, s[0]->arch_opcode());
+ EXPECT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ }
+}
+
+TEST_F(InstructionSelectorTest, Word32XorMinusOneWithParameter) {
+ {
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
+ m.Return(m.Word32Xor(m.Parameter(0), m.Int32Constant(-1)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kLoong64Nor32, s[0]->arch_opcode());
+ EXPECT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ }
+ {
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
+ m.Return(m.Word32Xor(m.Int32Constant(-1), m.Parameter(0)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kLoong64Nor32, s[0]->arch_opcode());
+ EXPECT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ }
+}
+
+TEST_F(InstructionSelectorTest, Word64XorMinusOneWithWord64Or) {
+ {
+ StreamBuilder m(this, MachineType::Int64(), MachineType::Int64());
+ m.Return(m.Word64Xor(m.Word64Or(m.Parameter(0), m.Parameter(0)),
+ m.Int64Constant(-1)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kLoong64Nor, s[0]->arch_opcode());
+ EXPECT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ }
+ {
+ StreamBuilder m(this, MachineType::Int64(), MachineType::Int64());
+ m.Return(m.Word64Xor(m.Int64Constant(-1),
+ m.Word64Or(m.Parameter(0), m.Parameter(0))));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kLoong64Nor, s[0]->arch_opcode());
+ EXPECT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ }
+}
+
+TEST_F(InstructionSelectorTest, Word32XorMinusOneWithWord32Or) {
+ {
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
+ m.Return(m.Word32Xor(m.Word32Or(m.Parameter(0), m.Parameter(0)),
+ m.Int32Constant(-1)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kLoong64Nor32, s[0]->arch_opcode());
+ EXPECT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ }
+ {
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
+ m.Return(m.Word32Xor(m.Int32Constant(-1),
+ m.Word32Or(m.Parameter(0), m.Parameter(0))));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kLoong64Nor32, s[0]->arch_opcode());
+ EXPECT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ }
+}
+
+TEST_F(InstructionSelectorTest, Word32AndWithImmediateWithWord32Shr) {
+ // The available shift operand range is `0 <= imm < 32`, but we also test
+ // that immediates outside this range are handled properly (modulo-32).
+ TRACED_FORRANGE(int32_t, shift, -32, 63) {
+ int32_t lsb = shift & 0x1F;
+ TRACED_FORRANGE(int32_t, width, 1, 31) {
+ uint32_t msk = (1 << width) - 1;
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
+ m.Return(m.Word32And(m.Word32Shr(m.Parameter(0), m.Int32Constant(shift)),
+ m.Int32Constant(msk)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kLoong64Bstrpick_w, s[0]->arch_opcode());
+ ASSERT_EQ(3U, s[0]->InputCount());
+ EXPECT_EQ(lsb, s.ToInt32(s[0]->InputAt(1)));
+ int32_t actual_width = (lsb + width > 32) ? (32 - lsb) : width;
+ EXPECT_EQ(actual_width, s.ToInt32(s[0]->InputAt(2)));
+ }
+ }
+ TRACED_FORRANGE(int32_t, shift, -32, 63) {
+ int32_t lsb = shift & 0x1F;
+ TRACED_FORRANGE(int32_t, width, 1, 31) {
+ uint32_t msk = (1 << width) - 1;
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
+ m.Return(
+ m.Word32And(m.Int32Constant(msk),
+ m.Word32Shr(m.Parameter(0), m.Int32Constant(shift))));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kLoong64Bstrpick_w, s[0]->arch_opcode());
+ ASSERT_EQ(3U, s[0]->InputCount());
+ EXPECT_EQ(lsb, s.ToInt32(s[0]->InputAt(1)));
+ int32_t actual_width = (lsb + width > 32) ? (32 - lsb) : width;
+ EXPECT_EQ(actual_width, s.ToInt32(s[0]->InputAt(2)));
+ }
+ }
+}
+
+TEST_F(InstructionSelectorTest, Word32ShlWithWord32And) {
+ TRACED_FORRANGE(int32_t, shift, 0, 30) {
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
+ Node* const p0 = m.Parameter(0);
+ Node* const r =
+ m.Word32Shl(m.Word32And(p0, m.Int32Constant((1 << (31 - shift)) - 1)),
+ m.Int32Constant(shift + 1));
+ m.Return(r);
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kLoong64Sll_w, s[0]->arch_opcode());
+ ASSERT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+ ASSERT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(s.ToVreg(r), s.ToVreg(s[0]->Output()));
+ }
+}
+
+TEST_F(InstructionSelectorTest, Word64ShlWithWord64And) {
+ TRACED_FORRANGE(int32_t, shift, 0, 62) {
+ StreamBuilder m(this, MachineType::Int64(), MachineType::Int64());
+ Node* const p0 = m.Parameter(0);
+ Node* const r =
+ m.Word64Shl(m.Word64And(p0, m.Int64Constant((1L << (63 - shift)) - 1)),
+ m.Int64Constant(shift + 1));
+ m.Return(r);
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kLoong64Sll_d, s[0]->arch_opcode());
+ ASSERT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+ ASSERT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(s.ToVreg(r), s.ToVreg(s[0]->Output()));
+ }
+}
+
+TEST_F(InstructionSelectorTest, Word32SarWithWord32Shl) {
+ {
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
+ Node* const p0 = m.Parameter(0);
+ Node* const r =
+ m.Word32Sar(m.Word32Shl(p0, m.Int32Constant(24)), m.Int32Constant(24));
+ m.Return(r);
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kLoong64Ext_w_b, s[0]->arch_opcode());
+ ASSERT_EQ(1U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+ ASSERT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(s.ToVreg(r), s.ToVreg(s[0]->Output()));
+ }
+ {
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
+ Node* const p0 = m.Parameter(0);
+ Node* const r =
+ m.Word32Sar(m.Word32Shl(p0, m.Int32Constant(16)), m.Int32Constant(16));
+ m.Return(r);
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kLoong64Ext_w_h, s[0]->arch_opcode());
+ ASSERT_EQ(1U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+ ASSERT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(s.ToVreg(r), s.ToVreg(s[0]->Output()));
+ }
+ {
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
+ Node* const p0 = m.Parameter(0);
+ Node* const r =
+ m.Word32Sar(m.Word32Shl(p0, m.Int32Constant(32)), m.Int32Constant(32));
+ m.Return(r);
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kLoong64Sll_w, s[0]->arch_opcode());
+ ASSERT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+ EXPECT_EQ(0, s.ToInt32(s[0]->InputAt(1)));
+ ASSERT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(s.ToVreg(r), s.ToVreg(s[0]->Output()));
+ }
+}
+
+// ----------------------------------------------------------------------------
+// MUL/DIV instructions.
+// ----------------------------------------------------------------------------
+
+using InstructionSelectorMulDivTest =
+ InstructionSelectorTestWithParam<MachInst2>;
+
+TEST_P(InstructionSelectorMulDivTest, Parameter) {
+ const MachInst2 dpi = GetParam();
+ const MachineType type = dpi.machine_type;
+ StreamBuilder m(this, type, type, type);
+ m.Return((m.*dpi.constructor)(m.Parameter(0), m.Parameter(1)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(dpi.arch_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+}
+
+INSTANTIATE_TEST_SUITE_P(InstructionSelectorTest, InstructionSelectorMulDivTest,
+ ::testing::ValuesIn(kMulDivInstructions));
+
+// ----------------------------------------------------------------------------
+// MOD instructions.
+// ----------------------------------------------------------------------------
+
+using InstructionSelectorModTest = InstructionSelectorTestWithParam<MachInst2>;
+
+TEST_P(InstructionSelectorModTest, Parameter) {
+ const MachInst2 dpi = GetParam();
+ const MachineType type = dpi.machine_type;
+ StreamBuilder m(this, type, type, type);
+ m.Return((m.*dpi.constructor)(m.Parameter(0), m.Parameter(1)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(dpi.arch_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+}
+
+INSTANTIATE_TEST_SUITE_P(InstructionSelectorTest, InstructionSelectorModTest,
+ ::testing::ValuesIn(kModInstructions));
+
+// ----------------------------------------------------------------------------
+// Floating point instructions.
+// ----------------------------------------------------------------------------
+
+using InstructionSelectorFPArithTest =
+ InstructionSelectorTestWithParam<MachInst2>;
+
+TEST_P(InstructionSelectorFPArithTest, Parameter) {
+ const MachInst2 fpa = GetParam();
+ StreamBuilder m(this, fpa.machine_type, fpa.machine_type, fpa.machine_type);
+ m.Return((m.*fpa.constructor)(m.Parameter(0), m.Parameter(1)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(fpa.arch_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+}
+
+INSTANTIATE_TEST_SUITE_P(InstructionSelectorTest,
+ InstructionSelectorFPArithTest,
+ ::testing::ValuesIn(kFPArithInstructions));
+
+// ----------------------------------------------------------------------------
+// Integer arithmetic
+// ----------------------------------------------------------------------------
+using InstructionSelectorIntArithTwoTest =
+ InstructionSelectorTestWithParam<MachInst2>;
+
+TEST_P(InstructionSelectorIntArithTwoTest, Parameter) {
+ const MachInst2 intpa = GetParam();
+ StreamBuilder m(this, intpa.machine_type, intpa.machine_type,
+ intpa.machine_type);
+ m.Return((m.*intpa.constructor)(m.Parameter(0), m.Parameter(1)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(intpa.arch_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+}
+
+INSTANTIATE_TEST_SUITE_P(InstructionSelectorTest,
+ InstructionSelectorIntArithTwoTest,
+ ::testing::ValuesIn(kAddSubInstructions));
+
+// ----------------------------------------------------------------------------
+// One node.
+// ----------------------------------------------------------------------------
+
+using InstructionSelectorIntArithOneTest =
+ InstructionSelectorTestWithParam<MachInst1>;
+
+TEST_P(InstructionSelectorIntArithOneTest, Parameter) {
+ const MachInst1 intpa = GetParam();
+ StreamBuilder m(this, intpa.machine_type, intpa.machine_type,
+ intpa.machine_type);
+ m.Return((m.*intpa.constructor)(m.Parameter(0)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(intpa.arch_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+}
+
+INSTANTIATE_TEST_SUITE_P(InstructionSelectorTest,
+ InstructionSelectorIntArithOneTest,
+ ::testing::ValuesIn(kAddSubOneInstructions));
+
+// ----------------------------------------------------------------------------
+// Conversions.
+// ----------------------------------------------------------------------------
+
+using InstructionSelectorConversionTest =
+ InstructionSelectorTestWithParam<Conversion>;
+
+TEST_P(InstructionSelectorConversionTest, Parameter) {
+ const Conversion conv = GetParam();
+ StreamBuilder m(this, conv.mi.machine_type, conv.src_machine_type);
+ m.Return((m.*conv.mi.constructor)(m.Parameter(0)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(conv.mi.arch_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(1U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+}
+
+INSTANTIATE_TEST_SUITE_P(InstructionSelectorTest,
+ InstructionSelectorConversionTest,
+ ::testing::ValuesIn(kConversionInstructions));
+
+TEST_F(InstructionSelectorTest, ChangesFromToSmi) {
+ {
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
+ m.Return(m.TruncateInt64ToInt32(
+ m.Word64Sar(m.Parameter(0), m.Int32Constant(32))));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kLoong64Sra_d, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_None, s[0]->addressing_mode());
+ ASSERT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ }
+ {
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
+ m.Return(
+ m.Word64Shl(m.ChangeInt32ToInt64(m.Parameter(0)), m.Int32Constant(32)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kLoong64Sll_d, s[0]->arch_opcode());
+ ASSERT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ }
+}
+
+TEST_F(InstructionSelectorTest, ChangeFloat64ToInt32OfChangeFloat32ToFloat64) {
+ {
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Float32());
+ m.Return(m.ChangeFloat64ToInt32(m.ChangeFloat32ToFloat64(m.Parameter(0))));
+ Stream s = m.Build();
+ ASSERT_EQ(2U, s.size());
+ EXPECT_EQ(kLoong64Float32ToFloat64, s[0]->arch_opcode());
+ EXPECT_EQ(kLoong64Float64ToInt32, s[1]->arch_opcode());
+ EXPECT_EQ(kMode_None, s[0]->addressing_mode());
+ ASSERT_EQ(1U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ }
+}
+
+TEST_F(InstructionSelectorTest,
+ TruncateFloat64ToFloat32OfChangeInt32ToFloat64) {
+ {
+ StreamBuilder m(this, MachineType::Float32(), MachineType::Int32());
+ m.Return(
+ m.TruncateFloat64ToFloat32(m.ChangeInt32ToFloat64(m.Parameter(0))));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kLoong64Int32ToFloat32, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_None, s[0]->addressing_mode());
+ ASSERT_EQ(1U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ }
+}
+
+TEST_F(InstructionSelectorTest, CombineShiftsWithMul) {
+ {
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
+ m.Return(m.Int32Mul(m.Word64Sar(m.Parameter(0), m.Int32Constant(32)),
+ m.Word64Sar(m.Parameter(0), m.Int32Constant(32))));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kLoong64Mulh_d, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_None, s[0]->addressing_mode());
+ ASSERT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ }
+}
+
+TEST_F(InstructionSelectorTest, CombineShiftsWithDivMod) {
+ {
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
+ m.Return(m.Int32Div(m.Word64Sar(m.Parameter(0), m.Int32Constant(32)),
+ m.Word64Sar(m.Parameter(0), m.Int32Constant(32))));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kLoong64Div_d, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_None, s[0]->addressing_mode());
+ ASSERT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ }
+ {
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
+ m.Return(m.Int32Mod(m.Word64Sar(m.Parameter(0), m.Int32Constant(32)),
+ m.Word64Sar(m.Parameter(0), m.Int32Constant(32))));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kLoong64Mod_d, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_None, s[0]->addressing_mode());
+ ASSERT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ }
+}
+
+TEST_F(InstructionSelectorTest, ChangeInt32ToInt64AfterLoad) {
+ // For each case, test that the conversion is merged into the load
+ // operation.
+ // ChangeInt32ToInt64(Load_Uint8) -> Ld_bu
+ {
+ StreamBuilder m(this, MachineType::Int64(), MachineType::Pointer(),
+ MachineType::Int32());
+ m.Return(m.ChangeInt32ToInt64(
+ m.Load(MachineType::Uint8(), m.Parameter(0), m.Parameter(1))));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kLoong64Ld_bu, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_MRR, s[0]->addressing_mode());
+ EXPECT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ }
+ // ChangeInt32ToInt64(Load_Int8) -> Ld_b
+ {
+ StreamBuilder m(this, MachineType::Int64(), MachineType::Pointer(),
+ MachineType::Int32());
+ m.Return(m.ChangeInt32ToInt64(
+ m.Load(MachineType::Int8(), m.Parameter(0), m.Parameter(1))));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kLoong64Ld_b, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_MRR, s[0]->addressing_mode());
+ EXPECT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ }
+ // ChangeInt32ToInt64(Load_Uint16) -> Ld_hu
+ {
+ StreamBuilder m(this, MachineType::Int64(), MachineType::Pointer(),
+ MachineType::Int32());
+ m.Return(m.ChangeInt32ToInt64(
+ m.Load(MachineType::Uint16(), m.Parameter(0), m.Parameter(1))));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kLoong64Ld_hu, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_MRR, s[0]->addressing_mode());
+ EXPECT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ }
+ // ChangeInt32ToInt64(Load_Int16) -> Ld_h
+ {
+ StreamBuilder m(this, MachineType::Int64(), MachineType::Pointer(),
+ MachineType::Int32());
+ m.Return(m.ChangeInt32ToInt64(
+ m.Load(MachineType::Int16(), m.Parameter(0), m.Parameter(1))));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kLoong64Ld_h, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_MRR, s[0]->addressing_mode());
+ EXPECT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ }
+ // ChangeInt32ToInt64(Load_Uint32) -> Ld_w
+ {
+ StreamBuilder m(this, MachineType::Int64(), MachineType::Pointer(),
+ MachineType::Int32());
+ m.Return(m.ChangeInt32ToInt64(
+ m.Load(MachineType::Uint32(), m.Parameter(0), m.Parameter(1))));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kLoong64Ld_w, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_MRR, s[0]->addressing_mode());
+ EXPECT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ }
+ // ChangeInt32ToInt64(Load_Int32) -> Ld_w
+ {
+ StreamBuilder m(this, MachineType::Int64(), MachineType::Pointer(),
+ MachineType::Int32());
+ m.Return(m.ChangeInt32ToInt64(
+ m.Load(MachineType::Int32(), m.Parameter(0), m.Parameter(1))));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kLoong64Ld_w, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_MRR, s[0]->addressing_mode());
+ EXPECT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ }
+}
+
+using InstructionSelectorElidedChangeUint32ToUint64Test =
+ InstructionSelectorTestWithParam<MachInst2>;
+
+TEST_P(InstructionSelectorElidedChangeUint32ToUint64Test, Parameter) {
+ const MachInst2 binop = GetParam();
+ StreamBuilder m(this, MachineType::Uint64(), binop.machine_type,
+ binop.machine_type);
+ m.Return(m.ChangeUint32ToUint64(
+ (m.*binop.constructor)(m.Parameter(0), m.Parameter(1))));
+ Stream s = m.Build();
+ // Make sure the `ChangeUint32ToUint64` node turned into a no-op.
+ ASSERT_EQ(2U, s.size());
+ EXPECT_EQ(binop.arch_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(kLoong64Bstrpick_d, s[1]->arch_opcode());
+ EXPECT_EQ(3U, s[1]->InputCount());
+ EXPECT_EQ(1U, s[1]->OutputCount());
+}
+
+INSTANTIATE_TEST_SUITE_P(InstructionSelectorTest,
+ InstructionSelectorElidedChangeUint32ToUint64Test,
+ ::testing::ValuesIn(kCanElideChangeUint32ToUint64));
+
+TEST_F(InstructionSelectorTest, ChangeUint32ToUint64AfterLoad) {
+ // For each case, make sure the `ChangeUint32ToUint64` node turned into a
+ // no-op.
+
+ // Ld_bu
+ {
+ StreamBuilder m(this, MachineType::Uint64(), MachineType::Pointer(),
+ MachineType::Int32());
+ m.Return(m.ChangeUint32ToUint64(
+ m.Load(MachineType::Uint8(), m.Parameter(0), m.Parameter(1))));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kLoong64Ld_bu, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_MRR, s[0]->addressing_mode());
+ EXPECT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ }
+ // Ld_hu
+ {
+ StreamBuilder m(this, MachineType::Uint64(), MachineType::Pointer(),
+ MachineType::Int32());
+ m.Return(m.ChangeUint32ToUint64(
+ m.Load(MachineType::Uint16(), m.Parameter(0), m.Parameter(1))));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kLoong64Ld_hu, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_MRR, s[0]->addressing_mode());
+ EXPECT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ }
+ // Ld_wu
+ {
+ StreamBuilder m(this, MachineType::Uint64(), MachineType::Pointer(),
+ MachineType::Int32());
+ m.Return(m.ChangeUint32ToUint64(
+ m.Load(MachineType::Uint32(), m.Parameter(0), m.Parameter(1))));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kLoong64Ld_wu, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_MRR, s[0]->addressing_mode());
+ EXPECT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ }
+}
+
+// ----------------------------------------------------------------------------
+// Loads and stores.
+// ----------------------------------------------------------------------------
+
+namespace {
+
+struct MemoryAccess {
+ MachineType type;
+ ArchOpcode load_opcode;
+ ArchOpcode store_opcode;
+};
+
+static const MemoryAccess kMemoryAccesses[] = {
+ {MachineType::Int8(), kLoong64Ld_b, kLoong64St_b},
+ {MachineType::Uint8(), kLoong64Ld_bu, kLoong64St_b},
+ {MachineType::Int16(), kLoong64Ld_h, kLoong64St_h},
+ {MachineType::Uint16(), kLoong64Ld_hu, kLoong64St_h},
+ {MachineType::Int32(), kLoong64Ld_w, kLoong64St_w},
+ {MachineType::Float32(), kLoong64Fld_s, kLoong64Fst_s},
+ {MachineType::Float64(), kLoong64Fld_d, kLoong64Fst_d},
+ {MachineType::Int64(), kLoong64Ld_d, kLoong64St_d}};
+
+struct MemoryAccessImm {
+ MachineType type;
+ ArchOpcode load_opcode;
+ ArchOpcode store_opcode;
+ bool (InstructionSelectorTest::Stream::*val_predicate)(
+ const InstructionOperand*) const;
+ const int32_t immediates[40];
+};
+
+std::ostream& operator<<(std::ostream& os, const MemoryAccessImm& acc) {
+ return os << acc.type;
+}
+
+struct MemoryAccessImm1 {
+ MachineType type;
+ ArchOpcode load_opcode;
+ ArchOpcode store_opcode;
+ bool (InstructionSelectorTest::Stream::*val_predicate)(
+ const InstructionOperand*) const;
+ const int32_t immediates[5];
+};
+
+std::ostream& operator<<(std::ostream& os, const MemoryAccessImm1& acc) {
+ return os << acc.type;
+}
+
+struct MemoryAccessImm2 {
+ MachineType type;
+ ArchOpcode store_opcode;
+ ArchOpcode store_opcode_unaligned;
+ bool (InstructionSelectorTest::Stream::*val_predicate)(
+ const InstructionOperand*) const;
+ const int32_t immediates[40];
+};
+
+// ----------------------------------------------------------------------------
+// Loads and stores immediate values
+// ----------------------------------------------------------------------------
+
+const MemoryAccessImm kMemoryAccessesImm[] = {
+ {MachineType::Int8(),
+ kLoong64Ld_b,
+ kLoong64St_b,
+ &InstructionSelectorTest::Stream::IsInteger,
+ {-4095, -3340, -3231, -3224, -3088, -1758, -1203, -123, -117, -91,
+ -89, -87, -86, -82, -44, -23, -3, 0, 7, 10,
+ 39, 52, 69, 71, 91, 92, 107, 109, 115, 124,
+ 286, 655, 1362, 1569, 2587, 3067, 3096, 3462, 3510, 4095}},
+ {MachineType::Uint8(),
+ kLoong64Ld_bu,
+ kLoong64St_b,
+ &InstructionSelectorTest::Stream::IsInteger,
+ {-4095, -3340, -3231, -3224, -3088, -1758, -1203, -123, -117, -91,
+ -89, -87, -86, -82, -44, -23, -3, 0, 7, 10,
+ 39, 52, 69, 71, 91, 92, 107, 109, 115, 124,
+ 286, 655, 1362, 1569, 2587, 3067, 3096, 3462, 3510, 4095}},
+ {MachineType::Int16(),
+ kLoong64Ld_h,
+ kLoong64St_h,
+ &InstructionSelectorTest::Stream::IsInteger,
+ {-4095, -3340, -3231, -3224, -3088, -1758, -1203, -123, -117, -91,
+ -89, -87, -86, -82, -44, -23, -3, 0, 7, 10,
+ 39, 52, 69, 71, 91, 92, 107, 109, 115, 124,
+ 286, 655, 1362, 1569, 2587, 3067, 3096, 3462, 3510, 4095}},
+ {MachineType::Uint16(),
+ kLoong64Ld_hu,
+ kLoong64St_h,
+ &InstructionSelectorTest::Stream::IsInteger,
+ {-4095, -3340, -3231, -3224, -3088, -1758, -1203, -123, -117, -91,
+ -89, -87, -86, -82, -44, -23, -3, 0, 7, 10,
+ 39, 52, 69, 71, 91, 92, 107, 109, 115, 124,
+ 286, 655, 1362, 1569, 2587, 3067, 3096, 3462, 3510, 4095}},
+ {MachineType::Int32(),
+ kLoong64Ld_w,
+ kLoong64St_w,
+ &InstructionSelectorTest::Stream::IsInteger,
+ {-4095, -3340, -3231, -3224, -3088, -1758, -1203, -123, -117, -91,
+ -89, -87, -86, -82, -44, -23, -3, 0, 7, 10,
+ 39, 52, 69, 71, 91, 92, 107, 109, 115, 124,
+ 286, 655, 1362, 1569, 2587, 3067, 3096, 3462, 3510, 4095}},
+ {MachineType::Float32(),
+ kLoong64Fld_s,
+ kLoong64Fst_s,
+ &InstructionSelectorTest::Stream::IsDouble,
+ {-4095, -3340, -3231, -3224, -3088, -1758, -1203, -123, -117, -91,
+ -89, -87, -86, -82, -44, -23, -3, 0, 7, 10,
+ 39, 52, 69, 71, 91, 92, 107, 109, 115, 124,
+ 286, 655, 1362, 1569, 2587, 3067, 3096, 3462, 3510, 4095}},
+ {MachineType::Float64(),
+ kLoong64Fld_d,
+ kLoong64Fst_d,
+ &InstructionSelectorTest::Stream::IsDouble,
+ {-4095, -3340, -3231, -3224, -3088, -1758, -1203, -123, -117, -91,
+ -89, -87, -86, -82, -44, -23, -3, 0, 7, 10,
+ 39, 52, 69, 71, 91, 92, 107, 109, 115, 124,
+ 286, 655, 1362, 1569, 2587, 3067, 3096, 3462, 3510, 4095}},
+ {MachineType::Int64(),
+ kLoong64Ld_d,
+ kLoong64St_d,
+ &InstructionSelectorTest::Stream::IsInteger,
+ {-4095, -3340, -3231, -3224, -3088, -1758, -1203, -123, -117, -91,
+ -89, -87, -86, -82, -44, -23, -3, 0, 7, 10,
+ 39, 52, 69, 71, 91, 92, 107, 109, 115, 124,
+ 286, 655, 1362, 1569, 2587, 3067, 3096, 3462, 3510, 4095}}};
+
+const MemoryAccessImm1 kMemoryAccessImmMoreThan16bit[] = {
+ {MachineType::Int8(),
+ kLoong64Ld_b,
+ kLoong64St_b,
+ &InstructionSelectorTest::Stream::IsInteger,
+ {-65000, -55000, 32777, 55000, 65000}},
+ {MachineType::Uint8(),
+ kLoong64Ld_bu,
+ kLoong64St_b,
+ &InstructionSelectorTest::Stream::IsInteger,
+ {-65000, -55000, 32777, 55000, 65000}},
+ {MachineType::Int16(),
+ kLoong64Ld_h,
+ kLoong64St_h,
+ &InstructionSelectorTest::Stream::IsInteger,
+ {-65000, -55000, 32777, 55000, 65000}},
+ {MachineType::Uint16(),
+ kLoong64Ld_hu,
+ kLoong64St_h,
+ &InstructionSelectorTest::Stream::IsInteger,
+ {-65000, -55000, 32777, 55000, 65000}},
+ {MachineType::Int32(),
+ kLoong64Ld_w,
+ kLoong64St_w,
+ &InstructionSelectorTest::Stream::IsInteger,
+ {-65000, -55000, 32777, 55000, 65000}},
+ {MachineType::Float32(),
+ kLoong64Fld_s,
+ kLoong64Fst_s,
+ &InstructionSelectorTest::Stream::IsDouble,
+ {-65000, -55000, 32777, 55000, 65000}},
+ {MachineType::Float64(),
+ kLoong64Fld_d,
+ kLoong64Fst_d,
+ &InstructionSelectorTest::Stream::IsDouble,
+ {-65000, -55000, 32777, 55000, 65000}},
+ {MachineType::Int64(),
+ kLoong64Ld_d,
+ kLoong64St_d,
+ &InstructionSelectorTest::Stream::IsInteger,
+ {-65000, -55000, 32777, 55000, 65000}}};
+
+} // namespace
+
+using InstructionSelectorMemoryAccessTest =
+ InstructionSelectorTestWithParam<MemoryAccess>;
+
+TEST_P(InstructionSelectorMemoryAccessTest, LoadWithParameters) {
+ const MemoryAccess memacc = GetParam();
+ StreamBuilder m(this, memacc.type, MachineType::Pointer(),
+ MachineType::Int32());
+ m.Return(m.Load(memacc.type, m.Parameter(0)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(memacc.load_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_MRI, s[0]->addressing_mode());
+}
+
+TEST_P(InstructionSelectorMemoryAccessTest, StoreWithParameters) {
+ const MemoryAccess memacc = GetParam();
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Pointer(),
+ MachineType::Int32(), memacc.type);
+ m.Store(memacc.type.representation(), m.Parameter(0), m.Parameter(1),
+ kNoWriteBarrier);
+ m.Return(m.Int32Constant(0));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(memacc.store_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_MRI, s[0]->addressing_mode());
+}
+
+INSTANTIATE_TEST_SUITE_P(InstructionSelectorTest,
+ InstructionSelectorMemoryAccessTest,
+ ::testing::ValuesIn(kMemoryAccesses));
+
+// ----------------------------------------------------------------------------
+// Load immediate.
+// ----------------------------------------------------------------------------
+
+using InstructionSelectorMemoryAccessImmTest =
+ InstructionSelectorTestWithParam<MemoryAccessImm>;
+
+TEST_P(InstructionSelectorMemoryAccessImmTest, LoadWithImmediateIndex) {
+ const MemoryAccessImm memacc = GetParam();
+ TRACED_FOREACH(int32_t, index, memacc.immediates) {
+ StreamBuilder m(this, memacc.type, MachineType::Pointer());
+ m.Return(m.Load(memacc.type, m.Parameter(0), m.Int32Constant(index)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(memacc.load_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_MRI, s[0]->addressing_mode());
+ ASSERT_EQ(2U, s[0]->InputCount());
+ ASSERT_EQ(InstructionOperand::IMMEDIATE, s[0]->InputAt(1)->kind());
+ EXPECT_EQ(index, s.ToInt32(s[0]->InputAt(1)));
+ ASSERT_EQ(1U, s[0]->OutputCount());
+ EXPECT_TRUE((s.*memacc.val_predicate)(s[0]->Output()));
+ }
+}
+
+// ----------------------------------------------------------------------------
+// Store immediate.
+// ----------------------------------------------------------------------------
+
+TEST_P(InstructionSelectorMemoryAccessImmTest, StoreWithImmediateIndex) {
+ const MemoryAccessImm memacc = GetParam();
+ TRACED_FOREACH(int32_t, index, memacc.immediates) {
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Pointer(),
+ memacc.type);
+ m.Store(memacc.type.representation(), m.Parameter(0),
+ m.Int32Constant(index), m.Parameter(1), kNoWriteBarrier);
+ m.Return(m.Int32Constant(0));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(memacc.store_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_MRI, s[0]->addressing_mode());
+ ASSERT_EQ(3U, s[0]->InputCount());
+ ASSERT_EQ(InstructionOperand::IMMEDIATE, s[0]->InputAt(1)->kind());
+ EXPECT_EQ(index, s.ToInt32(s[0]->InputAt(1)));
+ EXPECT_EQ(0U, s[0]->OutputCount());
+ }
+}
+
+TEST_P(InstructionSelectorMemoryAccessImmTest, StoreZero) {
+ const MemoryAccessImm memacc = GetParam();
+ TRACED_FOREACH(int32_t, index, memacc.immediates) {
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Pointer());
+ m.Store(memacc.type.representation(), m.Parameter(0),
+ m.Int32Constant(index), m.Int32Constant(0), kNoWriteBarrier);
+ m.Return(m.Int32Constant(0));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(memacc.store_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_MRI, s[0]->addressing_mode());
+ ASSERT_EQ(3U, s[0]->InputCount());
+ ASSERT_EQ(InstructionOperand::IMMEDIATE, s[0]->InputAt(1)->kind());
+ EXPECT_EQ(index, s.ToInt32(s[0]->InputAt(1)));
+ ASSERT_EQ(InstructionOperand::IMMEDIATE, s[0]->InputAt(2)->kind());
+ EXPECT_EQ(0, s.ToInt64(s[0]->InputAt(2)));
+ EXPECT_EQ(0U, s[0]->OutputCount());
+ }
+}
+
+INSTANTIATE_TEST_SUITE_P(InstructionSelectorTest,
+ InstructionSelectorMemoryAccessImmTest,
+ ::testing::ValuesIn(kMemoryAccessesImm));
+
+// ----------------------------------------------------------------------------
+// Load/store offsets more than 16 bits.
+// ----------------------------------------------------------------------------
+
+using InstructionSelectorMemoryAccessImmMoreThan16bitTest =
+ InstructionSelectorTestWithParam<MemoryAccessImm1>;
+
+TEST_P(InstructionSelectorMemoryAccessImmMoreThan16bitTest,
+ LoadWithImmediateIndex) {
+ const MemoryAccessImm1 memacc = GetParam();
+ TRACED_FOREACH(int32_t, index, memacc.immediates) {
+ StreamBuilder m(this, memacc.type, MachineType::Pointer());
+ m.Return(m.Load(memacc.type, m.Parameter(0), m.Int32Constant(index)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(memacc.load_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_MRR, s[0]->addressing_mode());
+ EXPECT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ }
+}
+
+TEST_P(InstructionSelectorMemoryAccessImmMoreThan16bitTest,
+ StoreWithImmediateIndex) {
+ const MemoryAccessImm1 memacc = GetParam();
+ TRACED_FOREACH(int32_t, index, memacc.immediates) {
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Pointer(),
+ memacc.type);
+ m.Store(memacc.type.representation(), m.Parameter(0),
+ m.Int32Constant(index), m.Parameter(1), kNoWriteBarrier);
+ m.Return(m.Int32Constant(0));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(memacc.store_opcode, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_MRR, s[0]->addressing_mode());
+ EXPECT_EQ(3U, s[0]->InputCount());
+ EXPECT_EQ(0U, s[0]->OutputCount());
+ }
+}
+
+INSTANTIATE_TEST_SUITE_P(InstructionSelectorTest,
+ InstructionSelectorMemoryAccessImmMoreThan16bitTest,
+ ::testing::ValuesIn(kMemoryAccessImmMoreThan16bit));
+
+// ----------------------------------------------------------------------------
+// kLoong64Cmp with zero testing.
+// ----------------------------------------------------------------------------
+
+TEST_F(InstructionSelectorTest, Word32EqualWithZero) {
+ {
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
+ m.Return(m.Word32Equal(m.Parameter(0), m.Int32Constant(0)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kLoong64Cmp, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_None, s[0]->addressing_mode());
+ ASSERT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(kFlags_set, s[0]->flags_mode());
+ EXPECT_EQ(kEqual, s[0]->flags_condition());
+ }
+ {
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
+ m.Return(m.Word32Equal(m.Int32Constant(0), m.Parameter(0)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kLoong64Cmp, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_None, s[0]->addressing_mode());
+ ASSERT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(kFlags_set, s[0]->flags_mode());
+ EXPECT_EQ(kEqual, s[0]->flags_condition());
+ }
+}
+
+TEST_F(InstructionSelectorTest, Word64EqualWithZero) {
+ {
+ StreamBuilder m(this, MachineType::Int64(), MachineType::Int64());
+ m.Return(m.Word64Equal(m.Parameter(0), m.Int64Constant(0)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kLoong64Cmp, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_None, s[0]->addressing_mode());
+ ASSERT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(kFlags_set, s[0]->flags_mode());
+ EXPECT_EQ(kEqual, s[0]->flags_condition());
+ }
+ {
+ StreamBuilder m(this, MachineType::Int64(), MachineType::Int64());
+ m.Return(m.Word64Equal(m.Int32Constant(0), m.Parameter(0)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kLoong64Cmp, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_None, s[0]->addressing_mode());
+ ASSERT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(kFlags_set, s[0]->flags_mode());
+ EXPECT_EQ(kEqual, s[0]->flags_condition());
+ }
+}
+
+TEST_F(InstructionSelectorTest, Word32Clz) {
+ StreamBuilder m(this, MachineType::Uint32(), MachineType::Uint32());
+ Node* const p0 = m.Parameter(0);
+ Node* const n = m.Word32Clz(p0);
+ m.Return(n);
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kLoong64Clz_w, s[0]->arch_opcode());
+ ASSERT_EQ(1U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+ ASSERT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
+}
+
+TEST_F(InstructionSelectorTest, Word64Clz) {
+ StreamBuilder m(this, MachineType::Uint64(), MachineType::Uint64());
+ Node* const p0 = m.Parameter(0);
+ Node* const n = m.Word64Clz(p0);
+ m.Return(n);
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kLoong64Clz_d, s[0]->arch_opcode());
+ ASSERT_EQ(1U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+ ASSERT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
+}
+
+TEST_F(InstructionSelectorTest, Float32Abs) {
+ StreamBuilder m(this, MachineType::Float32(), MachineType::Float32());
+ Node* const p0 = m.Parameter(0);
+ Node* const n = m.Float32Abs(p0);
+ m.Return(n);
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kLoong64Float32Abs, s[0]->arch_opcode());
+ ASSERT_EQ(1U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+ ASSERT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
+}
+
+TEST_F(InstructionSelectorTest, Float64Abs) {
+ StreamBuilder m(this, MachineType::Float64(), MachineType::Float64());
+ Node* const p0 = m.Parameter(0);
+ Node* const n = m.Float64Abs(p0);
+ m.Return(n);
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kLoong64Float64Abs, s[0]->arch_opcode());
+ ASSERT_EQ(1U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+ ASSERT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
+}
+
+TEST_F(InstructionSelectorTest, Float64Max) {
+ StreamBuilder m(this, MachineType::Float64(), MachineType::Float64(),
+ MachineType::Float64());
+ Node* const p0 = m.Parameter(0);
+ Node* const p1 = m.Parameter(1);
+ Node* const n = m.Float64Max(p0, p1);
+ m.Return(n);
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kLoong64Float64Max, s[0]->arch_opcode());
+ ASSERT_EQ(2U, s[0]->InputCount());
+ ASSERT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
+}
+
+TEST_F(InstructionSelectorTest, Float64Min) {
+ StreamBuilder m(this, MachineType::Float64(), MachineType::Float64(),
+ MachineType::Float64());
+ Node* const p0 = m.Parameter(0);
+ Node* const p1 = m.Parameter(1);
+ Node* const n = m.Float64Min(p0, p1);
+ m.Return(n);
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kLoong64Float64Min, s[0]->arch_opcode());
+ ASSERT_EQ(2U, s[0]->InputCount());
+ ASSERT_EQ(1U, s[0]->OutputCount());
+ EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
+}
+
+TEST_F(InstructionSelectorTest, LoadAndShiftRight) {
+ {
+ int32_t immediates[] = {-256, -255, -3, -2, -1, 0, 1,
+ 2, 3, 255, 256, 260, 4096, 4100,
+ 8192, 8196, 3276, 3280, 16376, 16380};
+ TRACED_FOREACH(int32_t, index, immediates) {
+ StreamBuilder m(this, MachineType::Uint64(), MachineType::Pointer());
+ Node* const load =
+ m.Load(MachineType::Uint64(), m.Parameter(0), m.Int32Constant(index));
+ Node* const sar = m.Word64Sar(load, m.Int32Constant(32));
+ // Make sure we don't fold the shift into the following add:
+ m.Return(m.Int64Add(sar, m.Parameter(0)));
+ Stream s = m.Build();
+ ASSERT_EQ(2U, s.size());
+ EXPECT_EQ(kLoong64Ld_w, s[0]->arch_opcode());
+ EXPECT_EQ(kMode_MRI, s[0]->addressing_mode());
+ EXPECT_EQ(2U, s[0]->InputCount());
+ EXPECT_EQ(s.ToVreg(m.Parameter(0)), s.ToVreg(s[0]->InputAt(0)));
+ ASSERT_EQ(InstructionOperand::IMMEDIATE, s[0]->InputAt(1)->kind());
+ EXPECT_EQ(index + 4, s.ToInt32(s[0]->InputAt(1)));
+ ASSERT_EQ(1U, s[0]->OutputCount());
+ }
+ }
+}
+
+TEST_F(InstructionSelectorTest, Word32ReverseBytes) {
+ {
+ StreamBuilder m(this, MachineType::Int32(), MachineType::Int32());
+ m.Return(m.Word32ReverseBytes(m.Parameter(0)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kLoong64ByteSwap32, s[0]->arch_opcode());
+ EXPECT_EQ(1U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ }
+}
+
+TEST_F(InstructionSelectorTest, Word64ReverseBytes) {
+ {
+ StreamBuilder m(this, MachineType::Int64(), MachineType::Int64());
+ m.Return(m.Word64ReverseBytes(m.Parameter(0)));
+ Stream s = m.Build();
+ ASSERT_EQ(1U, s.size());
+ EXPECT_EQ(kLoong64ByteSwap64, s[0]->arch_opcode());
+ EXPECT_EQ(1U, s[0]->InputCount());
+ EXPECT_EQ(1U, s[0]->OutputCount());
+ }
+}
+
+} // namespace compiler
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/unittests/compiler/machine-operator-reducer-unittest.cc b/deps/v8/test/unittests/compiler/machine-operator-reducer-unittest.cc
index c432229a35..fa8d45c782 100644
--- a/deps/v8/test/unittests/compiler/machine-operator-reducer-unittest.cc
+++ b/deps/v8/test/unittests/compiler/machine-operator-reducer-unittest.cc
@@ -3,12 +3,15 @@
// found in the LICENSE file.
#include "src/compiler/machine-operator-reducer.h"
+
#include <limits>
+
#include "src/base/bits.h"
#include "src/base/division-by-constant.h"
#include "src/base/ieee754.h"
#include "src/base/overflowing-math.h"
#include "src/compiler/js-graph.h"
+#include "src/compiler/machine-operator.h"
#include "src/compiler/typer.h"
#include "src/numbers/conversions-inl.h"
#include "test/unittests/compiler/graph-unittest.h"
@@ -29,7 +32,8 @@ class MachineOperatorReducerTest : public GraphTest {
public:
explicit MachineOperatorReducerTest(int num_parameters = 2)
: GraphTest(num_parameters),
- machine_(zone()),
+ machine_(zone(), MachineType::PointerRepresentation(),
+ MachineOperatorBuilder::kAllOptionalOps),
common_(zone()),
javascript_(zone()),
jsgraph_(isolate(), graph(), &common_, &javascript_, nullptr,
@@ -2880,6 +2884,27 @@ TEST_F(MachineOperatorReducerTest, StoreRepWord16WithWord32SarAndWord32Shl) {
}
}
+TEST_F(MachineOperatorReducerTest, Select) {
+ static const std::vector<const Operator*> ops = {
+ machine()->Float32Select().op(), machine()->Float64Select().op(),
+ machine()->Word32Select().op(), machine()->Word64Select().op()};
+
+ TRACED_FOREACH(const Operator*, op, ops) {
+ Node* arg0 = Parameter(0);
+ Node* arg1 = Parameter(1);
+
+ Node* select_true = graph()->NewNode(op, Int32Constant(1), arg0, arg1);
+ Reduction r_true = Reduce(select_true);
+ ASSERT_TRUE(r_true.Changed());
+ EXPECT_THAT(r_true.replacement(), IsParameter(0));
+
+ Node* select_false = graph()->NewNode(op, Int32Constant(0), arg0, arg1);
+ Reduction r_false = Reduce(select_false);
+ ASSERT_TRUE(r_false.Changed());
+ EXPECT_THAT(r_false.replacement(), IsParameter(1));
+ }
+}
+
} // namespace compiler
} // namespace internal
} // namespace v8
diff --git a/deps/v8/test/unittests/compiler/node-test-utils.cc b/deps/v8/test/unittests/compiler/node-test-utils.cc
index 5305fef574..b449faee8d 100644
--- a/deps/v8/test/unittests/compiler/node-test-utils.cc
+++ b/deps/v8/test/unittests/compiler/node-test-utils.cc
@@ -1150,7 +1150,6 @@ class IsStoreElementMatcher final : public TestNodeMatcher {
LOAD_MATCHER(Load)
LOAD_MATCHER(UnalignedLoad)
-LOAD_MATCHER(PoisonedLoad)
LOAD_MATCHER(LoadFromObject)
class IsLoadImmutableMatcher final : public TestNodeMatcher {
@@ -2103,16 +2102,6 @@ Matcher<Node*> IsLoad(const Matcher<LoadRepresentation>& rep_matcher,
effect_matcher, control_matcher));
}
-Matcher<Node*> IsPoisonedLoad(const Matcher<LoadRepresentation>& rep_matcher,
- const Matcher<Node*>& base_matcher,
- const Matcher<Node*>& index_matcher,
- const Matcher<Node*>& effect_matcher,
- const Matcher<Node*>& control_matcher) {
- return MakeMatcher(new IsPoisonedLoadMatcher(rep_matcher, base_matcher,
- index_matcher, effect_matcher,
- control_matcher));
-}
-
Matcher<Node*> IsUnalignedLoad(const Matcher<LoadRepresentation>& rep_matcher,
const Matcher<Node*>& base_matcher,
const Matcher<Node*>& index_matcher,
@@ -2366,7 +2355,6 @@ IS_UNOP_MATCHER(Word32Ctz)
IS_UNOP_MATCHER(Word32Popcnt)
IS_UNOP_MATCHER(Word32ReverseBytes)
IS_UNOP_MATCHER(SpeculativeToNumber)
-IS_UNOP_MATCHER(TaggedPoisonOnSpeculation)
#undef IS_UNOP_MATCHER
// Special-case Bitcast operators which are disabled when ENABLE_VERIFY_CSA is
diff --git a/deps/v8/test/unittests/compiler/node-test-utils.h b/deps/v8/test/unittests/compiler/node-test-utils.h
index 0e5e99679c..f727a14c34 100644
--- a/deps/v8/test/unittests/compiler/node-test-utils.h
+++ b/deps/v8/test/unittests/compiler/node-test-utils.h
@@ -328,11 +328,6 @@ Matcher<Node*> IsLoad(const Matcher<LoadRepresentation>& rep_matcher,
const Matcher<Node*>& index_matcher,
const Matcher<Node*>& effect_matcher,
const Matcher<Node*>& control_matcher);
-Matcher<Node*> IsPoisonedLoad(const Matcher<LoadRepresentation>& rep_matcher,
- const Matcher<Node*>& base_matcher,
- const Matcher<Node*>& index_matcher,
- const Matcher<Node*>& effect_matcher,
- const Matcher<Node*>& control_matcher);
Matcher<Node*> IsUnalignedLoad(const Matcher<LoadRepresentation>& rep_matcher,
const Matcher<Node*>& base_matcher,
const Matcher<Node*>& index_matcher,
@@ -486,7 +481,6 @@ Matcher<Node*> IsNumberToBoolean(const Matcher<Node*>& input_matcher);
Matcher<Node*> IsNumberToInt32(const Matcher<Node*>& input_matcher);
Matcher<Node*> IsNumberToUint32(const Matcher<Node*>& input_matcher);
Matcher<Node*> IsParameter(const Matcher<int> index_matcher);
-Matcher<Node*> IsSpeculationPoison();
Matcher<Node*> IsLoadFramePointer();
Matcher<Node*> IsLoadParentFramePointer();
Matcher<Node*> IsPlainPrimitiveToNumber(const Matcher<Node*>& input_matcher);
diff --git a/deps/v8/test/unittests/compiler/simplified-lowering-unittest.cc b/deps/v8/test/unittests/compiler/simplified-lowering-unittest.cc
index 6387f814e1..6eddb961ca 100644
--- a/deps/v8/test/unittests/compiler/simplified-lowering-unittest.cc
+++ b/deps/v8/test/unittests/compiler/simplified-lowering-unittest.cc
@@ -49,9 +49,8 @@ class SimplifiedLoweringTest : public GraphTest {
Linkage* linkage = zone()->New<Linkage>(Linkage::GetJSCallDescriptor(
zone(), false, num_parameters_ + 1, CallDescriptor::kCanUseRoots));
- SimplifiedLowering lowering(
- jsgraph(), broker(), zone(), source_positions(), node_origins(),
- PoisoningMitigationLevel::kDontPoison, tick_counter(), linkage);
+ SimplifiedLowering lowering(jsgraph(), broker(), zone(), source_positions(),
+ node_origins(), tick_counter(), linkage);
lowering.LowerAllNodes();
}
diff --git a/deps/v8/test/unittests/compiler/x64/instruction-selector-x64-unittest.cc b/deps/v8/test/unittests/compiler/x64/instruction-selector-x64-unittest.cc
index d2591a52ef..2adac76d66 100644
--- a/deps/v8/test/unittests/compiler/x64/instruction-selector-x64-unittest.cc
+++ b/deps/v8/test/unittests/compiler/x64/instruction-selector-x64-unittest.cc
@@ -1579,7 +1579,7 @@ TEST_F(InstructionSelectorTest, Float32Abs) {
m.Return(n);
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
- EXPECT_EQ(kSSEFloat32Abs, s[0]->arch_opcode());
+ EXPECT_EQ(kX64Float32Abs, s[0]->arch_opcode());
ASSERT_EQ(1U, s[0]->InputCount());
EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
ASSERT_EQ(1U, s[0]->OutputCount());
@@ -1594,7 +1594,7 @@ TEST_F(InstructionSelectorTest, Float32Abs) {
m.Return(n);
Stream s = m.Build(AVX);
ASSERT_EQ(1U, s.size());
- EXPECT_EQ(kAVXFloat32Abs, s[0]->arch_opcode());
+ EXPECT_EQ(kX64Float32Abs, s[0]->arch_opcode());
ASSERT_EQ(1U, s[0]->InputCount());
EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
ASSERT_EQ(1U, s[0]->OutputCount());
@@ -1612,7 +1612,7 @@ TEST_F(InstructionSelectorTest, Float64Abs) {
m.Return(n);
Stream s = m.Build();
ASSERT_EQ(1U, s.size());
- EXPECT_EQ(kSSEFloat64Abs, s[0]->arch_opcode());
+ EXPECT_EQ(kX64Float64Abs, s[0]->arch_opcode());
ASSERT_EQ(1U, s[0]->InputCount());
EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
ASSERT_EQ(1U, s[0]->OutputCount());
@@ -1627,7 +1627,7 @@ TEST_F(InstructionSelectorTest, Float64Abs) {
m.Return(n);
Stream s = m.Build(AVX);
ASSERT_EQ(1U, s.size());
- EXPECT_EQ(kAVXFloat64Abs, s[0]->arch_opcode());
+ EXPECT_EQ(kX64Float64Abs, s[0]->arch_opcode());
ASSERT_EQ(1U, s[0]->InputCount());
EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
ASSERT_EQ(1U, s[0]->OutputCount());
diff --git a/deps/v8/test/unittests/debug/debug-property-iterator-unittest.cc b/deps/v8/test/unittests/debug/debug-property-iterator-unittest.cc
index 1b1fcf21c3..fa14c7dc0a 100644
--- a/deps/v8/test/unittests/debug/debug-property-iterator-unittest.cc
+++ b/deps/v8/test/unittests/debug/debug-property-iterator-unittest.cc
@@ -2,7 +2,11 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "include/v8.h"
+#include "include/v8-exception.h"
+#include "include/v8-local-handle.h"
+#include "include/v8-object.h"
+#include "include/v8-primitive.h"
+#include "include/v8-template.h"
#include "src/api/api.h"
#include "src/objects/objects-inl.h"
#include "test/unittests/test-utils.h"
diff --git a/deps/v8/test/unittests/diagnostics/gdb-jit-unittest.cc b/deps/v8/test/unittests/diagnostics/gdb-jit-unittest.cc
new file mode 100644
index 0000000000..5519b7be58
--- /dev/null
+++ b/deps/v8/test/unittests/diagnostics/gdb-jit-unittest.cc
@@ -0,0 +1,58 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/diagnostics/gdb-jit.h"
+
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace v8 {
+namespace internal {
+namespace GDBJITInterface {
+
+#ifdef ENABLE_GDB_JIT_INTERFACE
+TEST(GDBJITTest, OverlapEntries) {
+ ClearCodeMapForTesting();
+
+ base::AddressRegion ar{10, 10};
+ AddRegionForTesting(ar);
+
+ // Full containment.
+ ASSERT_EQ(1u, NumOverlapEntriesForTesting({11, 2}));
+ // Overlap start.
+ ASSERT_EQ(1u, NumOverlapEntriesForTesting({5, 10}));
+ // Overlap end.
+ ASSERT_EQ(1u, NumOverlapEntriesForTesting({15, 10}));
+
+ // No overlap.
+ // Completely smaller.
+ ASSERT_EQ(0u, NumOverlapEntriesForTesting({5, 5}));
+ // Completely bigger.
+ ASSERT_EQ(0u, NumOverlapEntriesForTesting({20, 10}));
+
+ AddRegionForTesting({20, 10});
+ // Now we have 2 code entries that don't overlap:
+ // [ entry 1 ][entry 2]
+ // ^ 10 ^ 20
+
+ // Overlap none.
+ ASSERT_EQ(0u, NumOverlapEntriesForTesting({0, 5}));
+ ASSERT_EQ(0u, NumOverlapEntriesForTesting({30, 5}));
+
+ // Overlap one.
+ ASSERT_EQ(1u, NumOverlapEntriesForTesting({15, 5}));
+ ASSERT_EQ(1u, NumOverlapEntriesForTesting({20, 5}));
+
+ // Overlap both.
+ ASSERT_EQ(2u, NumOverlapEntriesForTesting({15, 10}));
+ ASSERT_EQ(2u, NumOverlapEntriesForTesting({5, 20}));
+ ASSERT_EQ(2u, NumOverlapEntriesForTesting({15, 20}));
+ ASSERT_EQ(2u, NumOverlapEntriesForTesting({0, 40}));
+
+ ClearCodeMapForTesting();
+}
+#endif
+
+} // namespace GDBJITInterface
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/unittests/execution/microtask-queue-unittest.cc b/deps/v8/test/unittests/execution/microtask-queue-unittest.cc
index 6c136b7b33..eb79e26e62 100644
--- a/deps/v8/test/unittests/execution/microtask-queue-unittest.cc
+++ b/deps/v8/test/unittests/execution/microtask-queue-unittest.cc
@@ -9,6 +9,7 @@
#include <memory>
#include <vector>
+#include "include/v8-function.h"
#include "src/heap/factory.h"
#include "src/objects/foreign.h"
#include "src/objects/js-array-inl.h"
diff --git a/deps/v8/test/unittests/heap/cppgc/heap-unittest.cc b/deps/v8/test/unittests/heap/cppgc/heap-unittest.cc
index bc9149e9b2..315d1ff962 100644
--- a/deps/v8/test/unittests/heap/cppgc/heap-unittest.cc
+++ b/deps/v8/test/unittests/heap/cppgc/heap-unittest.cc
@@ -9,6 +9,7 @@
#include <numeric>
#include "include/cppgc/allocation.h"
+#include "include/cppgc/cross-thread-persistent.h"
#include "include/cppgc/heap-consistency.h"
#include "include/cppgc/persistent.h"
#include "include/cppgc/prefinalizer.h"
@@ -311,7 +312,8 @@ TEST_F(GCHeapTest, TerminateInvokesDestructor) {
namespace {
-class Cloner final : public GarbageCollected<Cloner> {
+template <template <typename> class PersistentType>
+class Cloner final : public GarbageCollected<Cloner<PersistentType>> {
public:
static size_t destructor_count;
@@ -330,25 +332,41 @@ class Cloner final : public GarbageCollected<Cloner> {
void Trace(Visitor*) const {}
private:
- static Persistent<Cloner> new_instance_;
+ static PersistentType<Cloner> new_instance_;
cppgc::AllocationHandle& handle_;
size_t count_;
};
-Persistent<Cloner> Cloner::new_instance_;
-size_t Cloner::destructor_count;
+// static
+template <template <typename> class PersistentType>
+PersistentType<Cloner<PersistentType>> Cloner<PersistentType>::new_instance_;
+// static
+template <template <typename> class PersistentType>
+size_t Cloner<PersistentType>::destructor_count;
} // namespace
-TEST_F(GCHeapTest, TerminateReclaimsNewState) {
- Persistent<Cloner> cloner = MakeGarbageCollected<Cloner>(
- GetAllocationHandle(), GetAllocationHandle(), 1);
- Cloner::destructor_count = 0;
+template <template <typename> class PersistentType>
+void TerminateReclaimsNewState(std::shared_ptr<Platform> platform) {
+ auto heap = cppgc::Heap::Create(platform);
+ using ClonerImpl = Cloner<PersistentType>;
+ Persistent<ClonerImpl> cloner = MakeGarbageCollected<ClonerImpl>(
+ heap->GetAllocationHandle(), heap->GetAllocationHandle(), 1);
+ ClonerImpl::destructor_count = 0;
EXPECT_TRUE(cloner.Get());
- Heap::From(GetHeap())->Terminate();
+ Heap::From(heap.get())->Terminate();
EXPECT_FALSE(cloner.Get());
- EXPECT_EQ(2u, Cloner::destructor_count);
+ EXPECT_EQ(2u, ClonerImpl::destructor_count);
+}
+
+TEST_F(GCHeapTest, TerminateReclaimsNewState) {
+ TerminateReclaimsNewState<Persistent>(GetPlatformHandle());
+ TerminateReclaimsNewState<WeakPersistent>(GetPlatformHandle());
+ TerminateReclaimsNewState<cppgc::subtle::CrossThreadPersistent>(
+ GetPlatformHandle());
+ TerminateReclaimsNewState<cppgc::subtle::WeakCrossThreadPersistent>(
+ GetPlatformHandle());
}
TEST_F(GCHeapDeathTest, TerminateProhibitsAllocation) {
@@ -357,14 +375,24 @@ TEST_F(GCHeapDeathTest, TerminateProhibitsAllocation) {
"");
}
-TEST_F(GCHeapDeathTest, LargeChainOfNewStates) {
- Persistent<Cloner> cloner = MakeGarbageCollected<Cloner>(
- GetAllocationHandle(), GetAllocationHandle(), 1000);
- Cloner::destructor_count = 0;
+template <template <typename> class PersistentType>
+void LargeChainOfNewStates(cppgc::Heap& heap) {
+ using ClonerImpl = Cloner<PersistentType>;
+ Persistent<ClonerImpl> cloner = MakeGarbageCollected<ClonerImpl>(
+ heap.GetAllocationHandle(), heap.GetAllocationHandle(), 1000);
+ ClonerImpl::destructor_count = 0;
EXPECT_TRUE(cloner.Get());
// Terminate() requires destructors to stop creating new state within a few
// garbage collections.
- EXPECT_DEATH_IF_SUPPORTED(Heap::From(GetHeap())->Terminate(), "");
+ EXPECT_DEATH_IF_SUPPORTED(Heap::From(&heap)->Terminate(), "");
+}
+
+TEST_F(GCHeapDeathTest, LargeChainOfNewStatesPersistent) {
+ LargeChainOfNewStates<Persistent>(*GetHeap());
+}
+
+TEST_F(GCHeapDeathTest, LargeChainOfNewStatesCrossThreadPersistent) {
+ LargeChainOfNewStates<subtle::CrossThreadPersistent>(*GetHeap());
}
} // namespace internal
diff --git a/deps/v8/test/unittests/heap/cppgc/marker-unittest.cc b/deps/v8/test/unittests/heap/cppgc/marker-unittest.cc
index f96e5f4a25..5ed46857c8 100644
--- a/deps/v8/test/unittests/heap/cppgc/marker-unittest.cc
+++ b/deps/v8/test/unittests/heap/cppgc/marker-unittest.cc
@@ -7,12 +7,14 @@
#include <memory>
#include "include/cppgc/allocation.h"
+#include "include/cppgc/ephemeron-pair.h"
#include "include/cppgc/internal/pointer-policies.h"
#include "include/cppgc/member.h"
#include "include/cppgc/persistent.h"
#include "include/cppgc/trace-trait.h"
#include "src/heap/cppgc/heap-object-header.h"
#include "src/heap/cppgc/marking-visitor.h"
+#include "src/heap/cppgc/object-allocator.h"
#include "src/heap/cppgc/stats-collector.h"
#include "test/unittests/heap/cppgc/tests.h"
#include "testing/gtest/include/gtest/gtest.h"
@@ -44,6 +46,8 @@ class MarkerTest : public testing::TestWithHeap {
Marker* marker() const { return marker_.get(); }
+ void ResetMarker() { marker_.reset(); }
+
private:
std::unique_ptr<Marker> marker_;
};
@@ -346,6 +350,50 @@ TEST_F(MarkerTest, SentinelNotClearedOnWeakPersistentHandling) {
EXPECT_EQ(kSentinelPointer, root->weak_child());
}
+namespace {
+
+class SimpleObject final : public GarbageCollected<SimpleObject> {
+ public:
+ void Trace(Visitor*) const {}
+};
+
+class ObjectWithEphemeronPair final
+ : public GarbageCollected<ObjectWithEphemeronPair> {
+ public:
+ explicit ObjectWithEphemeronPair(AllocationHandle& handle)
+ : ephemeron_pair_(MakeGarbageCollected<SimpleObject>(handle),
+ MakeGarbageCollected<SimpleObject>(handle)) {}
+
+ void Trace(Visitor* visitor) const {
+ // First trace the ephemeron pair. The key is not yet marked as live, so the
+ // pair should be recorded for later processing. Then strongly mark the key.
+ // Marking the key will not trigger another worklist processing iteration,
+ // as it merely continues the same loop for regular objects and will leave
+ // the main marking worklist empty. If recording the ephemeron pair doesn't
+ // as well, we will get a crash when destroying the marker.
+ visitor->Trace(ephemeron_pair_);
+ visitor->Trace(const_cast<const SimpleObject*>(ephemeron_pair_.key.Get()));
+ }
+
+ private:
+ const EphemeronPair<SimpleObject, SimpleObject> ephemeron_pair_;
+};
+
+} // namespace
+
+TEST_F(MarkerTest, MarkerProcessesAllEphemeronPairs) {
+ static const Marker::MarkingConfig config = {
+ MarkingConfig::CollectionType::kMajor,
+ MarkingConfig::StackState::kNoHeapPointers,
+ MarkingConfig::MarkingType::kAtomic};
+ Persistent<ObjectWithEphemeronPair> obj =
+ MakeGarbageCollected<ObjectWithEphemeronPair>(GetAllocationHandle(),
+ GetAllocationHandle());
+ InitializeMarker(*Heap::From(GetHeap()), GetPlatformHandle().get(), config);
+ marker()->FinishMarking(MarkingConfig::StackState::kNoHeapPointers);
+ ResetMarker();
+}
+
// Incremental Marking
class IncrementalMarkingTest : public testing::TestWithHeap {
diff --git a/deps/v8/test/unittests/heap/cppgc/marking-verifier-unittest.cc b/deps/v8/test/unittests/heap/cppgc/marking-verifier-unittest.cc
index 733a6383da..b762d1be6e 100644
--- a/deps/v8/test/unittests/heap/cppgc/marking-verifier-unittest.cc
+++ b/deps/v8/test/unittests/heap/cppgc/marking-verifier-unittest.cc
@@ -198,7 +198,7 @@ TEST_F(MarkingVerifierDeathTest, DieOnUnmarkedWeakMember) {
"");
}
-#ifdef CPPGC_VERIFY_LIVE_BYTES
+#ifdef CPPGC_VERIFY_HEAP
TEST_F(MarkingVerifierDeathTest, DieOnUnexpectedLiveByteCount) {
GCed* object = MakeGarbageCollected<GCed>(GetAllocationHandle());
@@ -210,7 +210,7 @@ TEST_F(MarkingVerifierDeathTest, DieOnUnexpectedLiveByteCount) {
"");
}
-#endif // CPPGC_VERIFY_LIVE_BYTES
+#endif // CPPGC_VERIFY_HEAP
namespace {
@@ -256,7 +256,7 @@ void MarkingVerifierDeathTest::TestResurrectingPreFinalizer() {
EXPECT_DEATH_IF_SUPPORTED(PreciseGC(), "");
}
-#if DEBUG
+#if CPPGC_VERIFY_HEAP
TEST_F(MarkingVerifierDeathTest, DiesOnResurrectedMember) {
TestResurrectingPreFinalizer<Member>();
@@ -266,7 +266,7 @@ TEST_F(MarkingVerifierDeathTest, DiesOnResurrectedWeakMember) {
TestResurrectingPreFinalizer<WeakMember>();
}
-#endif // DEBUG
+#endif // CPPGC_VERIFY_HEAP
} // namespace internal
} // namespace cppgc
diff --git a/deps/v8/test/unittests/heap/cppgc/page-memory-unittest.cc b/deps/v8/test/unittests/heap/cppgc/page-memory-unittest.cc
index 6c8533c2f0..8b634113b4 100644
--- a/deps/v8/test/unittests/heap/cppgc/page-memory-unittest.cc
+++ b/deps/v8/test/unittests/heap/cppgc/page-memory-unittest.cc
@@ -5,6 +5,7 @@
#include "src/heap/cppgc/page-memory.h"
#include "src/base/page-allocator.h"
+#include "src/heap/cppgc/platform.h"
#include "testing/gtest/include/gtest/gtest.h"
namespace cppgc {
@@ -77,7 +78,8 @@ TEST(PageMemoryDeathTest, ConstructNonContainedRegions) {
TEST(PageMemoryRegionTest, NormalPageMemoryRegion) {
v8::base::PageAllocator allocator;
- auto pmr = std::make_unique<NormalPageMemoryRegion>(&allocator);
+ FatalOutOfMemoryHandler oom_handler;
+ auto pmr = std::make_unique<NormalPageMemoryRegion>(allocator, oom_handler);
pmr->UnprotectForTesting();
MemoryRegion prev_overall;
for (size_t i = 0; i < NormalPageMemoryRegion::kNumPageRegions; ++i) {
@@ -103,7 +105,9 @@ TEST(PageMemoryRegionTest, NormalPageMemoryRegion) {
TEST(PageMemoryRegionTest, LargePageMemoryRegion) {
v8::base::PageAllocator allocator;
- auto pmr = std::make_unique<LargePageMemoryRegion>(&allocator, 1024);
+ FatalOutOfMemoryHandler oom_handler;
+ auto pmr =
+ std::make_unique<LargePageMemoryRegion>(allocator, oom_handler, 1024);
pmr->UnprotectForTesting();
const PageMemory pm = pmr->GetPageMemory();
EXPECT_LE(1024u, pm.writeable_region().size());
@@ -116,16 +120,16 @@ TEST(PageMemoryRegionTest, PlatformUsesGuardPages) {
// regions.
v8::base::PageAllocator allocator;
#if defined(V8_HOST_ARCH_PPC64) && !defined(_AIX)
- EXPECT_FALSE(SupportsCommittingGuardPages(&allocator));
+ EXPECT_FALSE(SupportsCommittingGuardPages(allocator));
#elif defined(V8_HOST_ARCH_ARM64)
if (allocator.CommitPageSize() == 4096) {
- EXPECT_TRUE(SupportsCommittingGuardPages(&allocator));
+ EXPECT_TRUE(SupportsCommittingGuardPages(allocator));
} else {
// Arm64 supports both 16k and 64k OS pages.
- EXPECT_FALSE(SupportsCommittingGuardPages(&allocator));
+ EXPECT_FALSE(SupportsCommittingGuardPages(allocator));
}
#else // Regular case.
- EXPECT_TRUE(SupportsCommittingGuardPages(&allocator));
+ EXPECT_TRUE(SupportsCommittingGuardPages(allocator));
#endif
}
@@ -140,8 +144,10 @@ TEST(PageMemoryRegionDeathTest, ReservationIsFreed) {
// may expand to statements that re-purpose the previously freed memory
// and thus not crash.
EXPECT_DEATH_IF_SUPPORTED(
- v8::base::PageAllocator allocator; Address base; {
- auto pmr = std::make_unique<LargePageMemoryRegion>(&allocator, 1024);
+ v8::base::PageAllocator allocator; FatalOutOfMemoryHandler oom_handler;
+ Address base; {
+ auto pmr = std::make_unique<LargePageMemoryRegion>(allocator,
+ oom_handler, 1024);
base = pmr->reserved_region().base();
} access(base[0]);
, "");
@@ -149,8 +155,9 @@ TEST(PageMemoryRegionDeathTest, ReservationIsFreed) {
TEST(PageMemoryRegionDeathTest, FrontGuardPageAccessCrashes) {
v8::base::PageAllocator allocator;
- auto pmr = std::make_unique<NormalPageMemoryRegion>(&allocator);
- if (SupportsCommittingGuardPages(&allocator)) {
+ FatalOutOfMemoryHandler oom_handler;
+ auto pmr = std::make_unique<NormalPageMemoryRegion>(allocator, oom_handler);
+ if (SupportsCommittingGuardPages(allocator)) {
EXPECT_DEATH_IF_SUPPORTED(
access(pmr->GetPageMemory(0).overall_region().base()[0]), "");
}
@@ -158,8 +165,9 @@ TEST(PageMemoryRegionDeathTest, FrontGuardPageAccessCrashes) {
TEST(PageMemoryRegionDeathTest, BackGuardPageAccessCrashes) {
v8::base::PageAllocator allocator;
- auto pmr = std::make_unique<NormalPageMemoryRegion>(&allocator);
- if (SupportsCommittingGuardPages(&allocator)) {
+ FatalOutOfMemoryHandler oom_handler;
+ auto pmr = std::make_unique<NormalPageMemoryRegion>(allocator, oom_handler);
+ if (SupportsCommittingGuardPages(allocator)) {
EXPECT_DEATH_IF_SUPPORTED(
access(pmr->GetPageMemory(0).writeable_region().end()[0]), "");
}
@@ -167,7 +175,8 @@ TEST(PageMemoryRegionDeathTest, BackGuardPageAccessCrashes) {
TEST(PageMemoryRegionTreeTest, AddNormalLookupRemove) {
v8::base::PageAllocator allocator;
- auto pmr = std::make_unique<NormalPageMemoryRegion>(&allocator);
+ FatalOutOfMemoryHandler oom_handler;
+ auto pmr = std::make_unique<NormalPageMemoryRegion>(allocator, oom_handler);
PageMemoryRegionTree tree;
tree.Add(pmr.get());
ASSERT_EQ(pmr.get(), tree.Lookup(pmr->reserved_region().base()));
@@ -181,8 +190,10 @@ TEST(PageMemoryRegionTreeTest, AddNormalLookupRemove) {
TEST(PageMemoryRegionTreeTest, AddLargeLookupRemove) {
v8::base::PageAllocator allocator;
+ FatalOutOfMemoryHandler oom_handler;
constexpr size_t kLargeSize = 5012;
- auto pmr = std::make_unique<LargePageMemoryRegion>(&allocator, kLargeSize);
+ auto pmr = std::make_unique<LargePageMemoryRegion>(allocator, oom_handler,
+ kLargeSize);
PageMemoryRegionTree tree;
tree.Add(pmr.get());
ASSERT_EQ(pmr.get(), tree.Lookup(pmr->reserved_region().base()));
@@ -196,9 +207,11 @@ TEST(PageMemoryRegionTreeTest, AddLargeLookupRemove) {
TEST(PageMemoryRegionTreeTest, AddLookupRemoveMultiple) {
v8::base::PageAllocator allocator;
- auto pmr1 = std::make_unique<NormalPageMemoryRegion>(&allocator);
+ FatalOutOfMemoryHandler oom_handler;
+ auto pmr1 = std::make_unique<NormalPageMemoryRegion>(allocator, oom_handler);
constexpr size_t kLargeSize = 3127;
- auto pmr2 = std::make_unique<LargePageMemoryRegion>(&allocator, kLargeSize);
+ auto pmr2 = std::make_unique<LargePageMemoryRegion>(allocator, oom_handler,
+ kLargeSize);
PageMemoryRegionTree tree;
tree.Add(pmr1.get());
tree.Add(pmr2.get());
@@ -223,7 +236,8 @@ TEST(NormalPageMemoryPool, ConstructorEmpty) {
TEST(NormalPageMemoryPool, AddTakeSameBucket) {
v8::base::PageAllocator allocator;
- auto pmr = std::make_unique<NormalPageMemoryRegion>(&allocator);
+ FatalOutOfMemoryHandler oom_handler;
+ auto pmr = std::make_unique<NormalPageMemoryRegion>(allocator, oom_handler);
const PageMemory pm = pmr->GetPageMemory(0);
NormalPageMemoryPool pool;
constexpr size_t kBucket = 0;
@@ -235,7 +249,8 @@ TEST(NormalPageMemoryPool, AddTakeSameBucket) {
TEST(NormalPageMemoryPool, AddTakeNotFoundDifferentBucket) {
v8::base::PageAllocator allocator;
- auto pmr = std::make_unique<NormalPageMemoryRegion>(&allocator);
+ FatalOutOfMemoryHandler oom_handler;
+ auto pmr = std::make_unique<NormalPageMemoryRegion>(allocator, oom_handler);
const PageMemory pm = pmr->GetPageMemory(0);
NormalPageMemoryPool pool;
constexpr size_t kFirstBucket = 0;
@@ -250,7 +265,8 @@ TEST(NormalPageMemoryPool, AddTakeNotFoundDifferentBucket) {
TEST(PageBackendTest, AllocateNormalUsesPool) {
v8::base::PageAllocator allocator;
- PageBackend backend(&allocator);
+ FatalOutOfMemoryHandler oom_handler;
+ PageBackend backend(allocator, oom_handler);
constexpr size_t kBucket = 0;
Address writeable_base1 = backend.AllocateNormalPageMemory(kBucket);
EXPECT_NE(nullptr, writeable_base1);
@@ -262,7 +278,8 @@ TEST(PageBackendTest, AllocateNormalUsesPool) {
TEST(PageBackendTest, AllocateLarge) {
v8::base::PageAllocator allocator;
- PageBackend backend(&allocator);
+ FatalOutOfMemoryHandler oom_handler;
+ PageBackend backend(allocator, oom_handler);
Address writeable_base1 = backend.AllocateLargePageMemory(13731);
EXPECT_NE(nullptr, writeable_base1);
Address writeable_base2 = backend.AllocateLargePageMemory(9478);
@@ -274,7 +291,8 @@ TEST(PageBackendTest, AllocateLarge) {
TEST(PageBackendTest, LookupNormal) {
v8::base::PageAllocator allocator;
- PageBackend backend(&allocator);
+ FatalOutOfMemoryHandler oom_handler;
+ PageBackend backend(allocator, oom_handler);
constexpr size_t kBucket = 0;
Address writeable_base = backend.AllocateNormalPageMemory(kBucket);
EXPECT_EQ(nullptr, backend.Lookup(writeable_base - kGuardPageSize));
@@ -290,7 +308,8 @@ TEST(PageBackendTest, LookupNormal) {
TEST(PageBackendTest, LookupLarge) {
v8::base::PageAllocator allocator;
- PageBackend backend(&allocator);
+ FatalOutOfMemoryHandler oom_handler;
+ PageBackend backend(allocator, oom_handler);
constexpr size_t kSize = 7934;
Address writeable_base = backend.AllocateLargePageMemory(kSize);
EXPECT_EQ(nullptr, backend.Lookup(writeable_base - kGuardPageSize));
@@ -301,9 +320,10 @@ TEST(PageBackendTest, LookupLarge) {
TEST(PageBackendDeathTest, DestructingBackendDestroysPageMemory) {
v8::base::PageAllocator allocator;
+ FatalOutOfMemoryHandler oom_handler;
Address base;
{
- PageBackend backend(&allocator);
+ PageBackend backend(allocator, oom_handler);
constexpr size_t kBucket = 0;
base = backend.AllocateNormalPageMemory(kBucket);
}
diff --git a/deps/v8/test/unittests/heap/cppgc/platform-unittest.cc b/deps/v8/test/unittests/heap/cppgc/platform-unittest.cc
new file mode 100644
index 0000000000..7cf783ab0a
--- /dev/null
+++ b/deps/v8/test/unittests/heap/cppgc/platform-unittest.cc
@@ -0,0 +1,46 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/heap/cppgc/platform.h"
+
+#include "src/base/logging.h"
+#include "src/base/page-allocator.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace cppgc {
+namespace internal {
+
+TEST(FatalOutOfMemoryHandlerDeathTest, DefaultHandlerCrashes) {
+ FatalOutOfMemoryHandler handler;
+ EXPECT_DEATH_IF_SUPPORTED(handler(), "");
+}
+
+namespace {
+
+constexpr uintptr_t kHeapNeedle = 0x14;
+
+[[noreturn]] void CustomHandler(const std::string&, const SourceLocation&,
+ HeapBase* heap) {
+ if (heap == reinterpret_cast<HeapBase*>(kHeapNeedle)) {
+ FATAL("cust0m h4ndl3r with matching heap");
+ }
+ FATAL("cust0m h4ndl3r");
+}
+
+} // namespace
+
+TEST(FatalOutOfMemoryHandlerDeathTest, CustomHandlerCrashes) {
+ FatalOutOfMemoryHandler handler;
+ handler.SetCustomHandler(&CustomHandler);
+ EXPECT_DEATH_IF_SUPPORTED(handler(), "cust0m h4ndl3r");
+}
+
+TEST(FatalOutOfMemoryHandlerDeathTest, CustomHandlerWithHeapState) {
+ FatalOutOfMemoryHandler handler(reinterpret_cast<HeapBase*>(kHeapNeedle));
+ handler.SetCustomHandler(&CustomHandler);
+ EXPECT_DEATH_IF_SUPPORTED(handler(), "cust0m h4ndl3r with matching heap");
+}
+
+} // namespace internal
+} // namespace cppgc
diff --git a/deps/v8/test/unittests/heap/cppgc/prefinalizer-unittest.cc b/deps/v8/test/unittests/heap/cppgc/prefinalizer-unittest.cc
index 67aa594f93..5fc412c8c3 100644
--- a/deps/v8/test/unittests/heap/cppgc/prefinalizer-unittest.cc
+++ b/deps/v8/test/unittests/heap/cppgc/prefinalizer-unittest.cc
@@ -246,16 +246,23 @@ class AllocatingPrefinalizer : public GarbageCollected<AllocatingPrefinalizer> {
} // namespace
+#ifdef CPPGC_ALLOW_ALLOCATIONS_IN_PREFINALIZERS
+TEST_F(PrefinalizerTest, PrefinalizerDoesNotFailOnAllcoation) {
+ auto* object = MakeGarbageCollected<AllocatingPrefinalizer>(
+ GetAllocationHandle(), GetHeap());
+ PreciseGC();
+ USE(object);
+}
+#else
#ifdef DEBUG
-
TEST_F(PrefinalizerDeathTest, PrefinalizerFailsOnAllcoation) {
auto* object = MakeGarbageCollected<AllocatingPrefinalizer>(
GetAllocationHandle(), GetHeap());
USE(object);
EXPECT_DEATH_IF_SUPPORTED(PreciseGC(), "");
}
-
#endif // DEBUG
+#endif // CPPGC_ALLOW_ALLOCATIONS_IN_PREFINALIZERS
namespace {
@@ -321,5 +328,44 @@ TEST_F(PrefinalizerDeathTest, PrefinalizerCantRessurectObjectOnHeap) {
#endif // CPPGC_CHECK_ASSIGNMENTS_IN_PREFINALIZERS
#endif // V8_ENABLE_CHECKS
+#ifdef CPPGC_ALLOW_ALLOCATIONS_IN_PREFINALIZERS
+TEST_F(PrefinalizerTest, AllocatingPrefinalizersInMultipleGCCycles) {
+ auto* object = MakeGarbageCollected<AllocatingPrefinalizer>(
+ GetAllocationHandle(), GetHeap());
+ PreciseGC();
+ auto* other_object = MakeGarbageCollected<AllocatingPrefinalizer>(
+ GetAllocationHandle(), GetHeap());
+ PreciseGC();
+ USE(object);
+ USE(other_object);
+}
+#endif
+
+class GCedBase : public GarbageCollected<GCedBase> {
+ CPPGC_USING_PRE_FINALIZER(GCedBase, PreFinalize);
+
+ public:
+ void Trace(Visitor*) const {}
+ virtual void PreFinalize() { ++prefinalizer_count_; }
+ static size_t prefinalizer_count_;
+};
+size_t GCedBase::prefinalizer_count_ = 0u;
+
+class GCedInherited : public GCedBase {
+ public:
+ void PreFinalize() override { ++prefinalizer_count_; }
+ static size_t prefinalizer_count_;
+};
+size_t GCedInherited::prefinalizer_count_ = 0u;
+
+TEST_F(PrefinalizerTest, VirtualPrefinalizer) {
+ MakeGarbageCollected<GCedInherited>(GetAllocationHandle());
+ GCedBase::prefinalizer_count_ = 0u;
+ GCedInherited::prefinalizer_count_ = 0u;
+ PreciseGC();
+ EXPECT_EQ(0u, GCedBase::prefinalizer_count_);
+ EXPECT_LT(0u, GCedInherited::prefinalizer_count_);
+}
+
} // namespace internal
} // namespace cppgc
diff --git a/deps/v8/test/unittests/heap/progressbar-unittest.cc b/deps/v8/test/unittests/heap/progressbar-unittest.cc
new file mode 100644
index 0000000000..57bc721602
--- /dev/null
+++ b/deps/v8/test/unittests/heap/progressbar-unittest.cc
@@ -0,0 +1,80 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/heap/progress-bar.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace v8 {
+namespace internal {
+
+TEST(ProgressBar, DefaultDisabled) {
+ ProgressBar progress_bar;
+ progress_bar.Initialize();
+ EXPECT_FALSE(progress_bar.IsEnabled());
+}
+
+TEST(ProgressBar, EnabledAfterExplicitEnable) {
+ ProgressBar progress_bar;
+ progress_bar.Initialize();
+ progress_bar.Enable();
+ EXPECT_TRUE(progress_bar.IsEnabled());
+}
+
+TEST(ProgressBar, ZeroValueAfterEnable) {
+ ProgressBar progress_bar;
+ progress_bar.Initialize();
+ progress_bar.Enable();
+ ASSERT_TRUE(progress_bar.IsEnabled());
+ EXPECT_EQ(0u, progress_bar.Value());
+}
+
+TEST(ProgressBar, TrySetValue) {
+ ProgressBar progress_bar;
+ progress_bar.Initialize();
+ progress_bar.Enable();
+ ASSERT_TRUE(progress_bar.IsEnabled());
+ EXPECT_TRUE(progress_bar.TrySetNewValue(0, 17));
+ EXPECT_EQ(17u, progress_bar.Value());
+}
+
+TEST(ProgressBar, MultipleTrySetValue) {
+ ProgressBar progress_bar;
+ progress_bar.Initialize();
+ progress_bar.Enable();
+ ASSERT_TRUE(progress_bar.IsEnabled());
+ EXPECT_TRUE(progress_bar.TrySetNewValue(0, 23));
+ EXPECT_EQ(23u, progress_bar.Value());
+ EXPECT_TRUE(progress_bar.TrySetNewValue(23, 127));
+ EXPECT_EQ(127u, progress_bar.Value());
+}
+
+TEST(ProgressBar, ResetIfEnabledOnDisabled) {
+ ProgressBar progress_bar;
+ progress_bar.Initialize();
+ progress_bar.ResetIfEnabled();
+ EXPECT_FALSE(progress_bar.IsEnabled());
+}
+
+TEST(ProgressBar, ResetIfEnabledOnEnabled) {
+ ProgressBar progress_bar;
+ progress_bar.Initialize();
+ progress_bar.Enable();
+ ASSERT_TRUE(progress_bar.TrySetNewValue(0, 1));
+ progress_bar.ResetIfEnabled();
+ ASSERT_TRUE(progress_bar.IsEnabled());
+ EXPECT_EQ(0u, progress_bar.Value());
+}
+
+#ifdef DEBUG
+
+TEST(ProgressBarDeathTest, DiesOnTrySetValueOnDisabled) {
+ ProgressBar progress_bar;
+ progress_bar.Initialize();
+ EXPECT_DEATH_IF_SUPPORTED(progress_bar.TrySetNewValue(0, 1), "");
+}
+
+#endif // DEBUG
+
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/unittests/heap/unified-heap-unittest.cc b/deps/v8/test/unittests/heap/unified-heap-unittest.cc
index 577e9ff050..28edd79d40 100644
--- a/deps/v8/test/unittests/heap/unified-heap-unittest.cc
+++ b/deps/v8/test/unittests/heap/unified-heap-unittest.cc
@@ -13,8 +13,11 @@
#include "include/cppgc/platform.h"
#include "include/cppgc/testing.h"
#include "include/libplatform/libplatform.h"
+#include "include/v8-context.h"
#include "include/v8-cppgc.h"
-#include "include/v8.h"
+#include "include/v8-local-handle.h"
+#include "include/v8-object.h"
+#include "include/v8-traced-handle.h"
#include "src/api/api-inl.h"
#include "src/base/platform/time.h"
#include "src/heap/cppgc-js/cpp-heap.h"
diff --git a/deps/v8/test/unittests/heap/unified-heap-utils.cc b/deps/v8/test/unittests/heap/unified-heap-utils.cc
index 01b46c03fb..8ee7f9c520 100644
--- a/deps/v8/test/unittests/heap/unified-heap-utils.cc
+++ b/deps/v8/test/unittests/heap/unified-heap-utils.cc
@@ -6,6 +6,7 @@
#include "include/cppgc/platform.h"
#include "include/v8-cppgc.h"
+#include "include/v8-function.h"
#include "src/api/api-inl.h"
#include "src/heap/cppgc-js/cpp-heap.h"
#include "src/heap/heap.h"
diff --git a/deps/v8/test/unittests/heap/unified-heap-utils.h b/deps/v8/test/unittests/heap/unified-heap-utils.h
index c0f7481f77..2844e818a9 100644
--- a/deps/v8/test/unittests/heap/unified-heap-utils.h
+++ b/deps/v8/test/unittests/heap/unified-heap-utils.h
@@ -7,7 +7,7 @@
#include "include/cppgc/heap.h"
#include "include/v8-cppgc.h"
-#include "include/v8.h"
+#include "include/v8-local-handle.h"
#include "test/unittests/heap/heap-utils.h"
namespace v8 {
diff --git a/deps/v8/test/unittests/heap/unmapper-unittest.cc b/deps/v8/test/unittests/heap/unmapper-unittest.cc
index 99fe92a987..7a4cead569 100644
--- a/deps/v8/test/unittests/heap/unmapper-unittest.cc
+++ b/deps/v8/test/unittests/heap/unmapper-unittest.cc
@@ -97,6 +97,16 @@ class TrackingPageAllocator : public ::v8::PageAllocator {
return result;
}
+ bool DecommitPages(void* address, size_t size) override {
+ bool result = page_allocator_->DecommitPages(address, size);
+ if (result) {
+ // Mark pages as non-accessible.
+ UpdatePagePermissions(reinterpret_cast<Address>(address), size,
+ kNoAccess);
+ }
+ return result;
+ }
+
bool SetPermissions(void* address, size_t size,
PageAllocator::Permission access) override {
bool result = page_allocator_->SetPermissions(address, size, access);
@@ -240,6 +250,13 @@ class SequentialUnmapperTest : public TestWithIsolate {
SetPlatformPageAllocatorForTesting(tracking_page_allocator_));
old_flag_ = i::FLAG_concurrent_sweeping;
i::FLAG_concurrent_sweeping = false;
+#ifdef V8_VIRTUAL_MEMORY_CAGE
+ GetProcessWideVirtualMemoryCage()->TearDown();
+ constexpr bool use_guard_regions = false;
+ CHECK(GetProcessWideVirtualMemoryCage()->Initialize(
+ tracking_page_allocator_, kVirtualMemoryCageMinimumSize,
+ use_guard_regions));
+#endif
#ifdef V8_COMPRESS_POINTERS_IN_SHARED_CAGE
// Reinitialize the process-wide pointer cage so it can pick up the
// TrackingPageAllocator.
@@ -256,6 +273,9 @@ class SequentialUnmapperTest : public TestWithIsolate {
// freed until process teardown.
IsolateAllocator::FreeProcessWidePtrComprCageForTesting();
#endif
+#ifdef V8_VIRTUAL_MEMORY_CAGE
+ GetProcessWideVirtualMemoryCage()->TearDown();
+#endif
i::FLAG_concurrent_sweeping = old_flag_;
CHECK(tracking_page_allocator_->IsEmpty());
diff --git a/deps/v8/test/unittests/interpreter/bytecode-array-builder-unittest.cc b/deps/v8/test/unittests/interpreter/bytecode-array-builder-unittest.cc
index 1863d772bf..8838b0b94a 100644
--- a/deps/v8/test/unittests/interpreter/bytecode-array-builder-unittest.cc
+++ b/deps/v8/test/unittests/interpreter/bytecode-array-builder-unittest.cc
@@ -203,7 +203,7 @@ TEST_F(BytecodeArrayBuilderTest, AllBytecodesGenerated) {
.CallUndefinedReceiver(reg, pair, 1)
.CallRuntime(Runtime::kIsArray, reg)
.CallRuntimeForPair(Runtime::kLoadLookupSlotForCall, reg_list, pair)
- .CallJSRuntime(Context::OBJECT_CREATE, reg_list)
+ .CallJSRuntime(Context::PROMISE_THEN_INDEX, reg_list)
.CallWithSpread(reg, reg_list, 1);
// Emit binary operator invocations.
diff --git a/deps/v8/test/unittests/interpreter/interpreter-assembler-unittest.cc b/deps/v8/test/unittests/interpreter/interpreter-assembler-unittest.cc
index 63ccfc5b76..3eddc5c99c 100644
--- a/deps/v8/test/unittests/interpreter/interpreter-assembler-unittest.cc
+++ b/deps/v8/test/unittests/interpreter/interpreter-assembler-unittest.cc
@@ -27,8 +27,7 @@ InterpreterAssemblerTestState::InterpreterAssemblerTestState(
InterpreterAssemblerTest* test, Bytecode bytecode)
: compiler::CodeAssemblerState(
test->isolate(), test->zone(), InterpreterDispatchDescriptor{},
- CodeKind::BYTECODE_HANDLER, Bytecodes::ToString(bytecode),
- PoisoningMitigationLevel::kPoisonCriticalOnly) {}
+ CodeKind::BYTECODE_HANDLER, Bytecodes::ToString(bytecode)) {}
const interpreter::Bytecode kBytecodes[] = {
#define DEFINE_BYTECODE(Name, ...) interpreter::Bytecode::k##Name,
@@ -55,14 +54,7 @@ InterpreterAssemblerTest::InterpreterAssemblerForTest::
Matcher<c::Node*> InterpreterAssemblerTest::InterpreterAssemblerForTest::IsLoad(
const Matcher<c::LoadRepresentation>& rep_matcher,
const Matcher<c::Node*>& base_matcher,
- const Matcher<c::Node*>& index_matcher, LoadSensitivity needs_poisoning) {
- CHECK_NE(LoadSensitivity::kUnsafe, needs_poisoning);
- CHECK_NE(PoisoningMitigationLevel::kPoisonAll, poisoning_level());
- if (poisoning_level() == PoisoningMitigationLevel::kPoisonCriticalOnly &&
- needs_poisoning == LoadSensitivity::kCritical) {
- return ::i::compiler::IsPoisonedLoad(rep_matcher, base_matcher,
- index_matcher, _, _);
- }
+ const Matcher<c::Node*>& index_matcher) {
return ::i::compiler::IsLoad(rep_matcher, base_matcher, index_matcher, _, _);
}
@@ -71,7 +63,6 @@ InterpreterAssemblerTest::InterpreterAssemblerForTest::IsLoadFromObject(
const Matcher<c::LoadRepresentation>& rep_matcher,
const Matcher<c::Node*>& base_matcher,
const Matcher<c::Node*>& index_matcher) {
- CHECK_NE(PoisoningMitigationLevel::kPoisonAll, poisoning_level());
return ::i::compiler::IsLoadFromObject(rep_matcher, base_matcher,
index_matcher, _, _);
}
@@ -96,39 +87,36 @@ InterpreterAssemblerTest::InterpreterAssemblerForTest::IsWordNot(
Matcher<c::Node*>
InterpreterAssemblerTest::InterpreterAssemblerForTest::IsUnsignedByteOperand(
- int offset, LoadSensitivity needs_poisoning) {
+ int offset) {
return IsLoad(
MachineType::Uint8(),
c::IsParameter(InterpreterDispatchDescriptor::kBytecodeArray),
c::IsIntPtrAdd(
c::IsParameter(InterpreterDispatchDescriptor::kBytecodeOffset),
- c::IsIntPtrConstant(offset)),
- needs_poisoning);
+ c::IsIntPtrConstant(offset)));
}
Matcher<c::Node*>
InterpreterAssemblerTest::InterpreterAssemblerForTest::IsSignedByteOperand(
- int offset, LoadSensitivity needs_poisoning) {
+ int offset) {
return IsLoad(
MachineType::Int8(),
c::IsParameter(InterpreterDispatchDescriptor::kBytecodeArray),
c::IsIntPtrAdd(
c::IsParameter(InterpreterDispatchDescriptor::kBytecodeOffset),
- c::IsIntPtrConstant(offset)),
- needs_poisoning);
+ c::IsIntPtrConstant(offset)));
}
Matcher<c::Node*>
InterpreterAssemblerTest::InterpreterAssemblerForTest::IsUnsignedShortOperand(
- int offset, LoadSensitivity needs_poisoning) {
+ int offset) {
if (TargetSupportsUnalignedAccess()) {
return IsLoad(
MachineType::Uint16(),
c::IsParameter(InterpreterDispatchDescriptor::kBytecodeArray),
c::IsIntPtrAdd(
c::IsParameter(InterpreterDispatchDescriptor::kBytecodeOffset),
- c::IsIntPtrConstant(offset)),
- needs_poisoning);
+ c::IsIntPtrConstant(offset)));
} else {
#if V8_TARGET_LITTLE_ENDIAN
const int kStep = -1;
@@ -146,8 +134,7 @@ InterpreterAssemblerTest::InterpreterAssemblerForTest::IsUnsignedShortOperand(
c::IsParameter(InterpreterDispatchDescriptor::kBytecodeArray),
c::IsIntPtrAdd(
c::IsParameter(InterpreterDispatchDescriptor::kBytecodeOffset),
- c::IsIntPtrConstant(offset + kMsbOffset + kStep * i)),
- needs_poisoning);
+ c::IsIntPtrConstant(offset + kMsbOffset + kStep * i)));
}
return c::IsWord32Or(
c::IsWord32Shl(bytes[0], c::IsInt32Constant(kBitsPerByte)), bytes[1]);
@@ -156,15 +143,14 @@ InterpreterAssemblerTest::InterpreterAssemblerForTest::IsUnsignedShortOperand(
Matcher<c::Node*>
InterpreterAssemblerTest::InterpreterAssemblerForTest::IsSignedShortOperand(
- int offset, LoadSensitivity needs_poisoning) {
+ int offset) {
if (TargetSupportsUnalignedAccess()) {
return IsLoad(
MachineType::Int16(),
c::IsParameter(InterpreterDispatchDescriptor::kBytecodeArray),
c::IsIntPtrAdd(
c::IsParameter(InterpreterDispatchDescriptor::kBytecodeOffset),
- c::IsIntPtrConstant(offset)),
- needs_poisoning);
+ c::IsIntPtrConstant(offset)));
} else {
#if V8_TARGET_LITTLE_ENDIAN
const int kStep = -1;
@@ -182,8 +168,7 @@ InterpreterAssemblerTest::InterpreterAssemblerForTest::IsSignedShortOperand(
c::IsParameter(InterpreterDispatchDescriptor::kBytecodeArray),
c::IsIntPtrAdd(
c::IsParameter(InterpreterDispatchDescriptor::kBytecodeOffset),
- c::IsIntPtrConstant(offset + kMsbOffset + kStep * i)),
- needs_poisoning);
+ c::IsIntPtrConstant(offset + kMsbOffset + kStep * i)));
}
return c::IsWord32Or(
c::IsWord32Shl(bytes[0], c::IsInt32Constant(kBitsPerByte)), bytes[1]);
@@ -192,15 +177,14 @@ InterpreterAssemblerTest::InterpreterAssemblerForTest::IsSignedShortOperand(
Matcher<c::Node*>
InterpreterAssemblerTest::InterpreterAssemblerForTest::IsUnsignedQuadOperand(
- int offset, LoadSensitivity needs_poisoning) {
+ int offset) {
if (TargetSupportsUnalignedAccess()) {
return IsLoad(
MachineType::Uint32(),
c::IsParameter(InterpreterDispatchDescriptor::kBytecodeArray),
c::IsIntPtrAdd(
c::IsParameter(InterpreterDispatchDescriptor::kBytecodeOffset),
- c::IsIntPtrConstant(offset)),
- needs_poisoning);
+ c::IsIntPtrConstant(offset)));
} else {
#if V8_TARGET_LITTLE_ENDIAN
const int kStep = -1;
@@ -218,8 +202,7 @@ InterpreterAssemblerTest::InterpreterAssemblerForTest::IsUnsignedQuadOperand(
c::IsParameter(InterpreterDispatchDescriptor::kBytecodeArray),
c::IsIntPtrAdd(
c::IsParameter(InterpreterDispatchDescriptor::kBytecodeOffset),
- c::IsIntPtrConstant(offset + kMsbOffset + kStep * i)),
- needs_poisoning);
+ c::IsIntPtrConstant(offset + kMsbOffset + kStep * i)));
}
return c::IsWord32Or(
c::IsWord32Shl(bytes[0], c::IsInt32Constant(3 * kBitsPerByte)),
@@ -233,15 +216,14 @@ InterpreterAssemblerTest::InterpreterAssemblerForTest::IsUnsignedQuadOperand(
Matcher<c::Node*>
InterpreterAssemblerTest::InterpreterAssemblerForTest::IsSignedQuadOperand(
- int offset, LoadSensitivity needs_poisoning) {
+ int offset) {
if (TargetSupportsUnalignedAccess()) {
return IsLoad(
MachineType::Int32(),
c::IsParameter(InterpreterDispatchDescriptor::kBytecodeArray),
c::IsIntPtrAdd(
c::IsParameter(InterpreterDispatchDescriptor::kBytecodeOffset),
- c::IsIntPtrConstant(offset)),
- needs_poisoning);
+ c::IsIntPtrConstant(offset)));
} else {
#if V8_TARGET_LITTLE_ENDIAN
const int kStep = -1;
@@ -259,8 +241,7 @@ InterpreterAssemblerTest::InterpreterAssemblerForTest::IsSignedQuadOperand(
c::IsParameter(InterpreterDispatchDescriptor::kBytecodeArray),
c::IsIntPtrAdd(
c::IsParameter(InterpreterDispatchDescriptor::kBytecodeOffset),
- c::IsIntPtrConstant(offset + kMsbOffset + kStep * i)),
- needs_poisoning);
+ c::IsIntPtrConstant(offset + kMsbOffset + kStep * i)));
}
return c::IsWord32Or(
c::IsWord32Shl(bytes[0], c::IsInt32Constant(3 * kBitsPerByte)),
@@ -274,14 +255,14 @@ InterpreterAssemblerTest::InterpreterAssemblerForTest::IsSignedQuadOperand(
Matcher<c::Node*>
InterpreterAssemblerTest::InterpreterAssemblerForTest::IsSignedOperand(
- int offset, OperandSize operand_size, LoadSensitivity needs_poisoning) {
+ int offset, OperandSize operand_size) {
switch (operand_size) {
case OperandSize::kByte:
- return IsSignedByteOperand(offset, needs_poisoning);
+ return IsSignedByteOperand(offset);
case OperandSize::kShort:
- return IsSignedShortOperand(offset, needs_poisoning);
+ return IsSignedShortOperand(offset);
case OperandSize::kQuad:
- return IsSignedQuadOperand(offset, needs_poisoning);
+ return IsSignedQuadOperand(offset);
case OperandSize::kNone:
UNREACHABLE();
}
@@ -290,14 +271,14 @@ InterpreterAssemblerTest::InterpreterAssemblerForTest::IsSignedOperand(
Matcher<c::Node*>
InterpreterAssemblerTest::InterpreterAssemblerForTest::IsUnsignedOperand(
- int offset, OperandSize operand_size, LoadSensitivity needs_poisoning) {
+ int offset, OperandSize operand_size) {
switch (operand_size) {
case OperandSize::kByte:
- return IsUnsignedByteOperand(offset, needs_poisoning);
+ return IsUnsignedByteOperand(offset);
case OperandSize::kShort:
- return IsUnsignedShortOperand(offset, needs_poisoning);
+ return IsUnsignedShortOperand(offset);
case OperandSize::kQuad:
- return IsUnsignedQuadOperand(offset, needs_poisoning);
+ return IsUnsignedQuadOperand(offset);
case OperandSize::kNone:
UNREACHABLE();
}
@@ -307,12 +288,11 @@ InterpreterAssemblerTest::InterpreterAssemblerForTest::IsUnsignedOperand(
Matcher<c::Node*>
InterpreterAssemblerTest::InterpreterAssemblerForTest::IsLoadRegisterOperand(
int offset, OperandSize operand_size) {
- Matcher<c::Node*> reg_operand = IsChangeInt32ToIntPtr(
- IsSignedOperand(offset, operand_size, LoadSensitivity::kSafe));
+ Matcher<c::Node*> reg_operand =
+ IsChangeInt32ToIntPtr(IsSignedOperand(offset, operand_size));
return IsBitcastWordToTagged(IsLoad(
MachineType::Pointer(), c::IsLoadParentFramePointer(),
- c::IsWordShl(reg_operand, c::IsIntPtrConstant(kSystemPointerSizeLog2)),
- LoadSensitivity::kCritical));
+ c::IsWordShl(reg_operand, c::IsIntPtrConstant(kSystemPointerSizeLog2))));
}
TARGET_TEST_F(InterpreterAssemblerTest, BytecodeOperand) {
@@ -334,44 +314,38 @@ TARGET_TEST_F(InterpreterAssemblerTest, BytecodeOperand) {
switch (interpreter::Bytecodes::GetOperandType(bytecode, i)) {
case interpreter::OperandType::kRegCount:
EXPECT_THAT(m.BytecodeOperandCount(i),
- m.IsUnsignedOperand(offset, operand_size,
- LoadSensitivity::kCritical));
+ m.IsUnsignedOperand(offset, operand_size));
break;
case interpreter::OperandType::kFlag8:
EXPECT_THAT(m.BytecodeOperandFlag(i),
- m.IsUnsignedOperand(offset, operand_size,
- LoadSensitivity::kCritical));
+ m.IsUnsignedOperand(offset, operand_size));
break;
case interpreter::OperandType::kIdx:
EXPECT_THAT(m.BytecodeOperandIdx(i),
- c::IsChangeUint32ToWord(m.IsUnsignedOperand(
- offset, operand_size, LoadSensitivity::kCritical)));
+ c::IsChangeUint32ToWord(
+ m.IsUnsignedOperand(offset, operand_size)));
break;
case interpreter::OperandType::kNativeContextIndex:
EXPECT_THAT(m.BytecodeOperandNativeContextIndex(i),
- c::IsChangeUint32ToWord(m.IsUnsignedOperand(
- offset, operand_size, LoadSensitivity::kCritical)));
+ c::IsChangeUint32ToWord(
+ m.IsUnsignedOperand(offset, operand_size)));
break;
case interpreter::OperandType::kUImm:
EXPECT_THAT(m.BytecodeOperandUImm(i),
- m.IsUnsignedOperand(offset, operand_size,
- LoadSensitivity::kCritical));
+ m.IsUnsignedOperand(offset, operand_size));
break;
case interpreter::OperandType::kImm: {
EXPECT_THAT(m.BytecodeOperandImm(i),
- m.IsSignedOperand(offset, operand_size,
- LoadSensitivity::kCritical));
+ m.IsSignedOperand(offset, operand_size));
break;
}
case interpreter::OperandType::kRuntimeId:
EXPECT_THAT(m.BytecodeOperandRuntimeId(i),
- m.IsUnsignedOperand(offset, operand_size,
- LoadSensitivity::kCritical));
+ m.IsUnsignedOperand(offset, operand_size));
break;
case interpreter::OperandType::kIntrinsicId:
EXPECT_THAT(m.BytecodeOperandIntrinsicId(i),
- m.IsUnsignedOperand(offset, operand_size,
- LoadSensitivity::kCritical));
+ m.IsUnsignedOperand(offset, operand_size));
break;
case interpreter::OperandType::kRegList:
case interpreter::OperandType::kReg:
@@ -416,12 +390,11 @@ TARGET_TEST_F(InterpreterAssemblerTest, LoadConstantPoolEntry) {
c::IsParameter(InterpreterDispatchDescriptor::kBytecodeArray),
c::IsIntPtrConstant(BytecodeArray::kConstantPoolOffset -
kHeapObjectTag));
- EXPECT_THAT(
- load_constant,
- m.IsLoad(MachineType::AnyTagged(), constant_pool_matcher,
- c::IsIntPtrConstant(FixedArray::OffsetOfElementAt(2) -
- kHeapObjectTag),
- LoadSensitivity::kCritical));
+ EXPECT_THAT(load_constant,
+ m.IsLoadFromObject(
+ MachineType::AnyTagged(), constant_pool_matcher,
+ c::IsIntPtrConstant(FixedArray::OffsetOfElementAt(2) -
+ kHeapObjectTag)));
}
{
c::Node* index = m.UntypedParameter(2);
@@ -434,12 +407,11 @@ TARGET_TEST_F(InterpreterAssemblerTest, LoadConstantPoolEntry) {
kHeapObjectTag));
EXPECT_THAT(
load_constant,
- m.IsLoad(
+ m.IsLoadFromObject(
MachineType::AnyTagged(), constant_pool_matcher,
c::IsIntPtrAdd(
c::IsIntPtrConstant(FixedArray::kHeaderSize - kHeapObjectTag),
- c::IsWordShl(index, c::IsIntPtrConstant(kTaggedSizeLog2))),
- LoadSensitivity::kCritical));
+ c::IsWordShl(index, c::IsIntPtrConstant(kTaggedSizeLog2)))));
}
}
}
diff --git a/deps/v8/test/unittests/interpreter/interpreter-assembler-unittest.h b/deps/v8/test/unittests/interpreter/interpreter-assembler-unittest.h
index c2539d8a28..d02b80698a 100644
--- a/deps/v8/test/unittests/interpreter/interpreter-assembler-unittest.h
+++ b/deps/v8/test/unittests/interpreter/interpreter-assembler-unittest.h
@@ -45,8 +45,7 @@ class InterpreterAssemblerTest : public TestWithIsolateAndZone {
Matcher<compiler::Node*> IsLoad(
const Matcher<compiler::LoadRepresentation>& rep_matcher,
const Matcher<compiler::Node*>& base_matcher,
- const Matcher<compiler::Node*>& index_matcher,
- LoadSensitivity needs_poisoning = LoadSensitivity::kSafe);
+ const Matcher<compiler::Node*>& index_matcher);
Matcher<compiler::Node*> IsLoadFromObject(
const Matcher<compiler::LoadRepresentation>& rep_matcher,
const Matcher<compiler::Node*>& base_matcher,
@@ -60,30 +59,17 @@ class InterpreterAssemblerTest : public TestWithIsolateAndZone {
Matcher<compiler::Node*> IsWordNot(
const Matcher<compiler::Node*>& value_matcher);
- Matcher<compiler::Node*> IsUnsignedByteOperand(
- int offset, LoadSensitivity needs_poisoning);
- Matcher<compiler::Node*> IsSignedByteOperand(
- int offset, LoadSensitivity needs_poisoning);
- Matcher<compiler::Node*> IsUnsignedShortOperand(
- int offset, LoadSensitivity needs_poisoning);
- Matcher<compiler::Node*> IsSignedShortOperand(
- int offset, LoadSensitivity needs_poisoning);
- Matcher<compiler::Node*> IsUnsignedQuadOperand(
- int offset, LoadSensitivity needs_poisoning);
- Matcher<compiler::Node*> IsSignedQuadOperand(
- int offset, LoadSensitivity needs_poisoning);
-
- Matcher<compiler::Node*> IsUnpoisonedSignedOperand(
- int offset, OperandSize operand_size, LoadSensitivity needs_poisoning);
- Matcher<compiler::Node*> IsUnpoisonedUnsignedOperand(
- int offset, OperandSize operand_size, LoadSensitivity needs_poisoning);
+ Matcher<compiler::Node*> IsUnsignedByteOperand(int offset);
+ Matcher<compiler::Node*> IsSignedByteOperand(int offset);
+ Matcher<compiler::Node*> IsUnsignedShortOperand(int offset);
+ Matcher<compiler::Node*> IsSignedShortOperand(int offset);
+ Matcher<compiler::Node*> IsUnsignedQuadOperand(int offset);
+ Matcher<compiler::Node*> IsSignedQuadOperand(int offset);
Matcher<compiler::Node*> IsSignedOperand(int offset,
- OperandSize operand_size,
- LoadSensitivity needs_poisoning);
+ OperandSize operand_size);
Matcher<compiler::Node*> IsUnsignedOperand(int offset,
- OperandSize operand_size,
- LoadSensitivity needs_poisoning);
+ OperandSize operand_size);
Matcher<compiler::Node*> IsLoadRegisterOperand(int offset,
OperandSize operand_size);
diff --git a/deps/v8/test/unittests/logging/runtime-call-stats-unittest.cc b/deps/v8/test/unittests/logging/runtime-call-stats-unittest.cc
index 78d4db2ec5..2694920329 100644
--- a/deps/v8/test/unittests/logging/runtime-call-stats-unittest.cc
+++ b/deps/v8/test/unittests/logging/runtime-call-stats-unittest.cc
@@ -4,6 +4,7 @@
#include "src/logging/runtime-call-stats.h"
+#include "include/v8-template.h"
#include "src/api/api-inl.h"
#include "src/base/atomic-utils.h"
#include "src/base/platform/time.h"
diff --git a/deps/v8/test/unittests/objects/value-serializer-unittest.cc b/deps/v8/test/unittests/objects/value-serializer-unittest.cc
index 16c6f5ebcd..259969522f 100644
--- a/deps/v8/test/unittests/objects/value-serializer-unittest.cc
+++ b/deps/v8/test/unittests/objects/value-serializer-unittest.cc
@@ -7,7 +7,15 @@
#include <algorithm>
#include <string>
-#include "include/v8.h"
+#include "include/v8-context.h"
+#include "include/v8-date.h"
+#include "include/v8-function.h"
+#include "include/v8-json.h"
+#include "include/v8-local-handle.h"
+#include "include/v8-primitive-object.h"
+#include "include/v8-template.h"
+#include "include/v8-value-serializer.h"
+#include "include/v8-wasm.h"
#include "src/api/api-inl.h"
#include "src/base/build_config.h"
#include "src/objects/backing-store.h"
@@ -260,12 +268,9 @@ class ValueSerializerTest : public TestWithIsolate {
}
Local<Object> NewDummyUint8Array() {
- static uint8_t data[] = {4, 5, 6};
- std::unique_ptr<v8::BackingStore> backing_store =
- ArrayBuffer::NewBackingStore(
- data, sizeof(data), [](void*, size_t, void*) {}, nullptr);
- Local<ArrayBuffer> ab =
- ArrayBuffer::New(isolate(), std::move(backing_store));
+ const uint8_t data[] = {4, 5, 6};
+ Local<ArrayBuffer> ab = ArrayBuffer::New(isolate(), sizeof(data));
+ memcpy(ab->GetBackingStore()->Data(), data, sizeof(data));
return Uint8Array::New(ab, 0, sizeof(data));
}
@@ -2058,15 +2063,9 @@ class ValueSerializerTestWithSharedArrayBufferClone
#endif // V8_ENABLE_WEBASSEMBLY
CHECK(!is_wasm_memory);
- std::unique_ptr<v8::BackingStore> backing_store =
- SharedArrayBuffer::NewBackingStore(
- data, byte_length,
- [](void*, size_t, void*) {
- // Leak the buffer as it has the
- // lifetime of the test.
- },
- nullptr);
- return SharedArrayBuffer::New(isolate(), std::move(backing_store));
+ auto sab = SharedArrayBuffer::New(isolate(), byte_length);
+ memcpy(sab->GetBackingStore()->Data(), data, byte_length);
+ return sab;
}
static void SetUpTestCase() {
diff --git a/deps/v8/test/unittests/profiler/strings-storage-unittest.cc b/deps/v8/test/unittests/profiler/strings-storage-unittest.cc
index a15640ce65..49c32394a4 100644
--- a/deps/v8/test/unittests/profiler/strings-storage-unittest.cc
+++ b/deps/v8/test/unittests/profiler/strings-storage-unittest.cc
@@ -113,17 +113,23 @@ TEST_F(StringsStorageWithIsolate, Refcounting) {
const char* a = storage.GetCopy("12");
CHECK_EQ(storage.GetStringCountForTesting(), 1);
+ CHECK_EQ(2, storage.GetStringSize());
const char* b = storage.GetCopy("12");
CHECK_EQ(storage.GetStringCountForTesting(), 1);
+ CHECK_EQ(2, storage.GetStringSize());
// Ensure that we deduplicate the string.
CHECK_EQ(a, b);
CHECK(storage.Release(a));
CHECK_EQ(storage.GetStringCountForTesting(), 1);
+ CHECK_EQ(2, storage.GetStringSize());
+
CHECK(storage.Release(b));
CHECK_EQ(storage.GetStringCountForTesting(), 0);
+ CHECK_EQ(0, storage.GetStringSize());
+
#if !DEBUG
CHECK(!storage.Release("12"));
#endif // !DEBUG
@@ -131,16 +137,21 @@ TEST_F(StringsStorageWithIsolate, Refcounting) {
// Verify that other constructors refcount as intended.
const char* c = storage.GetFormatted("%d", 12);
CHECK_EQ(storage.GetStringCountForTesting(), 1);
+ CHECK_EQ(2, storage.GetStringSize());
const char* d = storage.GetName(12);
CHECK_EQ(storage.GetStringCountForTesting(), 1);
+ CHECK_EQ(2, storage.GetStringSize());
CHECK_EQ(c, d);
CHECK(storage.Release(c));
CHECK_EQ(storage.GetStringCountForTesting(), 1);
+ CHECK_EQ(2, storage.GetStringSize());
CHECK(storage.Release(d));
CHECK_EQ(storage.GetStringCountForTesting(), 0);
+ CHECK_EQ(0, storage.GetStringSize());
+
CHECK(!storage.Release("12"));
}
diff --git a/deps/v8/test/unittests/run-all-unittests.cc b/deps/v8/test/unittests/run-all-unittests.cc
index 5ef3fe3afe..645dd5b060 100644
--- a/deps/v8/test/unittests/run-all-unittests.cc
+++ b/deps/v8/test/unittests/run-all-unittests.cc
@@ -2,9 +2,11 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+#include <memory>
+
#include "include/cppgc/platform.h"
#include "include/libplatform/libplatform.h"
-#include "include/v8.h"
+#include "include/v8-initialization.h"
#include "src/base/compiler-specific.h"
#include "testing/gmock/include/gmock/gmock.h"
@@ -19,6 +21,9 @@ class DefaultPlatformEnvironment final : public ::testing::Environment {
0, v8::platform::IdleTaskSupport::kEnabled);
ASSERT_TRUE(platform_.get() != nullptr);
v8::V8::InitializePlatform(platform_.get());
+#ifdef V8_VIRTUAL_MEMORY_CAGE
+ ASSERT_TRUE(v8::V8::InitializeVirtualMemoryCage());
+#endif
cppgc::InitializeProcess(platform_->GetPageAllocator());
ASSERT_TRUE(v8::V8::Initialize());
}
diff --git a/deps/v8/test/unittests/runtime/runtime-debug-unittest.cc b/deps/v8/test/unittests/runtime/runtime-debug-unittest.cc
index dbc4a76f88..a73795bb40 100644
--- a/deps/v8/test/unittests/runtime/runtime-debug-unittest.cc
+++ b/deps/v8/test/unittests/runtime/runtime-debug-unittest.cc
@@ -2,7 +2,10 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "include/v8.h"
+#include "include/v8-exception.h"
+#include "include/v8-local-handle.h"
+#include "include/v8-object.h"
+#include "include/v8-template.h"
#include "src/api/api.h"
#include "src/objects/objects-inl.h"
#include "src/runtime/runtime.h"
diff --git a/deps/v8/test/unittests/tasks/background-compile-task-unittest.cc b/deps/v8/test/unittests/tasks/background-compile-task-unittest.cc
index 92d45f7949..dc4bd6e17a 100644
--- a/deps/v8/test/unittests/tasks/background-compile-task-unittest.cc
+++ b/deps/v8/test/unittests/tasks/background-compile-task-unittest.cc
@@ -4,7 +4,7 @@
#include <memory>
-#include "include/v8.h"
+#include "include/v8-platform.h"
#include "src/api/api-inl.h"
#include "src/ast/ast.h"
#include "src/ast/scopes.h"
diff --git a/deps/v8/test/unittests/test-helpers.cc b/deps/v8/test/unittests/test-helpers.cc
index 9154992b2b..941122b032 100644
--- a/deps/v8/test/unittests/test-helpers.cc
+++ b/deps/v8/test/unittests/test-helpers.cc
@@ -4,7 +4,6 @@
#include "test/unittests/test-helpers.h"
-#include "include/v8.h"
#include "src/api/api.h"
#include "src/execution/isolate.h"
#include "src/handles/handles.h"
diff --git a/deps/v8/test/unittests/test-helpers.h b/deps/v8/test/unittests/test-helpers.h
index 13aacd4398..d70ac81714 100644
--- a/deps/v8/test/unittests/test-helpers.h
+++ b/deps/v8/test/unittests/test-helpers.h
@@ -7,7 +7,7 @@
#include <memory>
-#include "include/v8.h"
+#include "include/v8-primitive.h"
#include "src/parsing/parse-info.h"
namespace v8 {
diff --git a/deps/v8/test/unittests/test-utils.cc b/deps/v8/test/unittests/test-utils.cc
index 4aa9628b67..eef418113a 100644
--- a/deps/v8/test/unittests/test-utils.cc
+++ b/deps/v8/test/unittests/test-utils.cc
@@ -5,7 +5,7 @@
#include "test/unittests/test-utils.h"
#include "include/libplatform/libplatform.h"
-#include "include/v8.h"
+#include "include/v8-isolate.h"
#include "src/api/api-inl.h"
#include "src/base/platform/time.h"
#include "src/execution/isolate.h"
diff --git a/deps/v8/test/unittests/test-utils.h b/deps/v8/test/unittests/test-utils.h
index 74eb1bad37..cac8980b68 100644
--- a/deps/v8/test/unittests/test-utils.h
+++ b/deps/v8/test/unittests/test-utils.h
@@ -8,7 +8,10 @@
#include <memory>
#include <vector>
-#include "include/v8.h"
+#include "include/v8-array-buffer.h"
+#include "include/v8-context.h"
+#include "include/v8-local-handle.h"
+#include "include/v8-primitive.h"
#include "src/api/api-inl.h"
#include "src/base/macros.h"
#include "src/base/utils/random-number-generator.h"
diff --git a/deps/v8/test/unittests/wasm/function-body-decoder-unittest.cc b/deps/v8/test/unittests/wasm/function-body-decoder-unittest.cc
index 89dd4f29e8..393256b0a4 100644
--- a/deps/v8/test/unittests/wasm/function-body-decoder-unittest.cc
+++ b/deps/v8/test/unittests/wasm/function-body-decoder-unittest.cc
@@ -1091,7 +1091,6 @@ TEST_F(FunctionBodyDecoderTest, UnreachableRefTypes) {
WASM_FEATURE_SCOPE(reftypes);
WASM_FEATURE_SCOPE(typed_funcref);
WASM_FEATURE_SCOPE(gc);
- WASM_FEATURE_SCOPE(gc_experiments);
WASM_FEATURE_SCOPE(return_call);
byte function_index = builder.AddFunction(sigs.i_ii());
@@ -2990,18 +2989,16 @@ TEST_F(FunctionBodyDecoderTest, TryDelegate) {
sigs.v_v(),
{WASM_BLOCK(WASM_TRY_OP, WASM_TRY_DELEGATE(WASM_STMTS(kExprThrow, ex), 2),
kExprCatch, ex, kExprEnd)});
+ ExpectValidates(sigs.v_v(),
+ {WASM_TRY_OP, kExprCatch, ex,
+ WASM_TRY_DELEGATE(WASM_STMTS(kExprThrow, ex), 0), kExprEnd},
+ kAppendEnd);
+ ExpectValidates(sigs.v_v(),
+ {WASM_TRY_OP,
+ WASM_BLOCK(WASM_TRY_DELEGATE(WASM_STMTS(kExprThrow, ex), 0)),
+ kExprCatch, ex, kExprEnd},
+ kAppendEnd);
- ExpectFailure(sigs.v_v(),
- {WASM_TRY_OP,
- WASM_BLOCK(WASM_TRY_DELEGATE(WASM_STMTS(kExprThrow, ex), 0)),
- kExprCatch, ex, kExprEnd},
- kAppendEnd,
- "delegate target must be a try block or the function block");
- ExpectFailure(sigs.v_v(),
- {WASM_TRY_OP, kExprCatch, ex,
- WASM_TRY_DELEGATE(WASM_STMTS(kExprThrow, ex), 0), kExprEnd},
- kAppendEnd,
- "cannot delegate inside the catch handler of the target");
ExpectFailure(
sigs.v_v(),
{WASM_BLOCK(WASM_TRY_OP, WASM_TRY_DELEGATE(WASM_STMTS(kExprThrow, ex), 3),
@@ -3706,6 +3703,38 @@ TEST_F(FunctionBodyDecoderTest, AllowingNonDefaultableLocals) {
kAppendEnd, "uninitialized non-defaultable local: 2");
}
+TEST_F(FunctionBodyDecoderTest, UnsafeNonDefaultableLocals) {
+ WASM_FEATURE_SCOPE(typed_funcref);
+ WASM_FEATURE_SCOPE(reftypes);
+ WASM_FEATURE_SCOPE(unsafe_nn_locals);
+ byte struct_type_index = builder.AddStruct({F(kWasmI32, true)});
+ ValueType rep = ref(struct_type_index);
+ FunctionSig sig(0, 1, &rep);
+ AddLocals(rep, 2);
+ // Declaring non-defaultable locals is fine.
+ ExpectValidates(&sig, {});
+ // Loading from an uninitialized non-defaultable local validates (but crashes
+ // when executed).
+ ExpectValidates(&sig, {WASM_LOCAL_GET(1), WASM_DROP});
+ // Loading from an initialized local is fine.
+ ExpectValidates(&sig, {WASM_LOCAL_SET(1, WASM_LOCAL_GET(0)),
+ WASM_LOCAL_GET(1), WASM_DROP});
+ ExpectValidates(&sig, {WASM_LOCAL_TEE(1, WASM_LOCAL_GET(0)),
+ WASM_LOCAL_GET(1), WASM_DROP, WASM_DROP});
+ // Non-nullable locals must be initialized with non-null values.
+ ExpectFailure(&sig, {WASM_LOCAL_SET(1, WASM_REF_NULL(struct_type_index))},
+ kAppendEnd,
+ "expected type (ref 0), found ref.null of type (ref null 0)");
+ // Block structure doesn't matter, everything validates.
+ ExpectValidates(&sig, {WASM_LOCAL_SET(1, WASM_LOCAL_GET(0)),
+ WASM_BLOCK(WASM_LOCAL_GET(1), WASM_DROP),
+ WASM_LOCAL_GET(1), WASM_DROP});
+ ExpectValidates(&sig,
+ {WASM_LOCAL_SET(1, WASM_LOCAL_GET(0)),
+ WASM_BLOCK(WASM_LOCAL_SET(2, WASM_LOCAL_GET(0))),
+ WASM_LOCAL_GET(1), WASM_DROP, WASM_LOCAL_GET(2), WASM_DROP});
+}
+
TEST_F(FunctionBodyDecoderTest, RefEq) {
WASM_FEATURE_SCOPE(reftypes);
WASM_FEATURE_SCOPE(eh);
@@ -4287,7 +4316,6 @@ TEST_F(FunctionBodyDecoderTest, RttSub) {
WASM_FEATURE_SCOPE(reftypes);
WASM_FEATURE_SCOPE(typed_funcref);
WASM_FEATURE_SCOPE(gc);
- WASM_FEATURE_SCOPE(gc_experiments);
uint8_t array_type_index = builder.AddArray(kWasmI8, true);
uint8_t super_struct_type_index = builder.AddStruct({F(kWasmI16, true)});
diff --git a/deps/v8/test/unittests/wasm/liftoff-register-unittests.cc b/deps/v8/test/unittests/wasm/liftoff-register-unittests.cc
index 84f5908768..8ab3d500c1 100644
--- a/deps/v8/test/unittests/wasm/liftoff-register-unittests.cc
+++ b/deps/v8/test/unittests/wasm/liftoff-register-unittests.cc
@@ -11,6 +11,8 @@
#include "src/execution/mips/frame-constants-mips.h"
#elif V8_TARGET_ARCH_MIPS64
#include "src/execution/mips64/frame-constants-mips64.h"
+#elif V8_TARGET_ARCH_LOONG64
+#include "src/execution/loong64/frame-constants-loong64.h"
#elif V8_TARGET_ARCH_ARM
#include "src/execution/arm/frame-constants-arm.h"
#elif V8_TARGET_ARCH_ARM64
diff --git a/deps/v8/test/unittests/wasm/memory-protection-unittest.cc b/deps/v8/test/unittests/wasm/memory-protection-unittest.cc
new file mode 100644
index 0000000000..65f8aeafe1
--- /dev/null
+++ b/deps/v8/test/unittests/wasm/memory-protection-unittest.cc
@@ -0,0 +1,182 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/flags/flags.h"
+#include "src/wasm/code-space-access.h"
+#include "src/wasm/module-compiler.h"
+#include "src/wasm/module-decoder.h"
+#include "src/wasm/wasm-engine.h"
+#include "src/wasm/wasm-features.h"
+#include "src/wasm/wasm-opcodes.h"
+#include "test/common/wasm/wasm-macro-gen.h"
+#include "test/unittests/test-utils.h"
+
+namespace v8 {
+namespace internal {
+namespace wasm {
+
+enum MemoryProtectionMode {
+ kNoProtection,
+ kPku,
+ kMprotect,
+ kPkuWithMprotectFallback
+};
+
+const char* MemoryProtectionModeToString(MemoryProtectionMode mode) {
+ switch (mode) {
+ case kNoProtection:
+ return "NoProtection";
+ case kPku:
+ return "Pku";
+ case kMprotect:
+ return "Mprotect";
+ case kPkuWithMprotectFallback:
+ return "PkuWithMprotectFallback";
+ }
+}
+
+class MemoryProtectionTest : public TestWithNativeContext {
+ public:
+ void Initialize(MemoryProtectionMode mode) {
+ mode_ = mode;
+ bool enable_pku = mode == kPku || mode == kPkuWithMprotectFallback;
+ FLAG_wasm_memory_protection_keys = enable_pku;
+ if (enable_pku) {
+ GetWasmCodeManager()->InitializeMemoryProtectionKeyForTesting();
+ }
+
+ bool enable_mprotect =
+ mode == kMprotect || mode == kPkuWithMprotectFallback;
+ FLAG_wasm_write_protect_code_memory = enable_mprotect;
+ }
+
+ void CompileModule() {
+ CHECK_NULL(native_module_);
+ native_module_ = CompileNativeModule();
+ code_ = native_module_->GetCode(0);
+ }
+
+ NativeModule* native_module() const { return native_module_.get(); }
+
+ WasmCode* code() const { return code_; }
+
+ bool code_is_protected() {
+ return V8_HAS_PTHREAD_JIT_WRITE_PROTECT || uses_pku() || uses_mprotect();
+ }
+
+ void MakeCodeWritable() {
+ native_module_->MakeWritable(base::AddressRegionOf(code_->instructions()));
+ }
+
+ void WriteToCode() { code_->instructions()[0] = 0; }
+
+ void AssertCodeEventuallyProtected() {
+ if (!code_is_protected()) {
+ // Without protection, writing to code should always work.
+ WriteToCode();
+ return;
+ }
+ // Tier-up might be running and unprotecting the code region temporarily (if
+ // using mprotect). In that case, repeatedly write to the code region to
+ // make us eventually crash.
+ ASSERT_DEATH_IF_SUPPORTED(
+ do {
+ WriteToCode();
+ base::OS::Sleep(base::TimeDelta::FromMilliseconds(10));
+ } while (uses_mprotect()),
+ "");
+ }
+
+ bool uses_mprotect() {
+ // M1 always uses MAP_JIT.
+ if (V8_HAS_PTHREAD_JIT_WRITE_PROTECT) return false;
+ return mode_ == kMprotect ||
+ (mode_ == kPkuWithMprotectFallback && !uses_pku());
+ }
+
+ bool uses_pku() {
+ // M1 always uses MAP_JIT.
+ if (V8_HAS_PTHREAD_JIT_WRITE_PROTECT) return false;
+ bool param_has_pku = mode_ == kPku || mode_ == kPkuWithMprotectFallback;
+ return param_has_pku &&
+ GetWasmCodeManager()->HasMemoryProtectionKeySupport();
+ }
+
+ private:
+ std::shared_ptr<NativeModule> CompileNativeModule() {
+ // Define the bytes for a module with a single empty function.
+ static const byte module_bytes[] = {
+ WASM_MODULE_HEADER, SECTION(Type, ENTRY_COUNT(1), SIG_ENTRY_v_v),
+ SECTION(Function, ENTRY_COUNT(1), SIG_INDEX(0)),
+ SECTION(Code, ENTRY_COUNT(1), ADD_COUNT(0 /* locals */, kExprEnd))};
+
+ ModuleResult result =
+ DecodeWasmModule(WasmFeatures::All(), std::begin(module_bytes),
+ std::end(module_bytes), false, kWasmOrigin,
+ isolate()->counters(), isolate()->metrics_recorder(),
+ v8::metrics::Recorder::ContextId::Empty(),
+ DecodingMethod::kSync, GetWasmEngine()->allocator());
+ CHECK(result.ok());
+
+ Handle<FixedArray> export_wrappers;
+ ErrorThrower thrower(isolate(), "");
+ constexpr int kNoCompilationId = 0;
+ std::shared_ptr<NativeModule> native_module = CompileToNativeModule(
+ isolate(), WasmFeatures::All(), &thrower, std::move(result).value(),
+ ModuleWireBytes{base::ArrayVector(module_bytes)}, &export_wrappers,
+ kNoCompilationId);
+ CHECK(!thrower.error());
+ CHECK_NOT_NULL(native_module);
+
+ return native_module;
+ }
+
+ MemoryProtectionMode mode_;
+ std::shared_ptr<NativeModule> native_module_;
+ WasmCodeRefScope code_refs_;
+ WasmCode* code_;
+};
+
+class ParameterizedMemoryProtectionTest
+ : public MemoryProtectionTest,
+ public ::testing::WithParamInterface<MemoryProtectionMode> {
+ public:
+ void SetUp() override { Initialize(GetParam()); }
+};
+
+std::string PrintMemoryProtectionTestParam(
+ ::testing::TestParamInfo<MemoryProtectionMode> info) {
+ return MemoryProtectionModeToString(info.param);
+}
+
+INSTANTIATE_TEST_SUITE_P(MemoryProtection, ParameterizedMemoryProtectionTest,
+ ::testing::Values(kNoProtection, kPku, kMprotect,
+ kPkuWithMprotectFallback),
+ PrintMemoryProtectionTestParam);
+
+TEST_P(ParameterizedMemoryProtectionTest, CodeNotWritableAfterCompilation) {
+ CompileModule();
+ AssertCodeEventuallyProtected();
+}
+
+TEST_P(ParameterizedMemoryProtectionTest, CodeWritableWithinScope) {
+ CompileModule();
+ CodeSpaceWriteScope write_scope(native_module());
+ MakeCodeWritable();
+ WriteToCode();
+}
+
+TEST_P(ParameterizedMemoryProtectionTest, CodeNotWritableAfterScope) {
+ CompileModule();
+ {
+ CodeSpaceWriteScope write_scope(native_module());
+ MakeCodeWritable();
+ WriteToCode();
+ }
+ AssertCodeEventuallyProtected();
+}
+
+} // namespace wasm
+} // namespace internal
+} // namespace v8
diff --git a/deps/v8/test/unittests/wasm/module-decoder-unittest.cc b/deps/v8/test/unittests/wasm/module-decoder-unittest.cc
index 271d6fe1b9..7854b9d5d7 100644
--- a/deps/v8/test/unittests/wasm/module-decoder-unittest.cc
+++ b/deps/v8/test/unittests/wasm/module-decoder-unittest.cc
@@ -838,7 +838,6 @@ TEST_F(WasmModuleVerifyTest, GlobalRttFreshSubOfCanon) {
WASM_FEATURE_SCOPE(reftypes);
WASM_FEATURE_SCOPE(typed_funcref);
WASM_FEATURE_SCOPE(gc);
- WASM_FEATURE_SCOPE(gc_experiments);
static const byte data[] = {
SECTION(Type, ENTRY_COUNT(2),
@@ -870,7 +869,6 @@ TEST_F(WasmModuleVerifyTest, GlobalRttFreshSubOfSubOfCanon) {
WASM_FEATURE_SCOPE(reftypes);
WASM_FEATURE_SCOPE(typed_funcref);
WASM_FEATURE_SCOPE(gc);
- WASM_FEATURE_SCOPE(gc_experiments);
static const byte data[] = {
SECTION(Type, ENTRY_COUNT(2),
@@ -888,7 +886,6 @@ TEST_F(WasmModuleVerifyTest, GlobalRttFreshSubOfFreshSubOfCanon) {
WASM_FEATURE_SCOPE(reftypes);
WASM_FEATURE_SCOPE(typed_funcref);
WASM_FEATURE_SCOPE(gc);
- WASM_FEATURE_SCOPE(gc_experiments);
static const byte data[] = {
SECTION(Type, ENTRY_COUNT(2),
@@ -928,7 +925,6 @@ TEST_F(WasmModuleVerifyTest, GlobalRttFreshSubOfGlobal) {
WASM_FEATURE_SCOPE(reftypes);
WASM_FEATURE_SCOPE(typed_funcref);
WASM_FEATURE_SCOPE(gc);
- WASM_FEATURE_SCOPE(gc_experiments);
static const byte data[] = {
SECTION(Type, ENTRY_COUNT(2),
@@ -974,7 +970,6 @@ TEST_F(WasmModuleVerifyTest, GlobalRttFreshSubOfGlobalTypeError) {
WASM_FEATURE_SCOPE(reftypes);
WASM_FEATURE_SCOPE(typed_funcref);
WASM_FEATURE_SCOPE(gc);
- WASM_FEATURE_SCOPE(gc_experiments);
static const byte data[] = {
SECTION(Type, ENTRY_COUNT(1),
@@ -1015,7 +1010,6 @@ TEST_F(WasmModuleVerifyTest, GlobalRttFreshSubIllegalParent) {
WASM_FEATURE_SCOPE(reftypes);
WASM_FEATURE_SCOPE(typed_funcref);
WASM_FEATURE_SCOPE(gc);
- WASM_FEATURE_SCOPE(gc_experiments);
static const byte data[] = {
SECTION(Type, ENTRY_COUNT(2),
@@ -1049,7 +1043,6 @@ TEST_F(WasmModuleVerifyTest, RttFreshSubGlobalTypeError) {
WASM_FEATURE_SCOPE(reftypes);
WASM_FEATURE_SCOPE(typed_funcref);
WASM_FEATURE_SCOPE(gc);
- WASM_FEATURE_SCOPE(gc_experiments);
static const byte data[] = {
SECTION(Type, ENTRY_COUNT(1),
@@ -1066,7 +1059,6 @@ TEST_F(WasmModuleVerifyTest, StructNewInitExpr) {
WASM_FEATURE_SCOPE(reftypes);
WASM_FEATURE_SCOPE(typed_funcref);
WASM_FEATURE_SCOPE(gc);
- WASM_FEATURE_SCOPE(gc_experiments);
static const byte basic[] = {
SECTION(Type, ENTRY_COUNT(1), // --
@@ -1116,7 +1108,6 @@ TEST_F(WasmModuleVerifyTest, ArrayInitInitExpr) {
WASM_FEATURE_SCOPE(reftypes);
WASM_FEATURE_SCOPE(typed_funcref);
WASM_FEATURE_SCOPE(gc);
- WASM_FEATURE_SCOPE(gc_experiments);
static const byte basic[] = {
SECTION(Type, ENTRY_COUNT(1), WASM_ARRAY_DEF(kI16Code, true)),
diff --git a/deps/v8/test/unittests/wasm/trap-handler-posix-unittest.cc b/deps/v8/test/unittests/wasm/trap-handler-posix-unittest.cc
index 3c8bf92c39..8ddffce695 100644
--- a/deps/v8/test/unittests/wasm/trap-handler-posix-unittest.cc
+++ b/deps/v8/test/unittests/wasm/trap-handler-posix-unittest.cc
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "include/v8.h"
+#include "include/v8-initialization.h"
#include "src/trap-handler/trap-handler.h"
#include "testing/gtest/include/gtest/gtest.h"
diff --git a/deps/v8/test/unittests/wasm/trap-handler-simulator-unittest.cc b/deps/v8/test/unittests/wasm/trap-handler-simulator-unittest.cc
index e5b71d956f..dffa202b03 100644
--- a/deps/v8/test/unittests/wasm/trap-handler-simulator-unittest.cc
+++ b/deps/v8/test/unittests/wasm/trap-handler-simulator-unittest.cc
@@ -4,7 +4,7 @@
#include "src/trap-handler/trap-handler-simulator.h"
-#include "include/v8.h"
+#include "include/v8-initialization.h"
#include "src/codegen/macro-assembler-inl.h"
#include "src/execution/simulator.h"
#include "src/trap-handler/trap-handler.h"
diff --git a/deps/v8/test/unittests/wasm/trap-handler-win-unittest.cc b/deps/v8/test/unittests/wasm/trap-handler-win-unittest.cc
index d2efc71a6d..b90fafeee9 100644
--- a/deps/v8/test/unittests/wasm/trap-handler-win-unittest.cc
+++ b/deps/v8/test/unittests/wasm/trap-handler-win-unittest.cc
@@ -4,7 +4,8 @@
#include <windows.h>
-#include "include/v8.h"
+#include "include/v8-initialization.h"
+#include "include/v8-platform.h"
#include "src/base/page-allocator.h"
#include "src/trap-handler/trap-handler.h"
#include "src/utils/allocation.h"
diff --git a/deps/v8/test/wasm-api-tests/reflect.cc b/deps/v8/test/wasm-api-tests/reflect.cc
index 80a85aa23a..9831e693bb 100644
--- a/deps/v8/test/wasm-api-tests/reflect.cc
+++ b/deps/v8/test/wasm-api-tests/reflect.cc
@@ -43,7 +43,7 @@ TEST_F(WasmCapiTest, Reflect) {
builder()->AddExportedGlobal(kWasmF64, false, WasmInitExpr(0.0),
base::CStrVector(kGlobalName));
- builder()->AllocateIndirectFunctions(12);
+ builder()->AddTable(kWasmFuncRef, 12, 12);
builder()->AddExport(base::CStrVector(kTableName), kExternalTable, 0);
builder()->SetMinMemorySize(1);
diff --git a/deps/v8/test/wasm-api-tests/run-all-wasm-api-tests.cc b/deps/v8/test/wasm-api-tests/run-all-wasm-api-tests.cc
index 6c8b6f4aef..1371a9236b 100644
--- a/deps/v8/test/wasm-api-tests/run-all-wasm-api-tests.cc
+++ b/deps/v8/test/wasm-api-tests/run-all-wasm-api-tests.cc
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "include/v8.h"
+#include "include/v8-initialization.h"
#include "src/flags/flags.h"
#include "src/trap-handler/trap-handler.h"
#include "testing/gmock/include/gmock/gmock.h"
diff --git a/deps/v8/test/wasm-api-tests/table.cc b/deps/v8/test/wasm-api-tests/table.cc
index e96ad5f0a1..c28dcce4a7 100644
--- a/deps/v8/test/wasm-api-tests/table.cc
+++ b/deps/v8/test/wasm-api-tests/table.cc
@@ -37,9 +37,8 @@ void ExpectResult(int expected, const Func* func, int arg1, int arg2) {
} // namespace
TEST_F(WasmCapiTest, Table) {
- builder()->AllocateIndirectFunctions(2);
- builder()->SetMaxTableSize(10);
- builder()->AddExport(base::CStrVector("table"), kExternalTable, 0);
+ const uint32_t table_index = builder()->AddTable(kWasmFuncRef, 2, 10);
+ builder()->AddExport(base::CStrVector("table"), kExternalTable, table_index);
const uint32_t sig_i_i_index = builder()->AddSignature(wasm_i_i_sig());
ValueType reps[] = {kWasmI32, kWasmI32, kWasmI32};
FunctionSig call_sig(1, 2, reps);
@@ -54,7 +53,9 @@ TEST_F(WasmCapiTest, Table) {
AddExportedFunction(base::CStrVector("g"), g_code, sizeof(g_code),
wasm_i_i_sig());
// Set table[1] to {f}, which has function index 1.
- builder()->SetIndirectFunction(1, 1);
+ builder()->SetIndirectFunction(
+ table_index, 1, 1,
+ WasmModuleBuilder::WasmElemSegment::kRelativeToImports);
Instantiate(nullptr);
diff --git a/deps/v8/test/wasm-js/tests.tar.gz.sha1 b/deps/v8/test/wasm-js/tests.tar.gz.sha1
index 4efb938397..e0eefb98fe 100644
--- a/deps/v8/test/wasm-js/tests.tar.gz.sha1
+++ b/deps/v8/test/wasm-js/tests.tar.gz.sha1
@@ -1 +1 @@
-3d2b0637fc1c3230156dc219ae2734dfdd4b303b \ No newline at end of file
+47fd6174b31ba072c87780a01537140f817989dc \ No newline at end of file
diff --git a/deps/v8/test/wasm-spec-tests/tests.tar.gz.sha1 b/deps/v8/test/wasm-spec-tests/tests.tar.gz.sha1
index 2eaf31c3b3..1caa818336 100644
--- a/deps/v8/test/wasm-spec-tests/tests.tar.gz.sha1
+++ b/deps/v8/test/wasm-spec-tests/tests.tar.gz.sha1
@@ -1 +1 @@
-9c4a85d3db889ce4e148e7ac3933255832aa528a \ No newline at end of file
+51142d7857b770528a57965d94c7b5365bf9c1a7 \ No newline at end of file
diff --git a/deps/v8/test/wasm-spec-tests/wasm-spec-tests.status b/deps/v8/test/wasm-spec-tests/wasm-spec-tests.status
index 6ce1d113c6..935f8a77d2 100644
--- a/deps/v8/test/wasm-spec-tests/wasm-spec-tests.status
+++ b/deps/v8/test/wasm-spec-tests/wasm-spec-tests.status
@@ -89,7 +89,7 @@
'conversions': [SKIP],
}], # '(arch == mipsel or arch == mips64el or arch == mips or arch == mips64) and not simulator_run'
-['(arch == mipsel or arch == mips64el) and simulator_run', {
+['(arch == mipsel or arch == mips64el or arch == loong64) and simulator_run', {
# These tests need larger stack size on simulator.
'skip-stack-guard-page': '--sim-stack-size=8192',
'proposals/tail-call/skip-stack-guard-page': '--sim-stack-size=8192',
diff --git a/deps/v8/test/webkit/fast/js/deep-recursion-test.js b/deps/v8/test/webkit/fast/js/deep-recursion-test.js
index c0a3b26c30..b0b7e78109 100644
--- a/deps/v8/test/webkit/fast/js/deep-recursion-test.js
+++ b/deps/v8/test/webkit/fast/js/deep-recursion-test.js
@@ -21,6 +21,9 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// This neuters too low stack size passed by the flag fuzzer.
+// Flags: --stack-size=864
+
description("This test how deep we can recurse, and that we get an exception when we do, as opposed to a stack overflow.");
function simpleRecursion(depth) {
diff --git a/deps/v8/test/webkit/fast/js/function-apply.js b/deps/v8/test/webkit/fast/js/function-apply.js
index 14a65646b3..407c101add 100644
--- a/deps/v8/test/webkit/fast/js/function-apply.js
+++ b/deps/v8/test/webkit/fast/js/function-apply.js
@@ -21,6 +21,9 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// This neuters too low stack size passed by the flag fuzzer.
+// Flags: --stack-size=864
+
description('Tests to ensure that Function.apply works correctly for Arrays, arguments and array-like objects.');
function argumentsApply1(a, b, c)
diff --git a/deps/v8/test/webkit/function-call-register-allocation.js b/deps/v8/test/webkit/function-call-register-allocation.js
index 8873bdec43..d8a6e2a14e 100644
--- a/deps/v8/test/webkit/function-call-register-allocation.js
+++ b/deps/v8/test/webkit/function-call-register-allocation.js
@@ -21,6 +21,9 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// This neuters too low stack size passed by the flag fuzzer.
+// Flags: --stack-size=864
+
description(
"This test checks for a specific regression that caused function calls to allocate too many temporary registers."
);
diff --git a/deps/v8/test/webkit/run-json-stringify.js b/deps/v8/test/webkit/run-json-stringify.js
index 51323b9b3a..41c89228e0 100644
--- a/deps/v8/test/webkit/run-json-stringify.js
+++ b/deps/v8/test/webkit/run-json-stringify.js
@@ -2,6 +2,9 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
+// This neuters too low stack size passed by the flag fuzzer.
+// Flags: --stack-size=864
+
var nativeJSON = this.JSON;
this.JSON = null;
d8.file.execute("test/webkit/resources/json2-es5-compat.js");
diff --git a/deps/v8/third_party/jinja2/Jinja2-2.10.1.tar.gz.md5 b/deps/v8/third_party/jinja2/Jinja2-2.10.1.tar.gz.md5
new file mode 100644
index 0000000000..479d0d13cf
--- /dev/null
+++ b/deps/v8/third_party/jinja2/Jinja2-2.10.1.tar.gz.md5
@@ -0,0 +1 @@
+0ae535be40fd215a8114a090c8b68e5a Jinja2-2.10.1.tar.gz
diff --git a/deps/v8/third_party/jinja2/Jinja2-2.10.1.tar.gz.sha512 b/deps/v8/third_party/jinja2/Jinja2-2.10.1.tar.gz.sha512
new file mode 100644
index 0000000000..217887094d
--- /dev/null
+++ b/deps/v8/third_party/jinja2/Jinja2-2.10.1.tar.gz.sha512
@@ -0,0 +1 @@
+a00153a0e07bb7d67f301b4eaf7af657726a1985e9ffc7ae2d76bdbb4c062d672efc8065e398767e1039b18a483a0092e206deac91e4047aad64920b56869623 Jinja2-2.10.1.tar.gz
diff --git a/deps/v8/third_party/jinja2/Jinja2-2.10.tar.gz.md5 b/deps/v8/third_party/jinja2/Jinja2-2.10.tar.gz.md5
deleted file mode 100644
index 9137ee129a..0000000000
--- a/deps/v8/third_party/jinja2/Jinja2-2.10.tar.gz.md5
+++ /dev/null
@@ -1 +0,0 @@
-61ef1117f945486472850819b8d1eb3d Jinja2-2.10.tar.gz
diff --git a/deps/v8/third_party/jinja2/Jinja2-2.10.tar.gz.sha512 b/deps/v8/third_party/jinja2/Jinja2-2.10.tar.gz.sha512
deleted file mode 100644
index 087d24c18e..0000000000
--- a/deps/v8/third_party/jinja2/Jinja2-2.10.tar.gz.sha512
+++ /dev/null
@@ -1 +0,0 @@
-0ea7371be67ffcf19e46dfd06523a45a0806e678a407d54f5f2f3e573982f0959cf82ec5d07b203670309928a62ef71109701ab16547a9bba2ebcdc178cb67f2 Jinja2-2.10.tar.gz
diff --git a/deps/v8/third_party/jinja2/OWNERS b/deps/v8/third_party/jinja2/OWNERS
index 05a4a96035..c4b81d802a 100644
--- a/deps/v8/third_party/jinja2/OWNERS
+++ b/deps/v8/third_party/jinja2/OWNERS
@@ -1,3 +1,4 @@
timloh@chromium.org
haraken@chromium.org
+wnwen@chromium.org
yukishiino@chromium.org
diff --git a/deps/v8/third_party/jinja2/README.chromium b/deps/v8/third_party/jinja2/README.chromium
index ecaf5a618c..37e91391be 100644
--- a/deps/v8/third_party/jinja2/README.chromium
+++ b/deps/v8/third_party/jinja2/README.chromium
@@ -1,8 +1,8 @@
Name: Jinja2 Python Template Engine
Short Name: jinja2
URL: https://jinja.palletsprojects.com/
-Version: 2.10
-CPEPrefix: cpe:/a:pocoo:jinja2:2.10
+Version: 2.10.1
+CPEPrefix: cpe:/a:pocoo:jinja2:2.10.1
License: BSD 3-Clause
License File: LICENSE
Security Critical: no
@@ -10,9 +10,9 @@ Security Critical: no
Description:
Template engine for code generation in Blink.
-Source: https://files.pythonhosted.org/packages/56/e6/332789f295cf22308386cf5bbd1f4e00ed11484299c5d7383378cf48ba47/Jinja2-2.10.tar.gz
-MD5: 61ef1117f945486472850819b8d1eb3d
-SHA-1: 34b69e5caab12ee37b9df69df9018776c008b7b8
+Source: https://files.pythonhosted.org/packages/93/ea/d884a06f8c7f9b7afbc8138b762e80479fb17aedbbe2b06515a12de9378d/Jinja2-2.10.1.tar.gz
+MD5: 0ae535be40fd215a8114a090c8b68e5a
+SHA-512: a00153a0e07bb7d67f301b4eaf7af657726a1985e9ffc7ae2d76bdbb4c062d672efc8065e398767e1039b18a483a0092e206deac91e4047aad64920b56869623
Local Modifications:
This only includes the jinja2 directory from the tarball and the LICENSE and
diff --git a/deps/v8/third_party/jinja2/__init__.py b/deps/v8/third_party/jinja2/__init__.py
index 42aa763d57..15e13b6f2e 100644
--- a/deps/v8/third_party/jinja2/__init__.py
+++ b/deps/v8/third_party/jinja2/__init__.py
@@ -27,7 +27,7 @@
:license: BSD, see LICENSE for more details.
"""
__docformat__ = 'restructuredtext en'
-__version__ = '2.10'
+__version__ = '2.10.1'
# high level interface
from jinja2.environment import Environment, Template
diff --git a/deps/v8/third_party/jinja2/get_jinja2.sh b/deps/v8/third_party/jinja2/get_jinja2.sh
index 941ba96f1f..15ed7dee7c 100755
--- a/deps/v8/third_party/jinja2/get_jinja2.sh
+++ b/deps/v8/third_party/jinja2/get_jinja2.sh
@@ -5,8 +5,8 @@
# Download page:
# https://pypi.python.org/pypi/Jinja2
PACKAGE='Jinja2'
-VERSION='2.10'
-SRC_URL='https://files.pythonhosted.org/packages/56/e6/332789f295cf22308386cf5bbd1f4e00ed11484299c5d7383378cf48ba47/Jinja2-2.10.tar.gz'
+VERSION='2.10.1'
+SRC_URL='https://files.pythonhosted.org/packages/93/ea/d884a06f8c7f9b7afbc8138b762e80479fb17aedbbe2b06515a12de9378d/Jinja2-2.10.1.tar.gz'
PACKAGE_DIR='jinja2'
CHROMIUM_FILES="README.chromium OWNERS get_jinja2.sh DIR_METADATA patches"
diff --git a/deps/v8/third_party/jinja2/lexer.py b/deps/v8/third_party/jinja2/lexer.py
index 6fd135dd5b..1f790259d6 100644
--- a/deps/v8/third_party/jinja2/lexer.py
+++ b/deps/v8/third_party/jinja2/lexer.py
@@ -45,12 +45,6 @@ else:
from jinja2 import _identifier
name_re = re.compile(r'[\w{0}]+'.format(_identifier.pattern))
check_ident = True
- # remove the pattern from memory after building the regex
- import sys
- del sys.modules['jinja2._identifier']
- import jinja2
- del jinja2._identifier
- del _identifier
float_re = re.compile(r'(?<!\.)\d+\.\d+')
newline_re = re.compile(r'(\r\n|\r|\n)')
diff --git a/deps/v8/third_party/jinja2/patches/0002-jinja2-add-_identifier-to-pydeps-for-py3.patch b/deps/v8/third_party/jinja2/patches/0002-jinja2-add-_identifier-to-pydeps-for-py3.patch
new file mode 100644
index 0000000000..a979bed788
--- /dev/null
+++ b/deps/v8/third_party/jinja2/patches/0002-jinja2-add-_identifier-to-pydeps-for-py3.patch
@@ -0,0 +1,34 @@
+From c07882fb6ea8a02869c84fd79e48855229ca5985 Mon Sep 17 00:00:00 2001
+From: Peter Wen <wnwen@chromium.org>
+Date: Wed, 18 Aug 2021 09:53:11 -0400
+Subject: [PATCH] [PATCH] jinja2: add _identifier to pydeps for py3
+
+If _identifier is deleted then build/print_python_deps.py fails to
+detect _identifier.py as a pydep for jinja, resulting in the isolate
+files missing third_party/jinja2/_identifier.py as a necessary file.
+
+Bug: 1228231
+---
+ third_party/jinja2/lexer.py | 6 ------
+ 1 file changed, 6 deletions(-)
+
+diff --git a/third_party/jinja2/lexer.py b/third_party/jinja2/lexer.py
+index 6fd135dd5b0a..1f790259d671 100644
+--- a/third_party/jinja2/lexer.py
++++ b/third_party/jinja2/lexer.py
+@@ -45,12 +45,6 @@ else:
+ from jinja2 import _identifier
+ name_re = re.compile(r'[\w{0}]+'.format(_identifier.pattern))
+ check_ident = True
+- # remove the pattern from memory after building the regex
+- import sys
+- del sys.modules['jinja2._identifier']
+- import jinja2
+- del jinja2._identifier
+- del _identifier
+
+ float_re = re.compile(r'(?<!\.)\d+\.\d+')
+ newline_re = re.compile(r'(\r\n|\r|\n)')
+--
+2.33.0.rc1.237.g0d66db33f3-goog
+
diff --git a/deps/v8/third_party/jinja2/sandbox.py b/deps/v8/third_party/jinja2/sandbox.py
index 93fb9d45f3..752e81289f 100644
--- a/deps/v8/third_party/jinja2/sandbox.py
+++ b/deps/v8/third_party/jinja2/sandbox.py
@@ -137,7 +137,7 @@ class _MagicFormatMapping(Mapping):
def inspect_format_method(callable):
if not isinstance(callable, (types.MethodType,
types.BuiltinMethodType)) or \
- callable.__name__ != 'format':
+ callable.__name__ not in ('format', 'format_map'):
return None
obj = callable.__self__
if isinstance(obj, string_types):
@@ -402,7 +402,7 @@ class SandboxedEnvironment(Environment):
obj.__class__.__name__
), name=attribute, obj=obj, exc=SecurityError)
- def format_string(self, s, args, kwargs):
+ def format_string(self, s, args, kwargs, format_func=None):
"""If a format call is detected, then this is routed through this
method so that our safety sandbox can be used for it.
"""
@@ -410,6 +410,17 @@ class SandboxedEnvironment(Environment):
formatter = SandboxedEscapeFormatter(self, s.escape)
else:
formatter = SandboxedFormatter(self)
+
+ if format_func is not None and format_func.__name__ == 'format_map':
+ if len(args) != 1 or kwargs:
+ raise TypeError(
+ 'format_map() takes exactly one argument %d given'
+ % (len(args) + (kwargs is not None))
+ )
+
+ kwargs = args[0]
+ args = None
+
kwargs = _MagicFormatMapping(args, kwargs)
rv = formatter.vformat(s, args, kwargs)
return type(s)(rv)
@@ -418,7 +429,7 @@ class SandboxedEnvironment(Environment):
"""Call an object from sandboxed code."""
fmt = inspect_format_method(__obj)
if fmt is not None:
- return __self.format_string(fmt, args, kwargs)
+ return __self.format_string(fmt, args, kwargs, __obj)
# the double prefixes are to avoid double keyword argument
# errors when proxying the call.
diff --git a/deps/v8/third_party/jinja2/tests.py b/deps/v8/third_party/jinja2/tests.py
index b14f85ff14..0adc3d4dbc 100644
--- a/deps/v8/third_party/jinja2/tests.py
+++ b/deps/v8/third_party/jinja2/tests.py
@@ -10,7 +10,7 @@
"""
import operator
import re
-from collections.abc import Mapping
+from collections import Mapping
from jinja2.runtime import Undefined
from jinja2._compat import text_type, string_types, integer_types
import decimal
diff --git a/deps/v8/third_party/zlib/google/zip_internal.cc b/deps/v8/third_party/zlib/google/zip_internal.cc
index cea1e88d03..00e9eefe6c 100644
--- a/deps/v8/third_party/zlib/google/zip_internal.cc
+++ b/deps/v8/third_party/zlib/google/zip_internal.cc
@@ -84,7 +84,7 @@ void* ZipOpenFunc(void* opaque, const void* filename, int mode) {
}
#endif
-#if defined(OS_POSIX)
+#if defined(OS_POSIX) || defined(OS_FUCHSIA)
// Callback function for zlib that opens a file stream from a file descriptor.
// Since we do not own the file descriptor, dup it so that we can fdopen/fclose
// a file stream.
@@ -286,7 +286,7 @@ unzFile OpenForUnzipping(const std::string& file_name_utf8) {
return unzOpen2_64(file_name_utf8.c_str(), zip_func_ptrs);
}
-#if defined(OS_POSIX)
+#if defined(OS_POSIX) || defined(OS_FUCHSIA)
unzFile OpenFdForUnzipping(int zip_fd) {
zlib_filefunc64_def zip_funcs;
FillFdOpenFileFunc(&zip_funcs, zip_fd);
diff --git a/deps/v8/third_party/zlib/google/zip_internal.h b/deps/v8/third_party/zlib/google/zip_internal.h
index ef5b5d0906..c7feba692b 100644
--- a/deps/v8/third_party/zlib/google/zip_internal.h
+++ b/deps/v8/third_party/zlib/google/zip_internal.h
@@ -35,7 +35,7 @@ namespace internal {
// Windows.
unzFile OpenForUnzipping(const std::string& file_name_utf8);
-#if defined(OS_POSIX)
+#if defined(OS_POSIX) || defined(OS_FUCHSIA)
// Opens the file referred to by |zip_fd| for unzipping.
unzFile OpenFdForUnzipping(int zip_fd);
#endif
diff --git a/deps/v8/third_party/zlib/google/zip_reader.cc b/deps/v8/third_party/zlib/google/zip_reader.cc
index 8ddd8dfc25..2ad1398499 100644
--- a/deps/v8/third_party/zlib/google/zip_reader.cc
+++ b/deps/v8/third_party/zlib/google/zip_reader.cc
@@ -157,7 +157,7 @@ bool ZipReader::Open(const base::FilePath& zip_file_path) {
bool ZipReader::OpenFromPlatformFile(base::PlatformFile zip_fd) {
DCHECK(!zip_file_);
-#if defined(OS_POSIX)
+#if defined(OS_POSIX) || defined(OS_FUCHSIA)
zip_file_ = internal::OpenFdForUnzipping(zip_fd);
#elif defined(OS_WIN)
zip_file_ = internal::OpenHandleForUnzipping(zip_fd);
diff --git a/deps/v8/tools/clusterfuzz/js_fuzzer/build_db.js b/deps/v8/tools/clusterfuzz/js_fuzzer/build_db.js
index 675a322c64..c00d286eb1 100644
--- a/deps/v8/tools/clusterfuzz/js_fuzzer/build_db.js
+++ b/deps/v8/tools/clusterfuzz/js_fuzzer/build_db.js
@@ -34,7 +34,6 @@ function main() {
}
const mutateDb = new db.MutateDbWriter(program.output_dir);
- const expressions = new Set();
const inputDir = path.resolve(program.input_dir);
for (const corpusName of program.args) {
@@ -53,7 +52,7 @@ function main() {
}
try{
- mutateDb.process(source, expressions);
+ mutateDb.process(source);
} catch (e) {
console.log(e);
}
diff --git a/deps/v8/tools/clusterfuzz/js_fuzzer/db.js b/deps/v8/tools/clusterfuzz/js_fuzzer/db.js
index e96265b068..3fbe438023 100644
--- a/deps/v8/tools/clusterfuzz/js_fuzzer/db.js
+++ b/deps/v8/tools/clusterfuzz/js_fuzzer/db.js
@@ -11,11 +11,13 @@ const fs = require('fs');
const fsPath = require('path');
const babelGenerator = require('@babel/generator').default;
+const babelTemplate = require('@babel/template').default;
const babelTraverse = require('@babel/traverse').default;
const babelTypes = require('@babel/types');
const globals = require('globals');
const random = require('./random.js');
+const sourceHelpers = require('./source_helpers.js');
const globalIdentifiers = new Set(Object.keys(globals.builtin));
const propertyNames = new Set([
@@ -238,6 +240,29 @@ function _markSkipped(path) {
}
}
+/**
+ * Returns true if an expression can be applied or false otherwise.
+ */
+function isValid(expression) {
+ const expressionTemplate = babelTemplate(
+ expression.source,
+ sourceHelpers.BABYLON_REPLACE_VAR_OPTIONS);
+
+ const dependencies = {};
+ if (expression.dependencies) {
+ for (const dependency of expression.dependencies) {
+ dependencies[dependency] = babelTypes.identifier('__v_0');
+ }
+ }
+
+ try {
+ expressionTemplate(dependencies);
+ } catch (e) {
+ return false;
+ }
+ return true;
+}
+
class MutateDbWriter {
constructor(outputDir) {
this.seen = new Set();
@@ -393,6 +418,11 @@ class MutateDbWriter {
return;
}
+ // Test results.
+ if (!isValid(expression)) {
+ return;
+ }
+
// Write results.
let dirPath = fsPath.join(self.outputDir, expression.type);
if (!fs.existsSync(dirPath)) {
diff --git a/deps/v8/tools/clusterfuzz/js_fuzzer/exceptions.js b/deps/v8/tools/clusterfuzz/js_fuzzer/exceptions.js
index efb1a8a649..4a571d5dd0 100644
--- a/deps/v8/tools/clusterfuzz/js_fuzzer/exceptions.js
+++ b/deps/v8/tools/clusterfuzz/js_fuzzer/exceptions.js
@@ -144,24 +144,6 @@ const DISALLOWED_DIFFERENTIAL_FUZZ_FLAGS = [
'--validate-asm',
];
-const ALLOWED_RUNTIME_FUNCTIONS = new Set([
- // List of allowed runtime functions. Others will be replaced with no-ops.
- 'ArrayBufferDetach',
- 'CompileBaseline',
- 'DeoptimizeFunction',
- 'DeoptimizeNow',
- 'EnableCodeLoggingForTesting',
- 'GetUndetectable',
- 'HeapObjectVerify',
- 'IsBeingInterpreted',
- 'NeverOptimizeFunction',
- 'OptimizeFunctionOnNextCall',
- 'OptimizeOsr',
- 'PrepareFunctionForOptimization',
- 'SetAllocationTimeout',
- 'SimulateNewspaceFull',
-]);
-
const MAX_FILE_SIZE_BYTES = 128 * 1024; // 128KB
const MEDIUM_FILE_SIZE_BYTES = 32 * 1024; // 32KB
@@ -260,13 +242,6 @@ function filterDifferentialFuzzFlags(flags) {
flag => _doesntMatch(DISALLOWED_DIFFERENTIAL_FUZZ_FLAGS, flag));
}
-function isAllowedRuntimeFunction(name) {
- if (process.env.APP_NAME != 'd8') {
- return false;
- }
-
- return ALLOWED_RUNTIME_FUNCTIONS.has(name);
-}
module.exports = {
filterDifferentialFuzzFlags: filterDifferentialFuzzFlags,
@@ -274,7 +249,6 @@ module.exports = {
getGeneratedSoftSkipped: getGeneratedSoftSkipped,
getGeneratedSloppy: getGeneratedSloppy,
getSoftSkipped: getSoftSkipped,
- isAllowedRuntimeFunction: isAllowedRuntimeFunction,
isTestSkippedAbs: isTestSkippedAbs,
isTestSkippedRel: isTestSkippedRel,
isTestSoftSkippedAbs: isTestSoftSkippedAbs,
diff --git a/deps/v8/tools/clusterfuzz/js_fuzzer/mutators/crossover_mutator.js b/deps/v8/tools/clusterfuzz/js_fuzzer/mutators/crossover_mutator.js
index 7e3c4955ce..491501dc5c 100644
--- a/deps/v8/tools/clusterfuzz/js_fuzzer/mutators/crossover_mutator.js
+++ b/deps/v8/tools/clusterfuzz/js_fuzzer/mutators/crossover_mutator.js
@@ -36,12 +36,9 @@ class CrossOverMutator extends mutator.Mutator {
{canHaveSuper: canHaveSuper});
// Insert the statement.
- var templateOptions = Object.assign({}, sourceHelpers.BABYLON_OPTIONS);
- templateOptions['placeholderPattern'] = /^VAR_[0-9]+$/;
-
let toInsert = babelTemplate(
randomExpression.source,
- templateOptions);
+ sourceHelpers.BABYLON_REPLACE_VAR_OPTIONS);
const dependencies = {};
if (randomExpression.dependencies) {
diff --git a/deps/v8/tools/clusterfuzz/js_fuzzer/source_helpers.js b/deps/v8/tools/clusterfuzz/js_fuzzer/source_helpers.js
index 264734607a..d7cb142f81 100644
--- a/deps/v8/tools/clusterfuzz/js_fuzzer/source_helpers.js
+++ b/deps/v8/tools/clusterfuzz/js_fuzzer/source_helpers.js
@@ -46,6 +46,9 @@ const BABYLON_OPTIONS = {
],
}
+const BABYLON_REPLACE_VAR_OPTIONS = Object.assign({}, BABYLON_OPTIONS);
+BABYLON_REPLACE_VAR_OPTIONS['placeholderPattern'] = /^VAR_[0-9]+$/;
+
function _isV8OrSpiderMonkeyLoad(path) {
// 'load' and 'loadRelativeToScript' used by V8 and SpiderMonkey.
return (babelTypes.isIdentifier(path.node.callee) &&
@@ -323,7 +326,6 @@ function loadSource(baseDir, relPath, parseStrict=false) {
removeComments(ast);
cleanAsserts(ast);
- neuterDisallowedV8Natives(ast);
annotateWithOriginalPath(ast, relPath);
const flags = loadFlags(data);
@@ -373,28 +375,6 @@ function cleanAsserts(ast) {
}
/**
- * Filter out disallowed V8 runtime functions.
- */
-function neuterDisallowedV8Natives(ast) {
- babelTraverse(ast, {
- CallExpression(path) {
- if (!babelTypes.isIdentifier(path.node.callee) ||
- !path.node.callee.name.startsWith(V8_BUILTIN_PREFIX)) {
- return;
- }
-
- const functionName = path.node.callee.name.substr(
- V8_BUILTIN_PREFIX.length);
-
- if (!exceptions.isAllowedRuntimeFunction(functionName)) {
- path.replaceWith(babelTypes.callExpression(
- babelTypes.identifier('nop'), []));
- }
- }
- });
-}
-
-/**
* Annotate code with original file path.
*/
function annotateWithOriginalPath(ast, relPath) {
@@ -468,6 +448,7 @@ function generateCode(source, dependencies=[]) {
module.exports = {
BABYLON_OPTIONS: BABYLON_OPTIONS,
+ BABYLON_REPLACE_VAR_OPTIONS: BABYLON_REPLACE_VAR_OPTIONS,
generateCode: generateCode,
loadDependencyAbs: loadDependencyAbs,
loadResource: loadResource,
diff --git a/deps/v8/tools/clusterfuzz/js_fuzzer/test/test_db.js b/deps/v8/tools/clusterfuzz/js_fuzzer/test/test_db.js
new file mode 100644
index 0000000000..1b645865b7
--- /dev/null
+++ b/deps/v8/tools/clusterfuzz/js_fuzzer/test/test_db.js
@@ -0,0 +1,33 @@
+// Copyright 2021 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+/**
+ * @fileoverview Test the script building the DB.
+ */
+
+'use strict';
+
+const assert = require('assert');
+const { execSync } = require("child_process");
+const fs = require('fs');
+const path = require('path');
+const tempy = require('tempy');
+
+function buildDb(inputDir, corpusName, outputDir) {
+ execSync(
+ `node build_db.js -i ${inputDir} -o ${outputDir} ${corpusName}`,
+ {stdio: ['pipe']});
+}
+
+describe('DB tests', () => {
+ // Test feeds an expression that does not apply.
+ it('omits erroneous expressions', () => {
+ const outPath = tempy.directory();
+ buildDb('test_data/db', 'this', outPath);
+ const indexFile = path.join(outPath, 'index.json');
+ const indexJSON = JSON.parse(fs.readFileSync(indexFile), 'utf-8');
+ assert.deepEqual(
+ indexJSON, {"statements": [], "superStatements": [], "all": []});
+ });
+});
diff --git a/deps/v8/tools/clusterfuzz/js_fuzzer/test/test_regressions.js b/deps/v8/tools/clusterfuzz/js_fuzzer/test/test_regressions.js
index a753c1c60a..62481f1f40 100644
--- a/deps/v8/tools/clusterfuzz/js_fuzzer/test/test_regressions.js
+++ b/deps/v8/tools/clusterfuzz/js_fuzzer/test/test_regressions.js
@@ -38,31 +38,6 @@ function execFile(jsFile) {
execSync("node " + jsFile, {stdio: ['pipe']});
}
-function buildDb(inputDir, corpusName, outputDir) {
- execSync(
- `node build_db.js -i ${inputDir} -o ${outputDir} ${corpusName}`,
- {stdio: ['pipe']});
-}
-
-function assertFuzzWithDbThrows(dbInputDir, corpusName, settings, regexp) {
- const outPath = tempy.directory();
- buildDb(dbInputDir, corpusName, outPath);
-
- settings['MUTATE_CROSSOVER_INSERT'] = 1.0;
- assert.throws(
- () => {
- createFuzzTest(
- outPath, settings,
- ['regress/build_db/cross_over_mutator_input.js']);
- },
- err => {
- assert(regexp.test(err));
- return true;
- },
- 'unexpected error',
- );
-}
-
describe('Regression tests', () => {
beforeEach(() => {
helpers.deterministicRandom(sandbox);
@@ -135,25 +110,4 @@ describe('Regression tests', () => {
['regress/numbers/input_indices.js']);
execFile(file);
});
-
- it('create call expression', () => {
- // TODO(machenbach): Build_db extracts a function expression without
- // parentheses, re-parsing this later fails in cross-over mutator.
- assertFuzzWithDbThrows(
- 'test_data/regress/build_db',
- 'destructuring',
- this.settings,
- SYNTAX_ERROR_RE);
- });
-
- it('create assignment expression', () => {
- // TODO(machenbach): Build_db extracts some assignment expressions with a
- // spurious dependency. This leads to an "unknown substitution" error
- // when applying the template.
- assertFuzzWithDbThrows(
- 'test_data/regress/build_db',
- 'this',
- this.settings,
- /.*Unknown substitution.*/);
- });
});
diff --git a/deps/v8/tools/clusterfuzz/js_fuzzer/test_data/regress/build_db/cross_over_mutator_class_input.js b/deps/v8/tools/clusterfuzz/js_fuzzer/test_data/cross_over_mutator_class_input.js
index f16fb2fe53..f16fb2fe53 100644
--- a/deps/v8/tools/clusterfuzz/js_fuzzer/test_data/regress/build_db/cross_over_mutator_class_input.js
+++ b/deps/v8/tools/clusterfuzz/js_fuzzer/test_data/cross_over_mutator_class_input.js
diff --git a/deps/v8/tools/clusterfuzz/js_fuzzer/test_data/regress/build_db/this/file.js b/deps/v8/tools/clusterfuzz/js_fuzzer/test_data/db/this/file.js
index 115616da0d..115616da0d 100644
--- a/deps/v8/tools/clusterfuzz/js_fuzzer/test_data/regress/build_db/this/file.js
+++ b/deps/v8/tools/clusterfuzz/js_fuzzer/test_data/db/this/file.js
diff --git a/deps/v8/tools/clusterfuzz/js_fuzzer/test_data/mjsunit/test_load.js b/deps/v8/tools/clusterfuzz/js_fuzzer/test_data/mjsunit/test_load.js
index dfa4bc49ba..342c9d87a3 100644
--- a/deps/v8/tools/clusterfuzz/js_fuzzer/test_data/mjsunit/test_load.js
+++ b/deps/v8/tools/clusterfuzz/js_fuzzer/test_data/mjsunit/test_load.js
@@ -3,5 +3,5 @@
// found in the LICENSE file.
var testLoad = 'test_load';
-d8.file.execute('test_data/mjsunit/test_load_1.js');
-d8.file.execute('test_load_0.js');
+load('test_data/mjsunit/test_load_1.js');
+load('test_load_0.js');
diff --git a/deps/v8/tools/clusterfuzz/js_fuzzer/test_data/mjsunit/test_load_0.js b/deps/v8/tools/clusterfuzz/js_fuzzer/test_data/mjsunit/test_load_0.js
index 3959a126b4..d0e66e4a9f 100644
--- a/deps/v8/tools/clusterfuzz/js_fuzzer/test_data/mjsunit/test_load_0.js
+++ b/deps/v8/tools/clusterfuzz/js_fuzzer/test_data/mjsunit/test_load_0.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-d8.file.execute('test_data/mjsunit/test_load_1.js');
-d8.file.execute('test_load_2.js');
-d8.file.execute('test_load_3.js');
+load('test_data/mjsunit/test_load_1.js');
+load('test_load_2.js');
+load('test_load_3.js');
var testLoad0 = 'test_load_0';
diff --git a/deps/v8/tools/clusterfuzz/js_fuzzer/test_data/mjsunit/test_load_1.js b/deps/v8/tools/clusterfuzz/js_fuzzer/test_data/mjsunit/test_load_1.js
index 8328dd2468..03c9166975 100644
--- a/deps/v8/tools/clusterfuzz/js_fuzzer/test_data/mjsunit/test_load_1.js
+++ b/deps/v8/tools/clusterfuzz/js_fuzzer/test_data/mjsunit/test_load_1.js
@@ -2,5 +2,5 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-d8.file.execute('test_load_2.js');
+load('test_load_2.js');
var testLoad1 = 'test_load_1';
diff --git a/deps/v8/tools/clusterfuzz/js_fuzzer/test_data/mjsunit/test_load_self.js b/deps/v8/tools/clusterfuzz/js_fuzzer/test_data/mjsunit/test_load_self.js
index cd2dfb5c04..31a9f4c507 100644
--- a/deps/v8/tools/clusterfuzz/js_fuzzer/test_data/mjsunit/test_load_self.js
+++ b/deps/v8/tools/clusterfuzz/js_fuzzer/test_data/mjsunit/test_load_self.js
@@ -2,4 +2,4 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-d8.file.execute("test_load_self.js");
+load("test_load_self.js");
diff --git a/deps/v8/tools/clusterfuzz/js_fuzzer/test_data/regress/build_db/destructuring/input.js b/deps/v8/tools/clusterfuzz/js_fuzzer/test_data/regress/build_db/destructuring/input.js
deleted file mode 100644
index fce0782617..0000000000
--- a/deps/v8/tools/clusterfuzz/js_fuzzer/test_data/regress/build_db/destructuring/input.js
+++ /dev/null
@@ -1,7 +0,0 @@
-// Copyright 2020 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-
-let x, y;
-(function([ x = y = 1 ]) {}([]));
diff --git a/deps/v8/tools/clusterfuzz/js_fuzzer/test_data/spidermonkey/test/load.js b/deps/v8/tools/clusterfuzz/js_fuzzer/test_data/spidermonkey/test/load.js
index fa5ddf6086..43a776c476 100644
--- a/deps/v8/tools/clusterfuzz/js_fuzzer/test_data/spidermonkey/test/load.js
+++ b/deps/v8/tools/clusterfuzz/js_fuzzer/test_data/spidermonkey/test/load.js
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-d8.file.execute('load1.js');
+load('load1.js');
loadRelativeToScript('load2.js');
console.log('load.js');
diff --git a/deps/v8/tools/clusterfuzz/js_fuzzer/test_db.js b/deps/v8/tools/clusterfuzz/js_fuzzer/test_db.js
index ff13c383c5..35f7956b76 100644
--- a/deps/v8/tools/clusterfuzz/js_fuzzer/test_db.js
+++ b/deps/v8/tools/clusterfuzz/js_fuzzer/test_db.js
@@ -29,7 +29,6 @@ function main() {
return;
}
- const loader = new sourceHelpers.V8SourceLoader();
const mutateDb = new db.MutateDb(program.input_dir);
const mutator = new crossOverMutator.CrossOverMutator(
{ MUTATE_CROSSOVER_INSERT: 1.0, testing: true }, mutateDb);
@@ -47,9 +46,9 @@ function main() {
() => { return expression; });
// Use a source that will try to insert one statement, allowing
// super.
- const source = loader.load(
+ const source = sourceHelpers.loadSource(
__dirname,
- 'test_data/regress/build_db/cross_over_mutator_class_input.js');
+ 'test_data/cross_over_mutator_class_input.js');
try {
mutator.mutate(source);
nPass++;
diff --git a/deps/v8/tools/clusterfuzz/v8_commands.py b/deps/v8/tools/clusterfuzz/v8_commands.py
index 924acbedd9..f03161c2c4 100644
--- a/deps/v8/tools/clusterfuzz/v8_commands.py
+++ b/deps/v8/tools/clusterfuzz/v8_commands.py
@@ -110,8 +110,7 @@ class Output(object):
self.pid = pid
def HasCrashed(self):
- return (self.exit_code < 0 and
- self.exit_code != -signal.SIGABRT)
+ return self.exit_code < 0
def Execute(args, cwd, timeout=None):
diff --git a/deps/v8/tools/clusterfuzz/v8_foozzie.py b/deps/v8/tools/clusterfuzz/v8_foozzie.py
index 52b7954093..92f881df83 100755
--- a/deps/v8/tools/clusterfuzz/v8_foozzie.py
+++ b/deps/v8/tools/clusterfuzz/v8_foozzie.py
@@ -78,13 +78,6 @@ CONFIGS = dict(
'--always-opt',
'--force-slow-path',
],
- trusted=[
- '--no-untrusted-code-mitigations',
- ],
- trusted_opt=[
- '--always-opt',
- '--no-untrusted-code-mitigations',
- ],
)
BASELINE_CONFIG = 'ignition'
@@ -173,6 +166,15 @@ KNOWN_FAILURES = {
'CrashTests/5694376231632896/1033966.js': 'flaky',
}
+# Flags that are already crashy during smoke tests should not be used.
+DISALLOWED_FLAGS = [
+ '--gdbjit',
+]
+
+
+def filter_flags(flags):
+ return [flag for flag in flags if flag not in DISALLOWED_FLAGS]
+
def infer_arch(d8):
"""Infer the V8 architecture from the build configuration next to the
@@ -223,7 +225,7 @@ class ExecutionArgumentsConfig(object):
d8 = os.path.join(BASE_PATH, d8)
assert os.path.exists(d8)
- flags = CONFIGS[config] + get('config_extra_flags')
+ flags = CONFIGS[config] + filter_flags(get('config_extra_flags'))
RunOptions = namedtuple('RunOptions', ['arch', 'config', 'd8', 'flags'])
return RunOptions(infer_arch(d8), config, d8, flags)
diff --git a/deps/v8/tools/clusterfuzz/v8_smoke_tests.js b/deps/v8/tools/clusterfuzz/v8_smoke_tests.js
index 39eb2d4e21..2c5fab338d 100644
--- a/deps/v8/tools/clusterfuzz/v8_smoke_tests.js
+++ b/deps/v8/tools/clusterfuzz/v8_smoke_tests.js
@@ -40,6 +40,6 @@ print("Sensitive runtime functions are neutered");
%OptimizeFunctionOnNextCall(foo);
foo();
print(%GetOptimizationStatus(foo));
- const fun = new Function("f", "sync", "return %GetOptimizationStatus(f);");
+ const fun = new Function("f", "return %GetOptimizationStatus(f);");
print(fun(foo));
})();
diff --git a/deps/v8/tools/cppgc/gen_cmake.py b/deps/v8/tools/cppgc/gen_cmake.py
index 0375d0fd3b..1063455b7f 100755
--- a/deps/v8/tools/cppgc/gen_cmake.py
+++ b/deps/v8/tools/cppgc/gen_cmake.py
@@ -244,7 +244,7 @@ set(CMAKE_CXX_STANDARD_REQUIRED ON)
option(CPPGC_ENABLE_OBJECT_NAMES "Enable object names in cppgc for debug purposes" OFF)
option(CPPGC_ENABLE_CAGED_HEAP "Enable heap reservation of size 4GB, only possible for 64bit archs" OFF)
-option(CPPGC_ENABLE_VERIFY_LIVE_BYTES " Enable verification of live bytes in the marking verifier" OFF)
+option(CPPGC_ENABLE_VERIFY_HEAP "Enables additional heap verification phases and checks" OFF)
option(CPPGC_CHECK_ASSIGNMENTS_IN_PREFINALIZERS " Enable assignment checks for Members/Persistents during prefinalizer invocations" OFF)
option(CPPGC_ENABLE_YOUNG_GENERATION "Enable young generation in cppgc" OFF)
set(CPPGC_TARGET_ARCH "x64" CACHE STRING "Target architecture, possible options: x64, x86, arm, arm64, ppc64, s390x, mipsel, mips64el")
@@ -409,7 +409,7 @@ else{else_cond}
deps=['Threads::Threads'],
desc='Main library'),
'sample':
- Target(name='cppgc_sample',
+ Target(name='cppgc_hello_world',
cmake='add_executable',
deps=['cppgc'],
desc='Example'),
@@ -435,8 +435,8 @@ endif()
if(CPPGC_ENABLE_CAGED_HEAP)
target_compile_definitions({target.name} PRIVATE "-DCPPGC_CAGED_HEAP")
endif()
-if(CPPGC_ENABLE_VERIFY_LIVE_BYTES)
- target_compile_definitions({target.name} PRIVATE "-DCPPGC_VERIFY_LIVE_BYTES")
+if(CPPGC_ENABLE_VERIFY_HEAP)
+ target_compile_definitions({target.name} PRIVATE "-DCPPGC_ENABLE_VERIFY_HEAP")
endif()
if(CPPGC_CHECK_ASSIGNMENTS_IN_PREFINALIZERS)
target_compile_definitions({target.name} PRIVATE "-DCPPGC_CHECK_ASSIGNMENTS_IN_PREFINALIZERS")
diff --git a/deps/v8/tools/cppgc/test_cmake.sh b/deps/v8/tools/cppgc/test_cmake.sh
index 77f551c0b5..55765ddcdd 100755
--- a/deps/v8/tools/cppgc/test_cmake.sh
+++ b/deps/v8/tools/cppgc/test_cmake.sh
@@ -50,7 +50,7 @@ cmake -GNinja $rootdir || fail "Failed to execute cmake"
# Build all targets.
ninja cppgc || fail "Failed to build cppgc"
-ninja cppgc_sample || fail "Failed to build sample"
+ninja cppgc_hello_world || fail "Failed to build sample"
ninja cppgc_unittests || fail "Failed to build unittests"
# Run unittests.
diff --git a/deps/v8/tools/dev/gm.py b/deps/v8/tools/dev/gm.py
index 3d52b70cdf..613065d5b1 100755
--- a/deps/v8/tools/dev/gm.py
+++ b/deps/v8/tools/dev/gm.py
@@ -28,6 +28,7 @@ not contain spaces.
from __future__ import print_function
import errno
import os
+import platform
import re
import subprocess
import sys
@@ -42,7 +43,7 @@ BUILD_TARGETS_ALL = ["all"]
# All arches that this script understands.
ARCHES = ["ia32", "x64", "arm", "arm64", "mipsel", "mips64el", "ppc", "ppc64",
- "riscv64", "s390", "s390x", "android_arm", "android_arm64"]
+ "riscv64", "s390", "s390x", "android_arm", "android_arm64", "loong64"]
# Arches that get built/run when you don't specify any.
DEFAULT_ARCHES = ["ia32", "x64", "arm", "arm64"]
# Modes that this script understands.
@@ -250,9 +251,7 @@ def _Notify(summary, body):
print("{} - {}".format(summary, body))
def _GetMachine():
- # Once we migrate to Python3, this can use os.uname().machine.
- # The index-based access is compatible with all Python versions.
- return os.uname()[4]
+ return platform.machine()
def GetPath(arch, mode):
subdir = "%s.%s" % (arch, mode)
@@ -299,6 +298,10 @@ class Config(object):
cpu = "arm64"
elif self.arch == "arm" and _GetMachine() in ("aarch64", "arm64"):
cpu = "arm"
+ elif self.arch == "loong64" and _GetMachine() == "loongarch64":
+ cpu = "loong64"
+ elif self.arch == "mips64el" and _GetMachine() == "mips64":
+ cpu = "mips64el"
elif "64" in self.arch or self.arch == "s390x":
# Native x64 or simulator build.
cpu = "x64"
@@ -310,7 +313,7 @@ class Config(object):
elif self.arch == "android_arm64":
v8_cpu = "arm64"
elif self.arch in ("arm", "arm64", "mipsel", "mips64el", "ppc", "ppc64",
- "riscv64", "s390", "s390x"):
+ "riscv64", "s390", "s390x", "loong64"):
v8_cpu = self.arch
else:
return []
@@ -322,9 +325,9 @@ class Config(object):
return []
def GetSpecialCompiler(self):
- if _GetMachine() == "aarch64":
- # We have no prebuilt Clang for arm64 on Linux, so use the system Clang
- # instead.
+ if _GetMachine() in ("aarch64", "mips64", "loongarch64"):
+ # We have no prebuilt Clang for arm64, mips64 or loongarch64 on Linux,
+ # so use the system Clang instead.
return ["clang_base_path = \"/usr\"", "clang_use_chrome_plugins = false"]
return []
@@ -363,7 +366,7 @@ class Config(object):
csa_trap = re.compile("Specify option( --csa-trap-on-node=[^ ]*)")
match = csa_trap.search(output)
extra_opt = match.group(1) if match else ""
- cmdline = re.compile("python ../../tools/run.py ./mksnapshot (.*)")
+ cmdline = re.compile("python3 ../../tools/run.py ./mksnapshot (.*)")
orig_cmdline = cmdline.search(output).group(1).strip()
cmdline = PrepareMksnapshotCmdline(orig_cmdline, path) + extra_opt
_Notify("V8 build requires your attention",
@@ -503,7 +506,7 @@ def Main(argv):
return_code = 0
# If we have Goma but it is not running, start it.
if (IS_GOMA_MACHINE and
- _Call("ps -e | grep compiler_proxy > /dev/null", silent=True) != 0):
+ _Call("pgrep -x compiler_proxy > /dev/null", silent=True) != 0):
_Call("%s/goma_ctl.py ensure_start" % GOMADIR)
for c in configs:
return_code += configs[c].Build()
diff --git a/deps/v8/tools/gen-postmortem-metadata.py b/deps/v8/tools/gen-postmortem-metadata.py
index 7b3dcedc92..564c750229 100644
--- a/deps/v8/tools/gen-postmortem-metadata.py
+++ b/deps/v8/tools/gen-postmortem-metadata.py
@@ -91,6 +91,16 @@ consts_misc = [
{ 'name': 'TaggedSize', 'value': 'kTaggedSize' },
{ 'name': 'TaggedSizeLog2', 'value': 'kTaggedSizeLog2' },
+ { 'name': 'CodeKindFieldMask', 'value': 'Code::KindField::kMask' },
+ { 'name': 'CodeKindFieldShift', 'value': 'Code::KindField::kShift' },
+
+ { 'name': 'CodeKindBytecodeHandler',
+ 'value': 'static_cast<int>(CodeKind::BYTECODE_HANDLER)' },
+ { 'name': 'CodeKindInterpretedFunction',
+ 'value': 'static_cast<int>(CodeKind::INTERPRETED_FUNCTION)' },
+ { 'name': 'CodeKindBaseline',
+ 'value': 'static_cast<int>(CodeKind::BASELINE)' },
+
{ 'name': 'OddballFalse', 'value': 'Oddball::kFalse' },
{ 'name': 'OddballTrue', 'value': 'Oddball::kTrue' },
{ 'name': 'OddballTheHole', 'value': 'Oddball::kTheHole' },
@@ -189,6 +199,10 @@ consts_misc = [
'value': 'StandardFrameConstants::kFunctionOffset' },
{ 'name': 'off_fp_args',
'value': 'StandardFrameConstants::kFixedFrameSizeAboveFp' },
+ { 'name': 'off_fp_bytecode_array',
+ 'value': 'UnoptimizedFrameConstants::kBytecodeArrayFromFp' },
+ { 'name': 'off_fp_bytecode_offset',
+ 'value': 'UnoptimizedFrameConstants::kBytecodeOffsetOrFeedbackVectorFromFp' },
{ 'name': 'scopeinfo_idx_nparams',
'value': 'ScopeInfo::kParameterCount' },
@@ -250,6 +264,7 @@ extras_accessors = [
'JSObject, elements, Object, kElementsOffset',
'JSObject, internal_fields, uintptr_t, kHeaderSize',
'FixedArray, data, uintptr_t, kHeaderSize',
+ 'BytecodeArray, data, uintptr_t, kHeaderSize',
'JSArrayBuffer, backing_store, uintptr_t, kBackingStoreOffset',
'JSArrayBuffer, byte_length, size_t, kByteLengthOffset',
'JSArrayBufferView, byte_length, size_t, kByteLengthOffset',
@@ -273,6 +288,7 @@ extras_accessors = [
'UncompiledData, inferred_name, String, kInferredNameOffset',
'UncompiledData, start_position, int32_t, kStartPositionOffset',
'UncompiledData, end_position, int32_t, kEndPositionOffset',
+ 'Script, source, Object, kSourceOffset',
'Script, name, Object, kNameOffset',
'Script, line_ends, Object, kLineEndsOffset',
'SharedFunctionInfo, raw_function_token_offset, int16_t, kFunctionTokenOffsetOffset',
@@ -280,6 +296,7 @@ extras_accessors = [
'SharedFunctionInfo, flags, int, kFlagsOffset',
'SharedFunctionInfo, length, uint16_t, kLengthOffset',
'SlicedString, parent, String, kParentOffset',
+ 'Code, flags, uint32_t, kFlagsOffset',
'Code, instruction_start, uintptr_t, kHeaderSize',
'Code, instruction_size, int, kInstructionSizeOffset',
'String, length, int32_t, kLengthOffset',
diff --git a/deps/v8/tools/generate-header-include-checks.py b/deps/v8/tools/generate-header-include-checks.py
index 250b741068..42c118c9d5 100755
--- a/deps/v8/tools/generate-header-include-checks.py
+++ b/deps/v8/tools/generate-header-include-checks.py
@@ -23,7 +23,7 @@ import re
import sys
# TODO(clemensb): Extend to tests.
-DEFAULT_INPUT = ['base', 'src']
+DEFAULT_INPUT = ['base', 'include', 'src']
DEFAULT_GN_FILE = 'BUILD.gn'
MY_DIR = os.path.dirname(os.path.realpath(__file__))
V8_DIR = os.path.dirname(MY_DIR)
@@ -44,7 +44,7 @@ AUTO_EXCLUDE_PATTERNS = [
# platform-specific headers
'\\b{}\\b'.format(p) for p in
('win', 'win32', 'ia32', 'x64', 'arm', 'arm64', 'mips', 'mips64', 's390',
- 'ppc','riscv64')]
+ 'ppc', 'riscv64', 'loong64')]
args = None
def parse_args():
diff --git a/deps/v8/tools/mb/mb.py b/deps/v8/tools/mb/mb.py
index 42ed60c7ef..671773272a 100755
--- a/deps/v8/tools/mb/mb.py
+++ b/deps/v8/tools/mb/mb.py
@@ -242,8 +242,6 @@ class MetaBuildWrapper(object):
' This can be either a regular path or a '
'GN-style source-relative path like '
'//out/Default.'))
- subp.add_argument('-s', '--swarmed', action='store_true',
- help='Run under swarming with the default dimensions')
subp.add_argument('-d', '--dimension', default=[], action='append', nargs=2,
dest='dimensions', metavar='FOO bar',
help='dimension to filter on')
@@ -375,67 +373,7 @@ class MetaBuildWrapper(object):
if ret:
return ret
- if self.args.swarmed:
- return self._RunUnderSwarming(build_dir, target)
- else:
- return self._RunLocallyIsolated(build_dir, target)
-
- def _RunUnderSwarming(self, build_dir, target):
- # TODO(dpranke): Look up the information for the target in
- # the //testing/buildbot.json file, if possible, so that we
- # can determine the isolate target, command line, and additional
- # swarming parameters, if possible.
- #
- # TODO(dpranke): Also, add support for sharding and merging results.
- # TODO(liviurau): While this seems to not be used in V8 yet, we need to add
- # a switch for internal try-bots, since they need to use 'chrome-swarming'
- cas_instance = 'chromium-swarm'
- dimensions = []
- for k, v in self._DefaultDimensions() + self.args.dimensions:
- dimensions += ['-d', k, v]
-
- archive_json_path = self.ToSrcRelPath(
- '%s/%s.archive.json' % (build_dir, target))
- cmd = [
- self.PathJoin(self.chromium_src_dir, 'tools', 'luci-go',
- self.isolate_exe),
- 'archive',
- '-i',
- self.ToSrcRelPath('%s/%s.isolate' % (build_dir, target)),
- '-cas-instance', cas_instance,
- '-dump-json',
- archive_json_path,
- ]
- ret, _, _ = self.Run(cmd, force_verbose=False)
- if ret:
- return ret
-
- try:
- archive_hashes = json.loads(self.ReadFile(archive_json_path))
- except Exception:
- self.Print(
- 'Failed to read JSON file "%s"' % archive_json_path, file=sys.stderr)
- return 1
- try:
- cas_digest = archive_hashes[target]
- except Exception:
- self.Print(
- 'Cannot find hash for "%s" in "%s", file content: %s' %
- (target, archive_json_path, archive_hashes),
- file=sys.stderr)
- return 1
-
- cmd = [
- self.executable,
- self.PathJoin('tools', 'swarming_client', 'swarming.py'),
- 'run',
- '-digests', cas_digest,
- '-S', 'chromium-swarm.appspot.com',
- ] + dimensions
- if self.args.extra_args:
- cmd += ['--'] + self.args.extra_args
- ret, _, _ = self.Run(cmd, force_verbose=True, buffer_output=False)
- return ret
+ return self._RunLocallyIsolated(build_dir, target)
def _RunLocallyIsolated(self, build_dir, target):
cmd = [
diff --git a/deps/v8/tools/mb/mb_unittest.py b/deps/v8/tools/mb/mb_unittest.py
index 4c67495de4..86d9cd403b 100755
--- a/deps/v8/tools/mb/mb_unittest.py
+++ b/deps/v8/tools/mb/mb_unittest.py
@@ -523,28 +523,6 @@ class UnitTest(unittest.TestCase):
self.check(['run', '-c', 'debug_goma', '//out/Default',
'base_unittests'], files=files, ret=0)
- def test_run_swarmed(self):
- files = {
- '/fake_src/testing/buildbot/gn_isolate_map.pyl': (
- "{'base_unittests': {"
- " 'label': '//base:base_unittests',"
- " 'type': 'raw',"
- " 'args': [],"
- "}}\n"
- ),
- '/fake_src/out/Default/base_unittests.runtime_deps': (
- "base_unittests\n"
- ),
- 'out/Default/base_unittests.archive.json':
- ("{\"base_unittests\":\"fake_hash\"}"),
- }
-
- mbw = self.fake_mbw(files=files)
- self.check(['run', '-s', '-c', 'debug_goma', '//out/Default',
- 'base_unittests'], mbw=mbw, ret=0)
- self.check(['run', '-s', '-c', 'debug_goma', '-d', 'os', 'Win7',
- '//out/Default', 'base_unittests'], mbw=mbw, ret=0)
-
def test_lookup(self):
self.check(['lookup', '-c', 'debug_goma'], ret=0,
out=('\n'
diff --git a/deps/v8/tools/profile.mjs b/deps/v8/tools/profile.mjs
index 4127b34b07..526baa835e 100644
--- a/deps/v8/tools/profile.mjs
+++ b/deps/v8/tools/profile.mjs
@@ -116,8 +116,9 @@ export class Script {
sourcePosition = new SourcePosition(this, line, column,)
this._addSourcePosition(line, column, sourcePosition);
}
- if (entry.entry?.type == "Script") {
- // Mark the source position of scripts, for inline scripts which
+ if (this.sourcePosition === undefined && entry.entry?.type === "Script") {
+ // Mark the source position of scripts, for inline scripts which don't
+ // start at line 1.
this.sourcePosition = sourcePosition;
}
sourcePosition.addEntry(entry);
diff --git a/deps/v8/tools/release/PRESUBMIT.py b/deps/v8/tools/release/PRESUBMIT.py
index 3bcb26d29f..a982b2e153 100644
--- a/deps/v8/tools/release/PRESUBMIT.py
+++ b/deps/v8/tools/release/PRESUBMIT.py
@@ -2,7 +2,13 @@
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
-def CheckChangeOnCommit(input_api, output_api):
+def _CommonChecks(input_api, output_api):
tests = input_api.canned_checks.GetUnitTestsInDirectory(
input_api, output_api, '.', files_to_check=['test_scripts.py$'])
return input_api.RunTests(tests)
+
+def CheckChangeOnUpload(input_api, output_api):
+ return _CommonChecks(input_api, output_api)
+
+def CheckChangeOnCommit(input_api, output_api):
+ return _CommonChecks(input_api, output_api)
diff --git a/deps/v8/tools/release/auto_tag.py b/deps/v8/tools/release/auto_tag.py
index fddefed61f..7e77c313d8 100755
--- a/deps/v8/tools/release/auto_tag.py
+++ b/deps/v8/tools/release/auto_tag.py
@@ -23,7 +23,7 @@ class Preparation(Step):
self.CommonPrepare()
self.PrepareBranch()
- self.GitCheckout("master")
+ self.GitCheckout("main")
self.vc.Pull()
diff --git a/deps/v8/tools/release/check_clusterfuzz.py b/deps/v8/tools/release/check_clusterfuzz.py
index 021cd55286..b1b7e084df 100755
--- a/deps/v8/tools/release/check_clusterfuzz.py
+++ b/deps/v8/tools/release/check_clusterfuzz.py
@@ -28,7 +28,7 @@ import urllib2
# Constants to git repos.
BASE_URL = "https://chromium.googlesource.com"
-DEPS_LOG = BASE_URL + "/chromium/src/+log/master/DEPS?format=JSON"
+DEPS_LOG = BASE_URL + "/chromium/src/+log/main/DEPS?format=JSON"
# Constants for retrieving v8 rolls.
CRREV = "https://cr-rev.appspot.com/_ah/api/crrev/v1/commit/%s"
diff --git a/deps/v8/tools/release/common_includes.py b/deps/v8/tools/release/common_includes.py
index 5049cc4534..b61a3e2e27 100644
--- a/deps/v8/tools/release/common_includes.py
+++ b/deps/v8/tools/release/common_includes.py
@@ -214,13 +214,13 @@ class VCInterface(object):
def GetBranches(self):
raise NotImplementedError()
- def MasterBranch(self):
+ def MainBranch(self):
raise NotImplementedError()
def CandidateBranch(self):
raise NotImplementedError()
- def RemoteMasterBranch(self):
+ def RemoteMainBranch(self):
raise NotImplementedError()
def RemoteCandidateBranch(self):
@@ -258,14 +258,14 @@ class GitInterface(VCInterface):
# Remove 'branch-heads/' prefix.
return map(lambda s: s[13:], branches)
- def MasterBranch(self):
- return "master"
+ def MainBranch(self):
+ return "main"
def CandidateBranch(self):
return "candidates"
- def RemoteMasterBranch(self):
- return "origin/master"
+ def RemoteMainBranch(self):
+ return "origin/main"
def RemoteCandidateBranch(self):
return "origin/candidates"
@@ -275,7 +275,7 @@ class GitInterface(VCInterface):
# want.
if name.startswith('refs/'):
return name
- if name in ["candidates", "master"]:
+ if name in ["candidates", "main"]:
return "refs/remotes/origin/%s" % name
try:
# Check if branch is in heads.
@@ -474,8 +474,8 @@ class Step(GitRecipesMixin):
if not self.GitIsWorkdirClean(): # pragma: no cover
self.Die("Workspace is not clean. Please commit or undo your changes.")
- # Checkout master in case the script was left on a work branch.
- self.GitCheckout('origin/master')
+ # Checkout main in case the script was left on a work branch.
+ self.GitCheckout('origin/main')
# Fetch unfetched revisions.
self.vc.Fetch()
@@ -485,7 +485,7 @@ class Step(GitRecipesMixin):
self.DeleteBranch(self._config["BRANCHNAME"])
def CommonCleanup(self):
- self.GitCheckout('origin/master')
+ self.GitCheckout('origin/main')
self.GitDeleteBranch(self._config["BRANCHNAME"])
# Clean up all temporary files.
@@ -605,13 +605,13 @@ class Step(GitRecipesMixin):
if match:
# Legacy: In the old process there's one level of indirection. The
# version is on the candidates branch and points to the real release
- # base on master through the commit message.
+ # base on main through the commit message.
return match.group("git_rev")
match = PUSH_MSG_NEW_RE.match(title)
if match:
- # This is a new-style v8 version branched from master. The commit
+ # This is a new-style v8 version branched from main. The commit
# "latest_hash" is the version-file change. Its parent is the release
- # base on master.
+ # base on main.
return self.GitLog(n=1, format="%H", git_hash="%s^" % latest_hash)
self.Die("Unknown latest release: %s" % latest_hash)
diff --git a/deps/v8/tools/release/create_release.py b/deps/v8/tools/release/create_release.py
index 20a666fb83..d1a066f00b 100755
--- a/deps/v8/tools/release/create_release.py
+++ b/deps/v8/tools/release/create_release.py
@@ -19,7 +19,7 @@ class Preparation(Step):
def RunStep(self):
self.Git("fetch origin +refs/heads/*:refs/heads/*")
- self.GitCheckout("origin/master")
+ self.GitCheckout("origin/main")
self.DeleteBranch("work-branch")
@@ -28,7 +28,7 @@ class PrepareBranchRevision(Step):
def RunStep(self):
self["push_hash"] = (self._options.revision or
- self.GitLog(n=1, format="%H", branch="origin/master"))
+ self.GitLog(n=1, format="%H", branch="origin/main"))
assert self["push_hash"]
print("Release revision %s" % self["push_hash"])
@@ -39,16 +39,16 @@ class IncrementVersion(Step):
def RunStep(self):
latest_version = self.GetLatestVersion()
- # The version file on master can be used to bump up major/minor at
+ # The version file on main can be used to bump up major/minor at
# branch time.
- self.GitCheckoutFile(VERSION_FILE, self.vc.RemoteMasterBranch())
- self.ReadAndPersistVersion("master_")
- master_version = self.ArrayToVersion("master_")
+ self.GitCheckoutFile(VERSION_FILE, self.vc.RemoteMainBranch())
+ self.ReadAndPersistVersion("main_")
+ main_version = self.ArrayToVersion("main_")
- # Use the highest version from master or from tags to determine the new
+ # Use the highest version from main or from tags to determine the new
# version.
authoritative_version = sorted(
- [master_version, latest_version], key=SortingKey)[1]
+ [main_version, latest_version], key=SortingKey)[1]
self.StoreVersion(authoritative_version, "authoritative_")
# Variables prefixed with 'new_' contain the new version numbers for the
@@ -74,7 +74,7 @@ class DetectLastRelease(Step):
MESSAGE = "Detect commit ID of last release base."
def RunStep(self):
- self["last_push_master"] = self.GetLatestReleaseBase()
+ self["last_push_main"] = self.GetLatestReleaseBase()
class DeleteBranchRef(Step):
@@ -107,7 +107,7 @@ class MakeBranch(Step):
MESSAGE = "Create the branch."
def RunStep(self):
- self.Git("reset --hard origin/master")
+ self.Git("reset --hard origin/main")
self.Git("new-branch work-branch --upstream origin/%s" % self["version"])
self.GitCheckoutFile(VERSION_FILE, self["latest_version"])
@@ -186,7 +186,7 @@ class CleanUp(Step):
print("Congratulations, you have successfully created version %s."
% self["version"])
- self.GitCheckout("origin/master")
+ self.GitCheckout("origin/main")
self.DeleteBranch("work-branch")
self.Git("gc")
diff --git a/deps/v8/tools/release/merge_to_branch.py b/deps/v8/tools/release/merge_to_branch.py
index 44f933e541..08a36125f8 100755
--- a/deps/v8/tools/release/merge_to_branch.py
+++ b/deps/v8/tools/release/merge_to_branch.py
@@ -77,7 +77,7 @@ class SearchArchitecturePorts(Step):
# Search for commits which matches the "Port XXX" pattern.
git_hashes = self.GitLog(reverse=True, format="%H",
grep="^[Pp]ort %s" % revision,
- branch=self.vc.RemoteMasterBranch())
+ branch=self.vc.RemoteMainBranch())
for git_hash in git_hashes.splitlines():
revision_title = self.GitLog(n=1, format="%s", git_hash=git_hash)
@@ -198,7 +198,7 @@ class CleanUp(Step):
class MergeToBranch(ScriptsBase):
def _Description(self):
return ("Performs the necessary steps to merge revisions from "
- "master to release branches like 4.5. This script does not "
+ "main to release branches like 4.5. This script does not "
"version the commit. See http://goo.gl/9ke2Vw for more "
"information.")
diff --git a/deps/v8/tools/release/mergeinfo.py b/deps/v8/tools/release/mergeinfo.py
index bed7441f85..8fae8ad05c 100755
--- a/deps/v8/tools/release/mergeinfo.py
+++ b/deps/v8/tools/release/mergeinfo.py
@@ -30,25 +30,25 @@ def describe_commit(git_working_dir, hash_to_search, one_line=False):
def get_followup_commits(git_working_dir, hash_to_search):
cmd = ['log', '--grep=' + hash_to_search, GIT_OPTION_HASH_ONLY,
- 'remotes/origin/master'];
+ 'remotes/origin/main'];
return git_execute(git_working_dir, cmd).strip().splitlines()
def get_merge_commits(git_working_dir, hash_to_search):
- merges = get_related_commits_not_on_master(git_working_dir, hash_to_search)
- false_merges = get_related_commits_not_on_master(
+ merges = get_related_commits_not_on_main(git_working_dir, hash_to_search)
+ false_merges = get_related_commits_not_on_main(
git_working_dir, 'Cr-Branched-From: ' + hash_to_search)
false_merges = set(false_merges)
return ([merge_commit for merge_commit in merges
if merge_commit not in false_merges])
-def get_related_commits_not_on_master(git_working_dir, grep_command):
+def get_related_commits_not_on_main(git_working_dir, grep_command):
commits = git_execute(git_working_dir, ['log',
'--all',
'--grep=' + grep_command,
GIT_OPTION_ONELINE,
'--decorate',
'--not',
- 'remotes/origin/master',
+ 'remotes/origin/main',
GIT_OPTION_HASH_ONLY])
return commits.splitlines()
diff --git a/deps/v8/tools/release/roll_merge.py b/deps/v8/tools/release/roll_merge.py
index 636c882980..d25f95e397 100755
--- a/deps/v8/tools/release/roll_merge.py
+++ b/deps/v8/tools/release/roll_merge.py
@@ -78,7 +78,7 @@ class SearchArchitecturePorts(Step):
# Search for commits which matches the "Port XXX" pattern.
git_hashes = self.GitLog(reverse=True, format="%H",
grep="Port %s" % revision,
- branch=self.vc.RemoteMasterBranch())
+ branch=self.vc.RemoteMainBranch())
for git_hash in git_hashes.splitlines():
revision_title = self.GitLog(n=1, format="%s", git_hash=git_hash)
@@ -226,7 +226,7 @@ class CleanUp(Step):
class RollMerge(ScriptsBase):
def _Description(self):
return ("Performs the necessary steps to merge revisions from "
- "master to other branches, including candidates and roll branches.")
+ "main to other branches, including candidates and roll branches.")
def _PrepareOptions(self, parser):
group = parser.add_mutually_exclusive_group(required=True)
diff --git a/deps/v8/tools/release/search_related_commits.py b/deps/v8/tools/release/search_related_commits.py
index e6e52d2196..48e6ae2592 100755
--- a/deps/v8/tools/release/search_related_commits.py
+++ b/deps/v8/tools/release/search_related_commits.py
@@ -200,7 +200,7 @@ if __name__ == "__main__": # pragma: no cover
"This tool analyzes the commit range between <of> and <until>. "
"It finds commits which belong together e.g. Implement/Revert pairs and "
"Implement/Port/Revert triples. All supplied hashes need to be "
- "from the same branch e.g. master.")
+ "from the same branch e.g. main.")
parser.add_argument("-g", "--git-dir", required=False, default=".",
help="The path to your git working directory.")
parser.add_argument("--verbose", action="store_true",
diff --git a/deps/v8/tools/release/test_mergeinfo.py b/deps/v8/tools/release/test_mergeinfo.py
index f8619bb2fd..9404542ef6 100755
--- a/deps/v8/tools/release/test_mergeinfo.py
+++ b/deps/v8/tools/release/test_mergeinfo.py
@@ -31,7 +31,7 @@ class TestMergeInfo(unittest.TestCase):
return output
def _update_origin(self):
- # Fetch from origin to get/update the origin/master branch
+ # Fetch from origin to get/update the origin/main branch
self._execute_git(['fetch', 'origin'])
def setUp(self):
@@ -54,10 +54,10 @@ class TestMergeInfo(unittest.TestCase):
def _assert_correct_standard_result(
self, result, all_commits, hash_of_first_commit):
- self.assertEqual(len(result), 1, "Master commit not found")
+ self.assertEqual(len(result), 1, "Main commit not found")
self.assertTrue(
result.get(hash_of_first_commit),
- "Master commit is wrong")
+ "Main commit is wrong")
self.assertEqual(
len(result[hash_of_first_commit]),
@@ -124,7 +124,7 @@ class TestMergeInfo(unittest.TestCase):
def testSearchMerges(self):
self._execute_git(['branch', 'test'])
- self._execute_git(['checkout', 'master'])
+ self._execute_git(['checkout', 'main'])
message = 'real initial commit'
self._make_empty_commit(message)
commits = self._get_commits()
@@ -142,7 +142,7 @@ class TestMergeInfo(unittest.TestCase):
message = 'Cr-Branched-From: ' + hash_of_first_commit
hash_of_ignored = self._make_empty_commit(message)
- self._execute_git(['checkout', 'master'])
+ self._execute_git(['checkout', 'main'])
followups = mergeinfo.get_followup_commits(
self.base_dir,
diff --git a/deps/v8/tools/release/test_scripts.py b/deps/v8/tools/release/test_scripts.py
index e8664cb2f1..e8757cf277 100755
--- a/deps/v8/tools/release/test_scripts.py
+++ b/deps/v8/tools/release/test_scripts.py
@@ -300,7 +300,7 @@ class ScriptTest(unittest.TestCase):
def testCommonPrepareDefault(self):
self.Expect([
Cmd("git status -s -uno", ""),
- Cmd("git checkout -f origin/master", ""),
+ Cmd("git checkout -f origin/main", ""),
Cmd("git fetch", ""),
Cmd("git branch", " branch1\n* %s" % TEST_CONFIG["BRANCHNAME"]),
RL("Y"),
@@ -312,7 +312,7 @@ class ScriptTest(unittest.TestCase):
def testCommonPrepareNoConfirm(self):
self.Expect([
Cmd("git status -s -uno", ""),
- Cmd("git checkout -f origin/master", ""),
+ Cmd("git checkout -f origin/main", ""),
Cmd("git fetch", ""),
Cmd("git branch", " branch1\n* %s" % TEST_CONFIG["BRANCHNAME"]),
RL("n"),
@@ -323,7 +323,7 @@ class ScriptTest(unittest.TestCase):
def testCommonPrepareDeleteBranchFailure(self):
self.Expect([
Cmd("git status -s -uno", ""),
- Cmd("git checkout -f origin/master", ""),
+ Cmd("git checkout -f origin/main", ""),
Cmd("git fetch", ""),
Cmd("git branch", " branch1\n* %s" % TEST_CONFIG["BRANCHNAME"]),
RL("Y"),
@@ -395,13 +395,13 @@ class ScriptTest(unittest.TestCase):
test_tag
"""
- # Version as tag: 3.22.4.0. Version on master: 3.22.6.
+ # Version as tag: 3.22.4.0. Version on main: 3.22.6.
# Make sure that the latest version is 3.22.6.0.
def testIncrementVersion(self):
self.Expect([
Cmd("git fetch origin +refs/tags/*:refs/tags/*", ""),
Cmd("git tag", self.TAGS),
- Cmd("git checkout -f origin/master -- include/v8-version.h",
+ Cmd("git checkout -f origin/main -- include/v8-version.h",
"", cb=lambda: self.WriteFakeVersionFile(3, 22, 6)),
])
@@ -430,7 +430,7 @@ test_tag
def testCreateRelease(self):
TextToFile("", os.path.join(TEST_CONFIG["DEFAULT_CWD"], ".git"))
- # The version file on master has build level 5.
+ # The version file on main has build level 5.
self.WriteFakeVersionFile(build=5)
commit_msg = """Version 3.22.5"""
@@ -449,18 +449,18 @@ test_tag
expectations = [
Cmd("git fetch origin +refs/heads/*:refs/heads/*", ""),
- Cmd("git checkout -f origin/master", "", cb=self.WriteFakeWatchlistsFile),
+ Cmd("git checkout -f origin/main", "", cb=self.WriteFakeWatchlistsFile),
Cmd("git branch", ""),
Cmd("git fetch origin +refs/tags/*:refs/tags/*", ""),
Cmd("git tag", self.TAGS),
- Cmd("git checkout -f origin/master -- include/v8-version.h",
+ Cmd("git checkout -f origin/main -- include/v8-version.h",
"", cb=self.WriteFakeVersionFile),
Cmd("git log -1 --format=%H 3.22.4", "release_hash\n"),
Cmd("git log -1 --format=%s release_hash", "Version 3.22.4\n"),
Cmd("git log -1 --format=%H release_hash^", "abc3\n"),
Cmd("git log --format=%H abc3..push_hash", "rev1\n"),
Cmd("git push origin push_hash:refs/heads/3.22.5", ""),
- Cmd("git reset --hard origin/master", ""),
+ Cmd("git reset --hard origin/main", ""),
Cmd("git new-branch work-branch --upstream origin/3.22.5", ""),
Cmd("git checkout -f 3.22.4 -- include/v8-version.h", "",
cb=self.WriteFakeVersionFile),
@@ -475,8 +475,8 @@ test_tag
"\"Version 3.22.5\" origin/3.22.5", "hsh_to_tag"),
Cmd("git tag 3.22.5 hsh_to_tag", ""),
Cmd("git push origin refs/tags/3.22.5:refs/tags/3.22.5", ""),
- Cmd("git checkout -f origin/master", ""),
- Cmd("git branch", "* master\n work-branch\n"),
+ Cmd("git checkout -f origin/main", ""),
+ Cmd("git branch", "* main\n work-branch\n"),
Cmd("git branch -D work-branch", ""),
Cmd("git gc", ""),
]
@@ -488,7 +488,7 @@ test_tag
CreateRelease(TEST_CONFIG, self).Run(args)
# Note: The version file is on build number 5 again in the end of this test
- # since the git command that merges to master is mocked out.
+ # since the git command that merges to main is mocked out.
# Check for correct content of the WATCHLISTS file
@@ -718,21 +718,21 @@ BUG=123,234,345,456,567,v8:123
self.Expect([
Cmd("git status -s -uno", ""),
- Cmd("git checkout -f origin/master", ""),
+ Cmd("git checkout -f origin/main", ""),
Cmd("git fetch", ""),
Cmd("git branch", " branch1\n* branch2\n"),
Cmd("git new-branch %s --upstream refs/remotes/origin/candidates" %
TEST_CONFIG["BRANCHNAME"], ""),
Cmd(("git log --format=%H --grep=\"Port ab12345\" "
- "--reverse origin/master"),
+ "--reverse origin/main"),
"ab45678\nab23456"),
Cmd("git log -1 --format=%s ab45678", "Title1"),
Cmd("git log -1 --format=%s ab23456", "Title2"),
Cmd(("git log --format=%H --grep=\"Port ab23456\" "
- "--reverse origin/master"),
+ "--reverse origin/main"),
""),
Cmd(("git log --format=%H --grep=\"Port ab34567\" "
- "--reverse origin/master"),
+ "--reverse origin/main"),
"ab56789"),
Cmd("git log -1 --format=%s ab56789", "Title3"),
RL("Y"), # Automatically add corresponding ports (ab34567, ab56789)?
@@ -792,7 +792,7 @@ BUG=123,234,345,456,567,v8:123
"hsh_to_tag"),
Cmd("git tag 3.22.5.1 hsh_to_tag", ""),
Cmd("git push origin refs/tags/3.22.5.1:refs/tags/3.22.5.1", ""),
- Cmd("git checkout -f origin/master", ""),
+ Cmd("git checkout -f origin/main", ""),
Cmd("git branch -D %s" % TEST_CONFIG["BRANCHNAME"], ""),
])
@@ -855,21 +855,21 @@ NOTREECHECKS=true
self.Expect([
Cmd("git status -s -uno", ""),
- Cmd("git checkout -f origin/master", ""),
+ Cmd("git checkout -f origin/main", ""),
Cmd("git fetch", ""),
Cmd("git branch", " branch1\n* branch2\n"),
Cmd("git new-branch %s --upstream refs/remotes/origin/candidates" %
TEST_CONFIG["BRANCHNAME"], ""),
Cmd(("git log --format=%H --grep=\"^[Pp]ort ab12345\" "
- "--reverse origin/master"),
+ "--reverse origin/main"),
"ab45678\nab23456"),
Cmd("git log -1 --format=%s ab45678", "Title1"),
Cmd("git log -1 --format=%s ab23456", "Title2"),
Cmd(("git log --format=%H --grep=\"^[Pp]ort ab23456\" "
- "--reverse origin/master"),
+ "--reverse origin/main"),
""),
Cmd(("git log --format=%H --grep=\"^[Pp]ort ab34567\" "
- "--reverse origin/master"),
+ "--reverse origin/main"),
"ab56789"),
Cmd("git log -1 --format=%s ab56789", "Title3"),
RL("Y"), # Automatically add corresponding ports (ab34567, ab56789)?
@@ -916,7 +916,7 @@ NOTREECHECKS=true
Cmd("git cl presubmit", "Presubmit successfull\n"),
Cmd("git cl land -f --bypass-hooks", "Closing issue\n",
cb=VerifyLand),
- Cmd("git checkout -f origin/master", ""),
+ Cmd("git checkout -f origin/main", ""),
Cmd("git branch -D %s" % TEST_CONFIG["BRANCHNAME"], ""),
])
diff --git a/deps/v8/tools/release/test_search_related_commits.py b/deps/v8/tools/release/test_search_related_commits.py
index cf6123611f..6943915fd6 100755
--- a/deps/v8/tools/release/test_search_related_commits.py
+++ b/deps/v8/tools/release/test_search_related_commits.py
@@ -43,7 +43,7 @@ class TestSearchRelatedCommits(unittest.TestCase):
Review URL: https://codereview.chromium.org/1084243005
- Cr-Commit-Position: refs/heads/master@{#28059}"""
+ Cr-Commit-Position: refs/heads/main@{#28059}"""
self._make_empty_commit(message)
message = """[crankshaft] Do some stuff
@@ -52,7 +52,7 @@ class TestSearchRelatedCommits(unittest.TestCase):
Review URL: https://codereview.chromium.org/1084243007
- Cr-Commit-Position: refs/heads/master@{#28030}"""
+ Cr-Commit-Position: refs/heads/main@{#28030}"""
self._make_empty_commit(message)
@@ -62,10 +62,10 @@ class TestSearchRelatedCommits(unittest.TestCase):
def _assert_correct_standard_result(
self, result, all_commits, hash_of_first_commit):
- self.assertEqual(len(result), 1, "Master commit not found")
+ self.assertEqual(len(result), 1, "Main commit not found")
self.assertTrue(
result.get(hash_of_first_commit),
- "Master commit is wrong")
+ "Main commit is wrong")
self.assertEqual(
len(result[hash_of_first_commit]),
@@ -86,12 +86,12 @@ class TestSearchRelatedCommits(unittest.TestCase):
def testSearchByCommitPosition(self):
message = """Revert of some stuff.
- > Cr-Commit-Position: refs/heads/master@{#28059}
+ > Cr-Commit-Position: refs/heads/main@{#28059}
R=mstarzinger@chromium.org
Review URL: https://codereview.chromium.org/1084243005
- Cr-Commit-Position: refs/heads/master@{#28088}"""
+ Cr-Commit-Position: refs/heads/main@{#28088}"""
self._make_empty_commit(message)
@@ -106,12 +106,12 @@ class TestSearchRelatedCommits(unittest.TestCase):
def testSearchByTitle(self):
message = """Revert of some stuff.
> [turbofan] Sanitize language mode for javascript operators.
- > Cr-Commit-Position: refs/heads/master@{#289}
+ > Cr-Commit-Position: refs/heads/main@{#289}
R=mstarzinger@chromium.org
Review URL: https://codereview.chromium.org/1084243005
- Cr-Commit-Position: refs/heads/master@{#28088}"""
+ Cr-Commit-Position: refs/heads/main@{#28088}"""
self._make_empty_commit(message)
@@ -134,7 +134,7 @@ class TestSearchRelatedCommits(unittest.TestCase):
Review URL: https://codereview.chromium.org/1084243005
- Cr-Commit-Position: refs/heads/master@{#28088}"""
+ Cr-Commit-Position: refs/heads/main@{#28088}"""
self._make_empty_commit(message)
@@ -162,16 +162,16 @@ class TestSearchRelatedCommits(unittest.TestCase):
Review URL: https://codereview.chromium.org/1084243005
- Cr-Commit-Position: refs/heads/master@{#28088}"""
+ Cr-Commit-Position: refs/heads/main@{#28088}"""
self._make_empty_commit(message)
# Related commits happen before and after separator so it is a hit
- commit_pos_of_master = "27088"
- message = """Implement awesome feature: Master commit
+ commit_pos_of_main = "27088"
+ message = """Implement awesome feature: Main commit
Review URL: https://codereview.chromium.org/1084243235
- Cr-Commit-Position: refs/heads/master@{#""" + commit_pos_of_master + "}"
+ Cr-Commit-Position: refs/heads/main@{#""" + commit_pos_of_main + "}"
self._make_empty_commit(message)
# Separator commit
@@ -179,7 +179,7 @@ class TestSearchRelatedCommits(unittest.TestCase):
Review URL: https://codereview.chromium.org/1084243456
- Cr-Commit-Position: refs/heads/master@{#28173}"""
+ Cr-Commit-Position: refs/heads/main@{#28173}"""
self._make_empty_commit(message)
# Filler commit
@@ -187,11 +187,11 @@ class TestSearchRelatedCommits(unittest.TestCase):
self._make_empty_commit(message)
# Related commit after separator: a hit
- message = "Patch r" + commit_pos_of_master +""" done
+ message = "Patch r" + commit_pos_of_main +""" done
Review URL: https://codereview.chromium.org/1084243235
- Cr-Commit-Position: refs/heads/master@{#29567}"""
+ Cr-Commit-Position: refs/heads/main@{#29567}"""
self._make_empty_commit(message)
#Fetch again for an update
@@ -221,12 +221,12 @@ class TestSearchRelatedCommits(unittest.TestCase):
def testPrettyPrint(self):
message = """Revert of some stuff.
> [turbofan] Sanitize language mode for javascript operators.
- > Cr-Commit-Position: refs/heads/master@{#289}
+ > Cr-Commit-Position: refs/heads/main@{#289}
R=mstarzinger@chromium.org
Review URL: https://codereview.chromium.org/1084243005
- Cr-Commit-Position: refs/heads/master@{#28088}"""
+ Cr-Commit-Position: refs/heads/main@{#28088}"""
self._make_empty_commit(message)
@@ -248,7 +248,7 @@ class TestSearchRelatedCommits(unittest.TestCase):
output.append(current_line)
self.assertIs(len(output), 2, "Not exactly two entries written")
- self.assertTrue(output[0].startswith("+"), "Master entry not marked with +")
+ self.assertTrue(output[0].startswith("+"), "Main entry not marked with +")
self.assertTrue(output[1].startswith("| "), "Child entry not marked with |")
def testNothingFound(self):
diff --git a/deps/v8/tools/run_perf.py b/deps/v8/tools/run_perf.py
index cdbbed8176..f2e72261f0 100644
--- a/deps/v8/tools/run_perf.py
+++ b/deps/v8/tools/run_perf.py
@@ -126,6 +126,7 @@ from testrunner.local import command
from testrunner.local import utils
from testrunner.objects.output import Output, NULL_OUTPUT
+# for py2/py3 compatibility
try:
basestring # Python 2
except NameError: # Python 3
@@ -152,7 +153,7 @@ def GeometricMean(values):
The mean is calculated using log to avoid overflow.
"""
- values = map(float, values)
+ values = list(map(float, values))
return math.exp(sum(map(math.log, values)) / len(values))
@@ -224,9 +225,9 @@ class ResultTracker(object):
def ToDict(self):
return {
- 'traces': self.traces.values(),
+ 'traces': list(self.traces.values()),
'errors': self.errors,
- 'runnables': self.runnables.values(),
+ 'runnables': list(self.runnables.values()),
}
def WriteToFile(self, file_name):
@@ -596,9 +597,11 @@ def find_build_directory(base_path, arch):
'Release',
]
possible_paths = [os.path.join(base_path, p) for p in possible_paths]
- actual_paths = filter(is_build, possible_paths)
+ actual_paths = list(filter(is_build, possible_paths))
assert actual_paths, 'No build directory found.'
- assert len(actual_paths) == 1, 'Found ambiguous build directories.'
+ assert len(
+ actual_paths
+ ) == 1, 'Found ambiguous build directories use --binary-override-path.'
return actual_paths[0]
@@ -677,10 +680,10 @@ class DesktopPlatform(Platform):
if args.prioritize:
self.command_prefix += ['-n', '-20']
if args.affinitize != None:
- # schedtool expects a bit pattern when setting affinity, where each
- # bit set to '1' corresponds to a core where the process may run on.
- # First bit corresponds to CPU 0. Since the 'affinitize' parameter is
- # a core number, we need to map to said bit pattern.
+ # schedtool expects a bit pattern when setting affinity, where each
+ # bit set to '1' corresponds to a core where the process may run on.
+ # First bit corresponds to CPU 0. Since the 'affinitize' parameter is
+ # a core number, we need to map to said bit pattern.
cpu = int(args.affinitize)
core = 1 << cpu
self.command_prefix += ['-a', ('0x%x' % core)]
@@ -841,10 +844,10 @@ class CustomMachineConfiguration:
try:
with open('/sys/devices/system/cpu/present', 'r') as f:
indexes = f.readline()
- r = map(int, indexes.split('-'))
+ r = list(map(int, indexes.split('-')))
if len(r) == 1:
- return range(r[0], r[0] + 1)
- return range(r[0], r[1] + 1)
+ return list(range(r[0], r[0] + 1))
+ return list(range(r[0], r[1] + 1))
except Exception:
logging.exception('Failed to retrieve number of CPUs.')
raise
@@ -1034,7 +1037,7 @@ def Main(argv):
# Ensure all arguments have absolute path before we start changing current
# directory.
- args.suite = map(os.path.abspath, args.suite)
+ args.suite = list(map(os.path.abspath, args.suite))
prev_aslr = None
prev_cpu_gov = None
diff --git a/deps/v8/tools/testrunner/base_runner.py b/deps/v8/tools/testrunner/base_runner.py
index cf5854c32c..48d3460e48 100644
--- a/deps/v8/tools/testrunner/base_runner.py
+++ b/deps/v8/tools/testrunner/base_runner.py
@@ -113,7 +113,8 @@ SLOW_ARCHS = [
"mips64el",
"s390",
"s390x",
- "riscv64"
+ "riscv64",
+ "loong64"
]
@@ -191,6 +192,7 @@ class BuildConfig(object):
self.lite_mode = build_config['v8_enable_lite_mode']
self.pointer_compression = build_config['v8_enable_pointer_compression']
self.pointer_compression_shared_cage = build_config['v8_enable_pointer_compression_shared_cage']
+ self.virtual_memory_cage = build_config['v8_enable_virtual_memory_cage']
self.third_party_heap = build_config['v8_enable_third_party_heap']
self.webassembly = build_config['v8_enable_webassembly']
# Export only for MIPS target
@@ -234,6 +236,8 @@ class BuildConfig(object):
detected_options.append('pointer_compression')
if self.pointer_compression_shared_cage:
detected_options.append('pointer_compression_shared_cage')
+ if self.virtual_memory_cage:
+ detected_options.append('virtual_memory_cage')
if self.third_party_heap:
detected_options.append('third_party_heap')
if self.webassembly:
@@ -267,6 +271,7 @@ class BaseTestRunner(object):
self.build_config = None
self.mode_options = None
self.target_os = None
+ self.infra_staging = False
@property
def framework_name(self):
@@ -279,6 +284,7 @@ class BaseTestRunner(object):
try:
parser = self._create_parser()
options, args = self._parse_args(parser, sys_args)
+ self.infra_staging = options.infra_staging
if options.swarming:
# Swarming doesn't print how isolated commands are called. Lets make
# this less cryptic by printing it ourselves.
@@ -348,6 +354,13 @@ class BaseTestRunner(object):
help="How long should fuzzer run")
parser.add_option("--swarming", default=False, action="store_true",
help="Indicates running test driver on swarming.")
+ parser.add_option('--infra-staging', help='Use new test runner features',
+ dest='infra_staging', default=None,
+ action='store_true')
+ parser.add_option('--no-infra-staging',
+ help='Opt out of new test runner features',
+ dest='infra_staging', default=None,
+ action='store_false')
parser.add_option("-j", help="The number of parallel tasks to run",
default=0, type=int)
@@ -370,9 +383,6 @@ class BaseTestRunner(object):
help="Path to a file for storing json results.")
parser.add_option('--slow-tests-cutoff', type="int", default=100,
help='Collect N slowest tests')
- parser.add_option("--junitout", help="File name of the JUnit output")
- parser.add_option("--junittestsuite", default="v8tests",
- help="The testsuite name in the JUnit output file")
parser.add_option("--exit-after-n-failures", type="int", default=100,
help="Exit after the first N failures instead of "
"running all tests. Pass 0 to disable this feature.")
@@ -666,6 +676,9 @@ class BaseTestRunner(object):
self.build_config.arch == 'mipsel':
no_simd_hardware = not simd_mips
+ if self.build_config.arch == 'loong64':
+ no_simd_hardware = True
+
# S390 hosts without VEF1 do not support Simd.
if self.build_config.arch == 's390x' and \
not self.build_config.simulator_run and \
@@ -678,6 +691,10 @@ class BaseTestRunner(object):
utils.GuessPowerProcessorVersion() < 9:
no_simd_hardware = True
+ # riscv64 do not support Simd instructions
+ if self.build_config.arch == 'riscv64':
+ no_simd_hardware = True
+
return {
"arch": self.build_config.arch,
"asan": self.build_config.asan,
@@ -716,6 +733,7 @@ class BaseTestRunner(object):
"lite_mode": self.build_config.lite_mode,
"pointer_compression": self.build_config.pointer_compression,
"pointer_compression_shared_cage": self.build_config.pointer_compression_shared_cage,
+ "virtual_memory_cage": self.build_config.virtual_memory_cage,
}
def _runner_flags(self):
@@ -812,9 +830,6 @@ class BaseTestRunner(object):
def _create_progress_indicators(self, test_count, options):
procs = [PROGRESS_INDICATORS[options.progress]()]
- if options.junitout:
- procs.append(progress.JUnitTestProgressIndicator(options.junitout,
- options.junittestsuite))
if options.json_test_results:
procs.append(progress.JsonTestProgressIndicator(self.framework_name))
diff --git a/deps/v8/tools/testrunner/local/android.py b/deps/v8/tools/testrunner/local/android.py
index ebf04afad6..cfc4e537f5 100644
--- a/deps/v8/tools/testrunner/local/android.py
+++ b/deps/v8/tools/testrunner/local/android.py
@@ -128,12 +128,6 @@ class _Driver(object):
)
self.push_file(
shell_dir,
- 'snapshot_blob_trusted.bin',
- target_dir,
- skip_if_missing=True,
- )
- self.push_file(
- shell_dir,
'icudtl.dat',
target_dir,
skip_if_missing=True,
diff --git a/deps/v8/tools/testrunner/local/junit_output.py b/deps/v8/tools/testrunner/local/junit_output.py
deleted file mode 100644
index 52f31ec422..0000000000
--- a/deps/v8/tools/testrunner/local/junit_output.py
+++ /dev/null
@@ -1,49 +0,0 @@
-# Copyright 2013 the V8 project authors. All rights reserved.
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following
-# disclaimer in the documentation and/or other materials provided
-# with the distribution.
-# * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived
-# from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-import xml.etree.ElementTree as xml
-
-
-class JUnitTestOutput:
- def __init__(self, test_suite_name):
- self.root = xml.Element("testsuite")
- self.root.attrib["name"] = test_suite_name
-
- def HasRunTest(self, test_name, test_cmd, test_duration, test_failure):
- testCaseElement = xml.Element("testcase")
- testCaseElement.attrib["name"] = test_name
- testCaseElement.attrib["cmd"] = test_cmd
- testCaseElement.attrib["time"] = str(round(test_duration, 3))
- if len(test_failure):
- failureElement = xml.Element("failure")
- failureElement.text = test_failure
- testCaseElement.append(failureElement)
- self.root.append(testCaseElement)
-
- def FinishAndWrite(self, f):
- xml.ElementTree(self.root).write(f, "UTF-8")
diff --git a/deps/v8/tools/testrunner/local/statusfile.py b/deps/v8/tools/testrunner/local/statusfile.py
index 48b9286959..de903752bb 100644
--- a/deps/v8/tools/testrunner/local/statusfile.py
+++ b/deps/v8/tools/testrunner/local/statusfile.py
@@ -64,7 +64,7 @@ VARIABLES = {ALWAYS: True}
for var in ["debug", "release", "big", "little", "android",
"arm", "arm64", "ia32", "mips", "mipsel", "mips64", "mips64el",
"x64", "ppc", "ppc64", "s390", "s390x", "macos", "windows",
- "linux", "aix", "r1", "r2", "r3", "r5", "r6", "riscv64"]:
+ "linux", "aix", "r1", "r2", "r3", "r5", "r6", "riscv64", "loong64"]:
VARIABLES[var] = var
# Allow using variants as keywords.
diff --git a/deps/v8/tools/testrunner/local/variants.py b/deps/v8/tools/testrunner/local/variants.py
index ba4eff451a..42bf12d464 100644
--- a/deps/v8/tools/testrunner/local/variants.py
+++ b/deps/v8/tools/testrunner/local/variants.py
@@ -13,11 +13,12 @@ ALL_VARIANT_FLAGS = {
"infra_staging": [[]],
"interpreted_regexp": [["--regexp-interpret-all"]],
"experimental_regexp": [["--default-to-experimental-regexp-engine"]],
- "concurrent_inlining": [["--concurrent-inlining"]],
"jitless": [["--jitless"]],
"sparkplug": [["--sparkplug"]],
"always_sparkplug": [[ "--always-sparkplug", "--sparkplug"]],
"minor_mc": [["--minor-mc"]],
+ "no_concurrent_inlining": [["--no-concurrent-inlining",
+ "--no-stress-concurrent-inlining"]],
"no_lfa": [["--no-lazy-feedback-allocation"]],
# No optimization means disable all optimizations. OptimizeFunctionOnNextCall
# would not force optimization too. It turns into a Nop. Please see
@@ -38,7 +39,6 @@ ALL_VARIANT_FLAGS = {
"stress_snapshot": [["--stress-snapshot"]],
# Trigger stress sampling allocation profiler with sample interval = 2^14
"stress_sampling": [["--stress-sampling-allocation-profiler=16384"]],
- "trusted": [["--no-untrusted-code-mitigations"]],
"no_wasm_traps": [["--no-wasm-trap-handler"]],
"turboprop": [["--turboprop"]],
"turboprop_as_toptier": [["--turboprop-as-toptier", "--turboprop"]],
@@ -58,13 +58,14 @@ INCOMPATIBLE_FLAGS_PER_VARIANT = {
"nooptimization": ["--always-opt"],
"slow_path": ["--no-force-slow-path"],
"stress_concurrent_allocation": ["--single-threaded-gc", "--predictable"],
- "stress_concurrent_inlining": ["--single-threaded", "--predictable", "--turboprop"],
+ "stress_concurrent_inlining": ["--single-threaded", "--predictable",
+ "--turboprop", "--lazy-feedback-allocation"],
"turboprop": ["--stress_concurrent_inlining"],
# The fast API tests initialize an embedder object that never needs to be
# serialized to the snapshot, so we don't have a
# SerializeInternalFieldsCallback for it, so they are incompatible with
# stress_snapshot.
- "stress_snapshot": ["--turbo-fast-api-calls"],
+ "stress_snapshot": ["--expose-fast-api"],
"stress": ["--always-opt", "--no-always-opt",
"--max-inlined-bytecode-size=*",
"--max-inlined-bytecode-size-cumulative=*", "--stress-inline",
diff --git a/deps/v8/tools/testrunner/num_fuzzer.py b/deps/v8/tools/testrunner/num_fuzzer.py
index d5b243ba96..ebf01078fb 100755
--- a/deps/v8/tools/testrunner/num_fuzzer.py
+++ b/deps/v8/tools/testrunner/num_fuzzer.py
@@ -20,7 +20,7 @@ from testrunner.testproc import fuzzer
from testrunner.testproc.base import TestProcProducer
from testrunner.testproc.combiner import CombinerProc
from testrunner.testproc.execution import ExecutionProc
-from testrunner.testproc.expectation import ForgiveTimeoutProc
+from testrunner.testproc.expectation import ExpectationProc
from testrunner.testproc.filter import StatusFileFilterProc, NameFilterProc
from testrunner.testproc.loader import LoadProc
from testrunner.testproc.progress import ResultsTracker
@@ -63,6 +63,11 @@ class NumFuzzer(base_runner.BaseTestRunner):
help="probability [0-10] of adding --random-gc-interval "
"flag to the test")
+ # Stress stack size
+ parser.add_option("--stress-stack-size", default=0, type="int",
+ help="probability [0-10] of adding --stack-size "
+ "flag to the test")
+
# Stress tasks
parser.add_option("--stress-delay-tasks", default=0, type="int",
help="probability [0-10] of adding --stress-delay-tasks "
@@ -119,7 +124,10 @@ class NumFuzzer(base_runner.BaseTestRunner):
def _runner_flags(self):
"""Extra default flags specific to the test runner implementation."""
- return ['--no-abort-on-contradictory-flags']
+ flags = ['--no-abort-on-contradictory-flags', '--testing-d8-test-runner']
+ if self.infra_staging:
+ flags.append('--no-fail')
+ return flags
def _get_statusfile_variables(self, options):
variables = (
@@ -133,6 +141,7 @@ class NumFuzzer(base_runner.BaseTestRunner):
options.stress_compaction,
options.stress_gc,
options.stress_delay_tasks,
+ options.stress_stack_size,
options.stress_thread_pool_size])),
})
return variables
@@ -154,7 +163,7 @@ class NumFuzzer(base_runner.BaseTestRunner):
# TODO(majeski): Improve sharding when combiner is present. Maybe select
# different random seeds for shards instead of splitting tests.
self._create_shard_proc(options),
- ForgiveTimeoutProc(),
+ ExpectationProc(self.infra_staging),
combiner,
self._create_fuzzer(fuzzer_rng, options),
sigproc,
@@ -221,6 +230,7 @@ class NumFuzzer(base_runner.BaseTestRunner):
add('marking', options.stress_marking)
add('scavenge', options.stress_scavenge)
add('gc_interval', options.stress_gc)
+ add('stack', options.stress_stack_size)
add('threads', options.stress_thread_pool_size)
add('delay', options.stress_delay_tasks)
add('deopt', options.stress_deopt, options.stress_deopt_min)
diff --git a/deps/v8/tools/testrunner/objects/testcase.py b/deps/v8/tools/testrunner/objects/testcase.py
index a1f1754b22..e044c20805 100644
--- a/deps/v8/tools/testrunner/objects/testcase.py
+++ b/deps/v8/tools/testrunner/objects/testcase.py
@@ -166,6 +166,15 @@ class TestCase(object):
self._expected_outcomes = (
self.expected_outcomes + [statusfile.TIMEOUT])
+ def allow_pass(self):
+ if self.expected_outcomes == outproc.OUTCOMES_TIMEOUT:
+ self._expected_outcomes = outproc.OUTCOMES_PASS_OR_TIMEOUT
+ elif self.expected_outcomes == outproc.OUTCOMES_FAIL:
+ self._expected_outcomes = outproc.OUTCOMES_FAIL_OR_PASS
+ elif statusfile.PASS not in self.expected_outcomes:
+ self._expected_outcomes = (
+ self.expected_outcomes + [statusfile.PASS])
+
@property
def expected_outcomes(self):
def is_flag(maybe_flag):
diff --git a/deps/v8/tools/testrunner/outproc/base.py b/deps/v8/tools/testrunner/outproc/base.py
index 9646b96c06..74a1d90159 100644
--- a/deps/v8/tools/testrunner/outproc/base.py
+++ b/deps/v8/tools/testrunner/outproc/base.py
@@ -12,8 +12,10 @@ from ..testproc.result import Result
OUTCOMES_PASS = [statusfile.PASS]
OUTCOMES_FAIL = [statusfile.FAIL]
+OUTCOMES_TIMEOUT = [statusfile.TIMEOUT]
OUTCOMES_PASS_OR_TIMEOUT = [statusfile.PASS, statusfile.TIMEOUT]
OUTCOMES_FAIL_OR_TIMEOUT = [statusfile.FAIL, statusfile.TIMEOUT]
+OUTCOMES_FAIL_OR_PASS = [statusfile.FAIL, statusfile.PASS]
class BaseOutProc(object):
diff --git a/deps/v8/tools/testrunner/standard_runner.py b/deps/v8/tools/testrunner/standard_runner.py
index 41352b34e8..50482da70e 100755
--- a/deps/v8/tools/testrunner/standard_runner.py
+++ b/deps/v8/tools/testrunner/standard_runner.py
@@ -132,13 +132,6 @@ class StandardTestRunner(base_runner.BaseTestRunner):
parser.add_option('--cfi-vptr',
help='Run tests with UBSAN cfi_vptr option.',
default=False, action='store_true')
- parser.add_option('--infra-staging', help='Use new test runner features',
- dest='infra_staging', default=None,
- action='store_true')
- parser.add_option('--no-infra-staging',
- help='Opt out of new test runner features',
- dest='infra_staging', default=None,
- action='store_false')
parser.add_option('--no-sorting', '--nosorting',
help='Don\'t sort tests according to duration of last'
' run.',
diff --git a/deps/v8/tools/testrunner/testproc/expectation.py b/deps/v8/tools/testrunner/testproc/expectation.py
index 285a599a74..df7a2c2b1a 100644
--- a/deps/v8/tools/testrunner/testproc/expectation.py
+++ b/deps/v8/tools/testrunner/testproc/expectation.py
@@ -7,14 +7,17 @@ from . import base
from testrunner.local import statusfile
from testrunner.outproc import base as outproc
-class ForgiveTimeoutProc(base.TestProcProducer):
+class ExpectationProc(base.TestProcProducer):
"""Test processor passing tests and results through and forgiving timeouts."""
- def __init__(self):
- super(ForgiveTimeoutProc, self).__init__('no-timeout')
+ def __init__(self, infra_staging):
+ super(ExpectationProc, self).__init__('no-timeout')
+ self.infra_staging = infra_staging
def _next_test(self, test):
subtest = self._create_subtest(test, 'no_timeout')
subtest.allow_timeouts()
+ if self.infra_staging:
+ subtest.allow_pass()
return self._send_test(subtest)
def _result_for(self, test, subtest, result):
diff --git a/deps/v8/tools/testrunner/testproc/fuzzer.py b/deps/v8/tools/testrunner/testproc/fuzzer.py
index 1237da56b2..67250b1c74 100644
--- a/deps/v8/tools/testrunner/testproc/fuzzer.py
+++ b/deps/v8/tools/testrunner/testproc/fuzzer.py
@@ -44,6 +44,7 @@ EXTRA_FLAGS = [
(0.1, '--regexp-tier-up-ticks=100'),
(0.1, '--stress-background-compile'),
(0.1, '--stress-concurrent-inlining'),
+ (0.1, '--stress-flush-code'),
(0.1, '--stress-lazy-source-positions'),
(0.1, '--stress-wasm-code-gc'),
(0.1, '--turbo-instruction-scheduling'),
@@ -265,6 +266,10 @@ class CompactionFuzzer(Fuzzer):
while True:
yield ['--stress-compaction-random']
+class StackSizeFuzzer(Fuzzer):
+ def create_flags_generator(self, rng, test, analysis_value):
+ while True:
+ yield ['--stack-size=%d' % rng.randint(54, 983)]
class TaskDelayFuzzer(Fuzzer):
def create_flags_generator(self, rng, test, analysis_value):
@@ -322,6 +327,7 @@ FUZZERS = {
'gc_interval': (GcIntervalAnalyzer, GcIntervalFuzzer),
'marking': (MarkingAnalyzer, MarkingFuzzer),
'scavenge': (ScavengeAnalyzer, ScavengeFuzzer),
+ 'stack': (None, StackSizeFuzzer),
'threads': (None, ThreadPoolSizeFuzzer),
}
diff --git a/deps/v8/tools/testrunner/testproc/progress.py b/deps/v8/tools/testrunner/testproc/progress.py
index ec97ab226f..c102cddec1 100644
--- a/deps/v8/tools/testrunner/testproc/progress.py
+++ b/deps/v8/tools/testrunner/testproc/progress.py
@@ -15,7 +15,6 @@ import time
from . import base
from . import util
-from ..local import junit_output
def print_failure_header(test, is_flaky=False):
@@ -362,45 +361,6 @@ class MonochromeProgressIndicator(CompactProgressIndicator):
print(("\r" + (" " * last_length) + "\r"), end='')
-class JUnitTestProgressIndicator(ProgressIndicator):
- def __init__(self, junitout, junittestsuite):
- super(JUnitTestProgressIndicator, self).__init__()
- self._requirement = base.DROP_PASS_STDOUT
-
- self.outputter = junit_output.JUnitTestOutput(junittestsuite)
- if junitout:
- self.outfile = open(junitout, "w")
- else:
- self.outfile = sys.stdout
-
- def _on_result_for(self, test, result):
- # TODO(majeski): Support for dummy/grouped results
- fail_text = ""
- output = result.output
- if result.has_unexpected_output:
- stdout = output.stdout.strip()
- if len(stdout):
- fail_text += "stdout:\n%s\n" % stdout
- stderr = output.stderr.strip()
- if len(stderr):
- fail_text += "stderr:\n%s\n" % stderr
- fail_text += "Command: %s" % result.cmd.to_string()
- if output.HasCrashed():
- fail_text += "exit code: %d\n--- CRASHED ---" % output.exit_code
- if output.HasTimedOut():
- fail_text += "--- TIMEOUT ---"
- self.outputter.HasRunTest(
- test_name=str(test),
- test_cmd=result.cmd.to_string(relative=True),
- test_duration=output.duration,
- test_failure=fail_text)
-
- def finished(self):
- self.outputter.FinishAndWrite(self.outfile)
- if self.outfile != sys.stdout:
- self.outfile.close()
-
-
class JsonTestProgressIndicator(ProgressIndicator):
def __init__(self, framework_name):
super(JsonTestProgressIndicator, self).__init__()
diff --git a/deps/v8/tools/unittests/run_tests_test.py b/deps/v8/tools/unittests/run_tests_test.py
index d9e998312e..89acacaaa3 100755
--- a/deps/v8/tools/unittests/run_tests_test.py
+++ b/deps/v8/tools/unittests/run_tests_test.py
@@ -350,7 +350,8 @@ class SystemTest(unittest.TestCase):
v8_enable_i18n_support=False, v8_target_cpu='x86',
v8_enable_verify_csa=False, v8_enable_lite_mode=False,
v8_enable_pointer_compression=False,
- v8_enable_pointer_compression_shared_cage=False)
+ v8_enable_pointer_compression_shared_cage=False,
+ v8_enable_virtual_memory_cage=False)
result = run_tests(
basedir,
'--progress=verbose',
diff --git a/deps/v8/tools/unittests/testdata/testroot1/v8_build_config.json b/deps/v8/tools/unittests/testdata/testroot1/v8_build_config.json
index 04ccbb1600..837f7ef5fc 100644
--- a/deps/v8/tools/unittests/testdata/testroot1/v8_build_config.json
+++ b/deps/v8/tools/unittests/testdata/testroot1/v8_build_config.json
@@ -22,6 +22,7 @@
"v8_enable_lite_mode": false,
"v8_enable_pointer_compression": true,
"v8_enable_pointer_compression_shared_cage": true,
+ "v8_enable_virtual_memory_cage": false,
"v8_control_flow_integrity": false,
"v8_enable_single_generation": false,
"v8_enable_third_party_heap": false,
diff --git a/deps/v8/tools/unittests/testdata/testroot2/v8_build_config.json b/deps/v8/tools/unittests/testdata/testroot2/v8_build_config.json
index b3e36ef6de..fbe348d973 100644
--- a/deps/v8/tools/unittests/testdata/testroot2/v8_build_config.json
+++ b/deps/v8/tools/unittests/testdata/testroot2/v8_build_config.json
@@ -22,6 +22,7 @@
"v8_enable_lite_mode": false,
"v8_enable_pointer_compression": false,
"v8_enable_pointer_compression_shared_cage": false,
+ "v8_enable_virtual_memory_cage": false,
"v8_control_flow_integrity": false,
"v8_enable_single_generation": false,
"v8_enable_third_party_heap": false,
diff --git a/deps/v8/tools/unittests/testdata/testroot3/v8_build_config.json b/deps/v8/tools/unittests/testdata/testroot3/v8_build_config.json
index 04ccbb1600..837f7ef5fc 100644
--- a/deps/v8/tools/unittests/testdata/testroot3/v8_build_config.json
+++ b/deps/v8/tools/unittests/testdata/testroot3/v8_build_config.json
@@ -22,6 +22,7 @@
"v8_enable_lite_mode": false,
"v8_enable_pointer_compression": true,
"v8_enable_pointer_compression_shared_cage": true,
+ "v8_enable_virtual_memory_cage": false,
"v8_control_flow_integrity": false,
"v8_enable_single_generation": false,
"v8_enable_third_party_heap": false,
diff --git a/deps/v8/tools/v8heapconst.py b/deps/v8/tools/v8heapconst.py
index 097b6a7267..5693bf147b 100644
--- a/deps/v8/tools/v8heapconst.py
+++ b/deps/v8/tools/v8heapconst.py
@@ -55,105 +55,104 @@ INSTANCE_TYPES = {
91: "ARRAY_BOILERPLATE_DESCRIPTION_TYPE",
92: "ASM_WASM_DATA_TYPE",
93: "ASYNC_GENERATOR_REQUEST_TYPE",
- 94: "BASELINE_DATA_TYPE",
- 95: "BREAK_POINT_TYPE",
- 96: "BREAK_POINT_INFO_TYPE",
- 97: "CACHED_TEMPLATE_OBJECT_TYPE",
- 98: "CALL_HANDLER_INFO_TYPE",
- 99: "CLASS_POSITIONS_TYPE",
- 100: "DEBUG_INFO_TYPE",
- 101: "ENUM_CACHE_TYPE",
- 102: "FEEDBACK_CELL_TYPE",
- 103: "FUNCTION_TEMPLATE_RARE_DATA_TYPE",
- 104: "INTERCEPTOR_INFO_TYPE",
- 105: "INTERPRETER_DATA_TYPE",
- 106: "MODULE_REQUEST_TYPE",
- 107: "PROMISE_CAPABILITY_TYPE",
- 108: "PROMISE_REACTION_TYPE",
- 109: "PROPERTY_DESCRIPTOR_OBJECT_TYPE",
- 110: "PROTOTYPE_INFO_TYPE",
- 111: "REG_EXP_BOILERPLATE_DESCRIPTION_TYPE",
- 112: "SCRIPT_TYPE",
- 113: "SOURCE_TEXT_MODULE_INFO_ENTRY_TYPE",
- 114: "STACK_FRAME_INFO_TYPE",
- 115: "TEMPLATE_OBJECT_DESCRIPTION_TYPE",
- 116: "TUPLE2_TYPE",
- 117: "WASM_EXCEPTION_TAG_TYPE",
- 118: "WASM_INDIRECT_FUNCTION_TABLE_TYPE",
- 119: "FIXED_ARRAY_TYPE",
- 120: "HASH_TABLE_TYPE",
- 121: "EPHEMERON_HASH_TABLE_TYPE",
- 122: "GLOBAL_DICTIONARY_TYPE",
- 123: "NAME_DICTIONARY_TYPE",
- 124: "NUMBER_DICTIONARY_TYPE",
- 125: "ORDERED_HASH_MAP_TYPE",
- 126: "ORDERED_HASH_SET_TYPE",
- 127: "ORDERED_NAME_DICTIONARY_TYPE",
- 128: "SIMPLE_NUMBER_DICTIONARY_TYPE",
- 129: "CLOSURE_FEEDBACK_CELL_ARRAY_TYPE",
- 130: "OBJECT_BOILERPLATE_DESCRIPTION_TYPE",
- 131: "SCRIPT_CONTEXT_TABLE_TYPE",
- 132: "BYTE_ARRAY_TYPE",
- 133: "BYTECODE_ARRAY_TYPE",
- 134: "FIXED_DOUBLE_ARRAY_TYPE",
- 135: "INTERNAL_CLASS_WITH_SMI_ELEMENTS_TYPE",
- 136: "SLOPPY_ARGUMENTS_ELEMENTS_TYPE",
- 137: "AWAIT_CONTEXT_TYPE",
- 138: "BLOCK_CONTEXT_TYPE",
- 139: "CATCH_CONTEXT_TYPE",
- 140: "DEBUG_EVALUATE_CONTEXT_TYPE",
- 141: "EVAL_CONTEXT_TYPE",
- 142: "FUNCTION_CONTEXT_TYPE",
- 143: "MODULE_CONTEXT_TYPE",
- 144: "NATIVE_CONTEXT_TYPE",
- 145: "SCRIPT_CONTEXT_TYPE",
- 146: "WITH_CONTEXT_TYPE",
- 147: "EXPORTED_SUB_CLASS_BASE_TYPE",
- 148: "EXPORTED_SUB_CLASS_TYPE",
- 149: "EXPORTED_SUB_CLASS2_TYPE",
- 150: "SMALL_ORDERED_HASH_MAP_TYPE",
- 151: "SMALL_ORDERED_HASH_SET_TYPE",
- 152: "SMALL_ORDERED_NAME_DICTIONARY_TYPE",
- 153: "DESCRIPTOR_ARRAY_TYPE",
- 154: "STRONG_DESCRIPTOR_ARRAY_TYPE",
- 155: "SOURCE_TEXT_MODULE_TYPE",
- 156: "SYNTHETIC_MODULE_TYPE",
- 157: "UNCOMPILED_DATA_WITH_PREPARSE_DATA_TYPE",
- 158: "UNCOMPILED_DATA_WITHOUT_PREPARSE_DATA_TYPE",
- 159: "WEAK_FIXED_ARRAY_TYPE",
- 160: "TRANSITION_ARRAY_TYPE",
- 161: "CELL_TYPE",
- 162: "CODE_TYPE",
- 163: "CODE_DATA_CONTAINER_TYPE",
- 164: "COVERAGE_INFO_TYPE",
- 165: "EMBEDDER_DATA_ARRAY_TYPE",
- 166: "FEEDBACK_METADATA_TYPE",
- 167: "FEEDBACK_VECTOR_TYPE",
- 168: "FILLER_TYPE",
- 169: "FREE_SPACE_TYPE",
- 170: "INTERNAL_CLASS_TYPE",
- 171: "INTERNAL_CLASS_WITH_STRUCT_ELEMENTS_TYPE",
- 172: "MAP_TYPE",
- 173: "MEGA_DOM_HANDLER_TYPE",
- 174: "ON_HEAP_BASIC_BLOCK_PROFILER_DATA_TYPE",
- 175: "PREPARSE_DATA_TYPE",
- 176: "PROPERTY_ARRAY_TYPE",
- 177: "PROPERTY_CELL_TYPE",
- 178: "SCOPE_INFO_TYPE",
- 179: "SHARED_FUNCTION_INFO_TYPE",
- 180: "SMI_BOX_TYPE",
- 181: "SMI_PAIR_TYPE",
- 182: "SORT_STATE_TYPE",
- 183: "SWISS_NAME_DICTIONARY_TYPE",
- 184: "WEAK_ARRAY_LIST_TYPE",
- 185: "WEAK_CELL_TYPE",
- 186: "WASM_ARRAY_TYPE",
- 187: "WASM_STRUCT_TYPE",
- 188: "JS_PROXY_TYPE",
+ 94: "BREAK_POINT_TYPE",
+ 95: "BREAK_POINT_INFO_TYPE",
+ 96: "CACHED_TEMPLATE_OBJECT_TYPE",
+ 97: "CALL_HANDLER_INFO_TYPE",
+ 98: "CLASS_POSITIONS_TYPE",
+ 99: "DEBUG_INFO_TYPE",
+ 100: "ENUM_CACHE_TYPE",
+ 101: "FEEDBACK_CELL_TYPE",
+ 102: "FUNCTION_TEMPLATE_RARE_DATA_TYPE",
+ 103: "INTERCEPTOR_INFO_TYPE",
+ 104: "INTERPRETER_DATA_TYPE",
+ 105: "MODULE_REQUEST_TYPE",
+ 106: "PROMISE_CAPABILITY_TYPE",
+ 107: "PROMISE_REACTION_TYPE",
+ 108: "PROPERTY_DESCRIPTOR_OBJECT_TYPE",
+ 109: "PROTOTYPE_INFO_TYPE",
+ 110: "REG_EXP_BOILERPLATE_DESCRIPTION_TYPE",
+ 111: "SCRIPT_TYPE",
+ 112: "SOURCE_TEXT_MODULE_INFO_ENTRY_TYPE",
+ 113: "STACK_FRAME_INFO_TYPE",
+ 114: "TEMPLATE_OBJECT_DESCRIPTION_TYPE",
+ 115: "TUPLE2_TYPE",
+ 116: "WASM_EXCEPTION_TAG_TYPE",
+ 117: "WASM_INDIRECT_FUNCTION_TABLE_TYPE",
+ 118: "FIXED_ARRAY_TYPE",
+ 119: "HASH_TABLE_TYPE",
+ 120: "EPHEMERON_HASH_TABLE_TYPE",
+ 121: "GLOBAL_DICTIONARY_TYPE",
+ 122: "NAME_DICTIONARY_TYPE",
+ 123: "NUMBER_DICTIONARY_TYPE",
+ 124: "ORDERED_HASH_MAP_TYPE",
+ 125: "ORDERED_HASH_SET_TYPE",
+ 126: "ORDERED_NAME_DICTIONARY_TYPE",
+ 127: "SIMPLE_NUMBER_DICTIONARY_TYPE",
+ 128: "CLOSURE_FEEDBACK_CELL_ARRAY_TYPE",
+ 129: "OBJECT_BOILERPLATE_DESCRIPTION_TYPE",
+ 130: "SCRIPT_CONTEXT_TABLE_TYPE",
+ 131: "BYTE_ARRAY_TYPE",
+ 132: "BYTECODE_ARRAY_TYPE",
+ 133: "FIXED_DOUBLE_ARRAY_TYPE",
+ 134: "INTERNAL_CLASS_WITH_SMI_ELEMENTS_TYPE",
+ 135: "SLOPPY_ARGUMENTS_ELEMENTS_TYPE",
+ 136: "AWAIT_CONTEXT_TYPE",
+ 137: "BLOCK_CONTEXT_TYPE",
+ 138: "CATCH_CONTEXT_TYPE",
+ 139: "DEBUG_EVALUATE_CONTEXT_TYPE",
+ 140: "EVAL_CONTEXT_TYPE",
+ 141: "FUNCTION_CONTEXT_TYPE",
+ 142: "MODULE_CONTEXT_TYPE",
+ 143: "NATIVE_CONTEXT_TYPE",
+ 144: "SCRIPT_CONTEXT_TYPE",
+ 145: "WITH_CONTEXT_TYPE",
+ 146: "EXPORTED_SUB_CLASS_BASE_TYPE",
+ 147: "EXPORTED_SUB_CLASS_TYPE",
+ 148: "EXPORTED_SUB_CLASS2_TYPE",
+ 149: "SMALL_ORDERED_HASH_MAP_TYPE",
+ 150: "SMALL_ORDERED_HASH_SET_TYPE",
+ 151: "SMALL_ORDERED_NAME_DICTIONARY_TYPE",
+ 152: "DESCRIPTOR_ARRAY_TYPE",
+ 153: "STRONG_DESCRIPTOR_ARRAY_TYPE",
+ 154: "SOURCE_TEXT_MODULE_TYPE",
+ 155: "SYNTHETIC_MODULE_TYPE",
+ 156: "UNCOMPILED_DATA_WITH_PREPARSE_DATA_TYPE",
+ 157: "UNCOMPILED_DATA_WITHOUT_PREPARSE_DATA_TYPE",
+ 158: "WEAK_FIXED_ARRAY_TYPE",
+ 159: "TRANSITION_ARRAY_TYPE",
+ 160: "CELL_TYPE",
+ 161: "CODE_TYPE",
+ 162: "CODE_DATA_CONTAINER_TYPE",
+ 163: "COVERAGE_INFO_TYPE",
+ 164: "EMBEDDER_DATA_ARRAY_TYPE",
+ 165: "FEEDBACK_METADATA_TYPE",
+ 166: "FEEDBACK_VECTOR_TYPE",
+ 167: "FILLER_TYPE",
+ 168: "FREE_SPACE_TYPE",
+ 169: "INTERNAL_CLASS_TYPE",
+ 170: "INTERNAL_CLASS_WITH_STRUCT_ELEMENTS_TYPE",
+ 171: "MAP_TYPE",
+ 172: "MEGA_DOM_HANDLER_TYPE",
+ 173: "ON_HEAP_BASIC_BLOCK_PROFILER_DATA_TYPE",
+ 174: "PREPARSE_DATA_TYPE",
+ 175: "PROPERTY_ARRAY_TYPE",
+ 176: "PROPERTY_CELL_TYPE",
+ 177: "SCOPE_INFO_TYPE",
+ 178: "SHARED_FUNCTION_INFO_TYPE",
+ 179: "SMI_BOX_TYPE",
+ 180: "SMI_PAIR_TYPE",
+ 181: "SORT_STATE_TYPE",
+ 182: "SWISS_NAME_DICTIONARY_TYPE",
+ 183: "WEAK_ARRAY_LIST_TYPE",
+ 184: "WEAK_CELL_TYPE",
+ 185: "WASM_ARRAY_TYPE",
+ 186: "WASM_STRUCT_TYPE",
+ 187: "JS_PROXY_TYPE",
1057: "JS_OBJECT_TYPE",
- 189: "JS_GLOBAL_OBJECT_TYPE",
- 190: "JS_GLOBAL_PROXY_TYPE",
- 191: "JS_MODULE_NAMESPACE_TYPE",
+ 188: "JS_GLOBAL_OBJECT_TYPE",
+ 189: "JS_GLOBAL_PROXY_TYPE",
+ 190: "JS_MODULE_NAMESPACE_TYPE",
1040: "JS_SPECIAL_API_OBJECT_TYPE",
1041: "JS_PRIMITIVE_WRAPPER_TYPE",
1058: "JS_API_OBJECT_TYPE",
@@ -236,81 +235,81 @@ INSTANCE_TYPES = {
# List of known V8 maps.
KNOWN_MAPS = {
- ("read_only_space", 0x02119): (172, "MetaMap"),
+ ("read_only_space", 0x02119): (171, "MetaMap"),
("read_only_space", 0x02141): (67, "NullMap"),
- ("read_only_space", 0x02169): (154, "StrongDescriptorArrayMap"),
- ("read_only_space", 0x02191): (159, "WeakFixedArrayMap"),
- ("read_only_space", 0x021d1): (101, "EnumCacheMap"),
- ("read_only_space", 0x02205): (119, "FixedArrayMap"),
+ ("read_only_space", 0x02169): (153, "StrongDescriptorArrayMap"),
+ ("read_only_space", 0x02191): (158, "WeakFixedArrayMap"),
+ ("read_only_space", 0x021d1): (100, "EnumCacheMap"),
+ ("read_only_space", 0x02205): (118, "FixedArrayMap"),
("read_only_space", 0x02251): (8, "OneByteInternalizedStringMap"),
- ("read_only_space", 0x0229d): (169, "FreeSpaceMap"),
- ("read_only_space", 0x022c5): (168, "OnePointerFillerMap"),
- ("read_only_space", 0x022ed): (168, "TwoPointerFillerMap"),
+ ("read_only_space", 0x0229d): (168, "FreeSpaceMap"),
+ ("read_only_space", 0x022c5): (167, "OnePointerFillerMap"),
+ ("read_only_space", 0x022ed): (167, "TwoPointerFillerMap"),
("read_only_space", 0x02315): (67, "UninitializedMap"),
("read_only_space", 0x0238d): (67, "UndefinedMap"),
("read_only_space", 0x023d1): (66, "HeapNumberMap"),
("read_only_space", 0x02405): (67, "TheHoleMap"),
("read_only_space", 0x02465): (67, "BooleanMap"),
- ("read_only_space", 0x02509): (132, "ByteArrayMap"),
- ("read_only_space", 0x02531): (119, "FixedCOWArrayMap"),
- ("read_only_space", 0x02559): (120, "HashTableMap"),
+ ("read_only_space", 0x02509): (131, "ByteArrayMap"),
+ ("read_only_space", 0x02531): (118, "FixedCOWArrayMap"),
+ ("read_only_space", 0x02559): (119, "HashTableMap"),
("read_only_space", 0x02581): (64, "SymbolMap"),
("read_only_space", 0x025a9): (40, "OneByteStringMap"),
- ("read_only_space", 0x025d1): (178, "ScopeInfoMap"),
- ("read_only_space", 0x025f9): (179, "SharedFunctionInfoMap"),
- ("read_only_space", 0x02621): (162, "CodeMap"),
- ("read_only_space", 0x02649): (161, "CellMap"),
- ("read_only_space", 0x02671): (177, "GlobalPropertyCellMap"),
+ ("read_only_space", 0x025d1): (177, "ScopeInfoMap"),
+ ("read_only_space", 0x025f9): (178, "SharedFunctionInfoMap"),
+ ("read_only_space", 0x02621): (161, "CodeMap"),
+ ("read_only_space", 0x02649): (160, "CellMap"),
+ ("read_only_space", 0x02671): (176, "GlobalPropertyCellMap"),
("read_only_space", 0x02699): (70, "ForeignMap"),
- ("read_only_space", 0x026c1): (160, "TransitionArrayMap"),
+ ("read_only_space", 0x026c1): (159, "TransitionArrayMap"),
("read_only_space", 0x026e9): (45, "ThinOneByteStringMap"),
- ("read_only_space", 0x02711): (167, "FeedbackVectorMap"),
+ ("read_only_space", 0x02711): (166, "FeedbackVectorMap"),
("read_only_space", 0x02749): (67, "ArgumentsMarkerMap"),
("read_only_space", 0x027a9): (67, "ExceptionMap"),
("read_only_space", 0x02805): (67, "TerminationExceptionMap"),
("read_only_space", 0x0286d): (67, "OptimizedOutMap"),
("read_only_space", 0x028cd): (67, "StaleRegisterMap"),
- ("read_only_space", 0x0292d): (131, "ScriptContextTableMap"),
- ("read_only_space", 0x02955): (129, "ClosureFeedbackCellArrayMap"),
- ("read_only_space", 0x0297d): (166, "FeedbackMetadataArrayMap"),
- ("read_only_space", 0x029a5): (119, "ArrayListMap"),
+ ("read_only_space", 0x0292d): (130, "ScriptContextTableMap"),
+ ("read_only_space", 0x02955): (128, "ClosureFeedbackCellArrayMap"),
+ ("read_only_space", 0x0297d): (165, "FeedbackMetadataArrayMap"),
+ ("read_only_space", 0x029a5): (118, "ArrayListMap"),
("read_only_space", 0x029cd): (65, "BigIntMap"),
- ("read_only_space", 0x029f5): (130, "ObjectBoilerplateDescriptionMap"),
- ("read_only_space", 0x02a1d): (133, "BytecodeArrayMap"),
- ("read_only_space", 0x02a45): (163, "CodeDataContainerMap"),
- ("read_only_space", 0x02a6d): (164, "CoverageInfoMap"),
- ("read_only_space", 0x02a95): (134, "FixedDoubleArrayMap"),
- ("read_only_space", 0x02abd): (122, "GlobalDictionaryMap"),
- ("read_only_space", 0x02ae5): (102, "ManyClosuresCellMap"),
- ("read_only_space", 0x02b0d): (173, "MegaDomHandlerMap"),
- ("read_only_space", 0x02b35): (119, "ModuleInfoMap"),
- ("read_only_space", 0x02b5d): (123, "NameDictionaryMap"),
- ("read_only_space", 0x02b85): (102, "NoClosuresCellMap"),
- ("read_only_space", 0x02bad): (124, "NumberDictionaryMap"),
- ("read_only_space", 0x02bd5): (102, "OneClosureCellMap"),
- ("read_only_space", 0x02bfd): (125, "OrderedHashMapMap"),
- ("read_only_space", 0x02c25): (126, "OrderedHashSetMap"),
- ("read_only_space", 0x02c4d): (127, "OrderedNameDictionaryMap"),
- ("read_only_space", 0x02c75): (175, "PreparseDataMap"),
- ("read_only_space", 0x02c9d): (176, "PropertyArrayMap"),
- ("read_only_space", 0x02cc5): (98, "SideEffectCallHandlerInfoMap"),
- ("read_only_space", 0x02ced): (98, "SideEffectFreeCallHandlerInfoMap"),
- ("read_only_space", 0x02d15): (98, "NextCallSideEffectFreeCallHandlerInfoMap"),
- ("read_only_space", 0x02d3d): (128, "SimpleNumberDictionaryMap"),
- ("read_only_space", 0x02d65): (150, "SmallOrderedHashMapMap"),
- ("read_only_space", 0x02d8d): (151, "SmallOrderedHashSetMap"),
- ("read_only_space", 0x02db5): (152, "SmallOrderedNameDictionaryMap"),
- ("read_only_space", 0x02ddd): (155, "SourceTextModuleMap"),
- ("read_only_space", 0x02e05): (183, "SwissNameDictionaryMap"),
- ("read_only_space", 0x02e2d): (156, "SyntheticModuleMap"),
+ ("read_only_space", 0x029f5): (129, "ObjectBoilerplateDescriptionMap"),
+ ("read_only_space", 0x02a1d): (132, "BytecodeArrayMap"),
+ ("read_only_space", 0x02a45): (162, "CodeDataContainerMap"),
+ ("read_only_space", 0x02a6d): (163, "CoverageInfoMap"),
+ ("read_only_space", 0x02a95): (133, "FixedDoubleArrayMap"),
+ ("read_only_space", 0x02abd): (121, "GlobalDictionaryMap"),
+ ("read_only_space", 0x02ae5): (101, "ManyClosuresCellMap"),
+ ("read_only_space", 0x02b0d): (172, "MegaDomHandlerMap"),
+ ("read_only_space", 0x02b35): (118, "ModuleInfoMap"),
+ ("read_only_space", 0x02b5d): (122, "NameDictionaryMap"),
+ ("read_only_space", 0x02b85): (101, "NoClosuresCellMap"),
+ ("read_only_space", 0x02bad): (123, "NumberDictionaryMap"),
+ ("read_only_space", 0x02bd5): (101, "OneClosureCellMap"),
+ ("read_only_space", 0x02bfd): (124, "OrderedHashMapMap"),
+ ("read_only_space", 0x02c25): (125, "OrderedHashSetMap"),
+ ("read_only_space", 0x02c4d): (126, "OrderedNameDictionaryMap"),
+ ("read_only_space", 0x02c75): (174, "PreparseDataMap"),
+ ("read_only_space", 0x02c9d): (175, "PropertyArrayMap"),
+ ("read_only_space", 0x02cc5): (97, "SideEffectCallHandlerInfoMap"),
+ ("read_only_space", 0x02ced): (97, "SideEffectFreeCallHandlerInfoMap"),
+ ("read_only_space", 0x02d15): (97, "NextCallSideEffectFreeCallHandlerInfoMap"),
+ ("read_only_space", 0x02d3d): (127, "SimpleNumberDictionaryMap"),
+ ("read_only_space", 0x02d65): (149, "SmallOrderedHashMapMap"),
+ ("read_only_space", 0x02d8d): (150, "SmallOrderedHashSetMap"),
+ ("read_only_space", 0x02db5): (151, "SmallOrderedNameDictionaryMap"),
+ ("read_only_space", 0x02ddd): (154, "SourceTextModuleMap"),
+ ("read_only_space", 0x02e05): (182, "SwissNameDictionaryMap"),
+ ("read_only_space", 0x02e2d): (155, "SyntheticModuleMap"),
("read_only_space", 0x02e55): (72, "WasmCapiFunctionDataMap"),
("read_only_space", 0x02e7d): (73, "WasmExportedFunctionDataMap"),
("read_only_space", 0x02ea5): (74, "WasmJSFunctionDataMap"),
("read_only_space", 0x02ecd): (75, "WasmTypeInfoMap"),
- ("read_only_space", 0x02ef5): (184, "WeakArrayListMap"),
- ("read_only_space", 0x02f1d): (121, "EphemeronHashTableMap"),
- ("read_only_space", 0x02f45): (165, "EmbedderDataArrayMap"),
- ("read_only_space", 0x02f6d): (185, "WeakCellMap"),
+ ("read_only_space", 0x02ef5): (183, "WeakArrayListMap"),
+ ("read_only_space", 0x02f1d): (120, "EphemeronHashTableMap"),
+ ("read_only_space", 0x02f45): (164, "EmbedderDataArrayMap"),
+ ("read_only_space", 0x02f6d): (184, "WeakCellMap"),
("read_only_space", 0x02f95): (32, "StringMap"),
("read_only_space", 0x02fbd): (41, "ConsOneByteStringMap"),
("read_only_space", 0x02fe5): (33, "ConsStringMap"),
@@ -329,7 +328,7 @@ KNOWN_MAPS = {
("read_only_space", 0x031ed): (67, "SelfReferenceMarkerMap"),
("read_only_space", 0x03215): (67, "BasicBlockCountersMarkerMap"),
("read_only_space", 0x03259): (91, "ArrayBoilerplateDescriptionMap"),
- ("read_only_space", 0x03359): (104, "InterceptorInfoMap"),
+ ("read_only_space", 0x03359): (103, "InterceptorInfoMap"),
("read_only_space", 0x05699): (76, "PromiseFulfillReactionJobTaskMap"),
("read_only_space", 0x056c1): (77, "PromiseRejectReactionJobTaskMap"),
("read_only_space", 0x056e9): (78, "CallableTaskMap"),
@@ -344,52 +343,51 @@ KNOWN_MAPS = {
("read_only_space", 0x05851): (89, "AllocationMementoMap"),
("read_only_space", 0x05879): (92, "AsmWasmDataMap"),
("read_only_space", 0x058a1): (93, "AsyncGeneratorRequestMap"),
- ("read_only_space", 0x058c9): (94, "BaselineDataMap"),
- ("read_only_space", 0x058f1): (95, "BreakPointMap"),
- ("read_only_space", 0x05919): (96, "BreakPointInfoMap"),
- ("read_only_space", 0x05941): (97, "CachedTemplateObjectMap"),
- ("read_only_space", 0x05969): (99, "ClassPositionsMap"),
- ("read_only_space", 0x05991): (100, "DebugInfoMap"),
- ("read_only_space", 0x059b9): (103, "FunctionTemplateRareDataMap"),
- ("read_only_space", 0x059e1): (105, "InterpreterDataMap"),
- ("read_only_space", 0x05a09): (106, "ModuleRequestMap"),
- ("read_only_space", 0x05a31): (107, "PromiseCapabilityMap"),
- ("read_only_space", 0x05a59): (108, "PromiseReactionMap"),
- ("read_only_space", 0x05a81): (109, "PropertyDescriptorObjectMap"),
- ("read_only_space", 0x05aa9): (110, "PrototypeInfoMap"),
- ("read_only_space", 0x05ad1): (111, "RegExpBoilerplateDescriptionMap"),
- ("read_only_space", 0x05af9): (112, "ScriptMap"),
- ("read_only_space", 0x05b21): (113, "SourceTextModuleInfoEntryMap"),
- ("read_only_space", 0x05b49): (114, "StackFrameInfoMap"),
- ("read_only_space", 0x05b71): (115, "TemplateObjectDescriptionMap"),
- ("read_only_space", 0x05b99): (116, "Tuple2Map"),
- ("read_only_space", 0x05bc1): (117, "WasmExceptionTagMap"),
- ("read_only_space", 0x05be9): (118, "WasmIndirectFunctionTableMap"),
- ("read_only_space", 0x05c11): (136, "SloppyArgumentsElementsMap"),
- ("read_only_space", 0x05c39): (153, "DescriptorArrayMap"),
- ("read_only_space", 0x05c61): (158, "UncompiledDataWithoutPreparseDataMap"),
- ("read_only_space", 0x05c89): (157, "UncompiledDataWithPreparseDataMap"),
- ("read_only_space", 0x05cb1): (174, "OnHeapBasicBlockProfilerDataMap"),
- ("read_only_space", 0x05cd9): (170, "InternalClassMap"),
- ("read_only_space", 0x05d01): (181, "SmiPairMap"),
- ("read_only_space", 0x05d29): (180, "SmiBoxMap"),
- ("read_only_space", 0x05d51): (147, "ExportedSubClassBaseMap"),
- ("read_only_space", 0x05d79): (148, "ExportedSubClassMap"),
- ("read_only_space", 0x05da1): (68, "AbstractInternalClassSubclass1Map"),
- ("read_only_space", 0x05dc9): (69, "AbstractInternalClassSubclass2Map"),
- ("read_only_space", 0x05df1): (135, "InternalClassWithSmiElementsMap"),
- ("read_only_space", 0x05e19): (171, "InternalClassWithStructElementsMap"),
- ("read_only_space", 0x05e41): (149, "ExportedSubClass2Map"),
- ("read_only_space", 0x05e69): (182, "SortStateMap"),
- ("read_only_space", 0x05e91): (90, "AllocationSiteWithWeakNextMap"),
- ("read_only_space", 0x05eb9): (90, "AllocationSiteWithoutWeakNextMap"),
- ("read_only_space", 0x05ee1): (81, "LoadHandler1Map"),
- ("read_only_space", 0x05f09): (81, "LoadHandler2Map"),
- ("read_only_space", 0x05f31): (81, "LoadHandler3Map"),
- ("read_only_space", 0x05f59): (82, "StoreHandler0Map"),
- ("read_only_space", 0x05f81): (82, "StoreHandler1Map"),
- ("read_only_space", 0x05fa9): (82, "StoreHandler2Map"),
- ("read_only_space", 0x05fd1): (82, "StoreHandler3Map"),
+ ("read_only_space", 0x058c9): (94, "BreakPointMap"),
+ ("read_only_space", 0x058f1): (95, "BreakPointInfoMap"),
+ ("read_only_space", 0x05919): (96, "CachedTemplateObjectMap"),
+ ("read_only_space", 0x05941): (98, "ClassPositionsMap"),
+ ("read_only_space", 0x05969): (99, "DebugInfoMap"),
+ ("read_only_space", 0x05991): (102, "FunctionTemplateRareDataMap"),
+ ("read_only_space", 0x059b9): (104, "InterpreterDataMap"),
+ ("read_only_space", 0x059e1): (105, "ModuleRequestMap"),
+ ("read_only_space", 0x05a09): (106, "PromiseCapabilityMap"),
+ ("read_only_space", 0x05a31): (107, "PromiseReactionMap"),
+ ("read_only_space", 0x05a59): (108, "PropertyDescriptorObjectMap"),
+ ("read_only_space", 0x05a81): (109, "PrototypeInfoMap"),
+ ("read_only_space", 0x05aa9): (110, "RegExpBoilerplateDescriptionMap"),
+ ("read_only_space", 0x05ad1): (111, "ScriptMap"),
+ ("read_only_space", 0x05af9): (112, "SourceTextModuleInfoEntryMap"),
+ ("read_only_space", 0x05b21): (113, "StackFrameInfoMap"),
+ ("read_only_space", 0x05b49): (114, "TemplateObjectDescriptionMap"),
+ ("read_only_space", 0x05b71): (115, "Tuple2Map"),
+ ("read_only_space", 0x05b99): (116, "WasmExceptionTagMap"),
+ ("read_only_space", 0x05bc1): (117, "WasmIndirectFunctionTableMap"),
+ ("read_only_space", 0x05be9): (135, "SloppyArgumentsElementsMap"),
+ ("read_only_space", 0x05c11): (152, "DescriptorArrayMap"),
+ ("read_only_space", 0x05c39): (157, "UncompiledDataWithoutPreparseDataMap"),
+ ("read_only_space", 0x05c61): (156, "UncompiledDataWithPreparseDataMap"),
+ ("read_only_space", 0x05c89): (173, "OnHeapBasicBlockProfilerDataMap"),
+ ("read_only_space", 0x05cb1): (169, "InternalClassMap"),
+ ("read_only_space", 0x05cd9): (180, "SmiPairMap"),
+ ("read_only_space", 0x05d01): (179, "SmiBoxMap"),
+ ("read_only_space", 0x05d29): (146, "ExportedSubClassBaseMap"),
+ ("read_only_space", 0x05d51): (147, "ExportedSubClassMap"),
+ ("read_only_space", 0x05d79): (68, "AbstractInternalClassSubclass1Map"),
+ ("read_only_space", 0x05da1): (69, "AbstractInternalClassSubclass2Map"),
+ ("read_only_space", 0x05dc9): (134, "InternalClassWithSmiElementsMap"),
+ ("read_only_space", 0x05df1): (170, "InternalClassWithStructElementsMap"),
+ ("read_only_space", 0x05e19): (148, "ExportedSubClass2Map"),
+ ("read_only_space", 0x05e41): (181, "SortStateMap"),
+ ("read_only_space", 0x05e69): (90, "AllocationSiteWithWeakNextMap"),
+ ("read_only_space", 0x05e91): (90, "AllocationSiteWithoutWeakNextMap"),
+ ("read_only_space", 0x05eb9): (81, "LoadHandler1Map"),
+ ("read_only_space", 0x05ee1): (81, "LoadHandler2Map"),
+ ("read_only_space", 0x05f09): (81, "LoadHandler3Map"),
+ ("read_only_space", 0x05f31): (82, "StoreHandler0Map"),
+ ("read_only_space", 0x05f59): (82, "StoreHandler1Map"),
+ ("read_only_space", 0x05f81): (82, "StoreHandler2Map"),
+ ("read_only_space", 0x05fa9): (82, "StoreHandler3Map"),
("map_space", 0x02119): (1057, "ExternalMap"),
("map_space", 0x02141): (2113, "JSMessageObjectMap"),
}
diff --git a/deps/v8/tools/whitespace.txt b/deps/v8/tools/whitespace.txt
index 60b58be703..f890e67970 100644
--- a/deps/v8/tools/whitespace.txt
+++ b/deps/v8/tools/whitespace.txt
@@ -7,7 +7,7 @@ A Smi balks into a war and says:
The doubles heard this and started to unbox.
The Smi looked at them when a crazy v8-autoroll account showed up...
The autoroller bought a round of Himbeerbrause. Suddenly.......
-The bartender starts to shake the bottles............................
+The bartender starts to shake the bottles...........................
I can't add trailing whitespaces, so I'm adding this line............
I'm starting to think that just adding trailing whitespaces might not be bad.